8140326: G1: Consider putting regions where evacuation failed into next collection set

Co-authored-by: Albert Mingkun Yang <ayang@openjdk.org>
Reviewed-by: iwalulya, ayang
This commit is contained in:
Thomas Schatzl 2023-08-08 10:29:14 +00:00
parent 28fd7a1739
commit 7e209528d3
36 changed files with 593 additions and 151 deletions

View File

@ -2464,6 +2464,12 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
_verifier->verify_after_gc();
verify_numa_regions("GC End");
_verifier->verify_region_sets_optional();
if (collector_state()->in_concurrent_start_gc()) {
log_debug(gc, verify)("Marking state");
_verifier->verify_marking_state();
}
phase_times()->record_verify_after_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
}
@ -2652,6 +2658,11 @@ void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
}
}
void G1CollectedHeap::retain_region(HeapRegion* hr) {
MutexLocker x(G1RareEvent_lock, Mutex::_no_safepoint_check_flag);
collection_set()->candidates()->add_retained_region_unsorted(hr);
}
void G1CollectedHeap::free_humongous_region(HeapRegion* hr,
FreeRegionList* free_list) {
assert(hr->is_humongous(), "this is only for humongous regions");
@ -2977,9 +2988,6 @@ void G1CollectedHeap::mark_evac_failure_object(uint worker_id, const oop obj, si
assert(!_cm->is_marked_in_bitmap(obj), "must be");
_cm->raw_mark_in_bitmap(obj);
if (collector_state()->in_concurrent_start_gc()) {
_cm->add_to_liveness(worker_id, obj, obj_size);
}
}
// Optimized nmethod scanning

View File

@ -684,6 +684,8 @@ public:
// at the same time.
void free_region(HeapRegion* hr, FreeRegionList* free_list);
// Add the given region to the retained regions collection set candidates.
void retain_region(HeapRegion* hr);
// It dirties the cards that cover the block so that the post
// write barrier never queues anything when updating objects on this
// block. It is assumed (and in fact we assert) that the block
@ -1028,7 +1030,7 @@ public:
// Return "TRUE" iff the given object address is within the collection
// set. Assumes that the reference points into the heap.
inline bool is_in_cset(const HeapRegion *hr) const;
inline bool is_in_cset(const HeapRegion* hr) const;
inline bool is_in_cset(oop obj) const;
inline bool is_in_cset(HeapWord* addr) const;

View File

@ -320,16 +320,21 @@ static int compare_region_idx(const uint a, const uint b) {
void G1CollectionSet::finalize_old_part(double time_remaining_ms) {
double non_young_start_time_sec = os::elapsedTime();
if (collector_state()->in_mixed_phase()) {
if (!candidates()->is_empty()) {
candidates()->verify();
G1CollectionCandidateRegionList initial_old_regions;
assert(_optional_old_regions.length() == 0, "must be");
_policy->select_candidates_from_marking(&candidates()->marking_regions(),
time_remaining_ms,
&initial_old_regions,
&_optional_old_regions);
time_remaining_ms = _policy->select_candidates_from_marking(&candidates()->marking_regions(),
time_remaining_ms,
&initial_old_regions,
&_optional_old_regions);
_policy->select_candidates_from_retained(&candidates()->retained_regions(),
time_remaining_ms,
&initial_old_regions,
&_optional_old_regions);
// Move initially selected old regions to collection set directly.
move_candidates_to_collection_set(&initial_old_regions);

View File

@ -38,6 +38,15 @@ void G1CollectionCandidateList::set(G1CollectionCandidateList::CandidateInfo* ca
_candidates.appendAll(&a);
}
void G1CollectionCandidateList::append_unsorted(HeapRegion* r) {
CandidateInfo c(r, r->calc_gc_efficiency());
_candidates.append(c);
}
void G1CollectionCandidateList::sort_by_efficiency() {
_candidates.sort(compare);
}
void G1CollectionCandidateList::remove(G1CollectionCandidateRegionList* other) {
guarantee((uint)_candidates.length() >= other->length(), "must be");
@ -142,6 +151,7 @@ void G1CollectionCandidateRegionList::clear() {
G1CollectionSetCandidates::G1CollectionSetCandidates() :
_marking_regions(),
_retained_regions(),
_contains_map(nullptr),
_max_regions(0),
_last_marking_candidates_length(0)
@ -165,6 +175,7 @@ void G1CollectionSetCandidates::initialize(uint max_regions) {
void G1CollectionSetCandidates::clear() {
_marking_regions.clear();
_retained_regions.clear();
for (uint i = 0; i < _max_regions; i++) {
_contains_map[i] = CandidateOrigin::Invalid;
}
@ -188,8 +199,40 @@ void G1CollectionSetCandidates::set_candidates_from_marking(G1CollectionCandidat
verify();
}
void G1CollectionSetCandidates::sort_by_efficiency() {
// From marking regions must always be sorted so no reason to actually sort
// them.
_marking_regions.verify();
_retained_regions.sort_by_efficiency();
_retained_regions.verify();
}
void G1CollectionSetCandidates::add_retained_region_unsorted(HeapRegion* r) {
assert(!contains(r), "must not contain region %u", r->hrm_index());
_contains_map[r->hrm_index()] = CandidateOrigin::Retained;
_retained_regions.append_unsorted(r);
}
void G1CollectionSetCandidates::remove(G1CollectionCandidateRegionList* other) {
_marking_regions.remove(other);
// During removal, we exploit the fact that elements in the marking_regions,
// retained_regions and other list are sorted by gc_efficiency. Furthermore,
// all regions in the passed other list are in one of the two other lists.
//
// Split original list into elements for the marking list and elements from the
// retained list.
G1CollectionCandidateRegionList other_marking_regions;
G1CollectionCandidateRegionList other_retained_regions;
for (HeapRegion* r : *other) {
if (is_from_marking(r)) {
other_marking_regions.append(r);
} else {
other_retained_regions.append(r);
}
}
_marking_regions.remove(&other_marking_regions);
_retained_regions.remove(&other_retained_regions);
for (HeapRegion* r : *other) {
assert(contains(r), "must contain region %u", r->hrm_index());
@ -204,7 +247,15 @@ bool G1CollectionSetCandidates::is_empty() const {
}
bool G1CollectionSetCandidates::has_more_marking_candidates() const {
return _marking_regions.length() != 0;
return marking_regions_length() != 0;
}
uint G1CollectionSetCandidates::marking_regions_length() const {
return _marking_regions.length();
}
uint G1CollectionSetCandidates::retained_regions_length() const {
return _retained_regions.length();
}
#ifndef PRODUCT
@ -218,7 +269,7 @@ void G1CollectionSetCandidates::verify_helper(G1CollectionCandidateList* list, u
from_marking++;
}
const uint hrm_index = r->hrm_index();
assert(_contains_map[hrm_index] == CandidateOrigin::Marking,
assert(_contains_map[hrm_index] == CandidateOrigin::Marking || _contains_map[hrm_index] == CandidateOrigin::Retained,
"must be %u is %u", hrm_index, (uint)_contains_map[hrm_index]);
assert(verify_map[hrm_index] == CandidateOrigin::Invalid, "already added");
@ -235,9 +286,14 @@ void G1CollectionSetCandidates::verify() {
}
verify_helper(&_marking_regions, from_marking, verify_map);
assert(from_marking == marking_regions_length(), "must be");
uint from_marking_retained = 0;
verify_helper(&_retained_regions, from_marking_retained, verify_map);
assert(from_marking_retained == 0, "must be");
assert(length() >= marking_regions_length(), "must be");
// Check whether the _contains_map is consistent with the list.
for (uint i = 0; i < _max_regions; i++) {
assert(_contains_map[i] == verify_map[i] ||
@ -262,6 +318,7 @@ const char* G1CollectionSetCandidates::get_short_type_str(const HeapRegion* r) c
static const char* type_strings[] = {
"Ci", // Invalid
"Cm", // Marking
"Cr", // Retained
"Cv" // Verification
};

View File

@ -100,6 +100,11 @@ public:
// Put the given set of candidates into this list, preserving the efficiency ordering.
void set(CandidateInfo* candidate_infos, uint num_infos);
// Add the given HeapRegion to this list at the end, (potentially) making the list unsorted.
void append_unsorted(HeapRegion* r);
// Restore sorting order by decreasing gc efficiency, using the existing efficiency
// values.
void sort_by_efficiency();
// Removes any HeapRegions stored in this list also in the other list. The other
// list may only contain regions in this list, sorted by gc efficiency. It need
// not be a prefix of this list. Returns the number of regions removed.
@ -129,13 +134,14 @@ public:
}
};
// Iterator for G1CollectionSetCandidates.
// Iterator for G1CollectionSetCandidates. There are no guarantees on the order
// of the regions returned.
class G1CollectionSetCandidatesIterator : public StackObj {
G1CollectionSetCandidates* _which;
uint _marking_position;
uint _position;
public:
G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint marking_position);
G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint position);
G1CollectionSetCandidatesIterator& operator++();
HeapRegion* operator*();
@ -146,26 +152,30 @@ public:
// Tracks all collection set candidates, i.e. regions that could/should be evacuated soon.
//
// These candidate regions are tracked in a list of regions, sorted by decreasing
// These candidate regions are tracked in two list of regions, sorted by decreasing
// "gc efficiency".
//
// Currently there is only one type of such regions:
//
// * marking_regions: the set of regions selected by concurrent marking to be
// evacuated to keep overall heap occupancy stable.
// They are guaranteed to be evacuated and cleared out during
// the mixed phase.
//
// * retained_regions: set of regions selected for evacuation during evacuation
// failure.
// Any young collection will try to evacuate them.
//
class G1CollectionSetCandidates : public CHeapObj<mtGC> {
friend class G1CollectionSetCandidatesIterator;
enum class CandidateOrigin : uint8_t {
Invalid,
Marking, // This region has been determined as candidate by concurrent marking.
Retained, // This region has been added because it has been retained after evacuation.
Verify // Special value for verification.
};
G1CollectionCandidateList _marking_regions;
G1CollectionCandidateList _marking_regions; // Set of regions selected by concurrent marking.
G1CollectionCandidateList _retained_regions; // Set of regions selected from evacuation failed regions.
CandidateOrigin* _contains_map;
uint _max_regions;
@ -180,6 +190,7 @@ public:
~G1CollectionSetCandidates();
G1CollectionCandidateList& marking_regions() { return _marking_regions; }
G1CollectionCandidateList& retained_regions() { return _retained_regions; }
void initialize(uint max_regions);
@ -194,6 +205,11 @@ public:
// regions.
uint last_marking_candidates_length() const { return _last_marking_candidates_length; }
void sort_by_efficiency();
// Add the given region to the set of retained regions without regards to the
// gc efficiency sorting. The retained regions must be re-sorted manually later.
void add_retained_region_unsorted(HeapRegion* r);
// Remove the given regions from the candidates. All given regions must be part
// of the candidates.
void remove(G1CollectionCandidateRegionList* other);
@ -203,9 +219,10 @@ public:
const char* get_short_type_str(const HeapRegion* r) const;
bool is_empty() const;
bool has_more_marking_candidates() const;
uint marking_regions_length() const { return _marking_regions.length(); }
bool has_more_marking_candidates() const;
uint marking_regions_length() const;
uint retained_regions_length() const;
private:
void verify_helper(G1CollectionCandidateList* list, uint& from_marking, CandidateOrigin* verify_map) PRODUCT_RETURN;
@ -213,7 +230,7 @@ private:
public:
void verify() PRODUCT_RETURN;
uint length() const { return marking_regions_length(); }
uint length() const { return marking_regions_length() + retained_regions_length(); }
// Iteration
G1CollectionSetCandidatesIterator begin() {
@ -221,7 +238,7 @@ public:
}
G1CollectionSetCandidatesIterator end() {
return G1CollectionSetCandidatesIterator(this, marking_regions_length());
return G1CollectionSetCandidatesIterator(this, length());
}
};

View File

@ -51,25 +51,28 @@ inline bool G1CollectionCandidateListIterator::operator!=(const G1CollectionCand
return !(*this == rhs);
}
inline G1CollectionSetCandidatesIterator::G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint marking_position) :
_which(which), _marking_position(marking_position) {
inline G1CollectionSetCandidatesIterator::G1CollectionSetCandidatesIterator(G1CollectionSetCandidates* which, uint position) :
_which(which), _position(position) {
}
inline G1CollectionSetCandidatesIterator& G1CollectionSetCandidatesIterator::operator++() {
assert(_marking_position < _which->_marking_regions.length(),
"must not be at end already");
_marking_position++;
assert(_position < _which->length(), "must not be at end already");
_position++;
return *this;
}
inline HeapRegion* G1CollectionSetCandidatesIterator::operator*() {
return _which->_marking_regions.at(_marking_position)._r;
uint length = _which->marking_regions_length();
if (_position < length) {
return _which->_marking_regions.at(_position)._r;
} else {
return _which->_retained_regions.at(_position - length)._r;
}
}
inline bool G1CollectionSetCandidatesIterator::operator==(const G1CollectionSetCandidatesIterator& rhs) {
assert(_which == rhs._which, "iterator belongs to different array");
return _marking_position == rhs._marking_position;
return _position == rhs._position;
}
inline bool G1CollectionSetCandidatesIterator::operator!=(const G1CollectionSetCandidatesIterator& rhs) {

View File

@ -31,13 +31,12 @@
#include "runtime/atomic.hpp"
#include "utilities/quickSort.hpp"
// Determine collection set candidates: For all regions determine whether they
// should be a collection set candidates, calculate their efficiency, sort and
// return them as G1CollectionSetCandidates instance.
// Determine collection set candidates (from marking): For all regions determine
// whether they should be a collection set candidate, calculate their efficiency,
// sort and put them into the candidates.
// Threads calculate the GC efficiency of the regions they get to process, and
// put them into some work area unsorted. At the end the array is sorted and
// copied into the G1CollectionSetCandidates instance; the caller will be the new
// owner of this object.
// put them into some work area without sorting. At the end that array is sorted and
// moved to the destination.
class G1BuildCandidateRegionsTask : public WorkerTask {
using CandidateInfo = G1CollectionCandidateList::CandidateInfo;
@ -152,12 +151,14 @@ class G1BuildCandidateRegionsTask : public WorkerTask {
// alloc region (we should not consider those for collection
// before we fill them up).
if (should_add(r) && !G1CollectedHeap::heap()->is_old_gc_alloc_region(r)) {
assert(r->rem_set()->is_complete(), "must be %u", r->hrm_index());
add_region(r);
} else if (r->is_old()) {
// Keep remembered sets for humongous regions, otherwise clean them out.
} else if (r->is_old() && !r->is_collection_set_candidate()) {
// Keep remembered sets for humongous regions and collection set candidates,
// otherwise clean them out.
r->rem_set()->clear(true /* only_cardset */);
} else {
assert(!r->is_old() || !r->rem_set()->is_tracked(),
assert(r->is_collection_set_candidate() || !r->is_old() || !r->rem_set()->is_tracked(),
"Missed to clear unused remembered set of region %u (%s) that is %s",
r->hrm_index(), r->get_type_str(), r->rem_set()->get_state_str());
}
@ -254,6 +255,10 @@ uint G1CollectionSetChooser::calculate_work_chunk_size(uint num_workers, uint nu
bool G1CollectionSetChooser::should_add(HeapRegion* hr) {
return !hr->is_young() &&
!hr->is_humongous() &&
// A region might have been retained (after evacuation failure) and already put
// into the candidates list during concurrent marking. These should keep being
// considered as retained regions.
!hr->is_collection_set_candidate() &&
region_occupancy_low_enough_for_evac(hr->live_bytes()) &&
hr->rem_set()->is_complete();
}

View File

@ -47,9 +47,8 @@ public:
return live_bytes < mixed_gc_live_threshold_bytes();
}
// Determine whether to add the given region to the collection set candidates or
// not. Currently, we skip regions that we will never move during young gc, and
// regions which liveness is over the occupancy threshold.
// Determine whether to add the given region to the collection set candidates from
// marking or not. Currently, we skip regions whose live bytes are over the threshold.
// Regions also need a complete remembered set to be a candidate.
static bool should_add(HeapRegion* hr);

View File

@ -325,6 +325,15 @@ uint G1CMRootMemRegions::num_root_regions() const {
return (uint)_num_root_regions;
}
bool G1CMRootMemRegions::contains(const MemRegion mr) const {
for (uint i = 0; i < _num_root_regions; i++) {
if (_root_regions[i].equals(mr)) {
return true;
}
}
return false;
}
void G1CMRootMemRegions::notify_scan_done() {
MutexLocker x(RootRegionScan_lock, Mutex::_no_safepoint_check_flag);
_scan_in_progress = false;
@ -994,6 +1003,10 @@ void G1ConcurrentMark::add_root_region(HeapRegion* r) {
root_regions()->add(r->top_at_mark_start(), r->top());
}
bool G1ConcurrentMark::is_root_region(HeapRegion* r) {
return root_regions()->contains(MemRegion(r->top_at_mark_start(), r->top()));
}
void G1ConcurrentMark::root_region_scan_abort_and_wait() {
root_regions()->abort();
root_regions()->wait_until_scan_finished();
@ -1362,8 +1375,8 @@ class G1ReclaimEmptyRegionsTask : public WorkerTask {
bool do_heap_region(HeapRegion *hr) {
if (hr->used() > 0 && hr->live_bytes() == 0 && !hr->is_young()) {
log_trace(gc)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT,
hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
log_trace(gc, marking)("Reclaimed empty old gen region %u (%s) bot " PTR_FORMAT,
hr->hrm_index(), hr->get_short_type_str(), p2i(hr->bottom()));
_freed_bytes += hr->used();
hr->set_containing_set(nullptr);
if (hr->is_humongous()) {
@ -1867,6 +1880,7 @@ HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
HeapWord* bottom = curr_region->bottom();
HeapWord* limit = curr_region->top_at_mark_start();
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(curr_region->top_at_mark_start()));
// notice that _finger == end cannot be guaranteed here since,
// someone else might have moved the finger even further
assert(_finger >= end, "the finger should have moved forward");

View File

@ -264,6 +264,10 @@ public:
// The number of root regions to scan.
uint num_root_regions() const;
// Is the given memregion contained in the root regions; the MemRegion must
// match exactly.
bool contains(const MemRegion mr) const;
void cancel_scan();
// Flag that we're done with root region scanning and notify anyone
@ -467,6 +471,8 @@ public:
// Live bytes in the given region as determined by concurrent marking, i.e. the amount of
// live bytes between bottom and TAMS.
size_t live_bytes(uint region) const { return _region_mark_stats[region]._live_words * HeapWordSize; }
// Set live bytes for concurrent marking.
void set_live_bytes(uint region, size_t live_bytes) { _region_mark_stats[region]._live_words = live_bytes / HeapWordSize; }
// Sets the internal top_at_region_start for the given region to current top of the region.
inline void update_top_at_rebuild_start(HeapRegion* r);
@ -555,6 +561,7 @@ public:
void scan_root_regions();
bool wait_until_root_region_scan_finished();
void add_root_region(HeapRegion* r);
bool is_root_region(HeapRegion* r);
void root_region_scan_abort_and_wait();
private:

View File

@ -59,6 +59,8 @@
//
// We need to scrub and scan objects to rebuild remembered sets until parsable_bottom;
// we need to scan objects to rebuild remembered sets until tars.
// Regions might have been reclaimed while scrubbing them after having yielded for
// a pause.
class G1RebuildRSAndScrubTask : public WorkerTask {
G1ConcurrentMark* _cm;
HeapRegionClaimer _hr_claimer;
@ -86,11 +88,12 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
}
// Yield if enough has been processed; returns if the concurrent marking cycle
// has been aborted for any reason.
bool yield_if_necessary() {
// has been aborted for any reason. Yielded is set if there has been an actual
// yield for a pause.
bool yield_if_necessary(bool& yielded) {
if (_processed_words >= ProcessingYieldLimitInWords) {
reset_processed_words();
_cm->do_yield_check();
yielded = _cm->do_yield_check();
}
return _cm->has_aborted();
}
@ -101,15 +104,15 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Based on the results of G1RemSetTrackingPolicy::needs_scan_for_rebuild(),
// the value may be changed to null during rebuilding if the region has either:
// - been allocated after rebuild start, or
// - been eagerly reclaimed by a young collection (only humongous)
// - been reclaimed by a collection.
bool should_rebuild_or_scrub(HeapRegion* hr) const {
return _cm->top_at_rebuild_start(hr->hrm_index()) != nullptr;
}
// Helper used by both humongous objects and when chunking an object larger than the
// G1RebuildRemSetChunkSize. The heap region is needed to ensure a humongous object
// is not eagerly reclaimed during yielding.
// Returns whether marking has been aborted.
// G1RebuildRemSetChunkSize. The heap region is needed check whether the region has
// been reclaimed during yielding.
// Returns true if marking has been aborted or false if completed.
bool scan_large_object(HeapRegion* hr, const oop obj, MemRegion scan_range) {
HeapWord* start = scan_range.start();
HeapWord* limit = scan_range.end();
@ -120,17 +123,18 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
// Update processed words and yield, for humongous objects we will yield
// after each chunk.
add_processed_words(mr.word_size());
bool mark_aborted = yield_if_necessary();
bool yielded;
bool mark_aborted = yield_if_necessary(yielded);
if (mark_aborted) {
return true;
} else if (!should_rebuild_or_scrub(hr)) {
// We need to check should_rebuild_or_scrub() again (for humongous objects)
// because the region might have been eagerly reclaimed during the yield.
log_trace(gc, marking)("Rebuild aborted for eagerly reclaimed humongous region: %u", hr->hrm_index());
} else if (yielded && !should_rebuild_or_scrub(hr)) {
// We need to check should_rebuild_or_scrub() again because the region might
// have been reclaimed during the yield.
log_trace(gc, marking)("Rebuild aborted for reclaimed region: %u", hr->hrm_index());
return false;
}
// Step to next chunk of the humongous object
// Step to next chunk of the large object.
start = mr.end();
} while (start < limit);
return false;
@ -188,9 +192,14 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
start = scrub_to_next_live(hr, start, limit);
}
bool mark_aborted = yield_if_necessary();
bool yielded;
bool mark_aborted = yield_if_necessary(yielded);
if (mark_aborted) {
return true;
} else if (yielded && !should_rebuild_or_scrub(hr)) {
// Region has been reclaimed while yielding. Exit continuing with the next region.
log_trace(gc, marking)("Scan and scrub aborted for reclaimed region: %u", hr->hrm_index());
return false;
}
}
return false;
@ -203,9 +212,13 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
while (start < limit) {
start += scan_object(hr, start);
// Avoid stalling safepoints and stop iteration if mark cycle has been aborted.
bool mark_aborted = yield_if_necessary();
bool yielded = true;
bool mark_aborted = yield_if_necessary(yielded);
if (mark_aborted) {
return true;
} else if (yielded && !should_rebuild_or_scrub(hr)) {
log_trace(gc, marking)("Scan aborted for reclaimed region: %u", hr->hrm_index());
return false;
}
}
return false;
@ -216,14 +229,19 @@ class G1RebuildRSAndScrubTask : public WorkerTask {
bool scan_and_scrub_region(HeapRegion* hr, HeapWord* const pb) {
assert(should_rebuild_or_scrub(hr), "must be");
log_trace(gc, marking)("Scrub and rebuild region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT,
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr->hrm_index())));
log_trace(gc, marking)("Scrub and rebuild region: " HR_FORMAT " pb: " PTR_FORMAT " TARS: " PTR_FORMAT " TAMS: " PTR_FORMAT,
HR_FORMAT_PARAMS(hr), p2i(pb), p2i(_cm->top_at_rebuild_start(hr->hrm_index())), p2i(hr->top_at_mark_start()));
if (scan_and_scrub_to_pb(hr, hr->bottom(), pb)) {
log_trace(gc, marking)("Scan and scrub aborted for region: %u", hr->hrm_index());
return true;
}
// Yielding during scrubbing and scanning might have reclaimed the region, so need to
// re-check after above.
if (!should_rebuild_or_scrub(hr)) {
return false;
}
// Scrubbing completed for this region - notify that we are done with it, resetting
// pb to bottom.
hr->note_end_of_scrubbing();

View File

@ -63,7 +63,7 @@ public:
HeapRegionClaimer* hrclaimer,
uint worker_id) const;
// Return a G1AbstractSubTask which does necessary preparation for evacuation failure regions
// Return a G1AbstractSubTask which does necessary preparation for evacuation failed regions
G1AbstractSubTask* create_prepare_regions_task();
uint num_regions_failed_evacuation() const {

View File

@ -25,6 +25,7 @@
#ifndef SHARE_GC_G1_G1EVACFAILUREREGIONS_INLINE_HPP
#define SHARE_GC_G1_G1EVACFAILUREREGIONS_INLINE_HPP
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp"
#include "runtime/atomic.hpp"
@ -37,8 +38,7 @@ bool G1EvacFailureRegions::record(uint region_idx) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
HeapRegion* hr = g1h->region_at(region_idx);
G1CollectorState* state = g1h->collector_state();
hr->note_evacuation_failure(state->in_concurrent_start_gc());
hr->note_evacuation_failure();
}
return success;
}

View File

@ -106,7 +106,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
#endif
_gc_par_phases[EagerlyReclaimHumongousObjects] = new WorkerDataArray<double>("EagerlyReclaimHumongousObjects", "Eagerly Reclaim Humongous Objects (ms):", max_gc_threads);
_gc_par_phases[RestorePreservedMarks] = new WorkerDataArray<double>("RestorePreservedMarks", "Restore Preserved Marks (ms):", max_gc_threads);
_gc_par_phases[ClearRetainedRegionBitmaps] = new WorkerDataArray<double>("ClearRetainedRegionsBitmap", "Clear Retained Region Bitmaps (ms):", max_gc_threads);
_gc_par_phases[ProcessEvacuationFailedRegions] = new WorkerDataArray<double>("ProcessEvacuationFailedRegions", "Process Evacuation Failed Regions (ms):", max_gc_threads);
_gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards);
_gc_par_phases[ScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks);
@ -126,8 +126,10 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
_gc_par_phases[MergePSS]->create_thread_work_items("Copied Bytes", MergePSSCopiedBytes);
_gc_par_phases[MergePSS]->create_thread_work_items("LAB Waste", MergePSSLABWasteBytes);
_gc_par_phases[MergePSS]->create_thread_work_items("LAB Undo Waste", MergePSSLABUndoWasteBytes);
_gc_par_phases[MergePSS]->create_thread_work_items("Evac Fail Extra Cards", MergePSSEvacFailExtra);
_gc_par_phases[RestoreRetainedRegions]->create_thread_work_items("Evacuation Failure Regions:", RestoreRetainedRegionsNum);
_gc_par_phases[RestoreRetainedRegions]->create_thread_work_items("Evacuation Failed Regions:", RestoreRetainedRegionsFailedNum);
_gc_par_phases[RestoreRetainedRegions]->create_thread_work_items("New Retained Regions:", RestoreRetainedRegionsRetainedNum);
_gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Chunks:", RemoveSelfForwardChunksNum);
_gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Empty Forward Chunks:", RemoveSelfForwardEmptyChunksNum);
@ -404,15 +406,20 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
const double pre_concurrent_start_ms = average_time_ms(ResetMarkingState) +
average_time_ms(NoteStartOfMark);
const double sum_ms = _cur_pre_evacuate_prepare_time_ms +
const double sum_ms = pre_concurrent_start_ms +
_cur_pre_evacuate_prepare_time_ms +
_recorded_young_cset_choice_time_ms +
_recorded_non_young_cset_choice_time_ms +
_cur_region_register_time +
_recorded_prepare_heap_roots_time_ms +
pre_concurrent_start_ms;
_recorded_prepare_heap_roots_time_ms;
info_time("Pre Evacuate Collection Set", sum_ms);
if (pre_concurrent_start_ms > 0.0) {
debug_phase(_gc_par_phases[ResetMarkingState]);
debug_phase(_gc_par_phases[NoteStartOfMark]);
}
debug_time("Pre Evacuate Prepare", _cur_pre_evacuate_prepare_time_ms);
debug_phase(_gc_par_phases[RetireTLABsAndFlushLogs], 1);
debug_phase(_gc_par_phases[NonJavaThreadFlushLogs], 1);
@ -421,11 +428,6 @@ double G1GCPhaseTimes::print_pre_evacuate_collection_set() const {
debug_time("Prepare Heap Roots", _recorded_prepare_heap_roots_time_ms);
if (pre_concurrent_start_ms > 0.0) {
debug_phase(_gc_par_phases[ResetMarkingState]);
debug_phase(_gc_par_phases[NoteStartOfMark]);
}
return sum_ms;
}
@ -504,7 +506,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
if (evacuation_failed) {
debug_phase(_gc_par_phases[RecalculateUsed], 1);
debug_phase(_gc_par_phases[RestorePreservedMarks], 1);
debug_phase(_gc_par_phases[ClearRetainedRegionBitmaps], 1);
debug_phase(_gc_par_phases[ProcessEvacuationFailedRegions], 1);
}
#if COMPILER2_OR_JVMCI
debug_phase(_gc_par_phases[UpdateDerivedPointers], 1);

View File

@ -88,7 +88,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
#endif
EagerlyReclaimHumongousObjects,
RestorePreservedMarks,
ClearRetainedRegionBitmaps,
ProcessEvacuationFailedRegions,
ResetMarkingState,
NoteStartOfMark,
GCParPhasesSentinel
@ -138,11 +138,13 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
MergePSSCopiedBytes,
MergePSSLABSize,
MergePSSLABWasteBytes,
MergePSSLABUndoWasteBytes
MergePSSLABUndoWasteBytes,
MergePSSEvacFailExtra
};
enum RestoreRetainedRegionsWorkItems {
RestoreRetainedRegionsNum,
RestoreRetainedRegionsFailedNum,
RestoreRetainedRegionsRetainedNum
};
enum RemoveSelfForwardsWorkItems {

View File

@ -437,6 +437,78 @@ void G1HeapVerifier::verify_region_sets() {
_g1h->collection_set()->candidates()->verify();
}
class G1VerifyRegionMarkingStateClosure : public HeapRegionClosure {
class MarkedBytesClosure {
size_t _marked_words;
public:
MarkedBytesClosure() : _marked_words(0) { }
inline size_t apply(oop obj) {
size_t result = obj->size();
_marked_words += result;
return result;
}
size_t marked_bytes() const { return _marked_words * HeapWordSize; }
};
public:
virtual bool do_heap_region(HeapRegion* r) {
if (r->is_free()) {
return false;
}
G1ConcurrentMark* cm = G1CollectedHeap::heap()->concurrent_mark();
bool part_of_marking = r->is_old_or_humongous() && !r->is_collection_set_candidate();
if (part_of_marking) {
guarantee(r->bottom() != r->top_at_mark_start(), "region %u (%s) does not have TAMS set",
r->hrm_index(), r->get_short_type_str());
size_t marked_bytes = cm->live_bytes(r->hrm_index());
MarkedBytesClosure cl;
r->apply_to_marked_objects(cm->mark_bitmap(), &cl);
guarantee(cl.marked_bytes() == marked_bytes,
"region %u (%s) live bytes actual %zu and cache %zu differ",
r->hrm_index(), r->get_short_type_str(), cl.marked_bytes(), marked_bytes);
} else {
guarantee(r->bottom() == r->top_at_mark_start(),
"region %u (%s) has TAMS set " PTR_FORMAT " " PTR_FORMAT,
r->hrm_index(), r->get_short_type_str(), p2i(r->bottom()), p2i(r->top_at_mark_start()));
guarantee(cm->live_bytes(r->hrm_index()) == 0,
"region %u (%s) has %zu live bytes recorded",
r->hrm_index(), r->get_short_type_str(), cm->live_bytes(r->hrm_index()));
guarantee(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), r->end()) == r->end(),
"region %u (%s) has mark",
r->hrm_index(), r->get_short_type_str());
guarantee(cm->is_root_region(r),
"region %u (%s) should be root region",
r->hrm_index(), r->get_short_type_str());
}
return false;
}
};
void G1HeapVerifier::verify_marking_state() {
assert(G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc(), "must be");
// Verify TAMSes, bitmaps and liveness statistics.
//
// - if part of marking: TAMS != bottom, liveness == 0, bitmap clear
// - if evacuation failed + part of marking: TAMS != bottom, liveness != 0, bitmap has at least on object set (corresponding to liveness)
// - if not part of marking: TAMS == bottom, liveness == 0, bitmap clear; must be in root region
// To compare liveness recorded in G1ConcurrentMark and actual we need to flush the
// cache.
G1CollectedHeap::heap()->concurrent_mark()->flush_all_task_caches();
G1VerifyRegionMarkingStateClosure cl;
_g1h->heap_region_iterate(&cl);
}
void G1HeapVerifier::prepare_for_verify() {
if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
_g1h->ensure_parsability(false);

View File

@ -70,6 +70,9 @@ public:
void verify_before_gc();
void verify_after_gc();
// Verify that marking state is set up correctly after a concurrent start pause.
void verify_marking_state();
void verify_bitmap_clear(bool above_tams_only);
// Do sanity check on the contents of the in-cset fast test table.

View File

@ -88,7 +88,8 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
EVAC_FAILURE_INJECTOR_ONLY(_evac_failure_inject_counter(0) COMMA)
_preserved_marks(preserved_marks),
_evacuation_failed_info(),
_evac_failure_regions(evac_failure_regions)
_evac_failure_regions(evac_failure_regions),
_evac_failure_enqueued_cards(0)
{
// We allocate number of young gen regions in the collection set plus one
// entries, since entry 0 keeps track of surviving bytes for non-young regions.
@ -147,6 +148,10 @@ size_t G1ParScanThreadState::lab_undo_waste_words() const {
return _plab_allocator->undo_waste();
}
size_t G1ParScanThreadState::evac_failure_enqueued_cards() const {
return _evac_failure_enqueued_cards;
}
#ifdef ASSERT
void G1ParScanThreadState::verify_task(narrowOop* task) const {
assert(task != nullptr, "invariant");
@ -595,10 +600,12 @@ void G1ParScanThreadStateSet::flush_stats() {
size_t lab_waste_bytes = pss->lab_waste_words() * HeapWordSize;
size_t lab_undo_waste_bytes = pss->lab_undo_waste_words() * HeapWordSize;
size_t copied_bytes = pss->flush_stats(_surviving_young_words_total, _num_workers) * HeapWordSize;
size_t evac_fail_enqueued_cards = pss->evac_failure_enqueued_cards();
p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, copied_bytes, G1GCPhaseTimes::MergePSSCopiedBytes);
p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_waste_bytes, G1GCPhaseTimes::MergePSSLABWasteBytes);
p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, lab_undo_waste_bytes, G1GCPhaseTimes::MergePSSLABUndoWasteBytes);
p->record_or_add_thread_work_item(G1GCPhaseTimes::MergePSS, worker_id, evac_fail_enqueued_cards, G1GCPhaseTimes::MergePSSEvacFailExtra);
delete pss;
_states[worker_id] = nullptr;
@ -640,12 +647,9 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
_evacuation_failed_info.register_copy_failure(word_sz);
// For iterating objects that failed evacuation currently we can reuse the
// existing closure to scan evacuated objects because:
// - for objects referring into the collection set we do not need to gather
// cards at this time. The regions they are in will be unconditionally turned
// to old regions without remembered sets.
// - since we are iterating from a collection set region (i.e. never a Survivor
// region), we always need to gather cards for this case.
// existing closure to scan evacuated objects; since we are iterating from a
// collection set region (i.e. never a Survivor region), we always need to
// gather cards for this case.
G1SkipCardEnqueueSetter x(&_scanner, false /* skip_card_enqueue */);
old->oop_iterate_backwards(&_scanner);

View File

@ -109,6 +109,16 @@ class G1ParScanThreadState : public CHeapObj<mtGC> {
PreservedMarks* _preserved_marks;
EvacuationFailedInfo _evacuation_failed_info;
G1EvacFailureRegions* _evac_failure_regions;
// Number of additional cards into evacuation failed regions enqueued into
// the local DCQS. This is an approximation, as cards that would be added later
// outside of evacuation failure will not be subtracted again.
size_t _evac_failure_enqueued_cards;
// Enqueue the card if not already in the set; this is a best-effort attempt on
// detecting duplicates.
template <class T> bool enqueue_if_new(T* p);
// Enqueue the card of p into the (evacuation failed) region.
template <class T> void enqueue_card_into_evac_fail_region(T* p, oop obj);
bool inject_evacuation_failure(uint region_idx) EVAC_FAILURE_INJECTOR_RETURN_( return false; );
@ -152,6 +162,8 @@ public:
size_t lab_waste_words() const;
size_t lab_undo_waste_words() const;
size_t evac_failure_enqueued_cards() const;
// Pass locally gathered statistics to global state. Returns the total number of
// HeapWords copied.
size_t flush_stats(size_t* surviving_young_words, uint num_workers);

View File

@ -96,6 +96,28 @@ G1OopStarChunkedList* G1ParScanThreadState::oops_into_optional_region(const Heap
return &_oops_into_optional_regions[hr->index_in_opt_cset()];
}
template <class T> bool G1ParScanThreadState::enqueue_if_new(T* p) {
size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (_last_enqueued_card != card_index) {
_rdc_local_qset.enqueue(ct()->byte_for_index(card_index));
_last_enqueued_card = card_index;
return true;
} else {
return false;
}
}
template <class T> void G1ParScanThreadState::enqueue_card_into_evac_fail_region(T* p, oop obj) {
assert(!HeapRegion::is_in_same_region(p, obj), "Should have filtered out cross-region references already.");
assert(!_g1h->heap_region_containing(p)->is_survivor(), "Should have filtered out from-newly allocated survivor references already.");
assert(_g1h->heap_region_containing(obj)->in_collection_set(), "Only for enqeueing reference into collection set region");
if (enqueue_if_new(p)) {
_evac_failure_enqueued_cards++;
}
}
template <class T> void G1ParScanThreadState::write_ref_field_post(T* p, oop obj) {
assert(obj != nullptr, "Must be");
if (HeapRegion::is_in_same_region(p, obj)) {
@ -109,11 +131,13 @@ template <class T> void G1ParScanThreadState::write_ref_field_post(T* p, oop obj
}
G1HeapRegionAttr dest_attr = _g1h->region_attr(obj);
// References to the current collection set are references to objects that failed
// evacuation. Currently these regions are always relabelled as old without
// remembered sets, so skip them.
// evacuation. Proactively collect remembered sets (cards) for them as likely they
// are sparsely populated (and have few references). We will decide later to keep
// or drop the region.
if (dest_attr.is_in_cset()) {
assert(obj->is_forwarded(), "evac-failed but not forwarded: " PTR_FORMAT, p2i(obj));
assert(obj->forwardee() == obj, "evac-failed but not self-forwarded: " PTR_FORMAT, p2i(obj));
enqueue_card_into_evac_fail_region(p, obj);
return;
}
enqueue_card_if_tracked(dest_attr, p, obj);
@ -137,12 +161,7 @@ template <class T> void G1ParScanThreadState::enqueue_card_if_tracked(G1HeapRegi
if (!region_attr.remset_is_tracked()) {
return;
}
size_t card_index = ct()->index_for(p);
// If the card hasn't been added to the buffer, do it.
if (_last_enqueued_card != card_index) {
_rdc_local_qset.enqueue(ct()->byte_for_index(card_index));
_last_enqueued_card = card_index;
}
enqueue_if_new(p);
}
#endif // SHARE_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP

View File

@ -268,9 +268,14 @@ uint G1Policy::calculate_young_desired_length(size_t pending_cards, size_t rs_le
desired_eden_length_by_mmu = calculate_desired_eden_length_by_mmu();
double base_time_ms = predict_base_time_ms(pending_cards, rs_length);
double retained_time_ms = predict_retained_regions_evac_time();
double total_time_ms = base_time_ms + retained_time_ms;
log_trace(gc, ergo, heap)("Predicted total base time: total %f base_time %f retained_time %f",
total_time_ms, base_time_ms, retained_time_ms);
desired_eden_length_by_pause =
calculate_desired_eden_length_by_pause(base_time_ms,
calculate_desired_eden_length_by_pause(total_time_ms,
absolute_min_young_length - survivor_length,
absolute_max_young_length - survivor_length);
@ -513,6 +518,29 @@ double G1Policy::predict_survivor_regions_evac_time() const {
return survivor_regions_evac_time;
}
double G1Policy::predict_retained_regions_evac_time() const {
uint num_regions = 0;
double result = 0.0;
G1CollectionCandidateList& list = candidates()->retained_regions();
uint min_regions_left = MIN2(min_retained_old_cset_length(),
list.length());
for (HeapRegion* r : list) {
if (min_regions_left == 0) {
// Minimum amount of regions considered. Exit.
break;
}
min_regions_left--;
result += predict_region_total_time_ms(r, collector_state()->in_young_only_phase());
num_regions++;
}
log_trace(gc, ergo, heap)("Selected %u of %u retained candidates taking %1.3fms additional time",
num_regions, list.length(), result);
return result;
}
G1GCPhaseTimes* G1Policy::phase_times() const {
// Lazy allocation because it must follow initialization of all the
// OopStorage objects by various other subsystems.
@ -623,6 +651,17 @@ void G1Policy::record_concurrent_refinement_stats(size_t pending_cards,
}
}
bool G1Policy::should_retain_evac_failed_region(uint index) const {
size_t live_bytes= _g1h->region_at(index)->live_bytes();
assert(live_bytes != 0,
"live bytes not set for %u used %zu garbage %zu cm-live %zu",
index, _g1h->region_at(index)->used(), _g1h->region_at(index)->garbage_bytes(), live_bytes);
size_t threshold = G1RetainRegionLiveThresholdPercent * HeapRegion::GrainBytes / 100;
return live_bytes < threshold;
}
void G1Policy::record_young_collection_start() {
Ticks now = Ticks::now();
// We only need to do this here as the policy will only be applied
@ -1371,6 +1410,12 @@ size_t G1Policy::allowed_waste_in_collection_set() const {
return G1HeapWastePercent * _g1h->capacity() / 100;
}
uint G1Policy::min_retained_old_cset_length() const {
// Guarantee some progress with retained regions regardless of available time by
// taking at least one region.
return 1;
}
uint G1Policy::calc_min_old_cset_length(uint num_candidate_regions) const {
// The min old CSet region bound is based on the maximum desired
// number of mixed GCs after a cycle. I.e., even if some old regions
@ -1488,6 +1533,72 @@ double G1Policy::select_candidates_from_marking(G1CollectionCandidateList* marki
return time_remaining_ms;
}
void G1Policy::select_candidates_from_retained(G1CollectionCandidateList* retained_list,
double time_remaining_ms,
G1CollectionCandidateRegionList* initial_old_regions,
G1CollectionCandidateRegionList* optional_old_regions) {
uint const min_regions = min_retained_old_cset_length();
uint num_initial_regions_selected = 0;
uint num_optional_regions_selected = 0;
uint num_expensive_regions_selected = 0;
double predicted_initial_time_ms = 0.0;
double predicted_optional_time_ms = 0.0;
// We want to make sure that on the one hand we process the retained regions asap,
// but on the other hand do not take too many of them as optional regions.
// So we split the time budget into budget we will unconditionally take into the
// initial old regions, and budget for taking optional regions from the retained
// list.
double optional_time_remaining_ms = max_time_for_retaining();
time_remaining_ms = MIN2(time_remaining_ms, optional_time_remaining_ms);
log_debug(gc, ergo, cset)("Start adding retained candidates to collection set. "
"Min %u regions, "
"time remaining %1.2fms, optional remaining %1.2fms",
min_regions, time_remaining_ms, optional_time_remaining_ms);
for (HeapRegion* r : *retained_list) {
double predicted_time_ms = predict_region_total_time_ms(r, collector_state()->in_young_only_phase());
bool fits_in_remaining_time = predicted_time_ms <= time_remaining_ms;
if (fits_in_remaining_time || (num_expensive_regions_selected < min_regions)) {
predicted_initial_time_ms += predicted_time_ms;
if (!fits_in_remaining_time) {
num_expensive_regions_selected++;
}
initial_old_regions->append(r);
num_initial_regions_selected++;
} else if (predicted_time_ms <= optional_time_remaining_ms) {
predicted_optional_time_ms += predicted_time_ms;
optional_old_regions->append(r);
num_optional_regions_selected++;
} else {
// Fits neither initial nor optional time limit. Exit.
break;
}
time_remaining_ms = MAX2(0.0, time_remaining_ms - predicted_time_ms);
optional_time_remaining_ms = MAX2(0.0, optional_time_remaining_ms - predicted_time_ms);
}
uint num_regions_selected = num_initial_regions_selected + num_optional_regions_selected;
if (num_regions_selected == retained_list->length()) {
log_debug(gc, ergo, cset)("Retained candidates exhausted.");
}
if (num_expensive_regions_selected > 0) {
log_debug(gc, ergo, cset)("Added %u retained candidates to collection set although the predicted time was too high.",
num_expensive_regions_selected);
}
log_debug(gc, ergo, cset)("Finish adding retained candidates to collection set. Initial: %u, optional: %u, "
"predicted initial time: %1.2fms, predicted optional time: %1.2fms, "
"time remaining: %1.2fms optional time remaining %1.2fms",
num_initial_regions_selected, num_optional_regions_selected,
predicted_initial_time_ms, predicted_optional_time_ms, time_remaining_ms, optional_time_remaining_ms);
}
void G1Policy::calculate_optional_collection_set_regions(G1CollectionCandidateRegionList* optional_regions,
double time_remaining_ms,
G1CollectionCandidateRegionList* selected_regions) {

View File

@ -247,6 +247,7 @@ private:
size_t predict_bytes_to_copy(HeapRegion* hr) const;
double predict_survivor_regions_evac_time() const;
double predict_retained_regions_evac_time() const;
// Check whether a given young length (young_length) fits into the
// given target pause time and whether the prediction for the amount
@ -259,6 +260,8 @@ private:
public:
size_t pending_cards_at_gc_start() const { return _pending_cards_at_gc_start; }
// The minimum number of retained regions we will add to the CSet during a young GC.
uint min_retained_old_cset_length() const;
// Calculate the minimum number of old regions we'll add to the CSet
// during a single mixed GC given the initial number of regions selected during
// marking.
@ -346,6 +349,11 @@ public:
G1CollectionCandidateRegionList* initial_old_regions,
G1CollectionCandidateRegionList* optional_old_regions);
void select_candidates_from_retained(G1CollectionCandidateList* retained_list,
double time_remaining_ms,
G1CollectionCandidateRegionList* initial_old_regions,
G1CollectionCandidateRegionList* optional_old_regions);
// Calculate the number of optional regions from the given collection set candidates,
// the remaining time and the maximum number of these regions and return the number
// of actually selected regions in num_optional_regions.
@ -401,6 +409,11 @@ public:
void record_concurrent_refinement_stats(size_t pending_cards,
size_t thread_buffer_cards);
bool should_retain_evac_failed_region(HeapRegion* r) const {
return should_retain_evac_failed_region(r->hrm_index());
}
bool should_retain_evac_failed_region(uint index) const;
private:
//
// Survivor regions policy.
@ -427,6 +440,9 @@ public:
// remaining time is used to choose what regions to include in the evacuation.
double optional_evacuation_fraction() const { return 0.75; }
// Returns the total time that to at most reserve for handling retained regions.
double max_time_for_retaining() const { return max_pause_time_ms() * optional_prediction_fraction(); }
uint tenuring_threshold() const { return _tenuring_threshold; }
uint max_survivor_regions() {

View File

@ -1102,10 +1102,11 @@ class G1MergeHeapRootsTask : public WorkerTask {
bool should_clear_region(HeapRegion* hr) const {
// The bitmap for young regions must obviously be clear as we never mark through them;
// old regions are only in the collection set after the concurrent cycle completed,
// so their bitmaps must also be clear except when the pause occurs during the
// Concurrent Cleanup for Next Mark phase. Only at that point the region's bitmap may
// contain marks while being in the collection set at the same time.
// old regions that are currently being marked through are only in the collection set
// after the concurrent cycle completed, so their bitmaps must also be clear except when
// the pause occurs during the Concurrent Cleanup for Next Mark phase.
// Only at that point the region's bitmap may contain marks while being in the collection
// set at the same time.
//
// There is one exception: shutdown might have aborted the Concurrent Cleanup for Next
// Mark phase midway, which might have also left stale marks in old generation regions.
@ -1130,6 +1131,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
} else {
assert_bitmap_clear(hr, _g1h->concurrent_mark()->mark_bitmap());
}
_g1h->concurrent_mark()->clear_statistics(hr);
return false;
}
};

View File

@ -30,6 +30,7 @@
#include "gc/g1/g1Allocator.hpp"
#include "gc/g1/g1CardSetMemory.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1CollectionSetCandidates.inline.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
@ -464,6 +465,13 @@ void G1YoungCollector::set_young_collection_default_active_worker_threads(){
}
void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info) {
// Must be before collection set calculation, requires collection set to not
// be calculated yet.
if (collector_state()->in_concurrent_start_gc()) {
concurrent_mark()->pre_concurrent_start(_gc_cause);
}
{
Ticks start = Ticks::now();
G1PreEvacuateCollectionSetBatchTask cl;
@ -508,10 +516,6 @@ void G1YoungCollector::pre_evacuate_collection_set(G1EvacInfo* evacuation_info)
DerivedPointerTable::clear();
#endif
if (collector_state()->in_concurrent_start_gc()) {
concurrent_mark()->pre_concurrent_start(_gc_cause);
}
evac_failure_injector()->arm_if_needed();
}
@ -950,6 +954,15 @@ void G1YoungCollector::post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thre
phase_times()->record_post_evacuate_cleanup_task_2_time((Ticks::now() - start).seconds() * 1000.0);
}
void G1YoungCollector::enqueue_candidates_as_root_regions() {
assert(collector_state()->in_concurrent_start_gc(), "must be");
G1CollectionSetCandidates* candidates = collection_set()->candidates();
for (HeapRegion* r : *candidates) {
_g1h->concurrent_mark()->add_root_region(r);
}
}
void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
G1ParScanThreadStateSet* per_thread_states) {
G1GCPhaseTimes* p = phase_times();
@ -972,6 +985,13 @@ void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info,
post_evacuate_cleanup_2(per_thread_states, evacuation_info);
// Regions in the collection set candidates are roots for the marking (they are
// not marked through considering they are very likely to be reclaimed soon.
// They need to be enqueued explicitly compared to survivor regions.
if (collector_state()->in_concurrent_start_gc()) {
enqueue_candidates_as_root_regions();
}
_evac_failure_regions.post_collection();
assert_used_and_recalculate_used_equal(_g1h);

View File

@ -126,6 +126,8 @@ class G1YoungCollector {
void post_evacuate_cleanup_2(G1ParScanThreadStateSet* per_thread_states,
G1EvacInfo* evacuation_info);
// Enqueue collection set candidates as root regions.
void enqueue_candidates_as_root_regions();
void post_evacuate_collection_set(G1EvacInfo* evacuation_info,
G1ParScanThreadStateSet* per_thread_states);

View File

@ -44,9 +44,8 @@ public:
if (_evac_failure_regions_num > 0) {
_evac_failure_regions.set_bit(r->hrm_index());
--_evac_failure_regions_num;
return false;
}
return true;
return _evac_failure_regions_num == 0;
}
};

View File

@ -334,28 +334,45 @@ public:
size_t num_dirtied() const { return _num_dirtied; }
};
class G1PostEvacuateCollectionSetCleanupTask2::ClearRetainedRegionBitmaps : public G1AbstractSubTask {
class G1PostEvacuateCollectionSetCleanupTask2::ProcessEvacuationFailedRegionsTask : public G1AbstractSubTask {
G1EvacFailureRegions* _evac_failure_regions;
HeapRegionClaimer _claimer;
class ClearRetainedRegionBitmapsClosure : public HeapRegionClosure {
class ProcessEvacuationFailedRegionsClosure : public HeapRegionClosure {
public:
bool do_heap_region(HeapRegion* r) override {
assert(r->bottom() == r->top_at_mark_start(),
"TAMS should have been reset for region %u", r->hrm_index());
G1CollectedHeap::heap()->clear_bitmap_for_region(r);
G1CollectedHeap* g1h = G1CollectedHeap::heap();
G1ConcurrentMark* cm = g1h->concurrent_mark();
uint region = r->hrm_index();
assert(r->top_at_mark_start() == r->bottom(), "TAMS must not have been set for region %u", region);
assert(cm->live_bytes(region) == 0, "Marking live bytes must not be set for region %u", region);
// Concurrent mark does not mark through regions that we retain (they are root
// regions wrt to marking), so we must clear their mark data (tams, bitmap)
// set eagerly or during evacuation failure.
bool clear_mark_data = !g1h->collector_state()->in_concurrent_start_gc() ||
g1h->policy()->should_retain_evac_failed_region(r);
if (clear_mark_data) {
g1h->clear_bitmap_for_region(r);
} else {
// This evacuation failed region is going to be marked through. Update mark data.
r->set_top_at_mark_start(r->top());
cm->set_live_bytes(r->hrm_index(), r->live_bytes());
assert(cm->mark_bitmap()->get_next_marked_addr(r->bottom(), r->top_at_mark_start()) != r->top_at_mark_start(),
"Marks must be on bitmap for region %u", r->hrm_index());
}
return false;
}
};
public:
ClearRetainedRegionBitmaps(G1EvacFailureRegions* evac_failure_regions) :
G1AbstractSubTask(G1GCPhaseTimes::ClearRetainedRegionBitmaps),
ProcessEvacuationFailedRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
G1AbstractSubTask(G1GCPhaseTimes::ProcessEvacuationFailedRegions),
_evac_failure_regions(evac_failure_regions),
_claimer(0) {
assert(!G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc(),
"Should not clear bitmaps of retained regions during concurrent start");
}
void set_max_workers(uint max_workers) override {
@ -367,7 +384,7 @@ public:
}
void do_work(uint worker_id) override {
ClearRetainedRegionBitmapsClosure cl;
ProcessEvacuationFailedRegionsClosure cl;
_evac_failure_regions->par_iterate(&cl, &_claimer, worker_id);
}
};
@ -525,6 +542,7 @@ class FreeCSetClosure : public HeapRegionClosure {
Tickspan _non_young_time;
FreeCSetStats* _stats;
G1EvacFailureRegions* _evac_failure_regions;
uint _num_retained_regions;
void assert_tracks_surviving_words(HeapRegion* r) {
assert(r->young_index_in_cset() != 0 &&
@ -554,10 +572,18 @@ class FreeCSetClosure : public HeapRegionClosure {
p->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreRetainedRegions,
_worker_id,
1,
G1GCPhaseTimes::RestoreRetainedRegionsNum);
G1GCPhaseTimes::RestoreRetainedRegionsFailedNum);
bool retain_region = _g1h->policy()->should_retain_evac_failed_region(r);
// Update the region state due to the failed evacuation.
r->handle_evacuation_failure();
r->handle_evacuation_failure(retain_region);
assert(r->is_old(), "must already be relabelled as old");
if (retain_region) {
_g1h->retain_region(r);
_num_retained_regions++;
}
assert(retain_region == r->rem_set()->is_tracked(), "When retaining a region, remembered set should be kept.");
// Add region to old set, need to hold lock.
MutexLocker x(OldSets_lock, Mutex::_no_safepoint_check_flag);
@ -584,7 +610,8 @@ public:
_young_time(),
_non_young_time(),
_stats(stats),
_evac_failure_regions(evac_failure_regions) { }
_evac_failure_regions(evac_failure_regions),
_num_retained_regions(0) { }
virtual bool do_heap_region(HeapRegion* r) {
assert(r->in_collection_set(), "Invariant: %u missing from CSet", r->hrm_index());
@ -617,11 +644,13 @@ public:
pt->record_time_secs(G1GCPhaseTimes::NonYoungFreeCSet, _worker_id, _non_young_time.seconds());
}
}
bool num_retained_regions() const { return _num_retained_regions; }
};
class G1PostEvacuateCollectionSetCleanupTask2::FreeCollectionSetTask : public G1AbstractSubTask {
G1CollectedHeap* _g1h;
G1EvacInfo* _evacuation_info;
G1EvacInfo* _evacuation_info;
FreeCSetStats* _worker_stats;
HeapRegionClaimer _claimer;
const size_t* _surviving_young_words;
@ -659,12 +688,22 @@ public:
virtual ~FreeCollectionSetTask() {
Ticks serial_time = Ticks::now();
G1GCPhaseTimes* p = _g1h->phase_times();
bool has_new_retained_regions =
p->sum_thread_work_items(G1GCPhaseTimes::RestoreRetainedRegions, G1GCPhaseTimes::RestoreRetainedRegionsRetainedNum) != 0;
if (has_new_retained_regions) {
G1CollectionSetCandidates* candidates = _g1h->collection_set()->candidates();
candidates->sort_by_efficiency();
}
report_statistics();
for (uint worker = 0; worker < _active_workers; worker++) {
_worker_stats[worker].~FreeCSetStats();
}
FREE_C_HEAP_ARRAY(FreeCSetStats, _worker_stats);
_g1h->phase_times()->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
p->record_serial_free_cset_time_ms((Ticks::now() - serial_time).seconds() * 1000.0);
_g1h->clear_collection_set();
}
@ -684,6 +723,10 @@ public:
_g1h->collection_set_par_iterate_all(&cl, &_claimer, worker_id);
// Report per-region type timings.
cl.report_timing();
_g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreRetainedRegions,
worker_id,
cl.num_retained_regions(),
G1GCPhaseTimes::RestoreRetainedRegionsRetainedNum);
}
};
@ -726,10 +769,7 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2
if (evac_failure_regions->evacuation_failed()) {
add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set()));
// Keep marks on bitmaps in retained regions during concurrent start - they will all be old.
if (!G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc()) {
add_parallel_task(new ClearRetainedRegionBitmaps(evac_failure_regions));
}
add_parallel_task(new ProcessEvacuationFailedRegionsTask(evac_failure_regions));
}
add_parallel_task(new RedirtyLoggedCardsTask(per_thread_states->rdcqs(), evac_failure_regions));
if (UseTLAB && ResizeTLAB) {

View File

@ -55,7 +55,7 @@ public:
// Second set of post evacuate collection set tasks containing (s means serial):
// - Eagerly Reclaim Humongous Objects (s)
// - Update Derived Pointers (s)
// - Clear Retained Region Bitmaps (on evacuation failure)
// - Clear Retained Region Data (on evacuation failure)
// - Redirty Logged Cards
// - Restore Preserved Marks (on evacuation failure)
// - Free Collection Set
@ -66,7 +66,7 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask {
class UpdateDerivedPointersTask;
#endif
class ClearRetainedRegionBitmaps;
class ProcessEvacuationFailedRegionsTask;
class RedirtyLoggedCardsTask;
class RestorePreservedMarksTask;
class FreeCollectionSetTask;

View File

@ -251,6 +251,12 @@
"Regions with live bytes exceeding this will not be collected.") \
range(0, 100) \
\
product(uintx, G1RetainRegionLiveThresholdPercent, 85, EXPERIMENTAL, \
"Threshold for evacuation failed regions to be considered for " \
"inclusion in the collection set candidates." \
"Regions with live bytes exceeding this will not be retained.") \
range(0, 100) \
\
product(uintx, G1HeapWastePercent, 5, \
"Amount of space, expressed as a percentage of the heap size, " \
"that G1 is willing not to collect to avoid expensive GCs.") \

View File

@ -44,6 +44,7 @@
#include "oops/access.inline.hpp"
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals_extension.hpp"
#include "utilities/powerOfTwo.hpp"
@ -100,14 +101,14 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
}
}
void HeapRegion::handle_evacuation_failure() {
void HeapRegion::handle_evacuation_failure(bool retain) {
uninstall_surv_rate_group();
clear_young_index_in_cset();
clear_index_in_opt_cset();
move_to_old();
_rem_set->clean_code_roots(this);
_rem_set->clear_locked(true /* only_cardset */);
_rem_set->clear_locked(true /* only_cardset */, retain /* keep_tracked */);
}
void HeapRegion::unlink_from_list() {
@ -263,23 +264,12 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
used());
}
void HeapRegion::note_evacuation_failure(bool during_concurrent_start) {
void HeapRegion::note_evacuation_failure() {
// PB must be bottom - we only evacuate old gen regions after scrubbing, and
// young gen regions never have their PB set to anything other than bottom.
assert(parsable_bottom_acquire() == bottom(), "must be");
_garbage_bytes = 0;
if (during_concurrent_start) {
// Self-forwarding marks all objects. Adjust TAMS so that these marks are
// below it.
set_top_at_mark_start(top());
} else {
// Outside of the mixed phase all regions that had an evacuation failure must
// be young regions, and their TAMS is always bottom. Similarly, before the
// start of the mixed phase, we scrubbed and reset TAMS to bottom.
assert(top_at_mark_start() == bottom(), "must be");
}
}
void HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {

View File

@ -483,7 +483,7 @@ public:
// Notify the region that an evacuation failure occurred for an object within this
// region.
void note_evacuation_failure(bool during_concurrent_start);
void note_evacuation_failure();
// Notify the region that we have partially finished processing self-forwarded
// objects during evacuation failure handling.
@ -529,7 +529,7 @@ public:
}
// Update the region state after a failed evacuation.
void handle_evacuation_failure();
void handle_evacuation_failure(bool retain);
// Iterate over the objects overlapping the given memory region, applying cl
// to all references in the region. This is a helper for

View File

@ -281,8 +281,8 @@ inline void HeapRegion::reset_parsable_bottom() {
}
inline void HeapRegion::note_start_of_marking() {
assert(top_at_mark_start() == bottom(), "CA region's TAMS must always be at bottom");
if (is_old_or_humongous()) {
assert(top_at_mark_start() == bottom(), "Region's TAMS must always be at bottom");
if (is_old_or_humongous() && !is_collection_set_candidate()) {
set_top_at_mark_start(top());
}
}

View File

@ -73,13 +73,17 @@ void HeapRegionRemSet::clear(bool only_cardset) {
clear_locked(only_cardset);
}
void HeapRegionRemSet::clear_locked(bool only_cardset) {
void HeapRegionRemSet::clear_locked(bool only_cardset, bool keep_tracked) {
if (!only_cardset) {
_code_roots.clear();
}
clear_fcc();
_card_set.clear();
set_state_untracked();
if (!keep_tracked) {
set_state_untracked();
} else {
assert(is_tracked(), "must be");
}
assert(occupied() == 0, "Should be clear.");
}

View File

@ -118,7 +118,7 @@ public:
// The region is being reclaimed; clear its remset, and any mention of
// entries for this region in other remsets.
void clear(bool only_cardset = false);
void clear_locked(bool only_cardset = false);
void clear_locked(bool only_cardset = false, bool keep_tracked = false);
void reset_table_scanner();
@ -156,7 +156,7 @@ public:
// Applies blk->do_code_blob() to each of the entries in _code_roots
void code_roots_do(CodeBlobClosure* blk) const;
// Clean out code roots not having an oop pointing into this region any more.
void clean_code_roots(HeapRegion* hr);
// Returns the number of elements in _code_roots

View File

@ -173,6 +173,7 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Merge Per-Thread State", Level.DEBUG),
new LogMessageWithLevel("LAB Waste", Level.DEBUG),
new LogMessageWithLevel("LAB Undo Waste", Level.DEBUG),
new LogMessageWithLevel("Evac Fail Extra Cards", Level.DEBUG),
new LogMessageWithLevel("Clear Logged Cards", Level.DEBUG),
new LogMessageWithLevel("Recalculate Used Memory", Level.DEBUG),
@ -264,7 +265,9 @@ public class TestGCLogMessages {
new LogMessageWithLevel("Recalculate Used Memory", Level.DEBUG),
new LogMessageWithLevel("Restore Preserved Marks", Level.DEBUG),
new LogMessageWithLevel("Restore Retained Regions", Level.DEBUG),
new LogMessageWithLevel("Evacuation Failure Regions", Level.DEBUG),
new LogMessageWithLevel("Process Evacuation Failed Regions", Level.DEBUG),
new LogMessageWithLevel("Evacuation Failed Regions", Level.DEBUG),
new LogMessageWithLevel("New Retained Regions", Level.DEBUG),
};
private void testWithEvacuationFailureLogs() throws Exception {

View File

@ -128,7 +128,7 @@ public class TestG1ParallelPhases {
"RestoreRetainedRegions",
"RemoveSelfForwards",
"RestorePreservedMarks",
"ClearRetainedRegionsBitmap",
"ProcessEvacuationFailedRegions",
// Generally optional phases.
"OptScanHR",
"OptMergeRS",