mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8256265: G1: Improve parallelism in regions that failed evacuation
Co-authored-by: Hamlin Li <mli@openjdk.org> Co-authored-by: Albert Mingkun Yang <ayang@openjdk.org> Reviewed-by: sjohanss, ayang
This commit is contained in:
parent
b31a03c60a
commit
15cb1fb788
@ -144,6 +144,16 @@ void G1CollectedHeap::run_batch_task(G1BatchedTask* cl) {
|
||||
workers()->run_task(cl, num_workers);
|
||||
}
|
||||
|
||||
uint G1CollectedHeap::get_chunks_per_region() {
|
||||
uint log_region_size = HeapRegion::LogOfHRGrainBytes;
|
||||
// Limit the expected input values to current known possible values of the
|
||||
// (log) region size. Adjust as necessary after testing if changing the permissible
|
||||
// values for region size.
|
||||
assert(log_region_size >= 20 && log_region_size <= 29,
|
||||
"expected value in [20,29], but got %u", log_region_size);
|
||||
return 1u << (log_region_size / 2 - 4);
|
||||
}
|
||||
|
||||
HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
|
||||
MemRegion mr) {
|
||||
return new HeapRegion(hrs_index, bot(), mr, &_card_set_config);
|
||||
@ -3290,11 +3300,13 @@ HeapRegion* G1CollectedHeap::alloc_highest_free_region() {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void G1CollectedHeap::mark_evac_failure_object(const oop obj) const {
|
||||
// All objects failing evacuation are live. What we'll do is
|
||||
// that we'll update the marking info so that they are
|
||||
// all below TAMS and explicitly marked.
|
||||
void G1CollectedHeap::mark_evac_failure_object(uint worker_id, const oop obj, size_t obj_size) const {
|
||||
assert(!_cm->is_marked_in_bitmap(obj), "must be");
|
||||
|
||||
_cm->raw_mark_in_bitmap(obj);
|
||||
if (collector_state()->in_concurrent_start_gc()) {
|
||||
_cm->add_to_liveness(worker_id, obj, obj_size);
|
||||
}
|
||||
}
|
||||
|
||||
// Optimized nmethod scanning
|
||||
|
||||
@ -523,6 +523,14 @@ public:
|
||||
// Run the given batch task using the workers.
|
||||
void run_batch_task(G1BatchedTask* cl);
|
||||
|
||||
// Return "optimal" number of chunks per region we want to use for claiming areas
|
||||
// within a region to claim.
|
||||
// The returned value is a trade-off between granularity of work distribution and
|
||||
// memory usage and maintenance costs of that table.
|
||||
// Testing showed that 64 for 1M/2M region, 128 for 4M/8M regions, 256 for 16/32M regions,
|
||||
// and so on seems to be such a good trade-off.
|
||||
static uint get_chunks_per_region();
|
||||
|
||||
G1Allocator* allocator() {
|
||||
return _allocator;
|
||||
}
|
||||
@ -1216,6 +1224,7 @@ public:
|
||||
|
||||
bool is_marked(oop obj) const;
|
||||
|
||||
inline static bool is_obj_filler(const oop obj);
|
||||
// Determine if an object is dead, given the object and also
|
||||
// the region to which the object belongs.
|
||||
inline bool is_obj_dead(const oop obj, const HeapRegion* hr) const;
|
||||
@ -1229,8 +1238,8 @@ public:
|
||||
inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
|
||||
inline bool is_obj_dead_full(const oop obj) const;
|
||||
|
||||
// Mark the live object that failed evacuation in the prev bitmap.
|
||||
void mark_evac_failure_object(oop obj) const;
|
||||
// Mark the live object that failed evacuation in the bitmap.
|
||||
void mark_evac_failure_object(uint worker_id, oop obj, size_t obj_size) const;
|
||||
|
||||
G1ConcurrentMark* concurrent_mark() const { return _cm; }
|
||||
|
||||
|
||||
@ -213,6 +213,11 @@ inline bool G1CollectedHeap::requires_barriers(stackChunkOop obj) const {
|
||||
return !heap_region_containing(obj)->is_young(); // is_in_young does an unnecessary NULL check
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_filler(const oop obj) {
|
||||
Klass* k = obj->klass();
|
||||
return k == Universe::fillerArrayKlassObj() || k == vmClasses::FillerObject_klass();
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return hr->is_obj_dead(obj, hr->parsable_bottom());
|
||||
}
|
||||
|
||||
@ -603,7 +603,8 @@ private:
|
||||
// live_words data are current wrt to the _mark_bitmap. We use this information
|
||||
// to only clear ranges of the bitmap that require clearing.
|
||||
if (is_clear_concurrent_undo()) {
|
||||
// No need to clear bitmaps for empty regions.
|
||||
// No need to clear bitmaps for empty regions (which includes regions we
|
||||
// did not mark through).
|
||||
if (_cm->live_words(r->hrm_index()) == 0) {
|
||||
assert(_bitmap->get_next_marked_addr(r->bottom(), r->end()) == r->end(), "Should not have marked bits");
|
||||
return r->bottom();
|
||||
@ -652,7 +653,7 @@ private:
|
||||
}
|
||||
assert(cur >= end, "Must have completed iteration over the bitmap for region %u.", r->hrm_index());
|
||||
|
||||
r->note_end_of_clearing();
|
||||
r->reset_top_at_mark_start();
|
||||
|
||||
return false;
|
||||
}
|
||||
@ -1887,7 +1888,6 @@ void G1ConcurrentMark::flush_all_task_caches() {
|
||||
void G1ConcurrentMark::clear_bitmap_for_region(HeapRegion* hr) {
|
||||
assert_at_safepoint();
|
||||
_mark_bitmap.clear_range(MemRegion(hr->bottom(), hr->end()));
|
||||
hr->note_end_of_clearing();
|
||||
}
|
||||
|
||||
HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
|
||||
@ -579,10 +579,10 @@ public:
|
||||
|
||||
// Mark in the marking bitmap. Used during evacuation failure to
|
||||
// remember what objects need handling. Not for use during marking.
|
||||
inline void raw_mark_in_bitmap(oop p);
|
||||
inline void raw_mark_in_bitmap(oop obj);
|
||||
|
||||
// Clears marks for all objects in the given region in the marking
|
||||
// bitmap. This should only be used clean the bitmap during a
|
||||
// bitmap. This should only be used to clean the bitmap during a
|
||||
// safepoint.
|
||||
void clear_bitmap_for_region(HeapRegion* hr);
|
||||
|
||||
|
||||
@ -280,8 +280,8 @@ inline bool G1CMTask::deal_with_reference(T* p) {
|
||||
return make_reference_grey(obj);
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::raw_mark_in_bitmap(oop p) {
|
||||
_mark_bitmap.par_mark(p);
|
||||
inline void G1ConcurrentMark::raw_mark_in_bitmap(oop obj) {
|
||||
_mark_bitmap.par_mark(obj);
|
||||
}
|
||||
|
||||
bool G1ConcurrentMark::is_marked_in_bitmap(oop p) const {
|
||||
|
||||
@ -29,171 +29,190 @@
|
||||
#include "gc/g1/g1EvacFailure.hpp"
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "gc/g1/g1GCPhaseTimes.hpp"
|
||||
#include "gc/g1/g1HeapVerifier.hpp"
|
||||
#include "gc/g1/g1OopClosures.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegion.inline.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
class RemoveSelfForwardPtrObjClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
G1ConcurrentMark* _cm;
|
||||
HeapRegion* _hr;
|
||||
size_t _marked_words;
|
||||
bool _during_concurrent_start;
|
||||
uint _worker_id;
|
||||
HeapWord* _last_forwarded_object_end;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
|
||||
bool during_concurrent_start,
|
||||
uint worker_id) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_cm(_g1h->concurrent_mark()),
|
||||
_hr(hr),
|
||||
_marked_words(0),
|
||||
_during_concurrent_start(during_concurrent_start),
|
||||
_worker_id(worker_id),
|
||||
_last_forwarded_object_end(hr->bottom()) { }
|
||||
|
||||
size_t marked_bytes() { return _marked_words * HeapWordSize; }
|
||||
|
||||
// Handle the marked objects in the region. These are self-forwarded objects
|
||||
// that need to be kept live. We need to update the remembered sets of these
|
||||
// objects. Further update the BOT and marks.
|
||||
// We can coalesce and overwrite the remaining heap contents with dummy objects
|
||||
// as they have either been dead or evacuated (which are unreferenced now, i.e.
|
||||
// dead too) already.
|
||||
size_t apply(oop obj) {
|
||||
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
|
||||
size_t obj_size = obj->size();
|
||||
assert(_last_forwarded_object_end <= obj_addr, "should iterate in ascending address order");
|
||||
assert(_hr->is_in(obj_addr), "sanity");
|
||||
|
||||
// The object failed to move.
|
||||
assert(obj->is_forwarded() && obj->forwardee() == obj, "sanity");
|
||||
|
||||
zap_dead_objects(_last_forwarded_object_end, obj_addr);
|
||||
|
||||
assert(_cm->is_marked_in_bitmap(obj), "should be correctly marked");
|
||||
if (_during_concurrent_start) {
|
||||
// If the evacuation failure occurs during concurrent start we should do
|
||||
// any additional necessary per-object actions.
|
||||
_cm->add_to_liveness(_worker_id, obj, obj_size);
|
||||
}
|
||||
|
||||
_marked_words += obj_size;
|
||||
// Reset the markWord
|
||||
obj->init_mark();
|
||||
|
||||
HeapWord* obj_end = obj_addr + obj_size;
|
||||
_last_forwarded_object_end = obj_end;
|
||||
_hr->update_bot_for_block(obj_addr, obj_end);
|
||||
return obj_size;
|
||||
}
|
||||
|
||||
// Fill the memory area from start to end with filler objects, and update the BOT
|
||||
// accordingly.
|
||||
void zap_dead_objects(HeapWord* start, HeapWord* end) {
|
||||
if (start == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
_hr->fill_range_with_dead_objects(start, end);
|
||||
}
|
||||
|
||||
void zap_remainder() {
|
||||
zap_dead_objects(_last_forwarded_object_end, _hr->top());
|
||||
}
|
||||
};
|
||||
|
||||
class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
|
||||
G1CollectedHeap* _g1h;
|
||||
uint _worker_id;
|
||||
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
class PhaseTimesStat {
|
||||
static constexpr G1GCPhaseTimes::GCParPhases phase_name =
|
||||
G1GCPhaseTimes::RemoveSelfForwards;
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
uint _worker_id;
|
||||
Ticks _start;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrHRClosure(uint worker_id,
|
||||
G1EvacFailureRegions* evac_failure_regions) :
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
PhaseTimesStat(G1GCPhaseTimes* phase_times, uint worker_id) :
|
||||
_phase_times(phase_times),
|
||||
_worker_id(worker_id),
|
||||
_evac_failure_regions(evac_failure_regions),
|
||||
_phase_times(G1CollectedHeap::heap()->phase_times()) {
|
||||
_start(Ticks::now()) { }
|
||||
|
||||
~PhaseTimesStat() {
|
||||
_phase_times->record_or_add_time_secs(phase_name,
|
||||
_worker_id,
|
||||
(Ticks::now() - _start).seconds());
|
||||
}
|
||||
|
||||
size_t remove_self_forward_ptr_by_walking_hr(HeapRegion* hr,
|
||||
bool during_concurrent_start) {
|
||||
RemoveSelfForwardPtrObjClosure rspc(hr,
|
||||
during_concurrent_start,
|
||||
_worker_id);
|
||||
|
||||
// All objects that failed evacuation has been marked in the bitmap.
|
||||
// Use the bitmap to apply the above closure to all failing objects.
|
||||
G1CMBitMap* bitmap = _g1h->concurrent_mark()->mark_bitmap();
|
||||
hr->apply_to_marked_objects(bitmap, &rspc);
|
||||
// Need to zap the remainder area of the processed region.
|
||||
rspc.zap_remainder();
|
||||
// Now clear all the marks to be ready for a new marking cyle.
|
||||
if (!during_concurrent_start) {
|
||||
assert(hr->top_at_mark_start() == hr->bottom(), "TAMS must be bottom to make all objects look live");
|
||||
_g1h->clear_bitmap_for_region(hr);
|
||||
} else {
|
||||
assert(hr->top_at_mark_start() == hr->top(), "TAMS must be top for bitmap to have any value");
|
||||
// Keep the bits.
|
||||
}
|
||||
// We never evacuate Old (non-humongous, non-archive) regions during scrubbing
|
||||
// (only afterwards); other regions (young, humongous, archive) never need
|
||||
// scrubbing, so the following must hold.
|
||||
assert(hr->parsable_bottom() == hr->bottom(), "PB must be bottom to make the whole area parsable");
|
||||
|
||||
return rspc.marked_bytes();
|
||||
void register_empty_chunk() {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RemoveSelfForwardEmptyChunksNum);
|
||||
}
|
||||
|
||||
bool do_heap_region(HeapRegion *hr) {
|
||||
assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
|
||||
assert(hr->in_collection_set(), "bad CS");
|
||||
assert(_evac_failure_regions->contains(hr->hrm_index()), "precondition");
|
||||
void register_nonempty_chunk() {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RemoveSelfForwardChunksNum);
|
||||
}
|
||||
|
||||
hr->clear_index_in_opt_cset();
|
||||
void register_objects_count_and_size(size_t num_marked_obj, size_t marked_words) {
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
num_marked_obj,
|
||||
G1GCPhaseTimes::RemoveSelfForwardObjectsNum);
|
||||
|
||||
bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc();
|
||||
|
||||
hr->note_self_forwarding_removal_start(during_concurrent_start);
|
||||
|
||||
_phase_times->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreRetainedRegions,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RestoreRetainedRegionsNum);
|
||||
|
||||
size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_concurrent_start);
|
||||
|
||||
hr->rem_set()->clean_code_roots(hr);
|
||||
hr->rem_set()->clear_locked(true);
|
||||
|
||||
hr->note_self_forwarding_removal_end(live_bytes);
|
||||
|
||||
return false;
|
||||
size_t marked_bytes = marked_words * HeapWordSize;
|
||||
_phase_times->record_or_add_thread_work_item(phase_name,
|
||||
_worker_id,
|
||||
marked_bytes,
|
||||
G1GCPhaseTimes::RemoveSelfForwardObjectsBytes);
|
||||
}
|
||||
};
|
||||
|
||||
G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
// Fill the memory area from start to end with filler objects, and update the BOT
|
||||
// accordingly. Since we clear and use the bitmap for marking objects that failed
|
||||
// evacuation, there is no other work to be done there.
|
||||
static size_t zap_dead_objects(HeapRegion* hr, HeapWord* start, HeapWord* end) {
|
||||
assert(start <= end, "precondition");
|
||||
if (start == end) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
hr->fill_range_with_dead_objects(start, end);
|
||||
return pointer_delta(end, start);
|
||||
}
|
||||
|
||||
static void update_garbage_words_in_hr(HeapRegion* hr, size_t garbage_words) {
|
||||
if (garbage_words != 0) {
|
||||
hr->note_self_forward_chunk_done(garbage_words * HeapWordSize);
|
||||
}
|
||||
}
|
||||
|
||||
static void prefetch_obj(HeapWord* obj_addr) {
|
||||
Prefetch::write(obj_addr, PrefetchScanIntervalInBytes);
|
||||
}
|
||||
|
||||
void G1RemoveSelfForwardsTask::process_chunk(uint worker_id,
|
||||
uint chunk_idx) {
|
||||
PhaseTimesStat stat(_g1h->phase_times(), worker_id);
|
||||
|
||||
G1CMBitMap* bitmap = _cm->mark_bitmap();
|
||||
const uint region_idx = _evac_failure_regions->get_region_idx(chunk_idx / _num_chunks_per_region);
|
||||
HeapRegion* hr = _g1h->region_at(region_idx);
|
||||
|
||||
HeapWord* hr_bottom = hr->bottom();
|
||||
HeapWord* hr_top = hr->top();
|
||||
HeapWord* chunk_start = hr_bottom + (chunk_idx % _num_chunks_per_region) * _chunk_size;
|
||||
|
||||
assert(chunk_start < hr->end(), "inv");
|
||||
if (chunk_start >= hr_top) {
|
||||
return;
|
||||
}
|
||||
|
||||
HeapWord* chunk_end = MIN2(chunk_start + _chunk_size, hr_top);
|
||||
HeapWord* first_marked_addr = bitmap->get_next_marked_addr(chunk_start, hr_top);
|
||||
|
||||
size_t garbage_words = 0;
|
||||
|
||||
if (chunk_start == hr_bottom) {
|
||||
// This is the bottom-most chunk in this region; zap [bottom, first_marked_addr).
|
||||
garbage_words += zap_dead_objects(hr, hr_bottom, first_marked_addr);
|
||||
}
|
||||
|
||||
if (first_marked_addr >= chunk_end) {
|
||||
stat.register_empty_chunk();
|
||||
update_garbage_words_in_hr(hr, garbage_words);
|
||||
return;
|
||||
}
|
||||
|
||||
stat.register_nonempty_chunk();
|
||||
|
||||
size_t num_marked_objs = 0;
|
||||
size_t marked_words = 0;
|
||||
|
||||
HeapWord* obj_addr = first_marked_addr;
|
||||
assert(chunk_start <= obj_addr && obj_addr < chunk_end,
|
||||
"object " PTR_FORMAT " must be within chunk [" PTR_FORMAT ", " PTR_FORMAT "[",
|
||||
p2i(obj_addr), p2i(chunk_start), p2i(chunk_end));
|
||||
do {
|
||||
assert(bitmap->is_marked(obj_addr), "inv");
|
||||
prefetch_obj(obj_addr);
|
||||
|
||||
oop obj = cast_to_oop(obj_addr);
|
||||
const size_t obj_size = obj->size();
|
||||
HeapWord* const obj_end_addr = obj_addr + obj_size;
|
||||
|
||||
{
|
||||
// Process marked object.
|
||||
assert(obj->is_forwarded() && obj->forwardee() == obj, "must be self-forwarded");
|
||||
obj->init_mark();
|
||||
hr->update_bot_for_block(obj_addr, obj_end_addr);
|
||||
|
||||
// Statistics
|
||||
num_marked_objs++;
|
||||
marked_words += obj_size;
|
||||
}
|
||||
|
||||
assert(obj_end_addr <= hr_top, "inv");
|
||||
// Use hr_top as the limit so that we zap dead ranges up to the next
|
||||
// marked obj or hr_top.
|
||||
HeapWord* next_marked_obj_addr = bitmap->get_next_marked_addr(obj_end_addr, hr_top);
|
||||
garbage_words += zap_dead_objects(hr, obj_end_addr, next_marked_obj_addr);
|
||||
obj_addr = next_marked_obj_addr;
|
||||
} while (obj_addr < chunk_end);
|
||||
|
||||
assert(marked_words > 0 && num_marked_objs > 0, "inv");
|
||||
|
||||
stat.register_objects_count_and_size(num_marked_objs, marked_words);
|
||||
|
||||
update_garbage_words_in_hr(hr, garbage_words);
|
||||
}
|
||||
|
||||
G1RemoveSelfForwardsTask::G1RemoveSelfForwardsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
WorkerTask("G1 Remove Self-forwarding Pointers"),
|
||||
_g1h(G1CollectedHeap::heap()),
|
||||
_hrclaimer(_g1h->workers()->active_workers()),
|
||||
_evac_failure_regions(evac_failure_regions) { }
|
||||
_cm(_g1h->concurrent_mark()),
|
||||
_evac_failure_regions(evac_failure_regions),
|
||||
_chunk_bitmap(mtGC) {
|
||||
|
||||
void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) {
|
||||
RemoveSelfForwardPtrHRClosure rsfp_cl(worker_id, _evac_failure_regions);
|
||||
_num_evac_fail_regions = _evac_failure_regions->num_regions_failed_evacuation();
|
||||
_num_chunks_per_region = G1CollectedHeap::get_chunks_per_region();
|
||||
|
||||
// Iterate through all regions that failed evacuation during the entire collection.
|
||||
_evac_failure_regions->par_iterate(&rsfp_cl, &_hrclaimer, worker_id);
|
||||
_chunk_size = static_cast<uint>(HeapRegion::GrainWords / _num_chunks_per_region);
|
||||
|
||||
log_debug(gc, ergo)("Initializing removing self forwards with %u chunks per region",
|
||||
_num_chunks_per_region);
|
||||
|
||||
_chunk_bitmap.resize(_num_chunks_per_region * _num_evac_fail_regions);
|
||||
}
|
||||
|
||||
uint G1ParRemoveSelfForwardPtrsTask::num_failed_regions() const {
|
||||
return _evac_failure_regions->num_regions_failed_evacuation();
|
||||
void G1RemoveSelfForwardsTask::work(uint worker_id) {
|
||||
const uint total_workers = G1CollectedHeap::heap()->workers()->active_workers();
|
||||
const uint total_chunks = _num_chunks_per_region * _num_evac_fail_regions;
|
||||
const uint start_chunk_idx = worker_id * total_chunks / total_workers;
|
||||
|
||||
for (uint i = 0; i < total_chunks; i++) {
|
||||
const uint chunk_idx = (start_chunk_idx + i) % total_chunks;
|
||||
if (claim_chunk(chunk_idx)) {
|
||||
process_chunk(worker_id, chunk_idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -25,30 +25,37 @@
|
||||
#ifndef SHARE_GC_G1_G1EVACFAILURE_HPP
|
||||
#define SHARE_GC_G1_G1EVACFAILURE_HPP
|
||||
|
||||
#include "gc/g1/g1OopClosures.hpp"
|
||||
#include "gc/g1/heapRegionManager.hpp"
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1ConcurrentMark;
|
||||
class G1EvacFailureRegions;
|
||||
|
||||
// Task to fixup self-forwarding pointers
|
||||
// installed as a result of an evacuation failure.
|
||||
class G1ParRemoveSelfForwardPtrsTask: public WorkerTask {
|
||||
protected:
|
||||
// Task to fixup self-forwarding pointers within the objects installed as a result
|
||||
// of an evacuation failure.
|
||||
class G1RemoveSelfForwardsTask : public WorkerTask {
|
||||
G1CollectedHeap* _g1h;
|
||||
HeapRegionClaimer _hrclaimer;
|
||||
G1ConcurrentMark* _cm;
|
||||
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
uint volatile _num_failed_regions;
|
||||
CHeapBitMap _chunk_bitmap;
|
||||
|
||||
uint _num_chunks_per_region;
|
||||
uint _num_evac_fail_regions;
|
||||
size_t _chunk_size;
|
||||
|
||||
bool claim_chunk(uint chunk_idx) {
|
||||
return _chunk_bitmap.par_set_bit(chunk_idx);
|
||||
}
|
||||
|
||||
void process_chunk(uint worker_id, uint chunk_idx);
|
||||
|
||||
public:
|
||||
G1ParRemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions);
|
||||
explicit G1RemoveSelfForwardsTask(G1EvacFailureRegions* evac_failure_regions);
|
||||
|
||||
void work(uint worker_id);
|
||||
|
||||
uint num_failed_regions() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_G1_G1EVACFAILURE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,7 +24,8 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#include "gc/g1/g1BatchedTask.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/g1EvacFailureRegions.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
@ -34,8 +35,7 @@
|
||||
G1EvacFailureRegions::G1EvacFailureRegions() :
|
||||
_regions_failed_evacuation(mtGC),
|
||||
_evac_failure_regions(nullptr),
|
||||
_evac_failure_regions_cur_length(0),
|
||||
_max_regions(0) { }
|
||||
_evac_failure_regions_cur_length(0) { }
|
||||
|
||||
G1EvacFailureRegions::~G1EvacFailureRegions() {
|
||||
assert(_evac_failure_regions == nullptr, "not cleaned up");
|
||||
@ -43,29 +43,27 @@ G1EvacFailureRegions::~G1EvacFailureRegions() {
|
||||
|
||||
void G1EvacFailureRegions::pre_collection(uint max_regions) {
|
||||
Atomic::store(&_evac_failure_regions_cur_length, 0u);
|
||||
_max_regions = max_regions;
|
||||
_regions_failed_evacuation.resize(_max_regions);
|
||||
_evac_failure_regions = NEW_C_HEAP_ARRAY(uint, _max_regions, mtGC);
|
||||
_regions_failed_evacuation.resize(max_regions);
|
||||
_evac_failure_regions = NEW_C_HEAP_ARRAY(uint, max_regions, mtGC);
|
||||
}
|
||||
|
||||
void G1EvacFailureRegions::post_collection() {
|
||||
_regions_failed_evacuation.resize(0);
|
||||
|
||||
FREE_C_HEAP_ARRAY(uint, _evac_failure_regions);
|
||||
_evac_failure_regions = nullptr;
|
||||
_max_regions = 0; // To have any record() attempt fail in the future.
|
||||
}
|
||||
|
||||
bool G1EvacFailureRegions::contains(uint region_idx) const {
|
||||
return _regions_failed_evacuation.par_at(region_idx, memory_order_relaxed);
|
||||
}
|
||||
|
||||
void G1EvacFailureRegions::par_iterate(HeapRegionClosure* closure,
|
||||
HeapRegionClaimer* _hrclaimer,
|
||||
HeapRegionClaimer* hrclaimer,
|
||||
uint worker_id) const {
|
||||
G1CollectedHeap::heap()->par_iterate_regions_array(closure,
|
||||
_hrclaimer,
|
||||
hrclaimer,
|
||||
_evac_failure_regions,
|
||||
Atomic::load(&_evac_failure_regions_cur_length),
|
||||
worker_id);
|
||||
}
|
||||
|
||||
bool G1EvacFailureRegions::contains(uint region_idx) const {
|
||||
assert(region_idx < _max_regions, "must be");
|
||||
return _regions_failed_evacuation.par_at(region_idx, memory_order_relaxed);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,6 +28,8 @@
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class G1AbstractSubTask;
|
||||
class G1HeapRegionChunkClosure;
|
||||
class HeapRegionClosure;
|
||||
class HeapRegionClaimer;
|
||||
|
||||
@ -41,13 +43,16 @@ class G1EvacFailureRegions {
|
||||
uint* _evac_failure_regions;
|
||||
// Number of regions evacuation failed in the current collection.
|
||||
volatile uint _evac_failure_regions_cur_length;
|
||||
// Maximum of regions number.
|
||||
uint _max_regions;
|
||||
|
||||
public:
|
||||
G1EvacFailureRegions();
|
||||
~G1EvacFailureRegions();
|
||||
|
||||
uint get_region_idx(uint idx) const {
|
||||
assert(idx < _evac_failure_regions_cur_length, "precondition");
|
||||
return _evac_failure_regions[idx];
|
||||
}
|
||||
|
||||
// Sets up the bitmap and failed regions array for addition.
|
||||
void pre_collection(uint max_regions);
|
||||
// Drops memory for internal data structures, but keep counts.
|
||||
@ -55,9 +60,12 @@ public:
|
||||
|
||||
bool contains(uint region_idx) const;
|
||||
void par_iterate(HeapRegionClosure* closure,
|
||||
HeapRegionClaimer* _hrclaimer,
|
||||
HeapRegionClaimer* hrclaimer,
|
||||
uint worker_id) const;
|
||||
|
||||
// Return a G1AbstractSubTask which does necessary preparation for evacuation failure regions
|
||||
G1AbstractSubTask* create_prepare_regions_task();
|
||||
|
||||
uint num_regions_failed_evacuation() const {
|
||||
return Atomic::load(&_evac_failure_regions_cur_length);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* Copyright (c) 2021, 2022, Huawei Technologies Co., Ltd. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,15 +27,18 @@
|
||||
|
||||
#include "gc/g1/g1EvacFailureRegions.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
bool G1EvacFailureRegions::record(uint region_idx) {
|
||||
assert(region_idx < _max_regions, "must be");
|
||||
bool success = _regions_failed_evacuation.par_set_bit(region_idx,
|
||||
memory_order_relaxed);
|
||||
if (success) {
|
||||
size_t offset = Atomic::fetch_and_add(&_evac_failure_regions_cur_length, 1u);
|
||||
_evac_failure_regions[offset] = region_idx;
|
||||
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
HeapRegion* hr = g1h->region_at(region_idx);
|
||||
G1CollectorState* state = g1h->collector_state();
|
||||
hr->note_evacuation_failure(state->in_concurrent_start_gc());
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
@ -103,6 +103,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
_gc_par_phases[Other] = new WorkerDataArray<double>("Other", "GC Worker Other (ms):", max_gc_threads);
|
||||
_gc_par_phases[MergePSS] = new WorkerDataArray<double>("MergePSS", "Merge Per-Thread State (ms):", max_gc_threads);
|
||||
_gc_par_phases[RestoreRetainedRegions] = new WorkerDataArray<double>("RestoreRetainedRegions", "Restore Retained Regions (ms):", max_gc_threads);
|
||||
_gc_par_phases[RemoveSelfForwards] = new WorkerDataArray<double>("RemoveSelfForwards", "Remove Self Forwards (ms):", max_gc_threads);
|
||||
_gc_par_phases[ClearCardTable] = new WorkerDataArray<double>("ClearLoggedCards", "Clear Logged Cards (ms):", max_gc_threads);
|
||||
_gc_par_phases[RecalculateUsed] = new WorkerDataArray<double>("RecalculateUsed", "Recalculate Used Memory (ms):", max_gc_threads);
|
||||
_gc_par_phases[ResetHotCardCache] = new WorkerDataArray<double>("ResetHotCardCache", "Reset Hot Card Cache (ms):", max_gc_threads);
|
||||
@ -112,6 +113,7 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
#endif
|
||||
_gc_par_phases[EagerlyReclaimHumongousObjects] = new WorkerDataArray<double>("EagerlyReclaimHumongousObjects", "Eagerly Reclaim Humongous Objects (ms):", max_gc_threads);
|
||||
_gc_par_phases[RestorePreservedMarks] = new WorkerDataArray<double>("RestorePreservedMarks", "Restore Preserved Marks (ms):", max_gc_threads);
|
||||
_gc_par_phases[ClearRetainedRegionBitmaps] = new WorkerDataArray<double>("ClearRetainedRegionsBitmap", "Clear Retained Region Bitmaps (ms):", max_gc_threads);
|
||||
|
||||
_gc_par_phases[ScanHR]->create_thread_work_items("Scanned Cards:", ScanHRScannedCards);
|
||||
_gc_par_phases[ScanHR]->create_thread_work_items("Scanned Blocks:", ScanHRScannedBlocks);
|
||||
@ -134,6 +136,11 @@ G1GCPhaseTimes::G1GCPhaseTimes(STWGCTimer* gc_timer, uint max_gc_threads) :
|
||||
|
||||
_gc_par_phases[RestoreRetainedRegions]->create_thread_work_items("Evacuation Failure Regions:", RestoreRetainedRegionsNum);
|
||||
|
||||
_gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Chunks:", RemoveSelfForwardChunksNum);
|
||||
_gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Empty Forward Chunks:", RemoveSelfForwardEmptyChunksNum);
|
||||
_gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Objects:", RemoveSelfForwardObjectsNum);
|
||||
_gc_par_phases[RemoveSelfForwards]->create_thread_work_items("Forward Bytes:", RemoveSelfForwardObjectsBytes);
|
||||
|
||||
_gc_par_phases[EagerlyReclaimHumongousObjects]->create_thread_work_items("Humongous Total", EagerlyReclaimNumTotal);
|
||||
_gc_par_phases[EagerlyReclaimHumongousObjects]->create_thread_work_items("Humongous Candidates", EagerlyReclaimNumCandidates);
|
||||
_gc_par_phases[EagerlyReclaimHumongousObjects]->create_thread_work_items("Humongous Reclaimed", EagerlyReclaimNumReclaimed);
|
||||
@ -483,6 +490,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
|
||||
debug_phase(_gc_par_phases[RecalculateUsed], 1);
|
||||
if (evacuation_failed) {
|
||||
debug_phase(_gc_par_phases[RestoreRetainedRegions], 1);
|
||||
debug_phase(_gc_par_phases[RemoveSelfForwards], 2);
|
||||
}
|
||||
|
||||
trace_phase(_gc_par_phases[RedirtyCards]);
|
||||
@ -490,6 +498,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
|
||||
if (evacuation_failed) {
|
||||
debug_phase(_gc_par_phases[RecalculateUsed], 1);
|
||||
debug_phase(_gc_par_phases[RestorePreservedMarks], 1);
|
||||
debug_phase(_gc_par_phases[ClearRetainedRegionBitmaps], 1);
|
||||
}
|
||||
debug_phase(_gc_par_phases[ResetHotCardCache], 1);
|
||||
debug_phase(_gc_par_phases[PurgeCodeRoots], 1);
|
||||
|
||||
@ -78,6 +78,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
SampleCollectionSetCandidates,
|
||||
MergePSS,
|
||||
RestoreRetainedRegions,
|
||||
RemoveSelfForwards,
|
||||
ClearCardTable,
|
||||
RecalculateUsed,
|
||||
ResetHotCardCache,
|
||||
@ -87,6 +88,7 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
#endif
|
||||
EagerlyReclaimHumongousObjects,
|
||||
RestorePreservedMarks,
|
||||
ClearRetainedRegionBitmaps,
|
||||
CLDClearClaimedMarks,
|
||||
ResetMarkingState,
|
||||
NoteStartOfMark,
|
||||
@ -149,6 +151,13 @@ class G1GCPhaseTimes : public CHeapObj<mtGC> {
|
||||
RestoreRetainedRegionsNum,
|
||||
};
|
||||
|
||||
enum RemoveSelfForwardsWorkItems {
|
||||
RemoveSelfForwardChunksNum,
|
||||
RemoveSelfForwardEmptyChunksNum,
|
||||
RemoveSelfForwardObjectsNum,
|
||||
RemoveSelfForwardObjectsBytes,
|
||||
};
|
||||
|
||||
enum GCEagerlyReclaimHumongousObjectsItems {
|
||||
EagerlyReclaimNumTotal,
|
||||
EagerlyReclaimNumCandidates,
|
||||
|
||||
@ -623,15 +623,14 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
|
||||
// Forward-to-self succeeded. We are the "owner" of the object.
|
||||
HeapRegion* r = _g1h->heap_region_containing(old);
|
||||
|
||||
// Objects failing evacuation will turn into old objects since the regions
|
||||
// are relabeled as such. We mark the failing objects in the marking bitmap
|
||||
// and later use it to handle all failed objects.
|
||||
_g1h->mark_evac_failure_object(old);
|
||||
|
||||
if (_evac_failure_regions->record(r->hrm_index())) {
|
||||
_g1h->hr_printer()->evac_failure(r);
|
||||
}
|
||||
|
||||
// Mark the failing object in the marking bitmap and later use the bitmap to handle
|
||||
// evacuation failure recovery.
|
||||
_g1h->mark_evac_failure_object(_worker_id, old, word_sz);
|
||||
|
||||
_preserved_marks->push_if_necessary(old, m);
|
||||
|
||||
ContinuationGCSupport::transform_stack_chunk(old);
|
||||
|
||||
@ -103,21 +103,6 @@ class G1RemSetScanState : public CHeapObj<mtGC> {
|
||||
// to (>=) HeapRegion::CardsPerRegion (completely scanned).
|
||||
uint volatile* _card_table_scan_state;
|
||||
|
||||
// Return "optimal" number of chunks per region we want to use for claiming areas
|
||||
// within a region to claim. Dependent on the region size as proxy for the heap
|
||||
// size, we limit the total number of chunks to limit memory usage and maintenance
|
||||
// effort of that table vs. granularity of distributing scanning work.
|
||||
// Testing showed that 64 for 1M/2M region, 128 for 4M/8M regions, 256 for 16/32M regions,
|
||||
// and so on seems to be such a good trade-off.
|
||||
static uint get_chunks_per_region(uint log_region_size) {
|
||||
// Limit the expected input values to current known possible values of the
|
||||
// (log) region size. Adjust as necessary after testing if changing the permissible
|
||||
// values for region size.
|
||||
assert(log_region_size >= 20 && log_region_size <= 29,
|
||||
"expected value in [20,29], but got %u", log_region_size);
|
||||
return 1u << (log_region_size / 2 - 4);
|
||||
}
|
||||
|
||||
uint _scan_chunks_per_region; // Number of chunks per region.
|
||||
uint8_t _log_scan_chunks_per_region; // Log of number of chunks per region.
|
||||
bool* _region_scan_chunks;
|
||||
@ -284,7 +269,7 @@ public:
|
||||
_max_reserved_regions(0),
|
||||
_collection_set_iter_state(NULL),
|
||||
_card_table_scan_state(NULL),
|
||||
_scan_chunks_per_region(get_chunks_per_region(HeapRegion::LogOfHRGrainBytes)),
|
||||
_scan_chunks_per_region(G1CollectedHeap::get_chunks_per_region()),
|
||||
_log_scan_chunks_per_region(log2i(_scan_chunks_per_region)),
|
||||
_region_scan_chunks(NULL),
|
||||
_num_total_scan_chunks(0),
|
||||
@ -1268,7 +1253,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
|
||||
void assert_bitmap_clear(HeapRegion* hr, const G1CMBitMap* bitmap) {
|
||||
assert(bitmap->get_next_marked_addr(hr->bottom(), hr->end()) == hr->end(),
|
||||
"Bitmap should have no mark for region %u", hr->hrm_index());
|
||||
"Bitmap should have no mark for region %u (%s)", hr->hrm_index(), hr->get_short_type_str());
|
||||
}
|
||||
|
||||
bool should_clear_region(HeapRegion* hr) const {
|
||||
@ -1297,6 +1282,7 @@ class G1MergeHeapRootsTask : public WorkerTask {
|
||||
// so the bitmap for the regions in the collection set must be cleared if not already.
|
||||
if (should_clear_region(hr)) {
|
||||
_g1h->clear_bitmap_for_region(hr);
|
||||
hr->reset_top_at_mark_start();
|
||||
} else {
|
||||
assert_bitmap_clear(hr, _g1h->concurrent_mark()->mark_bitmap());
|
||||
}
|
||||
|
||||
@ -97,19 +97,22 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
class G1PostEvacuateCollectionSetCleanupTask1::RemoveSelfForwardPtrsTask : public G1AbstractSubTask {
|
||||
G1ParRemoveSelfForwardPtrsTask _task;
|
||||
class G1PostEvacuateCollectionSetCleanupTask1::RestoreRetainedRegionsTask : public G1AbstractSubTask {
|
||||
G1RemoveSelfForwardsTask _task;
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
|
||||
public:
|
||||
RemoveSelfForwardPtrsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
RestoreRetainedRegionsTask(G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1AbstractSubTask(G1GCPhaseTimes::RestoreRetainedRegions),
|
||||
_task(evac_failure_regions),
|
||||
_evac_failure_regions(evac_failure_regions) { }
|
||||
_evac_failure_regions(evac_failure_regions) {
|
||||
}
|
||||
|
||||
double worker_cost() const override {
|
||||
assert(_evac_failure_regions->evacuation_failed(), "Should not call this if not executed");
|
||||
return _evac_failure_regions->num_regions_failed_evacuation();
|
||||
|
||||
double workers_per_region = (double)G1CollectedHeap::get_chunks_per_region() / G1RestoreRetainedRegionChunksPerWorker;
|
||||
return workers_per_region * _evac_failure_regions->num_regions_failed_evacuation();
|
||||
}
|
||||
|
||||
void do_work(uint worker_id) override {
|
||||
@ -128,10 +131,10 @@ G1PostEvacuateCollectionSetCleanupTask1::G1PostEvacuateCollectionSetCleanupTask1
|
||||
if (SampleCollectionSetCandidatesTask::should_execute()) {
|
||||
add_serial_task(new SampleCollectionSetCandidatesTask());
|
||||
}
|
||||
if (evacuation_failed) {
|
||||
add_parallel_task(new RemoveSelfForwardPtrsTask(evac_failure_regions));
|
||||
}
|
||||
add_parallel_task(G1CollectedHeap::heap()->rem_set()->create_cleanup_after_scan_heap_roots_task());
|
||||
if (evacuation_failed) {
|
||||
add_parallel_task(new RestoreRetainedRegionsTask(evac_failure_regions));
|
||||
}
|
||||
}
|
||||
|
||||
class G1FreeHumongousRegionClosure : public HeapRegionClosure {
|
||||
@ -317,7 +320,6 @@ public:
|
||||
};
|
||||
|
||||
class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
|
||||
private:
|
||||
size_t _num_dirtied;
|
||||
G1CollectedHeap* _g1h;
|
||||
G1CardTable* _g1_ct;
|
||||
@ -333,7 +335,7 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
|
||||
return _g1h->is_in_cset(hr) && !_evac_failure_regions->contains(hr->hrm_index());
|
||||
}
|
||||
|
||||
public:
|
||||
public:
|
||||
RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h, G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1CardTableEntryClosure(),
|
||||
_num_dirtied(0),
|
||||
@ -354,6 +356,45 @@ class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
|
||||
size_t num_dirtied() const { return _num_dirtied; }
|
||||
};
|
||||
|
||||
class G1PostEvacuateCollectionSetCleanupTask2::ClearRetainedRegionBitmaps : public G1AbstractSubTask {
|
||||
G1EvacFailureRegions* _evac_failure_regions;
|
||||
HeapRegionClaimer _claimer;
|
||||
|
||||
class ClearRetainedRegionBitmapsClosure : public HeapRegionClosure {
|
||||
public:
|
||||
|
||||
bool do_heap_region(HeapRegion* r) override {
|
||||
assert(r->bottom() == r->top_at_mark_start(),
|
||||
"TAMS should have been reset for region %u", r->hrm_index());
|
||||
G1CollectedHeap::heap()->clear_bitmap_for_region(r);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
|
||||
ClearRetainedRegionBitmaps(G1EvacFailureRegions* evac_failure_regions) :
|
||||
G1AbstractSubTask(G1GCPhaseTimes::ClearRetainedRegionBitmaps),
|
||||
_evac_failure_regions(evac_failure_regions),
|
||||
_claimer(0) {
|
||||
assert(!G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc(),
|
||||
"Should not clear bitmaps of retained regions during concurrent start");
|
||||
}
|
||||
|
||||
void set_max_workers(uint max_workers) override {
|
||||
_claimer.set_n_workers(max_workers);
|
||||
}
|
||||
|
||||
double worker_cost() const override {
|
||||
return _evac_failure_regions->num_regions_failed_evacuation();
|
||||
}
|
||||
|
||||
void do_work(uint worker_id) override {
|
||||
ClearRetainedRegionBitmapsClosure cl;
|
||||
_evac_failure_regions->par_iterate(&cl, &_claimer, worker_id);
|
||||
}
|
||||
};
|
||||
|
||||
class G1PostEvacuateCollectionSetCleanupTask2::RedirtyLoggedCardsTask : public G1AbstractSubTask {
|
||||
G1RedirtyCardsQueueSet* _rdcqs;
|
||||
BufferNode* volatile _nodes;
|
||||
@ -529,6 +570,15 @@ class FreeCSetClosure : public HeapRegionClosure {
|
||||
// gen statistics, but we need to update old gen statistics.
|
||||
stats()->account_failed_region(r);
|
||||
|
||||
G1GCPhaseTimes* p = _g1h->phase_times();
|
||||
assert(!r->is_pinned(), "Unexpected pinned region at index %u", r->hrm_index());
|
||||
assert(r->in_collection_set(), "bad CS");
|
||||
|
||||
p->record_or_add_thread_work_item(G1GCPhaseTimes::RestoreRetainedRegions,
|
||||
_worker_id,
|
||||
1,
|
||||
G1GCPhaseTimes::RestoreRetainedRegionsNum);
|
||||
|
||||
// Update the region state due to the failed evacuation.
|
||||
r->handle_evacuation_failure();
|
||||
|
||||
@ -676,9 +726,13 @@ G1PostEvacuateCollectionSetCleanupTask2::G1PostEvacuateCollectionSetCleanupTask2
|
||||
|
||||
if (evac_failure_regions->evacuation_failed()) {
|
||||
add_parallel_task(new RestorePreservedMarksTask(per_thread_states->preserved_marks_set()));
|
||||
// Keep marks on bitmaps in retained regions during concurrent start - they will all be old.
|
||||
if (!G1CollectedHeap::heap()->collector_state()->in_concurrent_start_gc()) {
|
||||
add_parallel_task(new ClearRetainedRegionBitmaps(evac_failure_regions));
|
||||
}
|
||||
}
|
||||
add_parallel_task(new RedirtyLoggedCardsTask(per_thread_states->rdcqs(), evac_failure_regions));
|
||||
add_parallel_task(new FreeCollectionSetTask(evacuation_info,
|
||||
per_thread_states->surviving_young_words(),
|
||||
evac_failure_regions));
|
||||
per_thread_states->surviving_young_words(),
|
||||
evac_failure_regions));
|
||||
}
|
||||
|
||||
@ -39,13 +39,13 @@ class G1ParScanThreadStateSet;
|
||||
// - Merge PSS (s)
|
||||
// - Recalculate Used (s)
|
||||
// - Sample Collection Set Candidates (s)
|
||||
// - Remove Self Forwards (on evacuation failure)
|
||||
// - Clear Card Table
|
||||
// - Restore retained regions (on evacuation failure)
|
||||
class G1PostEvacuateCollectionSetCleanupTask1 : public G1BatchedTask {
|
||||
class MergePssTask;
|
||||
class RecalculateUsedTask;
|
||||
class SampleCollectionSetCandidatesTask;
|
||||
class RemoveSelfForwardPtrsTask;
|
||||
class RestoreRetainedRegionsTask;
|
||||
|
||||
public:
|
||||
G1PostEvacuateCollectionSetCleanupTask1(G1ParScanThreadStateSet* per_thread_states,
|
||||
@ -57,6 +57,7 @@ public:
|
||||
// - Purge Code Roots (s)
|
||||
// - Reset Hot Card Cache (s)
|
||||
// - Update Derived Pointers (s)
|
||||
// - Clear Retained Region Bitmaps (on evacuation failure)
|
||||
// - Redirty Logged Cards
|
||||
// - Restore Preserved Marks (on evacuation failure)
|
||||
// - Free Collection Set
|
||||
@ -68,6 +69,7 @@ class G1PostEvacuateCollectionSetCleanupTask2 : public G1BatchedTask {
|
||||
class UpdateDerivedPointersTask;
|
||||
#endif
|
||||
|
||||
class ClearRetainedRegionBitmaps;
|
||||
class RedirtyLoggedCardsTask;
|
||||
class RestorePreservedMarksTask;
|
||||
class FreeCollectionSetTask;
|
||||
|
||||
@ -372,6 +372,11 @@
|
||||
number of free regions and the expected survival rates in each \
|
||||
section of the heap.") \
|
||||
\
|
||||
product(uint, G1RestoreRetainedRegionChunksPerWorker, 16, DIAGNOSTIC, \
|
||||
"The number of chunks assigned per worker thread for " \
|
||||
"retained region restore purposes.") \
|
||||
range(1, 256) \
|
||||
\
|
||||
GC_G1_EVACUATION_FAILURE_FLAGS(develop, \
|
||||
develop_pd, \
|
||||
product, \
|
||||
|
||||
@ -103,7 +103,11 @@ void HeapRegion::setup_heap_region_size(size_t max_heap_size) {
|
||||
void HeapRegion::handle_evacuation_failure() {
|
||||
uninstall_surv_rate_group();
|
||||
clear_young_index_in_cset();
|
||||
set_old();
|
||||
clear_index_in_opt_cset();
|
||||
move_to_old();
|
||||
|
||||
_rem_set->clean_code_roots(this);
|
||||
_rem_set->clear_locked(true /* only_cardset */);
|
||||
}
|
||||
|
||||
void HeapRegion::unlink_from_list() {
|
||||
@ -268,9 +272,11 @@ void HeapRegion::report_region_type_change(G1HeapRegionTraceType::Type to) {
|
||||
used());
|
||||
}
|
||||
|
||||
void HeapRegion::note_self_forwarding_removal_start(bool during_concurrent_start) {
|
||||
// We always scrub the region to make sure the entire region is
|
||||
// parsable after the self-forwarding point removal.
|
||||
void HeapRegion::note_evacuation_failure(bool during_concurrent_start) {
|
||||
// PB must be bottom - we only evacuate old gen regions after scrubbing, and
|
||||
// young gen regions never have their PB set to anything other than bottom.
|
||||
assert(parsable_bottom_acquire() == bottom(), "must be");
|
||||
|
||||
_garbage_bytes = 0;
|
||||
|
||||
if (during_concurrent_start) {
|
||||
@ -285,14 +291,11 @@ void HeapRegion::note_self_forwarding_removal_start(bool during_concurrent_start
|
||||
}
|
||||
}
|
||||
|
||||
void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
|
||||
assert(marked_bytes <= used(),
|
||||
"marked: " SIZE_FORMAT " used: " SIZE_FORMAT, marked_bytes, used());
|
||||
_garbage_bytes = used() - marked_bytes;
|
||||
void HeapRegion::note_self_forward_chunk_done(size_t garbage_bytes) {
|
||||
Atomic::add(&_garbage_bytes, garbage_bytes, memory_order_relaxed);
|
||||
}
|
||||
|
||||
// Code roots support
|
||||
|
||||
void HeapRegion::add_code_root(nmethod* nm) {
|
||||
HeapRegionRemSet* hrrs = rem_set();
|
||||
hrrs->add_code_root(nm);
|
||||
@ -441,7 +444,7 @@ void HeapRegion::print_on(outputStream* st) const {
|
||||
st->print("| ");
|
||||
}
|
||||
st->print("|TAMS " PTR_FORMAT "| PB " PTR_FORMAT "| %s ",
|
||||
p2i(top_at_mark_start()), p2i(parsable_bottom_acquire()), rem_set()->get_state_str());
|
||||
p2i(top_at_mark_start()), p2i(parsable_bottom_acquire()), rem_set()->get_state_str());
|
||||
if (UseNUMA) {
|
||||
G1NUMA* numa = G1NUMA::numa();
|
||||
if (node_index() < numa->num_active_nodes()) {
|
||||
|
||||
@ -143,9 +143,8 @@ private:
|
||||
HeapWord* const pb,
|
||||
HeapWord* first_block) const;
|
||||
|
||||
static bool obj_is_filler(oop obj);
|
||||
|
||||
public:
|
||||
|
||||
// Returns the address of the block reaching into or starting at addr.
|
||||
HeapWord* block_start(const void* addr) const;
|
||||
HeapWord* block_start(const void* addr, HeapWord* const pb) const;
|
||||
@ -244,11 +243,7 @@ private:
|
||||
// Amount of dead data in the region.
|
||||
size_t _garbage_bytes;
|
||||
|
||||
void init_top_at_mark_start() {
|
||||
set_top_at_mark_start(bottom());
|
||||
_parsable_bottom = bottom();
|
||||
_garbage_bytes = 0;
|
||||
}
|
||||
inline void init_top_at_mark_start();
|
||||
|
||||
// Data for young region survivor prediction.
|
||||
uint _young_index_in_cset;
|
||||
@ -376,7 +371,7 @@ public:
|
||||
inline void note_end_of_scrubbing();
|
||||
|
||||
// Notify the region that the (corresponding) bitmap has been cleared.
|
||||
inline void note_end_of_clearing();
|
||||
inline void reset_top_at_mark_start();
|
||||
|
||||
// During the concurrent scrubbing phase, can there be any areas with unloaded
|
||||
// classes or dead objects in this region?
|
||||
@ -505,13 +500,13 @@ public:
|
||||
// Clear the card table corresponding to this region.
|
||||
void clear_cardtable();
|
||||
|
||||
// Notify the region that we are about to start processing
|
||||
// self-forwarded objects during evac failure handling.
|
||||
void note_self_forwarding_removal_start(bool during_concurrent_start);
|
||||
// Notify the region that an evacuation failure occurred for an object within this
|
||||
// region.
|
||||
void note_evacuation_failure(bool during_concurrent_start);
|
||||
|
||||
// Notify the region that we have finished processing self-forwarded
|
||||
// objects during evac failure handling.
|
||||
void note_self_forwarding_removal_end(size_t marked_bytes);
|
||||
// Notify the region that we have partially finished processing self-forwarded
|
||||
// objects during evacuation failure handling.
|
||||
void note_self_forward_chunk_done(size_t garbage_bytes);
|
||||
|
||||
uint index_in_opt_cset() const {
|
||||
assert(has_index_in_opt_cset(), "Opt cset index not set.");
|
||||
|
||||
@ -139,11 +139,6 @@ inline bool HeapRegion::block_is_obj(const HeapWord* const p, HeapWord* const pb
|
||||
return is_marked_in_bitmap(cast_to_oop(p));
|
||||
}
|
||||
|
||||
inline bool HeapRegion::obj_is_filler(const oop obj) {
|
||||
Klass* k = obj->klass();
|
||||
return k == Universe::fillerArrayKlassObj() || k == vmClasses::FillerObject_klass();
|
||||
}
|
||||
|
||||
inline bool HeapRegion::is_obj_dead(const oop obj, HeapWord* const pb) const {
|
||||
assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
|
||||
|
||||
@ -159,7 +154,7 @@ inline bool HeapRegion::is_obj_dead(const oop obj, HeapWord* const pb) const {
|
||||
}
|
||||
|
||||
// This object is in the parsable part of the heap, live unless scrubbed.
|
||||
return obj_is_filler(obj);
|
||||
return G1CollectedHeap::is_obj_filler(obj);
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::next_live_in_unparsable(G1CMBitMap* const bitmap, const HeapWord* p, HeapWord* const limit) const {
|
||||
@ -201,7 +196,7 @@ inline void HeapRegion::reset_skip_compacting_after_full_gc() {
|
||||
|
||||
_garbage_bytes = 0;
|
||||
|
||||
set_top_at_mark_start(bottom());
|
||||
reset_top_at_mark_start();
|
||||
|
||||
reset_after_full_gc_common();
|
||||
}
|
||||
@ -322,7 +317,13 @@ inline void HeapRegion::note_end_of_scrubbing() {
|
||||
reset_parsable_bottom();
|
||||
}
|
||||
|
||||
inline void HeapRegion::note_end_of_clearing() {
|
||||
inline void HeapRegion::init_top_at_mark_start() {
|
||||
reset_top_at_mark_start();
|
||||
_parsable_bottom = bottom();
|
||||
_garbage_bytes = 0;
|
||||
}
|
||||
|
||||
inline void HeapRegion::reset_top_at_mark_start() {
|
||||
// We do not need a release store here because
|
||||
//
|
||||
// - if this method is called during concurrent bitmap clearing, we do not read
|
||||
|
||||
@ -125,10 +125,12 @@ public class TestG1ParallelPhases {
|
||||
// Some GC phases may or may not occur depending on environment. Filter them out
|
||||
// since we can not reliably guarantee that they occur (or not).
|
||||
Set<String> optPhases = of(
|
||||
// The following two phases only occur on evacuation failure.
|
||||
// The following phases only occur on evacuation failure.
|
||||
"RestoreRetainedRegions",
|
||||
"RemoveSelfForwards",
|
||||
"RestorePreservedMarks",
|
||||
|
||||
"ClearRetainedRegionsBitmap",
|
||||
// Generally optional phases.
|
||||
"OptScanHR",
|
||||
"OptMergeRS",
|
||||
"OptCodeRoots",
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user