8278917: Use Prev Bitmap for recording evac failed objects

Reviewed-by: ayang, mli, tschatzl
This commit is contained in:
Stefan Johansson 2021-12-21 14:03:08 +00:00
parent 29bd73638a
commit f4f2f32cd1
16 changed files with 118 additions and 308 deletions

View File

@ -2957,14 +2957,18 @@ void G1CollectedHeap::record_obj_copy_mem_stats() {
create_g1_evac_summary(&_old_evac_stats));
}
void G1CollectedHeap::clear_prev_bitmap_for_region(HeapRegion* hr) {
MemRegion mr(hr->bottom(), hr->end());
concurrent_mark()->clear_range_in_prev_bitmap(mr);
}
void G1CollectedHeap::free_region(HeapRegion* hr, FreeRegionList* free_list) {
assert(!hr->is_free(), "the region should not be free");
assert(!hr->is_empty(), "the region should not be empty");
assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
if (G1VerifyBitmaps) {
MemRegion mr(hr->bottom(), hr->end());
concurrent_mark()->clear_range_in_prev_bitmap(mr);
clear_prev_bitmap_for_region(hr);
}
// Clear the card counts for this region.

View File

@ -622,6 +622,8 @@ public:
// for all regions.
void verify_region_attr_remset_is_tracked() PRODUCT_RETURN;
void clear_prev_bitmap_for_region(HeapRegion* hr);
bool is_user_requested_concurrent_full_gc(GCCause::Cause cause);
// This is called at the start of either a concurrent cycle or a Full
@ -1252,6 +1254,9 @@ public:
inline bool is_obj_dead_full(const oop obj, const HeapRegion* hr) const;
inline bool is_obj_dead_full(const oop obj) const;
// Mark the live object that failed evacuation in the prev bitmap.
inline void mark_evac_failure_object(const oop obj, uint worker_id) const;
G1ConcurrentMark* concurrent_mark() const { return _cm; }
// Refinement

View File

@ -29,6 +29,7 @@
#include "gc/g1/g1BarrierSet.hpp"
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1ConcurrentMark.inline.hpp"
#include "gc/g1/g1EvacFailureRegions.hpp"
#include "gc/g1/g1Policy.hpp"
#include "gc/g1/g1RemSet.hpp"
@ -247,6 +248,13 @@ inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
return is_obj_dead_full(obj, heap_region_containing(obj));
}
inline void G1CollectedHeap::mark_evac_failure_object(const oop obj, uint worker_id) const {
// All objects failing evacuation are live. What we'll do is
// that we'll update the prev marking info so that they are
// all under PTAMS and explicitly marked.
_cm->par_mark_in_prev_bitmap(obj);
}
inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
_humongous_reclaim_candidates.set_candidate(region, value);

View File

@ -563,7 +563,7 @@ public:
void cleanup();
// Mark in the previous bitmap. Caution: the prev bitmap is usually read-only, so use
// this carefully.
inline void mark_in_prev_bitmap(oop p);
inline void par_mark_in_prev_bitmap(oop p);
// Clears marks for all objects in the given range, for the prev or
// next bitmaps. Caution: the previous bitmap is usually

View File

@ -268,9 +268,8 @@ inline bool G1CMTask::deal_with_reference(T* p) {
return make_reference_grey(obj);
}
inline void G1ConcurrentMark::mark_in_prev_bitmap(oop p) {
assert(!_prev_mark_bitmap->is_marked(p), "sanity");
_prev_mark_bitmap->mark(p);
inline void G1ConcurrentMark::par_mark_in_prev_bitmap(oop p) {
_prev_mark_bitmap->par_mark(p);
}
bool G1ConcurrentMark::is_marked_in_prev_bitmap(oop p) const {

View File

@ -37,7 +37,7 @@
#include "oops/compressedOops.inline.hpp"
#include "oops/oop.inline.hpp"
class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
class RemoveSelfForwardPtrObjClosure {
G1CollectedHeap* _g1h;
G1ConcurrentMark* _cm;
HeapRegion* _hr;
@ -60,13 +60,13 @@ public:
size_t marked_bytes() { return _marked_words * HeapWordSize; }
// Iterate over the live objects in the region to find self-forwarded objects
// Handle the marked objects in the region. These are self-forwarded objects
// that need to be kept live. We need to update the remembered sets of these
// objects. Further update the BOT and marks.
// We can coalesce and overwrite the remaining heap contents with dummy objects
// as they have either been dead or evacuated (which are unreferenced now, i.e.
// dead too) already.
void do_object(oop obj) {
size_t apply(oop obj) {
HeapWord* obj_addr = cast_from_oop<HeapWord*>(obj);
assert(_last_forwarded_object_end <= obj_addr, "should iterate in ascending address order");
assert(_hr->is_in(obj_addr), "sanity");
@ -75,12 +75,9 @@ public:
assert(obj->is_forwarded() && obj->forwardee() == obj, "sanity");
zap_dead_objects(_last_forwarded_object_end, obj_addr);
// We consider all objects that we find self-forwarded to be
// live. What we'll do is that we'll update the prev marking
// info so that they are all under PTAMS and explicitly marked.
if (!_cm->is_marked_in_prev_bitmap(obj)) {
_cm->mark_in_prev_bitmap(obj);
}
// Zapping clears the bitmap, make sure it didn't clear too much.
assert(_cm->is_marked_in_prev_bitmap(obj), "should be correctly marked");
if (_during_concurrent_start) {
// For the next marking info we'll only mark the
// self-forwarded objects explicitly if we are during
@ -92,7 +89,7 @@ public:
// explicitly and all objects in the CSet are considered
// (implicitly) live. So, we won't mark them explicitly and
// we'll leave them over NTAMS.
_cm->mark_in_next_bitmap(_worker_id, _hr, obj);
_cm->mark_in_next_bitmap(_worker_id, obj);
}
size_t obj_size = obj->size();
@ -102,6 +99,7 @@ public:
HeapWord* obj_end = obj_addr + obj_size;
_last_forwarded_object_end = obj_end;
_hr->alloc_block_in_bot(obj_addr, obj_end);
return obj_size;
}
// Fill the memory area from start to end with filler objects, and update the BOT
@ -161,8 +159,11 @@ public:
RemoveSelfForwardPtrObjClosure rspc(hr,
during_concurrent_start,
_worker_id);
// Iterates evac failure objs which are recorded during evacuation.
hr->process_and_drop_evac_failure_objs(&rspc);
// All objects that failed evacuation has been marked in the prev bitmap.
// Use the bitmap to apply the above closure to all failing objects.
G1CMBitMap* bitmap = const_cast<G1CMBitMap*>(_g1h->concurrent_mark()->prev_mark_bitmap());
hr->apply_to_marked_objects(bitmap, &rspc);
// Need to zap the remainder area of the processed region.
rspc.zap_remainder();
@ -172,26 +173,26 @@ public:
bool do_heap_region(HeapRegion *hr) {
assert(!hr->is_pinned(), "Unexpected pinned region at index %u", hr->hrm_index());
assert(hr->in_collection_set(), "bad CS");
assert(_evac_failure_regions->contains(hr->hrm_index()), "precondition");
if (_evac_failure_regions->contains(hr->hrm_index())) {
hr->clear_index_in_opt_cset();
hr->clear_index_in_opt_cset();
bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc();
bool during_concurrent_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
bool during_concurrent_start = _g1h->collector_state()->in_concurrent_start_gc();
bool during_concurrent_mark = _g1h->collector_state()->mark_or_rebuild_in_progress();
hr->note_self_forwarding_removal_start(during_concurrent_start,
during_concurrent_mark);
_g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
hr->note_self_forwarding_removal_start(during_concurrent_start,
during_concurrent_mark);
hr->reset_bot();
hr->reset_bot();
size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_concurrent_start);
size_t live_bytes = remove_self_forward_ptr_by_walking_hr(hr, during_concurrent_start);
hr->rem_set()->clean_strong_code_roots(hr);
hr->rem_set()->clear_locked(true);
hr->rem_set()->clean_strong_code_roots(hr);
hr->rem_set()->clear_locked(true);
hr->note_self_forwarding_removal_end(live_bytes);
_g1h->verifier()->check_bitmaps("Self-Forwarding Ptr Removal", hr);
hr->note_self_forwarding_removal_end(live_bytes);
}
return false;
}
};

View File

@ -1,123 +0,0 @@
/*
* Copyright (c) 2021, Huawei and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1EvacFailureObjectsSet.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1SegmentedArray.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "utilities/quickSort.hpp"
const G1SegmentedArrayAllocOptions G1EvacFailureObjectsSet::_alloc_options =
G1SegmentedArrayAllocOptions((uint)sizeof(OffsetInRegion), SegmentLength, UINT_MAX, Alignment);
G1SegmentedArrayFreeList<mtGC> G1EvacFailureObjectsSet::_free_segment_list;
#ifdef ASSERT
void G1EvacFailureObjectsSet::assert_is_valid_offset(size_t offset) const {
const uint max_offset = 1u << (HeapRegion::LogOfHRGrainBytes - LogHeapWordSize);
assert(offset < max_offset, "must be, but is " SIZE_FORMAT, offset);
}
#endif
oop G1EvacFailureObjectsSet::from_offset(OffsetInRegion offset) const {
assert_is_valid_offset(offset);
return cast_to_oop(_bottom + offset);
}
G1EvacFailureObjectsSet::OffsetInRegion G1EvacFailureObjectsSet::to_offset(oop obj) const {
const HeapWord* o = cast_from_oop<const HeapWord*>(obj);
size_t offset = pointer_delta(o, _bottom);
assert(obj == from_offset(static_cast<OffsetInRegion>(offset)), "must be");
return static_cast<OffsetInRegion>(offset);
}
G1EvacFailureObjectsSet::G1EvacFailureObjectsSet(uint region_idx, HeapWord* bottom) :
DEBUG_ONLY(_region_idx(region_idx) COMMA)
_bottom(bottom),
_offsets(&_alloc_options, &_free_segment_list) {
assert(HeapRegion::LogOfHRGrainBytes < 32, "must be");
}
// Helper class to join, sort and iterate over the previously collected segmented
// array of objects that failed evacuation.
class G1EvacFailureObjectsIterationHelper {
typedef G1EvacFailureObjectsSet::OffsetInRegion OffsetInRegion;
G1EvacFailureObjectsSet* _objects_set;
const G1SegmentedArray<OffsetInRegion, mtGC>* _segments;
OffsetInRegion* _offset_array;
uint _array_length;
static int order_oop(OffsetInRegion a, OffsetInRegion b) {
return static_cast<int>(a-b);
}
void join_and_sort() {
_segments->iterate_segments(*this);
QuickSort::sort(_offset_array, _array_length, order_oop, true);
}
void iterate(ObjectClosure* closure) {
for (uint i = 0; i < _array_length; i++) {
oop cur = _objects_set->from_offset(_offset_array[i]);
closure->do_object(cur);
}
}
public:
G1EvacFailureObjectsIterationHelper(G1EvacFailureObjectsSet* collector) :
_objects_set(collector),
_segments(&_objects_set->_offsets),
_offset_array(nullptr),
_array_length(0) { }
void process_and_drop(ObjectClosure* closure) {
uint num = _segments->num_allocated_slots();
_offset_array = NEW_C_HEAP_ARRAY(OffsetInRegion, num, mtGC);
join_and_sort();
assert(_array_length == num, "must be %u, %u", _array_length, num);
iterate(closure);
FREE_C_HEAP_ARRAY(OffsetInRegion, _offset_array);
}
// Callback of G1SegmentedArray::iterate_segments
void do_segment(G1SegmentedArraySegment<mtGC>* segment, uint length) {
segment->copy_to(&_offset_array[_array_length]);
_array_length += length;
}
};
void G1EvacFailureObjectsSet::process_and_drop(ObjectClosure* closure) {
assert_at_safepoint();
G1EvacFailureObjectsIterationHelper helper(this);
helper.process_and_drop(closure);
_offsets.drop_all();
}

View File

@ -1,82 +0,0 @@
/*
* Copyright (c) 2021, Huawei and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1EVACFAILUREOBJECTSSET_HPP
#define SHARE_GC_G1_G1EVACFAILUREOBJECTSSET_HPP
#include "gc/g1/g1SegmentedArray.hpp"
#include "memory/iterator.hpp"
#include "oops/oop.hpp"
class G1EvacFailureObjectsIterationHelper;
// This class collects addresses of objects that failed evacuation in a specific
// heap region.
// Provides sorted iteration of these objects for processing during the remove
// self forwards phase.
class G1EvacFailureObjectsSet {
friend class G1EvacFailureObjectsIterationHelper;
public:
// Storage type of an object that failed evacuation within a region. Given
// heap region size and possible object locations within a region, it is
// sufficient to use an uint here to save some space instead of full pointers.
typedef uint OffsetInRegion;
private:
static const uint SegmentLength = 256;
static const uint Alignment = 4;
static const G1SegmentedArrayAllocOptions _alloc_options;
// This free list is shared among evacuation failure process in all regions.
static G1SegmentedArrayFreeList<mtGC> _free_segment_list;
DEBUG_ONLY(const uint _region_idx;)
// Region bottom
const HeapWord* _bottom;
// Offsets within region containing objects that failed evacuation.
G1SegmentedArray<OffsetInRegion, mtGC> _offsets;
void assert_is_valid_offset(size_t offset) const NOT_DEBUG_RETURN;
// Converts between an offset within a region and an oop address.
oop from_offset(OffsetInRegion offset) const;
OffsetInRegion to_offset(oop obj) const;
public:
G1EvacFailureObjectsSet(uint region_idx, HeapWord* bottom);
// Record an object that failed evacuation.
inline void record(oop obj);
// Apply the given ObjectClosure to all objects that failed evacuation and
// empties the list after processing.
// Objects are passed in increasing address order.
void process_and_drop(ObjectClosure* closure);
};
#endif //SHARE_GC_G1_G1EVACFAILUREOBJECTSSET_HPP

View File

@ -1,40 +0,0 @@
/*
* Copyright (c) 2021, Huawei and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1EVACFAILUREOBJECTSSET_INLINE_HPP
#define SHARE_GC_G1_G1EVACFAILUREOBJECTSSET_INLINE_HPP
#include "gc/g1/g1EvacFailureObjectsSet.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1SegmentedArray.inline.hpp"
#include "gc/g1/heapRegion.inline.hpp"
void G1EvacFailureObjectsSet::record(oop obj) {
assert(obj != NULL, "must be");
assert(_region_idx == G1CollectedHeap::heap()->heap_region_containing(obj)->hrm_index(), "must be");
OffsetInRegion* e = _offsets.allocate();
*e = to_offset(obj);
}
#endif //SHARE_GC_G1_G1EVACFAILUREOBJECTSSET_INLINE_HPP

View File

@ -58,10 +58,16 @@ public:
}
};
void G1FullGCCompactTask::G1CompactRegionClosure::clear_in_prev_bitmap(oop obj) {
assert(_bitmap->is_marked(obj), "Should only compact marked objects");
_bitmap->clear(obj);
}
size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
size_t size = obj->size();
if (!obj->is_forwarded()) {
// Object not moving
// Object not moving, but clear the mark to allow reuse of the bitmap.
clear_in_prev_bitmap(obj);
return size;
}
@ -74,6 +80,9 @@ size_t G1FullGCCompactTask::G1CompactRegionClosure::apply(oop obj) {
cast_to_oop(destination)->init_mark();
assert(cast_to_oop(destination)->klass() != NULL, "should have a class");
// Clear the mark for the compacted object to allow reuse of the
// bitmap without an additional clearing step.
clear_in_prev_bitmap(obj);
return size;
}
@ -82,13 +91,15 @@ void G1FullGCCompactTask::compact_region(HeapRegion* hr) {
assert(!hr->is_humongous(), "Should be no humongous regions in compaction queue");
if (!collector()->is_free(hr->hrm_index())) {
// The compaction closure not only copies the object to the new
// location, but also clears the bitmap for it. This is needed
// for bitmap verification and to be able to use the prev_bitmap
// for evacuation failures in the next young collection. Testing
// showed that it was better overall to clear bit by bit, compared
// to clearing the whole region at the end. This difference was
// clearly seen for regions with few marks.
G1CompactRegionClosure compact(collector()->mark_bitmap());
hr->apply_to_marked_objects(collector()->mark_bitmap(), &compact);
// Clear the liveness information for this region if necessary i.e. if we actually look at it
// for bitmap verification. Otherwise it is sufficient that we move the TAMS to bottom().
if (G1VerifyBitmaps) {
collector()->mark_bitmap()->clear_region(hr);
}
}
hr->reset_compacted_after_full_gc();

View File

@ -50,7 +50,7 @@ public:
class G1CompactRegionClosure : public StackObj {
G1CMBitMap* _bitmap;
void clear_in_prev_bitmap(oop object);
public:
G1CompactRegionClosure(G1CMBitMap* bitmap) : _bitmap(bitmap) { }
size_t apply(oop object);

View File

@ -616,9 +616,11 @@ oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m, siz
if (forward_ptr == NULL) {
// Forward-to-self succeeded. We are the "owner" of the object.
HeapRegion* r = _g1h->heap_region_containing(old);
// Records evac failure objs, this will help speed up iteration
// of these objs later in *remove self forward* phase of post evacuation.
r->record_evac_failure_obj(old);
// Objects failing evacuation will turn into old objects since the regions
// are relabeled as such. We mark the failing objects in the prev bitmap and
// later use it to handle all failed objects.
_g1h->mark_evac_failure_object(old, _worker_id);
if (_evac_failure_regions->record(r->hrm_index())) {
_g1h->hr_printer()->evac_failure(r);

View File

@ -1259,6 +1259,46 @@ class G1MergeHeapRootsTask : public WorkerTask {
G1MergeCardSetStats stats() const { return _stats; }
};
// Closure to clear the prev bitmap for any old region in the collection set.
// This is needed to be able to use the bitmap for evacuation failure handling.
class G1ClearBitmapClosure : public HeapRegionClosure {
G1CollectedHeap* _g1h;
void assert_bitmap_clear(HeapRegion* hr, const G1CMBitMap* bitmap) {
assert(bitmap->get_next_marked_addr(hr->bottom(), hr->end()) == hr->end(),
"Bitmap should have no mark for young regions");
}
public:
G1ClearBitmapClosure(G1CollectedHeap* g1h) : _g1h(g1h) { }
bool do_heap_region(HeapRegion* hr) {
assert(_g1h->is_in_cset(hr), "Should only be used iterating the collection set");
// Young regions should always have cleared bitmaps, so only clear old.
if (hr->is_old()) {
_g1h->clear_prev_bitmap_for_region(hr);
} else {
assert(hr->is_young(), "Should only be young and old regions in collection set");
assert_bitmap_clear(hr, _g1h->concurrent_mark()->prev_mark_bitmap());
}
return false;
}
};
// Helper to allow two closure to be applied when
// iterating through the collection set.
class G1CombinedClosure : public HeapRegionClosure {
HeapRegionClosure* _closure1;
HeapRegionClosure* _closure2;
public:
G1CombinedClosure(HeapRegionClosure* cl1, HeapRegionClosure* cl2) :
_closure1(cl1),
_closure2(cl2) { }
bool do_heap_region(HeapRegion* hr) {
return _closure1->do_heap_region(hr) ||
_closure2->do_heap_region(hr);
}
};
// Visitor for the remembered sets of humongous candidate regions to merge their
// remembered set into the card table.
class G1FlushHumongousCandidateRemSets : public HeapRegionClosure {
@ -1426,9 +1466,12 @@ public:
G1GCParPhaseTimesTracker x(p, merge_remset_phase, worker_id, !_initial_evacuation /* allow_multiple_record */);
G1MergeCardSetStats stats;
{
G1MergeCardSetClosure cl(_scan_state);
g1h->collection_set_iterate_increment_from(&cl, &_hr_claimer, worker_id);
stats = cl.stats();
G1MergeCardSetClosure merge(_scan_state);
G1ClearBitmapClosure clear(g1h);
G1CombinedClosure combined(&merge, &clear);
g1h->collection_set_iterate_increment_from(&combined, &_hr_claimer, worker_id);
stats = merge.stats();
}
for (uint i = 0; i < G1GCPhaseTimes::MergeRSContainersSentinel; i++) {

View File

@ -107,10 +107,6 @@ void HeapRegion::handle_evacuation_failure() {
_next_marked_bytes = 0;
}
void HeapRegion::process_and_drop_evac_failure_objs(ObjectClosure* closure) {
_evac_failure_objs.process_and_drop(closure);
}
void HeapRegion::unlink_from_list() {
set_next(NULL);
set_prev(NULL);
@ -246,8 +242,7 @@ HeapRegion::HeapRegion(uint hrm_index,
_prev_marked_bytes(0), _next_marked_bytes(0),
_young_index_in_cset(-1),
_surv_rate_group(NULL), _age_index(G1SurvRateGroup::InvalidAgeIndex), _gc_efficiency(-1.0),
_node_index(G1NUMA::UnknownNodeIndex),
_evac_failure_objs(hrm_index, _bottom)
_node_index(G1NUMA::UnknownNodeIndex)
{
assert(Universe::on_page_boundary(mr.start()) && Universe::on_page_boundary(mr.end()),
"invalid space boundaries");

View File

@ -26,7 +26,6 @@
#define SHARE_GC_G1_HEAPREGION_HPP
#include "gc/g1/g1BlockOffsetTable.hpp"
#include "gc/g1/g1EvacFailureObjectsSet.hpp"
#include "gc/g1/g1HeapRegionTraceType.hpp"
#include "gc/g1/g1SurvRateGroup.hpp"
#include "gc/g1/heapRegionTracer.hpp"
@ -268,8 +267,6 @@ private:
uint _node_index;
G1EvacFailureObjectsSet _evac_failure_objs;
void report_region_type_change(G1HeapRegionTraceType::Type to);
// Returns whether the given object address refers to a dead object, and either the
@ -566,11 +563,6 @@ public:
// Update the region state after a failed evacuation.
void handle_evacuation_failure();
// Record an object that failed evacuation within this region.
void record_evac_failure_obj(oop obj);
// Applies the given closure to all previously recorded objects
// that failed evacuation in ascending address order.
void process_and_drop_evac_failure_objs(ObjectClosure* closure);
// Iterate over the objects overlapping the given memory region, applying cl
// to all references in the region. This is a helper for

View File

@ -30,7 +30,6 @@
#include "gc/g1/g1BlockOffsetTable.inline.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/g1/g1ConcurrentMarkBitMap.inline.hpp"
#include "gc/g1/g1EvacFailureObjectsSet.inline.hpp"
#include "gc/g1/g1Predictions.hpp"
#include "gc/g1/g1SegmentedArray.inline.hpp"
#include "oops/oop.inline.hpp"
@ -439,8 +438,4 @@ inline void HeapRegion::record_surv_words_in_group(size_t words_survived) {
_surv_rate_group->record_surviving_words(age_in_group, words_survived);
}
inline void HeapRegion::record_evac_failure_obj(oop obj) {
_evac_failure_objs.record(obj);
}
#endif // SHARE_GC_G1_HEAPREGION_INLINE_HPP