mirror of
https://github.com/openjdk/jdk.git
synced 2026-04-18 19:00:28 +00:00
8071280: Specialize HeapRegion::oops_on_card_seq_iterate_careful() for use during concurrent refinement and updating the rset
Reviewed-by: kbarrett, sangheki, ehelin
This commit is contained in:
parent
8ce49ec34e
commit
92a1acb43b
@ -1348,7 +1348,6 @@ public:
|
||||
// bitmap off to the side.
|
||||
void doConcurrentMark();
|
||||
|
||||
bool isMarkedPrev(oop obj) const;
|
||||
bool isMarkedNext(oop obj) const;
|
||||
|
||||
// Determine if an object is dead, given the object and also
|
||||
@ -1357,8 +1356,7 @@ public:
|
||||
// is not marked, and c) it is not in an archive region.
|
||||
bool is_obj_dead(const oop obj, const HeapRegion* hr) const {
|
||||
return
|
||||
!hr->obj_allocated_since_prev_marking(obj) &&
|
||||
!isMarkedPrev(obj) &&
|
||||
hr->is_obj_dead(obj, _cm->prevMarkBitMap()) &&
|
||||
!hr->is_archive();
|
||||
}
|
||||
|
||||
|
||||
@ -134,10 +134,6 @@ inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
|
||||
return _task_queues->queue(i);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
|
||||
return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
|
||||
}
|
||||
|
||||
inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
|
||||
return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
|
||||
}
|
||||
|
||||
@ -686,9 +686,12 @@ bool G1RemSet::refine_card(jbyte* card_ptr,
|
||||
worker_i);
|
||||
update_rs_oop_cl.set_from(r);
|
||||
|
||||
bool card_processed =
|
||||
r->oops_on_card_seq_iterate_careful(dirty_region,
|
||||
&update_rs_oop_cl);
|
||||
bool card_processed;
|
||||
if (_g1->is_gc_active()) {
|
||||
card_processed = r->oops_on_card_seq_iterate_careful<true>(dirty_region, &update_rs_oop_cl);
|
||||
} else {
|
||||
card_processed = r->oops_on_card_seq_iterate_careful<false>(dirty_region, &update_rs_oop_cl);
|
||||
}
|
||||
|
||||
// If unable to process the card then we encountered an unparsable
|
||||
// part of the heap (e.g. a partially allocated object) while
|
||||
|
||||
@ -349,112 +349,6 @@ void HeapRegion::note_self_forwarding_removal_end(size_t marked_bytes) {
|
||||
_prev_marked_bytes = marked_bytes;
|
||||
}
|
||||
|
||||
// Humongous objects are allocated directly in the old-gen. Need
|
||||
// special handling for concurrent processing encountering an
|
||||
// in-progress allocation.
|
||||
static bool do_oops_on_card_in_humongous(MemRegion mr,
|
||||
G1UpdateRSOrPushRefOopClosure* cl,
|
||||
HeapRegion* hr,
|
||||
G1CollectedHeap* g1h) {
|
||||
assert(hr->is_humongous(), "precondition");
|
||||
HeapRegion* sr = hr->humongous_start_region();
|
||||
oop obj = oop(sr->bottom());
|
||||
|
||||
// If concurrent and klass_or_null is NULL, then space has been
|
||||
// allocated but the object has not yet been published by setting
|
||||
// the klass. That can only happen if the card is stale. However,
|
||||
// we've already set the card clean, so we must return failure,
|
||||
// since the allocating thread could have performed a write to the
|
||||
// card that might be missed otherwise.
|
||||
if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We have a well-formed humongous object at the start of sr.
|
||||
// Only filler objects follow a humongous object in the containing
|
||||
// regions, and we can ignore those. So only process the one
|
||||
// humongous object.
|
||||
if (!g1h->is_obj_dead(obj, sr)) {
|
||||
if (obj->is_objArray() || (sr->bottom() < mr.start())) {
|
||||
// objArrays are always marked precisely, so limit processing
|
||||
// with mr. Non-objArrays might be precisely marked, and since
|
||||
// it's humongous it's worthwhile avoiding full processing.
|
||||
// However, the card could be stale and only cover filler
|
||||
// objects. That should be rare, so not worth checking for;
|
||||
// instead let it fall out from the bounded iteration.
|
||||
obj->oop_iterate(cl, mr);
|
||||
} else {
|
||||
// If obj is not an objArray and mr contains the start of the
|
||||
// obj, then this could be an imprecise mark, and we need to
|
||||
// process the entire object.
|
||||
obj->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
G1UpdateRSOrPushRefOopClosure* cl) {
|
||||
assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// Special handling for humongous regions.
|
||||
if (is_humongous()) {
|
||||
return do_oops_on_card_in_humongous(mr, cl, this, g1h);
|
||||
}
|
||||
assert(is_old(), "precondition");
|
||||
|
||||
// Because mr has been trimmed to what's been allocated in this
|
||||
// region, the parts of the heap that are examined here are always
|
||||
// parsable; there's no need to use klass_or_null to detect
|
||||
// in-progress allocation.
|
||||
|
||||
// Cache the boundaries of the memory region in some const locals
|
||||
HeapWord* const start = mr.start();
|
||||
HeapWord* const end = mr.end();
|
||||
|
||||
// Find the obj that extends onto mr.start().
|
||||
// Update BOT as needed while finding start of (possibly dead)
|
||||
// object containing the start of the region.
|
||||
HeapWord* cur = block_start(start);
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(cur <= start,
|
||||
"cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
|
||||
HeapWord* next = cur + block_size(cur);
|
||||
assert(start < next,
|
||||
"start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
oop obj = oop(cur);
|
||||
assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur));
|
||||
assert(obj->klass_or_null() != NULL,
|
||||
"Unparsable heap at " PTR_FORMAT, p2i(cur));
|
||||
|
||||
if (g1h->is_obj_dead(obj, this)) {
|
||||
// Carefully step over dead object.
|
||||
cur += block_size(cur);
|
||||
} else {
|
||||
// Step over live object, and process its references.
|
||||
cur += obj->size();
|
||||
// Non-objArrays are usually marked imprecise at the object
|
||||
// start, in which case we need to iterate over them in full.
|
||||
// objArrays are precisely marked, but can still be iterated
|
||||
// over in full if completely covered.
|
||||
if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
obj->oop_iterate(cl, mr);
|
||||
}
|
||||
}
|
||||
} while (cur < end);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Code roots support
|
||||
|
||||
void HeapRegion::add_strong_code_root(nmethod* nm) {
|
||||
|
||||
@ -56,6 +56,7 @@
|
||||
// room for filler objects to pad out to the end of the region.
|
||||
|
||||
class G1CollectedHeap;
|
||||
class G1CMBitMapRO;
|
||||
class HeapRegionRemSet;
|
||||
class HeapRegionRemSetIterator;
|
||||
class HeapRegion;
|
||||
@ -248,6 +249,13 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
|
||||
void report_region_type_change(G1HeapRegionTraceType::Type to);
|
||||
|
||||
// Returns whether the given object address refers to a dead object, and either the
|
||||
// size of the object (if live) or the size of the block (if dead) in size.
|
||||
// May
|
||||
// - only called with obj < top()
|
||||
// - not called on humongous objects or archive regions
|
||||
inline bool is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const;
|
||||
|
||||
protected:
|
||||
// The index of this region in the heap region sequence.
|
||||
uint _hrm_index;
|
||||
@ -311,6 +319,18 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
// for the collection set.
|
||||
double _predicted_elapsed_time_ms;
|
||||
|
||||
// Iterate over the references in a humongous objects and apply the given closure
|
||||
// to them.
|
||||
// Humongous objects are allocated directly in the old-gen. So we need special
|
||||
// handling for concurrent processing encountering an in-progress allocation.
|
||||
template <class Closure, bool is_gc_active>
|
||||
inline bool do_oops_on_card_in_humongous(MemRegion mr,
|
||||
Closure* cl,
|
||||
G1CollectedHeap* g1h);
|
||||
|
||||
// Returns the block size of the given (dead, potentially having its class unloaded) object
|
||||
// starting at p extending to at most the prev TAMS using the given mark bitmap.
|
||||
inline size_t block_size_using_bitmap(const HeapWord* p, const G1CMBitMapRO* prev_bitmap) const;
|
||||
public:
|
||||
HeapRegion(uint hrm_index,
|
||||
G1BlockOffsetTable* bot,
|
||||
@ -357,6 +377,9 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
// All allocated blocks are occupied by objects in a HeapRegion
|
||||
bool block_is_obj(const HeapWord* p) const;
|
||||
|
||||
// Returns whether the given object is dead based on TAMS and bitmap.
|
||||
bool is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const;
|
||||
|
||||
// Returns the object size for all valid block starts
|
||||
// and the amount of unallocated words if called on top()
|
||||
size_t block_size(const HeapWord* p) const;
|
||||
@ -652,16 +675,16 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
|
||||
// Iterate over the objects overlapping part of a card, applying cl
|
||||
// to all references in the region. This is a helper for
|
||||
// G1RemSet::refine_card, and is tightly coupled with it.
|
||||
// mr: the memory region covered by the card, trimmed to the
|
||||
// G1RemSet::refine_card*, and is tightly coupled with them.
|
||||
// mr is the memory region covered by the card, trimmed to the
|
||||
// allocated space for this region. Must not be empty.
|
||||
// This region must be old or humongous.
|
||||
// Returns true if the designated objects were successfully
|
||||
// processed, false if an unparsable part of the heap was
|
||||
// encountered; that only happens when invoked concurrently with the
|
||||
// mutator.
|
||||
bool oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
G1UpdateRSOrPushRefOopClosure* cl);
|
||||
template <bool is_gc_active, class Closure>
|
||||
inline bool oops_on_card_seq_iterate_careful(MemRegion mr, Closure* cl);
|
||||
|
||||
size_t recorded_rs_length() const { return _recorded_rs_length; }
|
||||
double predicted_elapsed_time_ms() const { return _predicted_elapsed_time_ms; }
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -112,6 +112,24 @@ G1ContiguousSpace::block_start_const(const void* p) const {
|
||||
return _bot_part.block_start_const(p);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::is_obj_dead_with_size(const oop obj, G1CMBitMapRO* prev_bitmap, size_t* size) const {
|
||||
HeapWord* addr = (HeapWord*) obj;
|
||||
|
||||
assert(addr < top(), "must be");
|
||||
assert(!is_archive(), "Archive regions should not have references into interesting regions.");
|
||||
assert(!is_humongous(), "Humongous objects not handled here");
|
||||
bool obj_is_dead = is_obj_dead(obj, prev_bitmap);
|
||||
|
||||
if (ClassUnloadingWithConcurrentMark && obj_is_dead) {
|
||||
assert(!block_is_obj(addr), "must be");
|
||||
*size = block_size_using_bitmap(addr, prev_bitmap);
|
||||
} else {
|
||||
assert(block_is_obj(addr), "must be");
|
||||
*size = obj->size();
|
||||
}
|
||||
return obj_is_dead;
|
||||
}
|
||||
|
||||
inline bool
|
||||
HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
@ -126,8 +144,27 @@ HeapRegion::block_is_obj(const HeapWord* p) const {
|
||||
return p < top();
|
||||
}
|
||||
|
||||
inline size_t
|
||||
HeapRegion::block_size(const HeapWord *addr) const {
|
||||
inline size_t HeapRegion::block_size_using_bitmap(const HeapWord* addr, const G1CMBitMapRO* prev_bitmap) const {
|
||||
assert(ClassUnloadingWithConcurrentMark,
|
||||
"All blocks should be objects if class unloading isn't used, so this method should not be called. "
|
||||
"HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
|
||||
"addr: " PTR_FORMAT,
|
||||
p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
|
||||
|
||||
// Old regions' dead objects may have dead classes
|
||||
// We need to find the next live object using the bitmap
|
||||
HeapWord* next = prev_bitmap->getNextMarkedWordAddress(addr, prev_top_at_mark_start());
|
||||
|
||||
assert(next > addr, "must get the next live object");
|
||||
return pointer_delta(next, addr);
|
||||
}
|
||||
|
||||
inline bool HeapRegion::is_obj_dead(const oop obj, const G1CMBitMapRO* prev_bitmap) const {
|
||||
assert(is_in_reserved(obj), "Object " PTR_FORMAT " must be in region", p2i(obj));
|
||||
return !obj_allocated_since_prev_marking(obj) && !prev_bitmap->isMarked((HeapWord*)obj);
|
||||
}
|
||||
|
||||
inline size_t HeapRegion::block_size(const HeapWord *addr) const {
|
||||
if (addr == top()) {
|
||||
return pointer_delta(end(), addr);
|
||||
}
|
||||
@ -136,21 +173,7 @@ HeapRegion::block_size(const HeapWord *addr) const {
|
||||
return oop(addr)->size();
|
||||
}
|
||||
|
||||
assert(ClassUnloadingWithConcurrentMark,
|
||||
"All blocks should be objects if G1 Class Unloading isn't used. "
|
||||
"HR: [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ") "
|
||||
"addr: " PTR_FORMAT,
|
||||
p2i(bottom()), p2i(top()), p2i(end()), p2i(addr));
|
||||
|
||||
// Old regions' dead objects may have dead classes
|
||||
// We need to find the next live object in some other
|
||||
// manner than getting the oop size
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
HeapWord* next = g1h->concurrent_mark()->prevMarkBitMap()->
|
||||
getNextMarkedWordAddress(addr, prev_top_at_mark_start());
|
||||
|
||||
assert(next > addr, "must get the next live object");
|
||||
return pointer_delta(next, addr);
|
||||
return block_size_using_bitmap(addr, G1CollectedHeap::heap()->concurrent_mark()->prevMarkBitMap());
|
||||
}
|
||||
|
||||
inline HeapWord* HeapRegion::par_allocate_no_bot_updates(size_t min_word_size,
|
||||
@ -230,4 +253,110 @@ inline bool HeapRegion::in_collection_set() const {
|
||||
return G1CollectedHeap::heap()->is_in_cset(this);
|
||||
}
|
||||
|
||||
template <class Closure, bool is_gc_active>
|
||||
bool HeapRegion::do_oops_on_card_in_humongous(MemRegion mr,
|
||||
Closure* cl,
|
||||
G1CollectedHeap* g1h) {
|
||||
assert(is_humongous(), "precondition");
|
||||
HeapRegion* sr = humongous_start_region();
|
||||
oop obj = oop(sr->bottom());
|
||||
|
||||
// If concurrent and klass_or_null is NULL, then space has been
|
||||
// allocated but the object has not yet been published by setting
|
||||
// the klass. That can only happen if the card is stale. However,
|
||||
// we've already set the card clean, so we must return failure,
|
||||
// since the allocating thread could have performed a write to the
|
||||
// card that might be missed otherwise.
|
||||
if (!is_gc_active && (obj->klass_or_null_acquire() == NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// We have a well-formed humongous object at the start of sr.
|
||||
// Only filler objects follow a humongous object in the containing
|
||||
// regions, and we can ignore those. So only process the one
|
||||
// humongous object.
|
||||
if (!g1h->is_obj_dead(obj, sr)) {
|
||||
if (obj->is_objArray() || (sr->bottom() < mr.start())) {
|
||||
// objArrays are always marked precisely, so limit processing
|
||||
// with mr. Non-objArrays might be precisely marked, and since
|
||||
// it's humongous it's worthwhile avoiding full processing.
|
||||
// However, the card could be stale and only cover filler
|
||||
// objects. That should be rare, so not worth checking for;
|
||||
// instead let it fall out from the bounded iteration.
|
||||
obj->oop_iterate(cl, mr);
|
||||
} else {
|
||||
// If obj is not an objArray and mr contains the start of the
|
||||
// obj, then this could be an imprecise mark, and we need to
|
||||
// process the entire object.
|
||||
obj->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <bool is_gc_active, class Closure>
|
||||
bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
Closure* cl) {
|
||||
assert(MemRegion(bottom(), end()).contains(mr), "Card region not in heap region");
|
||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||
|
||||
// Special handling for humongous regions.
|
||||
if (is_humongous()) {
|
||||
return do_oops_on_card_in_humongous<Closure, is_gc_active>(mr, cl, g1h);
|
||||
}
|
||||
assert(is_old(), "precondition");
|
||||
|
||||
// Because mr has been trimmed to what's been allocated in this
|
||||
// region, the parts of the heap that are examined here are always
|
||||
// parsable; there's no need to use klass_or_null to detect
|
||||
// in-progress allocation.
|
||||
|
||||
// Cache the boundaries of the memory region in some const locals
|
||||
HeapWord* const start = mr.start();
|
||||
HeapWord* const end = mr.end();
|
||||
|
||||
// Find the obj that extends onto mr.start().
|
||||
// Update BOT as needed while finding start of (possibly dead)
|
||||
// object containing the start of the region.
|
||||
HeapWord* cur = block_start(start);
|
||||
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(cur <= start,
|
||||
"cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
|
||||
HeapWord* next = cur + block_size(cur);
|
||||
assert(start < next,
|
||||
"start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
|
||||
}
|
||||
#endif
|
||||
|
||||
G1CMBitMapRO* bitmap = g1h->concurrent_mark()->prevMarkBitMap();
|
||||
do {
|
||||
oop obj = oop(cur);
|
||||
assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur));
|
||||
assert(obj->klass_or_null() != NULL,
|
||||
"Unparsable heap at " PTR_FORMAT, p2i(cur));
|
||||
|
||||
size_t size;
|
||||
bool is_dead = is_obj_dead_with_size(obj, bitmap, &size);
|
||||
|
||||
cur += size;
|
||||
if (!is_dead) {
|
||||
// Process live object's references.
|
||||
|
||||
// Non-objArrays are usually marked imprecise at the object
|
||||
// start, in which case we need to iterate over them in full.
|
||||
// objArrays are precisely marked, but can still be iterated
|
||||
// over in full if completely covered.
|
||||
if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
obj->oop_iterate(cl, mr);
|
||||
}
|
||||
}
|
||||
} while (cur < end);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_HEAPREGION_INLINE_HPP
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user