mirror of
https://github.com/openjdk/jdk.git
synced 2026-04-05 04:31:36 +00:00
8166607: G1 needs klass_or_null_acquire
Use acquire where needed, split refinment humongous and non to simplify. Reviewed-by: tschatzl, ehelin
This commit is contained in:
parent
1170f8e5c8
commit
213906ee83
@ -227,7 +227,7 @@ HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_slow(HeapWord
|
||||
while (n <= next_boundary) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) return q;
|
||||
if (obj->klass_or_null_acquire() == NULL) return q;
|
||||
n += block_size(q);
|
||||
}
|
||||
assert(q <= next_boundary && n > next_boundary, "Consequence of loop");
|
||||
|
||||
@ -136,7 +136,7 @@ inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_const(
|
||||
while (n <= addr) {
|
||||
q = n;
|
||||
oop obj = oop(q);
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
if (obj->klass_or_null_acquire() == NULL) {
|
||||
return q;
|
||||
}
|
||||
n += block_size(q);
|
||||
@ -148,7 +148,7 @@ inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr_const(
|
||||
|
||||
inline HeapWord* G1BlockOffsetTablePart::forward_to_block_containing_addr(HeapWord* q,
|
||||
const void* addr) {
|
||||
if (oop(q)->klass_or_null() == NULL) {
|
||||
if (oop(q)->klass_or_null_acquire() == NULL) {
|
||||
return q;
|
||||
}
|
||||
HeapWord* n = q + block_size(q);
|
||||
|
||||
@ -675,9 +675,10 @@ bool G1RemSet::refine_card(jbyte* card_ptr,
|
||||
card_ptr);
|
||||
|
||||
// If unable to process the card then we encountered an unparsable
|
||||
// part of the heap (e.g. a partially allocated object). Redirty
|
||||
// and re-enqueue: if we put off the card until a GC pause, then the
|
||||
// allocation will have completed.
|
||||
// part of the heap (e.g. a partially allocated object) while
|
||||
// processing a stale card. Despite the card being stale, redirty
|
||||
// and re-enqueue, because we've already cleaned the card. Without
|
||||
// this we could incorrectly discard a non-stale card.
|
||||
if (!card_processed) {
|
||||
assert(!_g1->is_gc_active(), "Unparsable heap during GC");
|
||||
// The card might have gotten re-dirtied and re-enqueued while we
|
||||
|
||||
@ -352,6 +352,49 @@ void HeapRegion::note_self_forwarding_removal_end(bool during_initial_mark,
|
||||
_prev_marked_bytes = marked_bytes;
|
||||
}
|
||||
|
||||
// Humongous objects are allocated directly in the old-gen. Need
|
||||
// special handling for concurrent processing encountering an
|
||||
// in-progress allocation.
|
||||
static bool do_oops_on_card_in_humongous(MemRegion mr,
|
||||
FilterOutOfRegionClosure* cl,
|
||||
HeapRegion* hr,
|
||||
G1CollectedHeap* g1h) {
|
||||
assert(hr->is_humongous(), "precondition");
|
||||
HeapRegion* sr = hr->humongous_start_region();
|
||||
oop obj = oop(sr->bottom());
|
||||
|
||||
// If concurrent and klass_or_null is NULL, then space has been
|
||||
// allocated but the object has not yet been published by setting
|
||||
// the klass. That can only happen if the card is stale. However,
|
||||
// we've already set the card clean, so we must return failure,
|
||||
// since the allocating thread could have performed a write to the
|
||||
// card that might be missed otherwise.
|
||||
if (!g1h->is_gc_active() && (obj->klass_or_null_acquire() == NULL)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Only filler objects follow a humongous object in the containing
|
||||
// regions, and we can ignore those. So only process the one
|
||||
// humongous object.
|
||||
if (!g1h->is_obj_dead(obj, sr)) {
|
||||
if (obj->is_objArray() || (sr->bottom() < mr.start())) {
|
||||
// objArrays are always marked precisely, so limit processing
|
||||
// with mr. Non-objArrays might be precisely marked, and since
|
||||
// it's humongous it's worthwhile avoiding full processing.
|
||||
// However, the card could be stale and only cover filler
|
||||
// objects. That should be rare, so not worth checking for;
|
||||
// instead let it fall out from the bounded iteration.
|
||||
obj->oop_iterate(cl, mr);
|
||||
} else {
|
||||
// If obj is not an objArray and mr contains the start of the
|
||||
// obj, then this could be an imprecise mark, and we need to
|
||||
// process the entire object.
|
||||
obj->oop_iterate(cl);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
FilterOutOfRegionClosure* cl,
|
||||
jbyte* card_ptr) {
|
||||
@ -369,7 +412,6 @@ bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
if (mr.is_empty()) {
|
||||
return true;
|
||||
}
|
||||
// Otherwise, find the obj that extends onto mr.start().
|
||||
|
||||
// The intersection of the incoming mr (for the card) and the
|
||||
// allocated part of the region is non-empty. This implies that
|
||||
@ -387,54 +429,52 @@ bool HeapRegion::oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
// We must complete this write before we do any of the reads below.
|
||||
OrderAccess::storeload();
|
||||
|
||||
// Special handling for humongous regions.
|
||||
if (is_humongous()) {
|
||||
return do_oops_on_card_in_humongous(mr, cl, this, g1h);
|
||||
}
|
||||
|
||||
// During GC we limit mr by scan_top. So we never get here with an
|
||||
// mr covering objects allocated during GC. Non-humongous objects
|
||||
// are only allocated in the old-gen during GC. So the parts of the
|
||||
// heap that may be examined here are always parsable; there's no
|
||||
// need to use klass_or_null here to detect in-progress allocations.
|
||||
|
||||
// Cache the boundaries of the memory region in some const locals
|
||||
HeapWord* const start = mr.start();
|
||||
HeapWord* const end = mr.end();
|
||||
|
||||
// Update BOT as needed while finding start of (potential) object.
|
||||
// Find the obj that extends onto mr.start().
|
||||
// Update BOT as needed while finding start of (possibly dead)
|
||||
// object containing the start of the region.
|
||||
HeapWord* cur = block_start(start);
|
||||
assert(cur <= start, "Postcondition");
|
||||
|
||||
oop obj;
|
||||
|
||||
HeapWord* next = cur;
|
||||
do {
|
||||
cur = next;
|
||||
obj = oop(cur);
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
assert(!g1h->is_gc_active(),
|
||||
"Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
|
||||
return false;
|
||||
}
|
||||
// Otherwise...
|
||||
next = cur + block_size(cur);
|
||||
} while (next <= start);
|
||||
|
||||
// If we finish the above loop...We have a parseable object that
|
||||
// begins on or before the start of the memory region, and ends
|
||||
// inside or spans the entire region.
|
||||
assert(cur <= start, "Loop postcondition");
|
||||
assert(obj->klass_or_null() != NULL, "Loop postcondition");
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(cur <= start,
|
||||
"cur: " PTR_FORMAT ", start: " PTR_FORMAT, p2i(cur), p2i(start));
|
||||
HeapWord* next = cur + block_size(cur);
|
||||
assert(start < next,
|
||||
"start: " PTR_FORMAT ", next: " PTR_FORMAT, p2i(start), p2i(next));
|
||||
}
|
||||
#endif
|
||||
|
||||
do {
|
||||
obj = oop(cur);
|
||||
assert((cur + block_size(cur)) > (HeapWord*)obj, "Loop invariant");
|
||||
if (obj->klass_or_null() == NULL) {
|
||||
// Ran into an unparseable point.
|
||||
assert(!g1h->is_gc_active(),
|
||||
"Unparsable heap during GC at " PTR_FORMAT, p2i(cur));
|
||||
return false;
|
||||
}
|
||||
oop obj = oop(cur);
|
||||
assert(obj->is_oop(true), "Not an oop at " PTR_FORMAT, p2i(cur));
|
||||
assert(obj->klass_or_null() != NULL,
|
||||
"Unparsable heap at " PTR_FORMAT, p2i(cur));
|
||||
|
||||
// Advance the current pointer. "obj" still points to the object to iterate.
|
||||
cur = cur + block_size(cur);
|
||||
|
||||
if (!g1h->is_obj_dead(obj)) {
|
||||
// Non-objArrays are sometimes marked imprecise at the object start. We
|
||||
// always need to iterate over them in full.
|
||||
// We only iterate over object arrays in full if they are completely contained
|
||||
// in the memory region.
|
||||
if (g1h->is_obj_dead(obj, this)) {
|
||||
// Carefully step over dead object.
|
||||
cur += block_size(cur);
|
||||
} else {
|
||||
// Step over live object, and process its references.
|
||||
cur += obj->size();
|
||||
// Non-objArrays are usually marked imprecise at the object
|
||||
// start, in which case we need to iterate over them in full.
|
||||
// objArrays are precisely marked, but can still be iterated
|
||||
// over in full if completely covered.
|
||||
if (!obj->is_objArray() || (((HeapWord*)obj) >= start && cur <= end)) {
|
||||
obj->oop_iterate(cl);
|
||||
} else {
|
||||
|
||||
@ -658,7 +658,7 @@ class HeapRegion: public G1ContiguousSpace {
|
||||
// mr: the memory region covered by the card.
|
||||
// card_ptr: if we decide that the card is not young and we iterate
|
||||
// over it, we'll clean the card before we start the iteration.
|
||||
// Returns true if card was successfully processed, false if an
|
||||
// Returns true if the card was successfully processed, false if an
|
||||
// unparsable part of the heap was encountered, which should only
|
||||
// happen when invoked concurrently with the mutator.
|
||||
bool oops_on_card_seq_iterate_careful(MemRegion mr,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user