8035401: Fix visibility of G1ParScanThreadState members

After JDK-8035400 there were several opportunities to fix the visibility of several members of the G1ParScanThreadState class.

Reviewed-by: brutisso, mgerdin
This commit is contained in:
Thomas Schatzl 2014-06-26 15:48:05 +02:00
parent 0bd028231f
commit b4e0c547b8
4 changed files with 131 additions and 117 deletions

View File

@ -4669,24 +4669,10 @@ bool G1ParEvacuateFollowersClosure::offer_termination() {
}
void G1ParEvacuateFollowersClosure::do_void() {
StarTask stolen_task;
G1ParScanThreadState* const pss = par_scan_state();
pss->trim_queue();
do {
while (queues()->steal(pss->queue_num(), pss->hash_seed(), stolen_task)) {
assert(pss->verify_task(stolen_task), "sanity");
if (stolen_task.is_narrow()) {
pss->deal_with_reference((narrowOop*) stolen_task);
} else {
pss->deal_with_reference((oop*) stolen_task);
}
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
pss->trim_queue();
}
pss->steal_and_trim_queue(queues());
} while (!offer_termination());
}
@ -4732,8 +4718,7 @@ protected:
}
public:
G1ParTask(G1CollectedHeap* g1h,
RefToScanQueueSet *task_queues)
G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
: AbstractGangTask("G1 collection"),
_g1h(g1h),
_queues(task_queues),
@ -4831,7 +4816,7 @@ public:
pss.print_termination_stats(worker_id);
}
assert(pss.refs()->is_empty(), "should be empty");
assert(pss.queue_is_empty(), "should be empty");
// Close the inner scope so that the ResourceMark and HandleMark
// destructors are executed here and are included as part of the
@ -5355,8 +5340,7 @@ public:
pss.set_evac_failure_closure(&evac_failure_cl);
assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
assert(pss.queue_is_empty(), "both queue and overflow should be empty");
G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
@ -5410,7 +5394,7 @@ public:
G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
drain_queue.do_void();
// Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
assert(pss.refs()->is_empty(), "should be");
assert(pss.queue_is_empty(), "should be");
}
};
@ -5477,7 +5461,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
pss.set_evac_failure_closure(&evac_failure_cl);
assert(pss.refs()->is_empty(), "pre-condition");
assert(pss.queue_is_empty(), "pre-condition");
G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
@ -5525,7 +5509,7 @@ void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
_gc_tracer_stw->report_gc_reference_stats(stats);
// We have completed copying any necessary live referent objects.
assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
assert(pss.queue_is_empty(), "both queue and overflow should be empty");
double ref_proc_time = os::elapsedTime() - ref_proc_start;
g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);

View File

@ -69,6 +69,11 @@ G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num,
_start = os::elapsedTime();
}
G1ParScanThreadState::~G1ParScanThreadState() {
retire_alloc_buffers();
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
void
G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
{
@ -139,14 +144,14 @@ void G1ParScanThreadState::trim_queue() {
StarTask ref;
do {
// Drain the overflow stack first, so other threads can steal.
while (refs()->pop_overflow(ref)) {
deal_with_reference(ref);
while (_refs->pop_overflow(ref)) {
dispatch_reference(ref);
}
while (refs()->pop_local(ref)) {
deal_with_reference(ref);
while (_refs->pop_local(ref)) {
dispatch_reference(ref);
}
} while (!refs()->is_empty());
} while (!_refs->is_empty());
}
oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
@ -249,3 +254,56 @@ oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
}
return obj;
}
HeapWord* G1ParScanThreadState::allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) {
return NULL; // Let caller handle allocation failure.
}
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
void G1ParScanThreadState::undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
if (obj != NULL) {
return obj;
}
return allocate_slow(purpose, word_sz);
}
void G1ParScanThreadState::retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}

View File

@ -39,7 +39,7 @@ class HeapRegion;
class outputStream;
class G1ParScanThreadState : public StackObj {
protected:
private:
G1CollectedHeap* _g1h;
RefToScanQueue* _refs;
DirtyCardQueue _dcq;
@ -98,14 +98,10 @@ protected:
}
}
public:
public:
G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
~G1ParScanThreadState() {
retire_alloc_buffers();
FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
}
~G1ParScanThreadState();
RefToScanQueue* refs() { return _refs; }
ageTable* age_table() { return &_age_table; }
G1ParGCAllocBuffer* alloc_buffer(GCAllocPurpose purpose) {
@ -116,6 +112,8 @@ public:
size_t undo_waste() const { return _undo_waste; }
#ifdef ASSERT
bool queue_is_empty() const { return _refs->is_empty(); }
bool verify_ref(narrowOop* ref) const;
bool verify_ref(oop* ref) const;
bool verify_task(StarTask ref) const;
@ -123,56 +121,24 @@ public:
template <class T> void push_on_queue(T* ref) {
assert(verify_ref(ref), "sanity");
refs()->push(ref);
_refs->push(ref);
}
template <class T> inline void update_rs(HeapRegion* from, T* p, int tid);
HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = NULL;
size_t gclab_word_size = _g1h->desired_plab_sz(purpose);
if (word_sz * 100 < gclab_word_size * ParallelGCBufferWastePct) {
G1ParGCAllocBuffer* alloc_buf = alloc_buffer(purpose);
add_to_alloc_buffer_waste(alloc_buf->words_remaining());
alloc_buf->retire(false /* end_of_gc */, false /* retain */);
private:
HeapWord* buf = _g1h->par_allocate_during_gc(purpose, gclab_word_size);
if (buf == NULL) return NULL; // Let caller handle allocation failure.
// Otherwise.
alloc_buf->set_word_size(gclab_word_size);
alloc_buf->set_buf(buf);
inline HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz);
inline HeapWord* allocate_slow(GCAllocPurpose purpose, size_t word_sz);
inline void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz);
obj = alloc_buf->allocate(word_sz);
assert(obj != NULL, "buffer was definitely big enough...");
} else {
obj = _g1h->par_allocate_during_gc(purpose, word_sz);
}
return obj;
}
HeapWord* allocate(GCAllocPurpose purpose, size_t word_sz) {
HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
if (obj != NULL) return obj;
return allocate_slow(purpose, word_sz);
}
void undo_allocation(GCAllocPurpose purpose, HeapWord* obj, size_t word_sz) {
if (alloc_buffer(purpose)->contains(obj)) {
assert(alloc_buffer(purpose)->contains(obj + word_sz - 1),
"should contain whole object");
alloc_buffer(purpose)->undo_allocation(obj, word_sz);
} else {
CollectedHeap::fill_with_object(obj, word_sz);
add_to_undo_waste(word_sz);
}
}
public:
void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
_evac_failure_cl = evac_failure_cl;
}
OopsInHeapRegionClosure* evac_failure_closure() {
return _evac_failure_cl;
}
OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
int* hash_seed() { return &_hash_seed; }
uint queue_num() { return _queue_num; }
@ -201,10 +167,8 @@ public:
return os::elapsedTime() - _start;
}
static void
print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
void
print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
static void print_termination_stats_hdr(outputStream* const st = gclog_or_tty);
void print_termination_stats(int i, outputStream* const st = gclog_or_tty) const;
size_t* surviving_young_words() {
// We add on to hide entry 0 which accumulates surviving words for
@ -213,15 +177,7 @@ public:
}
private:
void retire_alloc_buffers() {
for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
size_t waste = _alloc_buffers[ap]->words_remaining();
add_to_alloc_buffer_waste(waste);
_alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
true /* end_of_gc */,
false /* retain */);
}
}
void retire_alloc_buffers();
#define G1_PARTIAL_ARRAY_MASK 0x2
@ -254,39 +210,18 @@ public:
inline void do_oop_partial_array(oop* p);
// This method is applied to the fields of the objects that have just been copied.
template <class T> void do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
public:
oop copy_to_survivor_space(oop const obj);
template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
template <class T> inline void deal_with_reference(T* ref_to_scan);
inline void deal_with_reference(StarTask ref);
inline void dispatch_reference(StarTask ref);
public:
oop copy_to_survivor_space(oop const obj);
public:
void trim_queue();
inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
};
#endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP

View File

@ -43,6 +43,30 @@ template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p,
}
}
template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
"Reference should not be NULL here as such are never pushed to the task queue.");
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
// Although we never intentionally push references outside of the collection
// set, due to (benign) races in the claim mechanism during RSet scanning more
// than one thread might claim the same card. So the same card may be
// processed multiple times. So redo this check.
if (_g1h->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = copy_to_survivor_space(obj);
}
assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
}
assert(obj != NULL, "Must be");
update_rs(from, p, queue_num());
}
inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
assert(has_partial_array_mask(p), "invariant");
oop from_obj = clear_partial_array_mask(p);
@ -104,7 +128,7 @@ template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_
}
}
inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
assert(verify_task(ref), "sanity");
if (ref.is_narrow()) {
deal_with_reference((narrowOop*)ref);
@ -113,5 +137,18 @@ inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
}
}
void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
StarTask stolen_task;
while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
assert(verify_task(stolen_task), "sanity");
dispatch_reference(stolen_task);
// We've just processed a reference and we might have made
// available new entries on the queues. So we have to make sure
// we drain the queues as necessary.
trim_queue();
}
}
#endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */