8220240: Refactor shared dirty card queue

Add G1SharedDirtyCardQueue class.

Reviewed-by: tschatzl, lkorinth
This commit is contained in:
Kim Barrett 2019-03-22 15:42:43 -04:00
parent 54f31c6089
commit 3bb3dbc47e
12 changed files with 192 additions and 100 deletions

View File

@ -58,7 +58,8 @@ G1BarrierSet::G1BarrierSet(G1CardTable* card_table) :
_satb_mark_queue_buffer_allocator("SATB Buffer Allocator", G1SATBBufferSize),
_dirty_card_queue_buffer_allocator("DC Buffer Allocator", G1UpdateBufferSize),
_satb_mark_queue_set(),
_dirty_card_queue_set()
_dirty_card_queue_set(),
_shared_dirty_card_queue(&_dirty_card_queue_set)
{}
void G1BarrierSet::enqueue(oop pre_val) {

View File

@ -27,6 +27,7 @@
#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SATBMarkQueueSet.hpp"
#include "gc/g1/g1SharedDirtyCardQueue.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
@ -42,6 +43,7 @@ class G1BarrierSet: public CardTableBarrierSet {
BufferNode::Allocator _dirty_card_queue_buffer_allocator;
G1SATBMarkQueueSet _satb_mark_queue_set;
G1DirtyCardQueueSet _dirty_card_queue_set;
G1SharedDirtyCardQueue _shared_dirty_card_queue;
static G1BarrierSet* g1_barrier_set() {
return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
@ -91,6 +93,10 @@ class G1BarrierSet: public CardTableBarrierSet {
return g1_barrier_set()->_dirty_card_queue_set;
}
static G1SharedDirtyCardQueue& shared_dirty_card_queue() {
return g1_barrier_set()->_shared_dirty_card_queue;
}
// Callbacks for runtime accesses.
template <DecoratorSet decorators, typename BarrierSetT = G1BarrierSet>
class AccessBarrier: public ModRefBarrierSet::AccessBarrier<decorators, BarrierSetT> {

View File

@ -1682,12 +1682,10 @@ jint G1CollectedHeap::initialize() {
// later, based on the concurrent refinement object.
G1BarrierSet::dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
&bs->dirty_card_queue_buffer_allocator(),
Shared_DirtyCardQ_lock,
true); // init_free_ids
dirty_card_queue_set().initialize(DirtyCardQ_CBL_mon,
&bs->dirty_card_queue_buffer_allocator(),
Shared_DirtyCardQ_lock);
&bs->dirty_card_queue_buffer_allocator());
// Create the hot card cache.
_hot_card_cache = new G1HotCardCache(this);

View File

@ -56,21 +56,18 @@ public:
}
};
G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent) :
G1DirtyCardQueue::G1DirtyCardQueue(G1DirtyCardQueueSet* qset) :
// Dirty card queues are always active, so we create them with their
// active field set to true.
PtrQueue(qset, permanent, true /* active */)
PtrQueue(qset, true /* active */)
{ }
G1DirtyCardQueue::~G1DirtyCardQueue() {
if (!is_permanent()) {
flush();
}
flush();
}
G1DirtyCardQueueSet::G1DirtyCardQueueSet(bool notify_when_complete) :
PtrQueueSet(notify_when_complete),
_shared_dirty_card_queue(this, true /* permanent */),
_free_ids(NULL),
_processed_buffers_mut(0),
_processed_buffers_rs_thread(0),
@ -90,10 +87,8 @@ uint G1DirtyCardQueueSet::num_par_ids() {
void G1DirtyCardQueueSet::initialize(Monitor* cbl_mon,
BufferNode::Allocator* allocator,
Mutex* lock,
bool init_free_ids) {
PtrQueueSet::initialize(cbl_mon, allocator);
_shared_dirty_card_queue.set_lock(lock);
if (init_free_ids) {
_free_ids = new G1FreeIdSet(0, num_par_ids());
}
@ -217,13 +212,7 @@ void G1DirtyCardQueueSet::abandon_logs() {
} closure;
Threads::threads_do(&closure);
shared_dirty_card_queue()->reset();
}
void G1DirtyCardQueueSet::concatenate_log(G1DirtyCardQueue& dcq) {
if (!dcq.is_empty()) {
dcq.flush();
}
G1BarrierSet::shared_dirty_card_queue().reset();
}
void G1DirtyCardQueueSet::concatenate_logs() {
@ -234,16 +223,16 @@ void G1DirtyCardQueueSet::concatenate_logs() {
size_t old_limit = max_completed_buffers();
set_max_completed_buffers(MaxCompletedBuffersUnlimited);
class ConcatenateThreadLogClosure : public ThreadClosure {
G1DirtyCardQueueSet* _qset;
public:
ConcatenateThreadLogClosure(G1DirtyCardQueueSet* qset) : _qset(qset) {}
struct ConcatenateThreadLogClosure : public ThreadClosure {
virtual void do_thread(Thread* t) {
_qset->concatenate_log(G1ThreadLocalData::dirty_card_queue(t));
G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(t);
if (!dcq.is_empty()) {
dcq.flush();
}
}
} closure(this);
} closure;
Threads::threads_do(&closure);
concatenate_log(_shared_dirty_card_queue);
G1BarrierSet::shared_dirty_card_queue().flush();
set_max_completed_buffers(old_limit);
}

View File

@ -48,7 +48,7 @@ public:
// A ptrQueue whose elements are "oops", pointers to object heads.
class G1DirtyCardQueue: public PtrQueue {
public:
G1DirtyCardQueue(G1DirtyCardQueueSet* qset, bool permanent = false);
G1DirtyCardQueue(G1DirtyCardQueueSet* qset);
// Flush before destroying; queue may be used to capture pending work while
// doing something else, with auto-flush on completion.
@ -70,11 +70,7 @@ public:
};
class G1DirtyCardQueueSet: public PtrQueueSet {
G1DirtyCardQueue _shared_dirty_card_queue;
// Apply the closure to the elements of "node" from it's index to
// buffer_size. If all closure applications return true, then
// returns true. Stops processing after the first closure
@ -116,15 +112,12 @@ class G1DirtyCardQueueSet: public PtrQueueSet {
// Current buffer node used for parallel iteration.
BufferNode* volatile _cur_par_buffer_node;
void concatenate_log(G1DirtyCardQueue& dcq);
public:
G1DirtyCardQueueSet(bool notify_when_complete = true);
~G1DirtyCardQueueSet();
void initialize(Monitor* cbl_mon,
BufferNode::Allocator* allocator,
Mutex* lock,
bool init_free_ids = false);
// The number of parallel ids that can be claimed to allow collector or
@ -147,10 +140,6 @@ public:
// by reset_for_par_iteration.
void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl);
G1DirtyCardQueue* shared_dirty_card_queue() {
return &_shared_dirty_card_queue;
}
// If a full collection is happening, reset partial logs, and ignore
// completed ones: the full collection will make them all irrelevant.
void abandon_logs();

View File

@ -35,6 +35,7 @@
#include "gc/g1/g1OopClosures.inline.hpp"
#include "gc/g1/g1RootClosures.hpp"
#include "gc/g1/g1RemSet.hpp"
#include "gc/g1/g1SharedDirtyCardQueue.hpp"
#include "gc/g1/heapRegion.inline.hpp"
#include "gc/g1/heapRegionManager.inline.hpp"
#include "gc/g1/heapRegionRemSet.hpp"
@ -519,9 +520,7 @@ void G1RemSet::oops_into_collection_set_do(G1ParScanThreadState* pss, uint worke
}
void G1RemSet::prepare_for_oops_into_collection_set_do() {
G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
dcqs.concatenate_logs();
G1BarrierSet::dirty_card_queue_set().concatenate_logs();
_scan_state->reset();
}
@ -660,29 +659,30 @@ void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
assert(!dirty_region.is_empty(), "sanity");
G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i);
bool card_processed =
r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl);
if (r->oops_on_card_seq_iterate_careful<false>(dirty_region, &conc_refine_cl)) {
_num_conc_refined_cards++; // Unsynchronized update, only used for logging.
return;
}
// If unable to process the card then we encountered an unparsable
// part of the heap (e.g. a partially allocated object) while
// processing a stale card. Despite the card being stale, redirty
// and re-enqueue, because we've already cleaned the card. Without
// this we could incorrectly discard a non-stale card.
if (!card_processed) {
// The card might have gotten re-dirtied and re-enqueued while we
// worked. (In fact, it's pretty likely.)
if (*card_ptr != G1CardTable::dirty_card_val()) {
*card_ptr = G1CardTable::dirty_card_val();
MutexLockerEx x(Shared_DirtyCardQ_lock,
Mutex::_no_safepoint_check_flag);
G1DirtyCardQueue* sdcq =
G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue();
sdcq->enqueue(card_ptr);
}
} else {
_num_conc_refined_cards++; // Unsynchronized update, only used for logging.
// part of the heap (e.g. a partially allocated object, so only
// temporarily a problem) while processing a stale card. Despite
// the card being stale, we can't simply ignore it, because we've
// already marked the card cleaned, so taken responsibility for
// ensuring the card gets scanned.
//
// However, the card might have gotten re-dirtied and re-enqueued
// while we worked. (In fact, it's pretty likely.)
if (*card_ptr == G1CardTable::dirty_card_val()) {
return;
}
// Re-dirty the card and enqueue in the *shared* queue. Can't use
// the thread-local queue, because that might be the queue that is
// being processed by us; we could be a Java thread conscripted to
// perform refinement on our queue's current buffer.
*card_ptr = G1CardTable::dirty_card_val();
G1BarrierSet::shared_dirty_card_queue().enqueue(card_ptr);
}
bool G1RemSet::refine_card_during_gc(CardValue* card_ptr,

View File

@ -0,0 +1,73 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1DirtyCardQueue.hpp"
#include "gc/g1/g1SharedDirtyCardQueue.hpp"
#include "gc/shared/ptrQueue.hpp"
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
G1SharedDirtyCardQueue::G1SharedDirtyCardQueue(G1DirtyCardQueueSet* qset) :
_qset(qset),
_buffer(NULL),
_index(0)
{}
G1SharedDirtyCardQueue::~G1SharedDirtyCardQueue() {
flush();
}
void G1SharedDirtyCardQueue::enqueue(void* card_ptr) {
MutexLockerEx ml(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag);
if (_index == 0) {
flush();
_buffer = _qset->allocate_buffer();
_index = _qset->buffer_size();
assert(_index != 0, "invariant");
}
_buffer[--_index] = card_ptr;
}
void G1SharedDirtyCardQueue::flush() {
if (_buffer != NULL) {
BufferNode* node = BufferNode::make_node_from_buffer(_buffer, _index);
_buffer = NULL;
_index = 0;
if (node->index() == _qset->buffer_size()) {
_qset->deallocate_buffer(node);
} else {
_qset->enqueue_completed_buffer(node);
}
}
assert(_index == 0, "invariant");
}
void G1SharedDirtyCardQueue::reset() {
if (_buffer == NULL) {
_index = 0;
} else {
_index = _qset->buffer_size();
}
}

View File

@ -0,0 +1,60 @@
/*
* Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_G1_G1SHAREDDIRTYCARDQUEUE_HPP
#define SHARE_GC_G1_G1SHAREDDIRTYCARDQUEUE_HPP
#include "utilities/globalDefinitions.hpp"
class G1DirtyCardQueueSet;
// A dirty card queue providing thread-safe enqueue. A shared global
// instance can be used for cases where a thread-local dirty card can't
// be used.
class G1SharedDirtyCardQueue {
G1DirtyCardQueueSet* const _qset;
void** _buffer;
size_t _index;
// Noncopyable
G1SharedDirtyCardQueue(const G1SharedDirtyCardQueue&);
G1SharedDirtyCardQueue& operator=(const G1SharedDirtyCardQueue&);
public:
G1SharedDirtyCardQueue(G1DirtyCardQueueSet* qset);
~G1SharedDirtyCardQueue(); // flushes the queue.
// Thread-safe addition to shared logging buffer.
void enqueue(void* card_ptr);
// Flush any pending entries to the qset and remove the buffer.
// Not thread-safe.
void flush();
// Discard any pending entries.
// Not thread-safe.
void reset();
};
#endif // SHARE_GC_G1_G1SHAREDDIRTYCARDQUEUE_HPP

View File

@ -36,18 +36,16 @@
#include <new>
PtrQueue::PtrQueue(PtrQueueSet* qset, bool permanent, bool active) :
PtrQueue::PtrQueue(PtrQueueSet* qset, bool active) :
_qset(qset),
_active(active),
_permanent(permanent),
_index(0),
_capacity_in_bytes(0),
_buf(NULL),
_lock(NULL)
_buf(NULL)
{}
PtrQueue::~PtrQueue() {
assert(_permanent || (_buf == NULL), "queue must be flushed before delete");
assert(_buf == NULL, "queue must be flushed before delete");
}
void PtrQueue::flush_impl() {
@ -271,23 +269,13 @@ void PtrQueue::handle_zero_index() {
return;
}
if (_lock) {
assert(_lock->owned_by_self(), "Required.");
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
_buf = NULL; // clear shared _buf field
qset()->enqueue_completed_buffer(node);
assert(_buf == NULL, "multiple enqueuers appear to be racing");
} else {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
if (qset()->process_or_enqueue_completed_buffer(node)) {
// Recycle the buffer. No allocation.
assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
assert(capacity() == qset()->buffer_size(), "invariant");
reset();
return;
}
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
if (qset()->process_or_enqueue_completed_buffer(node)) {
// Recycle the buffer. No allocation.
assert(_buf == BufferNode::make_buffer_from_node(node), "invariant");
assert(capacity() == qset()->buffer_size(), "invariant");
reset();
return;
}
}
// Set capacity in case this is the first allocation.

View File

@ -54,11 +54,6 @@ class PtrQueue {
// Whether updates should be logged.
bool _active;
// If true, the queue is permanent, and doesn't need to deallocate
// its buffer in the destructor (since that obtains a lock which may not
// be legally locked by then.
const bool _permanent;
// The (byte) index at which an object was last enqueued. Starts at
// capacity_in_bytes (indicating an empty buffer) and goes towards zero.
// Value is always pointer-size aligned.
@ -111,27 +106,20 @@ protected:
return byte_index_to_index(capacity_in_bytes());
}
// If there is a lock associated with this buffer, this is that lock.
Mutex* _lock;
PtrQueueSet* qset() { return _qset; }
bool is_permanent() const { return _permanent; }
// Process queue entries and release resources.
void flush_impl();
// Initialize this queue to contain a null buffer, and be part of the
// given PtrQueueSet.
PtrQueue(PtrQueueSet* qset, bool permanent = false, bool active = false);
PtrQueue(PtrQueueSet* qset, bool active = false);
// Requires queue flushed or permanent.
// Requires queue flushed.
~PtrQueue();
public:
// Associate a lock with a ptr queue.
void set_lock(Mutex* lock) { _lock = lock; }
// Forcibly set empty.
void reset() {
if (_buf != NULL) {

View File

@ -35,14 +35,14 @@
#include "runtime/threadSMR.hpp"
#include "runtime/vmThread.hpp"
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent) :
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) :
// SATB queues are only active during marking cycles. We create
// them with their active field set to false. If a thread is
// created during a cycle and its SATB queue needs to be activated
// before the thread starts running, we'll need to set its active
// field to true. This must be done in the collector-specific
// BarrierSet thread attachment protocol.
PtrQueue(qset, permanent, false /* active */)
PtrQueue(qset, false /* active */)
{ }
void SATBMarkQueue::flush() {

View File

@ -55,7 +55,7 @@ private:
inline void apply_filter(Filter filter_out);
public:
SATBMarkQueue(SATBMarkQueueSet* qset, bool permanent = false);
SATBMarkQueue(SATBMarkQueueSet* qset);
// Process queue entries and free resources.
void flush();