mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8372250: Merge PtrQueue into SATBMarkQueue
Reviewed-by: kbarrett, iwalulya, tschatzl, wkemper
This commit is contained in:
parent
6ec36d348b
commit
0eb2bcd260
@ -26,7 +26,7 @@
|
||||
#include "gc/g1/g1HeapSizingPolicy.hpp"
|
||||
#include "gc/g1/jvmFlagConstraintsG1.hpp"
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
#include "gc/shared/satbMarkQueue.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
|
||||
@ -70,8 +70,8 @@
|
||||
nonstatic_field(G1HeapRegionSetBase, _length, uint) \
|
||||
\
|
||||
nonstatic_field(SATBMarkQueue, _active, bool) \
|
||||
nonstatic_field(PtrQueue, _buf, void**) \
|
||||
nonstatic_field(PtrQueue, _index, size_t)
|
||||
nonstatic_field(SATBMarkQueue, _buf, void**) \
|
||||
nonstatic_field(SATBMarkQueue, _index, size_t)
|
||||
|
||||
#define VM_INT_CONSTANTS_G1GC(declare_constant, declare_constant_with_value) \
|
||||
declare_constant(G1HeapRegionType::FreeTag) \
|
||||
@ -96,7 +96,6 @@
|
||||
declare_toplevel_type(G1HeapRegionManager) \
|
||||
declare_toplevel_type(G1HeapRegionSetBase) \
|
||||
declare_toplevel_type(G1MonitoringSupport) \
|
||||
declare_toplevel_type(PtrQueue) \
|
||||
declare_toplevel_type(G1HeapRegionType) \
|
||||
declare_toplevel_type(SATBMarkQueue) \
|
||||
\
|
||||
|
||||
@ -1,111 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
|
||||
PtrQueue::PtrQueue(PtrQueueSet* qset) :
|
||||
_index(0),
|
||||
_buf(nullptr)
|
||||
{}
|
||||
|
||||
PtrQueue::~PtrQueue() {
|
||||
assert(_buf == nullptr, "queue must be flushed before delete");
|
||||
}
|
||||
|
||||
size_t PtrQueue::current_capacity() const {
|
||||
if (_buf == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return BufferNode::make_node_from_buffer(_buf)->capacity();
|
||||
}
|
||||
}
|
||||
|
||||
PtrQueueSet::PtrQueueSet(BufferNode::Allocator* allocator) :
|
||||
_allocator(allocator)
|
||||
{}
|
||||
|
||||
PtrQueueSet::~PtrQueueSet() {}
|
||||
|
||||
void PtrQueueSet::reset_queue(PtrQueue& queue) {
|
||||
queue.set_index(queue.current_capacity());
|
||||
}
|
||||
|
||||
void PtrQueueSet::flush_queue(PtrQueue& queue) {
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
size_t index = queue.index();
|
||||
queue.set_buffer(nullptr);
|
||||
queue.set_index(0);
|
||||
BufferNode* node = BufferNode::make_node_from_buffer(buffer, index);
|
||||
if (index == node->capacity()) {
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
enqueue_completed_buffer(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool PtrQueueSet::try_enqueue(PtrQueue& queue, void* value) {
|
||||
size_t index = queue.index();
|
||||
if (index == 0) return false;
|
||||
void** buffer = queue.buffer();
|
||||
assert(buffer != nullptr, "no buffer but non-zero index");
|
||||
buffer[--index] = value;
|
||||
queue.set_index(index);
|
||||
return true;
|
||||
}
|
||||
|
||||
void PtrQueueSet::retry_enqueue(PtrQueue& queue, void* value) {
|
||||
assert(queue.index() != 0, "precondition");
|
||||
assert(queue.buffer() != nullptr, "precondition");
|
||||
size_t index = queue.index();
|
||||
queue.buffer()[--index] = value;
|
||||
queue.set_index(index);
|
||||
}
|
||||
|
||||
BufferNode* PtrQueueSet::exchange_buffer_with_new(PtrQueue& queue) {
|
||||
BufferNode* node = nullptr;
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
node = BufferNode::make_node_from_buffer(buffer, queue.index());
|
||||
}
|
||||
install_new_buffer(queue);
|
||||
return node;
|
||||
}
|
||||
|
||||
void PtrQueueSet::install_new_buffer(PtrQueue& queue) {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
queue.set_buffer(BufferNode::make_buffer_from_node(node));
|
||||
queue.set_index(node->capacity());
|
||||
}
|
||||
|
||||
void** PtrQueueSet::allocate_buffer() {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
return BufferNode::make_buffer_from_node(node);
|
||||
}
|
||||
|
||||
void PtrQueueSet::deallocate_buffer(BufferNode* node) {
|
||||
_allocator->release(node);
|
||||
}
|
||||
@ -1,168 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_SHARED_PTRQUEUE_HPP
|
||||
#define SHARE_GC_SHARED_PTRQUEUE_HPP
|
||||
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
|
||||
// There are various techniques that require threads to be able to log
|
||||
// addresses. For example, a generational write barrier might log
|
||||
// the addresses of modified old-generation objects. This type supports
|
||||
// this operation.
|
||||
|
||||
class PtrQueueSet;
|
||||
class PtrQueue {
|
||||
friend class VMStructs;
|
||||
|
||||
NONCOPYABLE(PtrQueue);
|
||||
|
||||
// The (byte) index at which an object was last enqueued. Starts at
|
||||
// capacity (in bytes) (indicating an empty buffer) and goes towards zero.
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _index;
|
||||
|
||||
static const size_t _element_size = sizeof(void*);
|
||||
|
||||
static size_t byte_index_to_index(size_t ind) {
|
||||
assert(is_aligned(ind, _element_size), "precondition");
|
||||
return ind / _element_size;
|
||||
}
|
||||
|
||||
static size_t index_to_byte_index(size_t ind) {
|
||||
return ind * _element_size;
|
||||
}
|
||||
|
||||
protected:
|
||||
// The buffer.
|
||||
void** _buf;
|
||||
|
||||
// Initialize this queue to contain a null buffer, and be part of the
|
||||
// given PtrQueueSet.
|
||||
PtrQueue(PtrQueueSet* qset);
|
||||
|
||||
// Requires queue flushed.
|
||||
~PtrQueue();
|
||||
|
||||
public:
|
||||
|
||||
void** buffer() const { return _buf; }
|
||||
void set_buffer(void** buffer) { _buf = buffer; }
|
||||
|
||||
size_t index() const {
|
||||
return byte_index_to_index(_index);
|
||||
}
|
||||
|
||||
void set_index(size_t new_index) {
|
||||
assert(new_index <= current_capacity(), "precondition");
|
||||
_index = index_to_byte_index(new_index);
|
||||
}
|
||||
|
||||
// Returns the capacity of the buffer, or 0 if the queue doesn't currently
|
||||
// have a buffer.
|
||||
size_t current_capacity() const;
|
||||
|
||||
bool is_empty() const { return index() == current_capacity(); }
|
||||
size_t size() const { return current_capacity() - index(); }
|
||||
|
||||
protected:
|
||||
// To support compiler.
|
||||
template<typename Derived>
|
||||
static ByteSize byte_offset_of_index() {
|
||||
return byte_offset_of(Derived, _index);
|
||||
}
|
||||
|
||||
static constexpr ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
|
||||
|
||||
template<typename Derived>
|
||||
static ByteSize byte_offset_of_buf() {
|
||||
return byte_offset_of(Derived, _buf);
|
||||
}
|
||||
|
||||
static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
|
||||
};
|
||||
|
||||
// A PtrQueueSet represents resources common to a set of pointer queues.
|
||||
// In particular, the individual queues allocate buffers from this shared
|
||||
// set, and return completed buffers to the set.
|
||||
class PtrQueueSet {
|
||||
BufferNode::Allocator* _allocator;
|
||||
|
||||
NONCOPYABLE(PtrQueueSet);
|
||||
|
||||
protected:
|
||||
// Create an empty ptr queue set.
|
||||
PtrQueueSet(BufferNode::Allocator* allocator);
|
||||
~PtrQueueSet();
|
||||
|
||||
// Discard any buffered enqueued data.
|
||||
void reset_queue(PtrQueue& queue);
|
||||
|
||||
// If queue has any buffered enqueued data, transfer it to this qset.
|
||||
// Otherwise, deallocate queue's buffer.
|
||||
void flush_queue(PtrQueue& queue);
|
||||
|
||||
// Add value to queue's buffer, returning true. If buffer is full
|
||||
// or if queue doesn't have a buffer, does nothing and returns false.
|
||||
bool try_enqueue(PtrQueue& queue, void* value);
|
||||
|
||||
// Add value to queue's buffer. The queue must have a non-full buffer.
|
||||
// Used after an initial try_enqueue has failed and the situation resolved.
|
||||
void retry_enqueue(PtrQueue& queue, void* value);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
// Returns the old buffer, or null if queue didn't have a buffer.
|
||||
BufferNode* exchange_buffer_with_new(PtrQueue& queue);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
void install_new_buffer(PtrQueue& queue);
|
||||
|
||||
public:
|
||||
|
||||
// Return the associated BufferNode allocator.
|
||||
BufferNode::Allocator* allocator() const { return _allocator; }
|
||||
|
||||
// Return the buffer for a BufferNode of size buffer_capacity().
|
||||
void** allocate_buffer();
|
||||
|
||||
// Return an empty buffer to the free list. The node is required
|
||||
// to have been allocated with a size of buffer_capacity().
|
||||
void deallocate_buffer(BufferNode* node);
|
||||
|
||||
// A completed buffer is a buffer the mutator is finished with, and
|
||||
// is ready to be processed by the collector. It need not be full.
|
||||
|
||||
// Adds node to the completed buffer list.
|
||||
virtual void enqueue_completed_buffer(BufferNode* node) = 0;
|
||||
|
||||
size_t buffer_capacity() const {
|
||||
return _allocator->buffer_capacity();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_SHARED_PTRQUEUE_HPP
|
||||
@ -36,14 +36,19 @@
|
||||
#include "utilities/globalCounter.inline.hpp"
|
||||
|
||||
SATBMarkQueue::SATBMarkQueue(SATBMarkQueueSet* qset) :
|
||||
PtrQueue(qset),
|
||||
_buf(nullptr),
|
||||
_index(0),
|
||||
// SATB queues are only active during marking cycles. We create them
|
||||
// with their active field set to false. If a thread is created
|
||||
// during a cycle, it's SATB queue needs to be activated before the
|
||||
// thread starts running. This is handled by the collector-specific
|
||||
// BarrierSet thread attachment protocol.
|
||||
_active(false)
|
||||
{ }
|
||||
{}
|
||||
|
||||
SATBMarkQueue::~SATBMarkQueue() {
|
||||
assert(_buf == nullptr, "queue must be flushed before delete");
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Helpful for debugging
|
||||
@ -64,7 +69,7 @@ void SATBMarkQueue::print(const char* name) {
|
||||
#endif // PRODUCT
|
||||
|
||||
SATBMarkQueueSet::SATBMarkQueueSet(BufferNode::Allocator* allocator) :
|
||||
PtrQueueSet(allocator),
|
||||
_allocator(allocator),
|
||||
_list(),
|
||||
_count_and_process_flag(0),
|
||||
_process_completed_buffers_threshold(SIZE_MAX),
|
||||
@ -214,13 +219,6 @@ bool SATBMarkQueueSet::apply_closure_to_completed_buffer(SATBBufferClosure* cl)
|
||||
}
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::flush_queue(SATBMarkQueue& queue) {
|
||||
// Filter now to possibly save work later. If filtering empties the
|
||||
// buffer then flush_queue can deallocate the buffer.
|
||||
filter(queue);
|
||||
PtrQueueSet::flush_queue(queue);
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::enqueue_known_active(SATBMarkQueue& queue, oop obj) {
|
||||
assert(queue.is_active(), "precondition");
|
||||
void* value = cast_from_oop<void*>(obj);
|
||||
@ -355,3 +353,76 @@ void SATBMarkQueueSet::abandon_partial_marking() {
|
||||
} closure(*this);
|
||||
Threads::threads_do(&closure);
|
||||
}
|
||||
|
||||
size_t SATBMarkQueue::current_capacity() const {
|
||||
if (_buf == nullptr) {
|
||||
return 0;
|
||||
} else {
|
||||
return BufferNode::make_node_from_buffer(_buf)->capacity();
|
||||
}
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::reset_queue(SATBMarkQueue& queue) {
|
||||
queue.set_index(queue.current_capacity());
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::flush_queue(SATBMarkQueue& queue) {
|
||||
// Filter now to possibly save work later. If filtering empties the
|
||||
// buffer then flush_queue can deallocate the buffer.
|
||||
filter(queue);
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
size_t index = queue.index();
|
||||
queue.set_buffer(nullptr);
|
||||
queue.set_index(0);
|
||||
BufferNode* node = BufferNode::make_node_from_buffer(buffer, index);
|
||||
if (index == node->capacity()) {
|
||||
deallocate_buffer(node);
|
||||
} else {
|
||||
enqueue_completed_buffer(node);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool SATBMarkQueueSet::try_enqueue(SATBMarkQueue& queue, void* value) {
|
||||
size_t index = queue.index();
|
||||
if (index == 0) return false;
|
||||
void** buffer = queue.buffer();
|
||||
assert(buffer != nullptr, "no buffer but non-zero index");
|
||||
buffer[--index] = value;
|
||||
queue.set_index(index);
|
||||
return true;
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::retry_enqueue(SATBMarkQueue& queue, void* value) {
|
||||
assert(queue.index() != 0, "precondition");
|
||||
assert(queue.buffer() != nullptr, "precondition");
|
||||
size_t index = queue.index();
|
||||
queue.buffer()[--index] = value;
|
||||
queue.set_index(index);
|
||||
}
|
||||
|
||||
BufferNode* SATBMarkQueueSet::exchange_buffer_with_new(SATBMarkQueue& queue) {
|
||||
BufferNode* node = nullptr;
|
||||
void** buffer = queue.buffer();
|
||||
if (buffer != nullptr) {
|
||||
node = BufferNode::make_node_from_buffer(buffer, queue.index());
|
||||
}
|
||||
install_new_buffer(queue);
|
||||
return node;
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::install_new_buffer(SATBMarkQueue& queue) {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
queue.set_buffer(BufferNode::make_buffer_from_node(node));
|
||||
queue.set_index(node->capacity());
|
||||
}
|
||||
|
||||
void** SATBMarkQueueSet::allocate_buffer() {
|
||||
BufferNode* node = _allocator->allocate();
|
||||
return BufferNode::make_buffer_from_node(node);
|
||||
}
|
||||
|
||||
void SATBMarkQueueSet::deallocate_buffer(BufferNode* node) {
|
||||
_allocator->release(node);
|
||||
}
|
||||
|
||||
@ -25,11 +25,15 @@
|
||||
#ifndef SHARE_GC_SHARED_SATBMARKQUEUE_HPP
|
||||
#define SHARE_GC_SHARED_SATBMARKQUEUE_HPP
|
||||
|
||||
#include "gc/shared/ptrQueue.hpp"
|
||||
#include "gc/shared/bufferNode.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/padded.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/sizes.hpp"
|
||||
|
||||
class Thread;
|
||||
class Monitor;
|
||||
@ -45,12 +49,33 @@ public:
|
||||
virtual void do_buffer(void** buffer, size_t size) = 0;
|
||||
};
|
||||
|
||||
// A PtrQueue whose elements are (possibly stale) pointers to object heads.
|
||||
class SATBMarkQueue: public PtrQueue {
|
||||
// A queue whose elements are (possibly stale) pointers to object heads.
|
||||
class SATBMarkQueue {
|
||||
friend class VMStructs;
|
||||
friend class SATBMarkQueueSet;
|
||||
|
||||
private:
|
||||
NONCOPYABLE(SATBMarkQueue);
|
||||
|
||||
// The buffer.
|
||||
void** _buf;
|
||||
|
||||
// The (byte) index at which an object was last enqueued. Starts at
|
||||
// capacity (in bytes) (indicating an empty buffer) and goes towards zero.
|
||||
// Value is always pointer-size aligned.
|
||||
size_t _index;
|
||||
|
||||
static const size_t _element_size = sizeof(void*);
|
||||
|
||||
static size_t byte_index_to_index(size_t ind) {
|
||||
assert(is_aligned(ind, _element_size), "precondition");
|
||||
return ind / _element_size;
|
||||
}
|
||||
|
||||
static size_t index_to_byte_index(size_t ind) {
|
||||
return ind * _element_size;
|
||||
}
|
||||
|
||||
// Per-queue (so thread-local) cache of the SATBMarkQueueSet's
|
||||
// active state, to support inline barriers in compiled code.
|
||||
bool _active;
|
||||
@ -58,6 +83,29 @@ private:
|
||||
public:
|
||||
SATBMarkQueue(SATBMarkQueueSet* qset);
|
||||
|
||||
// Queue must be flushed
|
||||
~SATBMarkQueue();
|
||||
|
||||
void** buffer() const { return _buf; }
|
||||
|
||||
void set_buffer(void** buffer) { _buf = buffer; }
|
||||
|
||||
size_t index() const {
|
||||
return byte_index_to_index(_index);
|
||||
}
|
||||
|
||||
void set_index(size_t new_index) {
|
||||
assert(new_index <= current_capacity(), "precondition");
|
||||
_index = index_to_byte_index(new_index);
|
||||
}
|
||||
|
||||
// Returns the capacity of the buffer, or 0 if the queue doesn't currently
|
||||
// have a buffer.
|
||||
size_t current_capacity() const;
|
||||
|
||||
bool is_empty() const { return index() == current_capacity(); }
|
||||
size_t size() const { return current_capacity() - index(); }
|
||||
|
||||
bool is_active() const { return _active; }
|
||||
void set_active(bool value) { _active = value; }
|
||||
|
||||
@ -68,14 +116,16 @@ public:
|
||||
|
||||
// Compiler support.
|
||||
static ByteSize byte_offset_of_index() {
|
||||
return PtrQueue::byte_offset_of_index<SATBMarkQueue>();
|
||||
return byte_offset_of(SATBMarkQueue, _index);
|
||||
}
|
||||
using PtrQueue::byte_width_of_index;
|
||||
|
||||
static constexpr ByteSize byte_width_of_index() { return in_ByteSize(sizeof(size_t)); }
|
||||
|
||||
static ByteSize byte_offset_of_buf() {
|
||||
return PtrQueue::byte_offset_of_buf<SATBMarkQueue>();
|
||||
return byte_offset_of(SATBMarkQueue, _buf);
|
||||
}
|
||||
using PtrQueue::byte_width_of_buf;
|
||||
|
||||
static ByteSize byte_width_of_buf() { return in_ByteSize(_element_size); }
|
||||
|
||||
static ByteSize byte_offset_of_active() {
|
||||
return byte_offset_of(SATBMarkQueue, _active);
|
||||
@ -84,7 +134,18 @@ public:
|
||||
static ByteSize byte_width_of_active() { return in_ByteSize(sizeof(bool)); }
|
||||
};
|
||||
|
||||
class SATBMarkQueueSet: public PtrQueueSet {
|
||||
|
||||
// A SATBMarkQueueSet represents resources common to a set of SATBMarkQueues.
|
||||
// In particular, the individual queues allocate buffers from this shared
|
||||
// set, and return completed buffers to the set.
|
||||
// A completed buffer is a buffer the mutator is finished with, and
|
||||
// is ready to be processed by the collector. It need not be full.
|
||||
|
||||
class SATBMarkQueueSet {
|
||||
|
||||
BufferNode::Allocator* _allocator;
|
||||
|
||||
NONCOPYABLE(SATBMarkQueueSet);
|
||||
|
||||
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_PADDING_SIZE, 0);
|
||||
PaddedEnd<BufferNode::Stack> _list;
|
||||
@ -99,6 +160,24 @@ class SATBMarkQueueSet: public PtrQueueSet {
|
||||
BufferNode* get_completed_buffer();
|
||||
void abandon_completed_buffers();
|
||||
|
||||
// Discard any buffered enqueued data.
|
||||
void reset_queue(SATBMarkQueue& queue);
|
||||
|
||||
// Add value to queue's buffer, returning true. If buffer is full
|
||||
// or if queue doesn't have a buffer, does nothing and returns false.
|
||||
bool try_enqueue(SATBMarkQueue& queue, void* value);
|
||||
|
||||
// Add value to queue's buffer. The queue must have a non-full buffer.
|
||||
// Used after an initial try_enqueue has failed and the situation resolved.
|
||||
void retry_enqueue(SATBMarkQueue& queue, void* value);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
// Returns the old buffer, or null if queue didn't have a buffer.
|
||||
BufferNode* exchange_buffer_with_new(SATBMarkQueue& queue);
|
||||
|
||||
// Installs a new buffer into queue.
|
||||
void install_new_buffer(SATBMarkQueue& queue);
|
||||
|
||||
#ifdef ASSERT
|
||||
void dump_active_states(bool expected_active);
|
||||
void verify_active_states(bool expected_active);
|
||||
@ -106,6 +185,7 @@ class SATBMarkQueueSet: public PtrQueueSet {
|
||||
|
||||
protected:
|
||||
SATBMarkQueueSet(BufferNode::Allocator* allocator);
|
||||
|
||||
~SATBMarkQueueSet();
|
||||
|
||||
void handle_zero_index(SATBMarkQueue& queue);
|
||||
@ -131,6 +211,7 @@ public:
|
||||
void set_process_completed_buffers_threshold(size_t value);
|
||||
|
||||
size_t buffer_enqueue_threshold() const { return _buffer_enqueue_threshold; }
|
||||
|
||||
void set_buffer_enqueue_threshold_percentage(uint value);
|
||||
|
||||
// If there exists some completed buffer, pop and process it, and
|
||||
@ -144,7 +225,7 @@ public:
|
||||
// Add obj to queue. This qset and the queue must be active.
|
||||
void enqueue_known_active(SATBMarkQueue& queue, oop obj);
|
||||
virtual void filter(SATBMarkQueue& queue) = 0;
|
||||
virtual void enqueue_completed_buffer(BufferNode* node);
|
||||
void enqueue_completed_buffer(BufferNode* node);
|
||||
|
||||
// The number of buffers in the list. Racy and not updated atomically
|
||||
// with the set of completed buffers.
|
||||
@ -157,6 +238,20 @@ public:
|
||||
return (_count_and_process_flag.load_relaxed() & 1) != 0;
|
||||
}
|
||||
|
||||
// Return the associated BufferNode allocator.
|
||||
BufferNode::Allocator* allocator() const { return _allocator; }
|
||||
|
||||
// Return the buffer for a BufferNode of size buffer_capacity().
|
||||
void** allocate_buffer();
|
||||
|
||||
// Return an empty buffer to the free list. The node is required
|
||||
// to have been allocated with a size of buffer_capacity().
|
||||
void deallocate_buffer(BufferNode* node);
|
||||
|
||||
size_t buffer_capacity() const {
|
||||
return _allocator->buffer_capacity();
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
// Helpful for debugging
|
||||
void print_all(const char* msg);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user