mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8376126
Hi all, please convert remaining volatile declarations in `G1ConcurrentMark` to use `Atomic<T>`. These volatiles are used to indicate concurrent phased access (like changing the variable in thread A, then reading the variable in thread B while A is dormant, or only ever updating it in one direction) where concurrency safety is provided by barriers between these phases. The exception is `G1ConcurrentMark::_finger` that has apparently been overlooked in an earlier changes. Testing: gha Thanks, Thomas
This commit is contained in:
parent
025041ba04
commit
47925be92a
@ -518,8 +518,8 @@ G1ConcurrentMark::G1ConcurrentMark(G1CollectedHeap* g1h,
|
||||
_max_concurrent_workers(0),
|
||||
|
||||
_region_mark_stats(NEW_C_HEAP_ARRAY(G1RegionMarkStats, _g1h->max_num_regions(), mtGC)),
|
||||
_top_at_mark_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
|
||||
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(HeapWord*, _g1h->max_num_regions(), mtGC)),
|
||||
_top_at_mark_starts(NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _g1h->max_num_regions(), mtGC)),
|
||||
_top_at_rebuild_starts(NEW_C_HEAP_ARRAY(Atomic<HeapWord*>, _g1h->max_num_regions(), mtGC)),
|
||||
_needs_remembered_set_rebuild(false)
|
||||
{
|
||||
assert(G1CGC_lock != nullptr, "CGC_lock must be initialized");
|
||||
@ -565,7 +565,7 @@ PartialArrayStateManager* G1ConcurrentMark::partial_array_state_manager() const
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::reset() {
|
||||
_has_aborted = false;
|
||||
_has_aborted.store_relaxed(false);
|
||||
|
||||
reset_marking_for_restart();
|
||||
|
||||
@ -577,7 +577,7 @@ void G1ConcurrentMark::reset() {
|
||||
|
||||
uint max_num_regions = _g1h->max_num_regions();
|
||||
for (uint i = 0; i < max_num_regions; i++) {
|
||||
_top_at_rebuild_starts[i] = nullptr;
|
||||
::new (&_top_at_rebuild_starts[i]) Atomic<HeapWord*>{};
|
||||
_region_mark_stats[i].clear();
|
||||
}
|
||||
|
||||
@ -589,7 +589,7 @@ void G1ConcurrentMark::clear_statistics(G1HeapRegion* r) {
|
||||
for (uint j = 0; j < _max_num_tasks; ++j) {
|
||||
_tasks[j]->clear_mark_stats_cache(region_idx);
|
||||
}
|
||||
_top_at_rebuild_starts[region_idx] = nullptr;
|
||||
::new (&_top_at_rebuild_starts[region_idx]) Atomic<HeapWord*>{};
|
||||
_region_mark_stats[region_idx].clear();
|
||||
}
|
||||
|
||||
@ -625,7 +625,7 @@ void G1ConcurrentMark::reset_marking_for_restart() {
|
||||
}
|
||||
|
||||
clear_has_overflown();
|
||||
_finger = _heap.start();
|
||||
_finger.store_relaxed(_heap.start());
|
||||
|
||||
for (uint i = 0; i < _max_num_tasks; ++i) {
|
||||
G1CMTaskQueue* queue = _task_queues->queue(i);
|
||||
@ -647,14 +647,14 @@ void G1ConcurrentMark::set_concurrency(uint active_tasks) {
|
||||
void G1ConcurrentMark::set_concurrency_and_phase(uint active_tasks, bool concurrent) {
|
||||
set_concurrency(active_tasks);
|
||||
|
||||
_concurrent = concurrent;
|
||||
_concurrent.store_relaxed(concurrent);
|
||||
|
||||
if (!concurrent) {
|
||||
// At this point we should be in a STW phase, and completed marking.
|
||||
assert_at_safepoint_on_vm_thread();
|
||||
assert(out_of_regions(),
|
||||
"only way to get here: _finger: " PTR_FORMAT ", _heap_end: " PTR_FORMAT,
|
||||
p2i(_finger), p2i(_heap.end()));
|
||||
p2i(finger()), p2i(_heap.end()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -685,8 +685,8 @@ void G1ConcurrentMark::reset_at_marking_complete() {
|
||||
}
|
||||
|
||||
G1ConcurrentMark::~G1ConcurrentMark() {
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_mark_starts);
|
||||
FREE_C_HEAP_ARRAY(HeapWord*, _top_at_rebuild_starts);
|
||||
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _top_at_mark_starts);
|
||||
FREE_C_HEAP_ARRAY(Atomic<HeapWord*>, _top_at_rebuild_starts);
|
||||
FREE_C_HEAP_ARRAY(G1RegionMarkStats, _region_mark_stats);
|
||||
// The G1ConcurrentMark instance is never freed.
|
||||
ShouldNotReachHere();
|
||||
@ -1151,7 +1151,7 @@ void G1ConcurrentMark::concurrent_cycle_start() {
|
||||
}
|
||||
|
||||
uint G1ConcurrentMark::completed_mark_cycles() const {
|
||||
return AtomicAccess::load(&_completed_mark_cycles);
|
||||
return _completed_mark_cycles.load_relaxed();
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
|
||||
@ -1160,7 +1160,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
|
||||
_g1h->trace_heap_after_gc(_gc_tracer_cm);
|
||||
|
||||
if (mark_cycle_completed) {
|
||||
AtomicAccess::inc(&_completed_mark_cycles, memory_order_relaxed);
|
||||
_completed_mark_cycles.add_then_fetch(1u, memory_order_relaxed);
|
||||
}
|
||||
|
||||
if (has_aborted()) {
|
||||
@ -1174,7 +1174,7 @@ void G1ConcurrentMark::concurrent_cycle_end(bool mark_cycle_completed) {
|
||||
}
|
||||
|
||||
void G1ConcurrentMark::mark_from_roots() {
|
||||
_restart_for_overflow = false;
|
||||
_restart_for_overflow.store_relaxed(false);
|
||||
|
||||
uint active_workers = calc_active_marking_workers();
|
||||
|
||||
@ -1343,7 +1343,7 @@ void G1ConcurrentMark::remark() {
|
||||
}
|
||||
} else {
|
||||
// We overflowed. Restart concurrent marking.
|
||||
_restart_for_overflow = true;
|
||||
_restart_for_overflow.store_relaxed(true);
|
||||
|
||||
verify_during_pause(G1HeapVerifier::G1VerifyRemark, VerifyLocation::RemarkOverflow);
|
||||
|
||||
@ -1772,44 +1772,45 @@ void G1ConcurrentMark::clear_bitmap_for_region(G1HeapRegion* hr) {
|
||||
}
|
||||
|
||||
G1HeapRegion* G1ConcurrentMark::claim_region(uint worker_id) {
|
||||
// "checkpoint" the finger
|
||||
HeapWord* finger = _finger;
|
||||
// "Checkpoint" the finger.
|
||||
HeapWord* local_finger = finger();
|
||||
|
||||
while (finger < _heap.end()) {
|
||||
assert(_g1h->is_in_reserved(finger), "invariant");
|
||||
while (local_finger < _heap.end()) {
|
||||
assert(_g1h->is_in_reserved(local_finger), "invariant");
|
||||
|
||||
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(finger);
|
||||
G1HeapRegion* curr_region = _g1h->heap_region_containing_or_null(local_finger);
|
||||
// Make sure that the reads below do not float before loading curr_region.
|
||||
OrderAccess::loadload();
|
||||
// Above heap_region_containing may return null as we always scan claim
|
||||
// until the end of the heap. In this case, just jump to the next region.
|
||||
HeapWord* end = curr_region != nullptr ? curr_region->end() : finger + G1HeapRegion::GrainWords;
|
||||
HeapWord* end = curr_region != nullptr ? curr_region->end() : local_finger + G1HeapRegion::GrainWords;
|
||||
|
||||
// Is the gap between reading the finger and doing the CAS too long?
|
||||
HeapWord* res = AtomicAccess::cmpxchg(&_finger, finger, end);
|
||||
if (res == finger && curr_region != nullptr) {
|
||||
// we succeeded
|
||||
HeapWord* res = _finger.compare_exchange(local_finger, end);
|
||||
if (res == local_finger && curr_region != nullptr) {
|
||||
// We succeeded.
|
||||
HeapWord* bottom = curr_region->bottom();
|
||||
HeapWord* limit = top_at_mark_start(curr_region);
|
||||
|
||||
log_trace(gc, marking)("Claim region %u bottom " PTR_FORMAT " tams " PTR_FORMAT, curr_region->hrm_index(), p2i(curr_region->bottom()), p2i(top_at_mark_start(curr_region)));
|
||||
// notice that _finger == end cannot be guaranteed here since,
|
||||
// someone else might have moved the finger even further
|
||||
assert(_finger >= end, "the finger should have moved forward");
|
||||
// Notice that _finger == end cannot be guaranteed here since,
|
||||
// someone else might have moved the finger even further.
|
||||
assert(finger() >= end, "The finger should have moved forward");
|
||||
|
||||
if (limit > bottom) {
|
||||
return curr_region;
|
||||
} else {
|
||||
assert(limit == bottom,
|
||||
"the region limit should be at bottom");
|
||||
"The region limit should be at bottom");
|
||||
// We return null and the caller should try calling
|
||||
// claim_region() again.
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
assert(_finger > finger, "the finger should have moved forward");
|
||||
// read it again
|
||||
finger = _finger;
|
||||
// Read the finger again.
|
||||
HeapWord* next_finger = finger();
|
||||
assert(next_finger > local_finger, "The finger should have moved forward " PTR_FORMAT " " PTR_FORMAT, p2i(local_finger), p2i(next_finger));
|
||||
local_finger = next_finger;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1948,7 +1949,7 @@ bool G1ConcurrentMark::concurrent_cycle_abort() {
|
||||
|
||||
void G1ConcurrentMark::abort_marking_threads() {
|
||||
assert(!_root_regions.scan_in_progress(), "still doing root region scan");
|
||||
_has_aborted = true;
|
||||
_has_aborted.store_relaxed(true);
|
||||
_first_overflow_barrier_sync.abort();
|
||||
_second_overflow_barrier_sync.abort();
|
||||
}
|
||||
|
||||
@ -368,7 +368,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
|
||||
// For grey objects
|
||||
G1CMMarkStack _global_mark_stack; // Grey objects behind global finger
|
||||
HeapWord* volatile _finger; // The global finger, region aligned,
|
||||
Atomic<HeapWord*> _finger; // The global finger, region aligned,
|
||||
// always pointing to the end of the
|
||||
// last claimed region
|
||||
|
||||
@ -395,19 +395,19 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
WorkerThreadsBarrierSync _second_overflow_barrier_sync;
|
||||
|
||||
// Number of completed mark cycles.
|
||||
volatile uint _completed_mark_cycles;
|
||||
Atomic<uint> _completed_mark_cycles;
|
||||
|
||||
// This is set by any task, when an overflow on the global data
|
||||
// structures is detected
|
||||
volatile bool _has_overflown;
|
||||
Atomic<bool> _has_overflown;
|
||||
// True: marking is concurrent, false: we're in remark
|
||||
volatile bool _concurrent;
|
||||
Atomic<bool> _concurrent;
|
||||
// Set at the end of a Full GC so that marking aborts
|
||||
volatile bool _has_aborted;
|
||||
Atomic<bool> _has_aborted;
|
||||
|
||||
// Used when remark aborts due to an overflow to indicate that
|
||||
// another concurrent marking phase should start
|
||||
volatile bool _restart_for_overflow;
|
||||
Atomic<bool> _restart_for_overflow;
|
||||
|
||||
ConcurrentGCTimer* _gc_timer_cm;
|
||||
|
||||
@ -461,8 +461,8 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
|
||||
void print_and_reset_taskqueue_stats();
|
||||
|
||||
HeapWord* finger() { return _finger; }
|
||||
bool concurrent() { return _concurrent; }
|
||||
HeapWord* finger() { return _finger.load_relaxed(); }
|
||||
bool concurrent() { return _concurrent.load_relaxed(); }
|
||||
uint active_tasks() { return _num_active_tasks; }
|
||||
TaskTerminator* terminator() { return &_terminator; }
|
||||
|
||||
@ -487,7 +487,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
// to satisfy an allocation without doing a GC. This is fine, because all
|
||||
// objects in those regions will be considered live anyway because of
|
||||
// SATB guarantees (i.e. their TAMS will be equal to bottom).
|
||||
bool out_of_regions() { return _finger >= _heap.end(); }
|
||||
bool out_of_regions() { return finger() >= _heap.end(); }
|
||||
|
||||
// Returns the task with the given id
|
||||
G1CMTask* task(uint id) {
|
||||
@ -499,10 +499,10 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
|
||||
// Access / manipulation of the overflow flag which is set to
|
||||
// indicate that the global stack has overflown
|
||||
bool has_overflown() { return _has_overflown; }
|
||||
void set_has_overflown() { _has_overflown = true; }
|
||||
void clear_has_overflown() { _has_overflown = false; }
|
||||
bool restart_for_overflow() { return _restart_for_overflow; }
|
||||
bool has_overflown() { return _has_overflown.load_relaxed(); }
|
||||
void set_has_overflown() { _has_overflown.store_relaxed(true); }
|
||||
void clear_has_overflown() { _has_overflown.store_relaxed(false); }
|
||||
bool restart_for_overflow() { return _restart_for_overflow.load_relaxed(); }
|
||||
|
||||
// Methods to enter the two overflow sync barriers
|
||||
void enter_first_sync_barrier(uint worker_id);
|
||||
@ -516,12 +516,12 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
|
||||
G1RegionMarkStats* _region_mark_stats;
|
||||
// Top pointer for each region at the start of marking. Must be valid for all committed
|
||||
// regions.
|
||||
HeapWord* volatile* _top_at_mark_starts;
|
||||
Atomic<HeapWord*>* _top_at_mark_starts;
|
||||
// Top pointer for each region at the start of the rebuild remembered set process
|
||||
// for regions which remembered sets need to be rebuilt. A null for a given region
|
||||
// means that this region does not be scanned during the rebuilding remembered
|
||||
// set phase at all.
|
||||
HeapWord* volatile* _top_at_rebuild_starts;
|
||||
Atomic<HeapWord*>* _top_at_rebuild_starts;
|
||||
// True when Remark pause selected regions for rebuilding.
|
||||
bool _needs_remembered_set_rebuild;
|
||||
public:
|
||||
@ -676,7 +676,7 @@ public:
|
||||
|
||||
uint completed_mark_cycles() const;
|
||||
|
||||
bool has_aborted() { return _has_aborted; }
|
||||
bool has_aborted() { return _has_aborted.load_relaxed(); }
|
||||
|
||||
void print_summary_info();
|
||||
|
||||
|
||||
@ -194,11 +194,11 @@ inline void G1CMTask::process_array_chunk(objArrayOop obj, size_t start, size_t
|
||||
inline void G1ConcurrentMark::update_top_at_mark_start(G1HeapRegion* r) {
|
||||
uint const region = r->hrm_index();
|
||||
assert(region < _g1h->max_num_regions(), "Tried to access TAMS for region %u out of bounds", region);
|
||||
_top_at_mark_starts[region] = r->top();
|
||||
_top_at_mark_starts[region].store_relaxed(r->top());
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::reset_top_at_mark_start(G1HeapRegion* r) {
|
||||
_top_at_mark_starts[r->hrm_index()] = r->bottom();
|
||||
_top_at_mark_starts[r->hrm_index()].store_relaxed(r->bottom());
|
||||
}
|
||||
|
||||
inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) const {
|
||||
@ -207,7 +207,7 @@ inline HeapWord* G1ConcurrentMark::top_at_mark_start(const G1HeapRegion* r) cons
|
||||
|
||||
inline HeapWord* G1ConcurrentMark::top_at_mark_start(uint region) const {
|
||||
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
|
||||
return _top_at_mark_starts[region];
|
||||
return _top_at_mark_starts[region].load_relaxed();
|
||||
}
|
||||
|
||||
inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
|
||||
@ -217,7 +217,7 @@ inline bool G1ConcurrentMark::obj_allocated_since_mark_start(oop obj) const {
|
||||
}
|
||||
|
||||
inline HeapWord* G1ConcurrentMark::top_at_rebuild_start(G1HeapRegion* r) const {
|
||||
return _top_at_rebuild_starts[r->hrm_index()];
|
||||
return _top_at_rebuild_starts[r->hrm_index()].load_relaxed();
|
||||
}
|
||||
|
||||
inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
|
||||
@ -225,10 +225,10 @@ inline void G1ConcurrentMark::update_top_at_rebuild_start(G1HeapRegion* r) {
|
||||
|
||||
uint const region = r->hrm_index();
|
||||
assert(region < _g1h->max_num_regions(), "Tried to access TARS for region %u out of bounds", region);
|
||||
assert(_top_at_rebuild_starts[region] == nullptr,
|
||||
assert(top_at_rebuild_start(r) == nullptr,
|
||||
"TARS for region %u has already been set to " PTR_FORMAT " should be null",
|
||||
region, p2i(_top_at_rebuild_starts[region]));
|
||||
_top_at_rebuild_starts[region] = r->top();
|
||||
region, p2i(top_at_rebuild_start(r)));
|
||||
_top_at_rebuild_starts[region].store_relaxed(r->top());
|
||||
}
|
||||
|
||||
inline void G1CMTask::update_liveness(oop const obj, const size_t obj_size) {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user