mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8376353
Hi all, please review this change to convert `PSParallelCompact` to use `Atomic<T>`. Testing: tier1-5,gha Thanks, Thomas
This commit is contained in:
parent
37cb22826a
commit
a4346031d7
@ -30,6 +30,7 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/oopMap.hpp"
|
||||
#include "cppstdlib/new.hpp"
|
||||
#include "gc/parallel/objectStartArray.inline.hpp"
|
||||
#include "gc/parallel/parallelArguments.hpp"
|
||||
#include "gc/parallel/parallelScavengeHeap.inline.hpp"
|
||||
@ -135,8 +136,8 @@ bool ParallelCompactData::RegionData::is_clear() {
|
||||
(_source_region == 0) &&
|
||||
(_partial_obj_addr == nullptr) &&
|
||||
(_partial_obj_size == 0) &&
|
||||
(_dc_and_los == 0) &&
|
||||
(_shadow_state == 0);
|
||||
(dc_and_los() == 0) &&
|
||||
(shadow_state() == 0);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
@ -145,8 +146,8 @@ void ParallelCompactData::RegionData::verify_clear() {
|
||||
assert(_source_region == 0, "inv");
|
||||
assert(_partial_obj_addr == nullptr, "inv");
|
||||
assert(_partial_obj_size == 0, "inv");
|
||||
assert(_dc_and_los == 0, "inv");
|
||||
assert(_shadow_state == 0, "inv");
|
||||
assert(dc_and_los() == 0, "inv");
|
||||
assert(shadow_state() == 0, "inv");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -296,7 +297,9 @@ void ParallelCompactData::clear_range(size_t beg_region, size_t end_region) {
|
||||
assert(end_region <= _region_count, "end_region out of range");
|
||||
|
||||
const size_t region_cnt = end_region - beg_region;
|
||||
memset(_region_data + beg_region, 0, region_cnt * sizeof(RegionData));
|
||||
for (size_t i = beg_region; i < end_region; i++) {
|
||||
::new (&_region_data[i]) RegionData{};
|
||||
}
|
||||
}
|
||||
|
||||
// The total live words on src_region would overflow the target space, so find
|
||||
@ -1294,7 +1297,7 @@ void PSParallelCompact::marking_phase(ParallelOldTracer *gc_tracer) {
|
||||
}
|
||||
|
||||
template<typename Func>
|
||||
void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe) {
|
||||
void PSParallelCompact::adjust_in_space_helper(SpaceId id, Atomic<uint>* claim_counter, Func&& on_stripe) {
|
||||
MutableSpace* sp = PSParallelCompact::space(id);
|
||||
HeapWord* const bottom = sp->bottom();
|
||||
HeapWord* const top = sp->top();
|
||||
@ -1307,7 +1310,7 @@ void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_
|
||||
const size_t stripe_size = num_regions_per_stripe * region_size;
|
||||
|
||||
while (true) {
|
||||
uint counter = AtomicAccess::fetch_then_add(claim_counter, num_regions_per_stripe);
|
||||
uint counter = claim_counter->fetch_then_add(num_regions_per_stripe);
|
||||
HeapWord* cur_stripe = bottom + counter * region_size;
|
||||
if (cur_stripe >= top) {
|
||||
break;
|
||||
@ -1317,7 +1320,7 @@ void PSParallelCompact::adjust_in_space_helper(SpaceId id, volatile uint* claim_
|
||||
}
|
||||
}
|
||||
|
||||
void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
|
||||
void PSParallelCompact::adjust_in_old_space(Atomic<uint>* claim_counter) {
|
||||
// Regions in old-space shouldn't be split.
|
||||
assert(!_space_info[old_space_id].split_info().is_valid(), "inv");
|
||||
|
||||
@ -1348,7 +1351,7 @@ void PSParallelCompact::adjust_in_old_space(volatile uint* claim_counter) {
|
||||
});
|
||||
}
|
||||
|
||||
void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_counter) {
|
||||
void PSParallelCompact::adjust_in_young_space(SpaceId id, Atomic<uint>* claim_counter) {
|
||||
adjust_in_space_helper(id, claim_counter, [](HeapWord* stripe_start, HeapWord* stripe_end) {
|
||||
HeapWord* obj_start = stripe_start;
|
||||
while (obj_start < stripe_end) {
|
||||
@ -1362,7 +1365,7 @@ void PSParallelCompact::adjust_in_young_space(SpaceId id, volatile uint* claim_c
|
||||
});
|
||||
}
|
||||
|
||||
void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counters) {
|
||||
void PSParallelCompact::adjust_pointers_in_spaces(uint worker_id, Atomic<uint>* claim_counters) {
|
||||
auto start_time = Ticks::now();
|
||||
adjust_in_old_space(&claim_counters[0]);
|
||||
for (uint id = eden_space_id; id < last_space_id; ++id) {
|
||||
@ -1376,12 +1379,12 @@ class PSAdjustTask final : public WorkerTask {
|
||||
WeakProcessor::Task _weak_proc_task;
|
||||
OopStorageSetStrongParState<false, false> _oop_storage_iter;
|
||||
uint _nworkers;
|
||||
volatile bool _code_cache_claimed;
|
||||
volatile uint _claim_counters[PSParallelCompact::last_space_id] = {};
|
||||
Atomic<bool> _code_cache_claimed;
|
||||
Atomic<uint> _claim_counters[PSParallelCompact::last_space_id];
|
||||
|
||||
bool try_claim_code_cache_task() {
|
||||
return AtomicAccess::load(&_code_cache_claimed) == false
|
||||
&& AtomicAccess::cmpxchg(&_code_cache_claimed, false, true) == false;
|
||||
return _code_cache_claimed.load_relaxed() == false
|
||||
&& _code_cache_claimed.compare_set(false, true);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -1393,6 +1396,9 @@ public:
|
||||
_nworkers(nworkers),
|
||||
_code_cache_claimed(false) {
|
||||
|
||||
for (unsigned int i = PSParallelCompact::old_space_id; i < PSParallelCompact::last_space_id; ++i) {
|
||||
::new (&_claim_counters[i]) Atomic<uint>{};
|
||||
}
|
||||
ClassLoaderDataGraph::verify_claimed_marks_cleared(ClassLoaderData::_claim_stw_fullgc_adjust);
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2026, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "gc/shared/taskTerminator.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/atomicAccess.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.hpp"
|
||||
|
||||
class ParallelScavengeHeap;
|
||||
@ -236,7 +236,7 @@ public:
|
||||
// in this region (words). This does not include the partial object
|
||||
// extending onto the region (if any), or the part of an object that extends
|
||||
// onto the next region (if any).
|
||||
size_t live_obj_size() const { return _dc_and_los & los_mask; }
|
||||
size_t live_obj_size() const { return dc_and_los() & los_mask; }
|
||||
|
||||
// Total live data that lies within the region (words).
|
||||
size_t data_size() const { return partial_obj_size() + live_obj_size(); }
|
||||
@ -268,9 +268,9 @@ public:
|
||||
// Minor subtlety: claimed() returns true if the region is marked
|
||||
// completed(), which is desirable since a region must be claimed before it
|
||||
// can be completed.
|
||||
bool available() const { return _dc_and_los < dc_one; }
|
||||
bool claimed() const { return _dc_and_los >= dc_claimed; }
|
||||
bool completed() const { return _dc_and_los >= dc_completed; }
|
||||
bool available() const { return dc_and_los() < dc_one; }
|
||||
bool claimed() const { return dc_and_los() >= dc_claimed; }
|
||||
bool completed() const { return dc_and_los() >= dc_completed; }
|
||||
|
||||
// These are not atomic.
|
||||
void set_destination(HeapWord* addr) { _destination = addr; }
|
||||
@ -315,7 +315,7 @@ public:
|
||||
// Return to the normal path here
|
||||
inline void shadow_to_normal();
|
||||
|
||||
int shadow_state() { return _shadow_state; }
|
||||
int shadow_state() { return _shadow_state.load_relaxed(); }
|
||||
|
||||
bool is_clear();
|
||||
|
||||
@ -339,9 +339,10 @@ public:
|
||||
size_t _source_region;
|
||||
HeapWord* _partial_obj_addr;
|
||||
region_sz_t _partial_obj_size;
|
||||
region_sz_t volatile _dc_and_los;
|
||||
int volatile _shadow_state;
|
||||
Atomic<region_sz_t> _dc_and_los;
|
||||
Atomic<int> _shadow_state;
|
||||
|
||||
region_sz_t dc_and_los() const { return _dc_and_los.load_relaxed(); }
|
||||
#ifdef ASSERT
|
||||
public:
|
||||
uint _pushed; // 0 until region is pushed onto a stack
|
||||
@ -411,7 +412,7 @@ private:
|
||||
inline uint
|
||||
ParallelCompactData::RegionData::destination_count_raw() const
|
||||
{
|
||||
return _dc_and_los & dc_mask;
|
||||
return dc_and_los() & dc_mask;
|
||||
}
|
||||
|
||||
inline uint
|
||||
@ -425,26 +426,26 @@ ParallelCompactData::RegionData::set_destination_count(uint count)
|
||||
{
|
||||
assert(count <= (dc_completed >> dc_shift), "count too large");
|
||||
const region_sz_t live_sz = (region_sz_t) live_obj_size();
|
||||
_dc_and_los = (count << dc_shift) | live_sz;
|
||||
_dc_and_los.store_relaxed((count << dc_shift) | live_sz);
|
||||
}
|
||||
|
||||
inline void ParallelCompactData::RegionData::set_live_obj_size(size_t words)
|
||||
{
|
||||
assert(words <= los_mask, "would overflow");
|
||||
_dc_and_los = destination_count_raw() | (region_sz_t)words;
|
||||
_dc_and_los.store_relaxed(destination_count_raw() | (region_sz_t)words);
|
||||
}
|
||||
|
||||
inline void ParallelCompactData::RegionData::decrement_destination_count()
|
||||
{
|
||||
assert(_dc_and_los < dc_claimed, "already claimed");
|
||||
assert(_dc_and_los >= dc_one, "count would go negative");
|
||||
AtomicAccess::add(&_dc_and_los, dc_mask);
|
||||
assert(dc_and_los() < dc_claimed, "already claimed");
|
||||
assert(dc_and_los() >= dc_one, "count would go negative");
|
||||
_dc_and_los.add_then_fetch(dc_mask);
|
||||
}
|
||||
|
||||
inline void ParallelCompactData::RegionData::set_completed()
|
||||
{
|
||||
assert(claimed(), "must be claimed first");
|
||||
_dc_and_los = dc_completed | (region_sz_t) live_obj_size();
|
||||
_dc_and_los.store_relaxed(dc_completed | (region_sz_t) live_obj_size());
|
||||
}
|
||||
|
||||
// MT-unsafe claiming of a region. Should only be used during single threaded
|
||||
@ -452,7 +453,7 @@ inline void ParallelCompactData::RegionData::set_completed()
|
||||
inline bool ParallelCompactData::RegionData::claim_unsafe()
|
||||
{
|
||||
if (available()) {
|
||||
_dc_and_los |= dc_claimed;
|
||||
_dc_and_los.store_relaxed(dc_and_los() | dc_claimed);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -461,36 +462,35 @@ inline bool ParallelCompactData::RegionData::claim_unsafe()
|
||||
inline void ParallelCompactData::RegionData::add_live_obj(size_t words)
|
||||
{
|
||||
assert(words <= (size_t)los_mask - live_obj_size(), "overflow");
|
||||
AtomicAccess::add(&_dc_and_los, static_cast<region_sz_t>(words));
|
||||
_dc_and_los.add_then_fetch(static_cast<region_sz_t>(words));
|
||||
}
|
||||
|
||||
inline bool ParallelCompactData::RegionData::claim()
|
||||
{
|
||||
const region_sz_t los = static_cast<region_sz_t>(live_obj_size());
|
||||
const region_sz_t old = AtomicAccess::cmpxchg(&_dc_and_los, los, dc_claimed | los);
|
||||
return old == los;
|
||||
return _dc_and_los.compare_set(los, dc_claimed | los);
|
||||
}
|
||||
|
||||
inline bool ParallelCompactData::RegionData::mark_normal() {
|
||||
return AtomicAccess::cmpxchg(&_shadow_state, UnusedRegion, NormalRegion) == UnusedRegion;
|
||||
return _shadow_state.compare_set(UnusedRegion, NormalRegion);
|
||||
}
|
||||
|
||||
inline bool ParallelCompactData::RegionData::mark_shadow() {
|
||||
if (_shadow_state != UnusedRegion) return false;
|
||||
return AtomicAccess::cmpxchg(&_shadow_state, UnusedRegion, ShadowRegion) == UnusedRegion;
|
||||
if (shadow_state() != UnusedRegion) return false;
|
||||
return _shadow_state.compare_set(UnusedRegion, ShadowRegion);
|
||||
}
|
||||
|
||||
inline void ParallelCompactData::RegionData::mark_filled() {
|
||||
int old = AtomicAccess::cmpxchg(&_shadow_state, ShadowRegion, FilledShadow);
|
||||
int old = _shadow_state.compare_exchange(ShadowRegion, FilledShadow);
|
||||
assert(old == ShadowRegion, "Fail to mark the region as filled");
|
||||
}
|
||||
|
||||
inline bool ParallelCompactData::RegionData::mark_copied() {
|
||||
return AtomicAccess::cmpxchg(&_shadow_state, FilledShadow, CopiedShadow) == FilledShadow;
|
||||
return _shadow_state.compare_set(FilledShadow, CopiedShadow);
|
||||
}
|
||||
|
||||
void ParallelCompactData::RegionData::shadow_to_normal() {
|
||||
int old = AtomicAccess::cmpxchg(&_shadow_state, ShadowRegion, NormalRegion);
|
||||
int old = _shadow_state.compare_exchange(ShadowRegion, NormalRegion);
|
||||
assert(old == ShadowRegion, "Fail to mark the region as finish");
|
||||
}
|
||||
|
||||
@ -764,13 +764,13 @@ public:
|
||||
static bool invoke(bool clear_all_soft_refs, bool should_do_max_compaction);
|
||||
|
||||
template<typename Func>
|
||||
static void adjust_in_space_helper(SpaceId id, volatile uint* claim_counter, Func&& on_stripe);
|
||||
static void adjust_in_space_helper(SpaceId id, Atomic<uint>* claim_counter, Func&& on_stripe);
|
||||
|
||||
static void adjust_in_old_space(volatile uint* claim_counter);
|
||||
static void adjust_in_old_space(Atomic<uint>* claim_counter);
|
||||
|
||||
static void adjust_in_young_space(SpaceId id, volatile uint* claim_counter);
|
||||
static void adjust_in_young_space(SpaceId id, Atomic<uint>* claim_counter);
|
||||
|
||||
static void adjust_pointers_in_spaces(uint worker_id, volatile uint* claim_counter);
|
||||
static void adjust_pointers_in_spaces(uint worker_id, Atomic<uint>* claim_counter);
|
||||
|
||||
static void post_initialize();
|
||||
// Perform initialization for PSParallelCompact that requires
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user