Hi all,

  use `Atomic<T>` instead of `AtomicAccess` in `CardTableBarrierSet` and subclasses. Since this modifies `CardTableBarrierSet::_card_table` the change has some fan-out.

Testing: gha

Thanks,
  Thomas
This commit is contained in:
Thomas Schatzl 2026-01-20 09:11:54 +01:00
parent 66e950e9b6
commit 75013a45ec
7 changed files with 34 additions and 29 deletions

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,13 +64,13 @@ G1BarrierSet::G1BarrierSet(G1CardTable* card_table,
{}
G1BarrierSet::~G1BarrierSet() {
delete _refinement_table;
delete refinement_table();
}
void G1BarrierSet::swap_global_card_table() {
G1CardTable* temp = static_cast<G1CardTable*>(_card_table);
_card_table = _refinement_table;
_refinement_table = temp;
G1CardTable* temp = static_cast<G1CardTable*>(card_table());
_card_table.store_relaxed(refinement_table());
_refinement_table.store_relaxed(temp);
}
void G1BarrierSet::update_card_table_base(Thread* thread) {
@ -80,7 +80,7 @@ void G1BarrierSet::update_card_table_base(Thread* thread) {
assert(thread->is_Java_thread(), "may only update card table base of JavaThreads, not %s", thread->name());
}
#endif
G1ThreadLocalData::set_byte_map_base(thread, _card_table->byte_map_base());
G1ThreadLocalData::set_byte_map_base(thread, card_table()->byte_map_base());
}
template <class T> void
@ -135,10 +135,10 @@ void G1BarrierSet::write_region(MemRegion mr) {
// marks next time.
// If we write to the old card table (after the switching, then the refinement
// table) the oncoming handshake will do the memory synchronization.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardTable* local_card_table = card_table();
volatile CardValue* byte = card_table->byte_for(mr.start());
CardValue* last_byte = card_table->byte_for(mr.last());
volatile CardValue* byte = local_card_table->byte_for(mr.start());
CardValue* last_byte = local_card_table->byte_for(mr.last());
// Dirty cards only if necessary.
for (; byte <= last_byte; byte++) {
@ -190,6 +190,6 @@ void G1BarrierSet::on_thread_detach(Thread* thread) {
}
void G1BarrierSet::print_on(outputStream* st) const {
_card_table->print_on(st, "Card");
_refinement_table->print_on(st, "Refinement");
card_table()->print_on(st, "Card");
refinement_table()->print_on(st, "Refinement");
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,6 +29,7 @@
#include "gc/shared/bufferNode.hpp"
#include "gc/shared/cardTable.hpp"
#include "gc/shared/cardTableBarrierSet.hpp"
#include "runtime/atomic.hpp"
class G1CardTable;
class Thread;
@ -66,7 +67,7 @@ class G1BarrierSet: public CardTableBarrierSet {
BufferNode::Allocator _satb_mark_queue_buffer_allocator;
G1SATBMarkQueueSet _satb_mark_queue_set;
G1CardTable* _refinement_table;
Atomic<G1CardTable*> _refinement_table;
public:
G1BarrierSet(G1CardTable* card_table, G1CardTable* refinement_table);
@ -76,7 +77,7 @@ class G1BarrierSet: public CardTableBarrierSet {
return barrier_set_cast<G1BarrierSet>(BarrierSet::barrier_set());
}
G1CardTable* refinement_table() const { return _refinement_table; }
G1CardTable* refinement_table() const { return _refinement_table.load_relaxed(); }
// Swap the global card table references, without synchronization.
void swap_global_card_table();

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,8 +73,8 @@ inline void G1BarrierSet::write_ref_field_post(T* field) {
// Make sure that the card table reference is read only once. Otherwise the compiler
// might reload that value in the two accesses below, that could cause writes to
// the wrong card table.
CardTable* card_table = AtomicAccess::load(&_card_table);
CardValue* byte = card_table->byte_for(field);
CardTable* local_card_table = card_table();
CardValue* byte = local_card_table->byte_for(field);
if (*byte == G1CardTable::clean_card_val()) {
*byte = G1CardTable::dirty_card_val();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -73,15 +73,15 @@ CardTableBarrierSet::CardTableBarrierSet(CardTable* card_table) :
{}
CardTableBarrierSet::~CardTableBarrierSet() {
delete _card_table;
delete card_table();
}
void CardTableBarrierSet::write_region(MemRegion mr) {
_card_table->dirty_MemRegion(mr);
card_table()->dirty_MemRegion(mr);
}
void CardTableBarrierSet::print_on(outputStream* st) const {
_card_table->print_on(st);
card_table()->print_on(st);
}
// Helper for ReduceInitialCardMarks. For performance,
@ -116,7 +116,7 @@ void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop ne
if (!ReduceInitialCardMarks) {
return;
}
if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
if (new_obj->is_typeArray() || card_table()->is_in_young(new_obj)) {
// Arrays of non-references don't need a post-barrier.
} else {
MemRegion mr(cast_from_oop<HeapWord*>(new_obj), new_obj->size());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,7 @@
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/cardTable.hpp"
#include "memory/memRegion.hpp"
#include "runtime/atomic.hpp"
#include "utilities/align.hpp"
// This kind of "BarrierSet" allows a "CollectedHeap" to detect and
@ -48,7 +49,7 @@ class CardTableBarrierSet: public BarrierSet {
protected:
typedef CardTable::CardValue CardValue;
CardTable* _card_table;
Atomic<CardTable*> _card_table;
CardTableBarrierSet(BarrierSetAssembler* barrier_set_assembler,
BarrierSetC1* barrier_set_c1,
@ -85,7 +86,8 @@ public:
// at the address "start", which may not necessarily be HeapWord-aligned
inline void write_ref_array(HeapWord* start, size_t count);
CardTable* card_table() const { return _card_table; }
CardTable* card_table() { return _card_table.load_relaxed(); }
CardTable* card_table() const { return _card_table.load_relaxed(); }
virtual void on_slowpath_allocation_exit(JavaThread* thread, oop new_obj);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,7 +35,7 @@
template <DecoratorSet decorators, typename T>
inline void CardTableBarrierSet::write_ref_field_post(T* field) {
volatile CardValue* byte = _card_table->byte_for(field);
volatile CardValue* byte = card_table()->byte_for(field);
*byte = CardTable::dirty_card_val();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,6 +48,7 @@
#if INCLUDE_ZGC
#include "gc/z/vmStructs_z.hpp"
#endif
#include "runtime/atomic.hpp"
#define VM_STRUCTS_GC(nonstatic_field, \
volatile_static_field, \
@ -88,7 +89,7 @@
nonstatic_field(CardTable, _byte_map_size, const size_t) \
nonstatic_field(CardTable, _byte_map, CardTable::CardValue*) \
nonstatic_field(CardTable, _byte_map_base, CardTable::CardValue*) \
nonstatic_field(CardTableBarrierSet, _card_table, CardTable*) \
nonstatic_field(CardTableBarrierSet, _card_table, Atomic<CardTable*>) \
\
static_field(CollectedHeap, _lab_alignment_reserve, size_t) \
nonstatic_field(CollectedHeap, _reserved, MemRegion) \
@ -149,6 +150,7 @@
\
declare_toplevel_type(BarrierSet*) \
declare_toplevel_type(CardTable*) \
declare_toplevel_type(Atomic<CardTable*>) \
declare_toplevel_type(CardTable*const) \
declare_toplevel_type(CardTableBarrierSet*) \
declare_toplevel_type(CardTableBarrierSet**) \