8382636: Shenandoah: Use self-forwarding to handle OOM during evacuation

Reviewed-by: wkemper, kdnilsen
This commit is contained in:
Roman Kennke 2026-04-30 20:26:44 +00:00
parent edff559cef
commit 02ad0712a8
29 changed files with 242 additions and 596 deletions

View File

@ -44,6 +44,16 @@ void ShenandoahArguments::initialize() {
vm_exit_during_initialization("Shenandoah GC is not supported on this platform.");
#endif
// Shenandoah relies on the object header bits (including the self-forwarded bit
// at markWord::self_fwd_mask_in_place) being preserved across monitor inflation,
// which only holds with UseObjectMonitorTable.
if (!UseObjectMonitorTable) {
if (FLAG_IS_CMDLINE(UseObjectMonitorTable)) {
vm_exit_during_initialization("Shenandoah requires UseObjectMonitorTable");
}
FLAG_SET_DEFAULT(UseObjectMonitorTable, true);
}
#if 0 // leave this block as stepping stone for future platforms
log_warning(gc)("Shenandoah GC is not fully supported on this platform:");
log_warning(gc)(" concurrent modes are not supported, only STW cycles are enabled;");

View File

@ -34,7 +34,6 @@
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahCardTable.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
#include "gc/shenandoah/shenandoahForwarding.inline.hpp"
#include "gc/shenandoah/shenandoahGeneration.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@ -102,7 +101,6 @@ inline oop ShenandoahBarrierSet::load_reference_barrier_mutator(oop obj, T* load
if (obj == fwd) {
assert(_heap->is_evacuation_in_progress(), "evac should be in progress");
Thread* const t = Thread::current();
ShenandoahEvacOOMScope scope(t);
fwd = _heap->evacuate_object(obj, t);
}
@ -124,7 +122,6 @@ inline oop ShenandoahBarrierSet::load_reference_barrier(oop obj) {
oop fwd = resolve_forwarded_not_null(obj);
if (obj == fwd && _heap->is_evacuation_in_progress()) {
Thread* t = Thread::current();
ShenandoahEvacOOMScope oom_evac_scope(t);
return _heap->evacuate_object(obj, t);
}
return fwd;
@ -511,7 +508,6 @@ template <class T>
void ShenandoahBarrierSet::arraycopy_evacuation(T* src, size_t count) {
assert(_heap->is_evacuation_in_progress(), "only during evacuation");
if (need_bulk_update(reinterpret_cast<HeapWord*>(src))) {
ShenandoahEvacOOMScope oom_evac;
arraycopy_work<T, true, true, false>(src, count);
}
}

View File

@ -29,7 +29,6 @@
#include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
#include "gc/shenandoah/shenandoahCollectionSet.inline.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/access.hpp"
@ -77,7 +76,6 @@ public:
void ShenandoahBarrierSet::clone_evacuation(oop obj) {
assert(_heap->is_evacuation_in_progress(), "only during evacuation");
if (need_bulk_update(cast_from_oop<HeapWord*>(obj))) {
ShenandoahEvacOOMScope oom_evac_scope;
ShenandoahUpdateRefsForOopClosure</* has_fwd = */ true, /* evac = */ true, /* enqueue */ false> cl;
obj->oop_iterate(&cl);
}

View File

@ -30,7 +30,6 @@
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahMark.inline.hpp"
#include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
@ -132,22 +131,12 @@ void ShenandoahKeepAliveClosure::do_oop_work(T* p) {
template <bool CONCURRENT, bool STABLE_THREAD>
void ShenandoahEvacuateUpdateRootClosureBase<CONCURRENT, STABLE_THREAD>::do_oop(oop* p) {
if (CONCURRENT) {
ShenandoahEvacOOMScope scope;
do_oop_work(p);
} else {
do_oop_work(p);
}
do_oop_work(p);
}
template <bool CONCURRENT, bool STABLE_THREAD>
void ShenandoahEvacuateUpdateRootClosureBase<CONCURRENT, STABLE_THREAD>::do_oop(narrowOop* p) {
if (CONCURRENT) {
ShenandoahEvacOOMScope scope;
do_oop_work(p);
} else {
do_oop_work(p);
}
do_oop_work(p);
}
template <bool CONCURRENT, bool STABLE_THREAD>

View File

@ -27,7 +27,6 @@
#include "code/nmethod.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
@ -117,7 +116,6 @@ public:
// Heal oops
if (_bs->is_armed(nm)) {
ShenandoahEvacOOMScope oom_evac_scope;
ShenandoahNMethod::heal_nmethod_metadata(nm_data);
// Must remain armed to complete remaining work in nmethod entry barrier
assert(_bs->is_armed(nm), "Should remain armed");

View File

@ -854,8 +854,6 @@ public:
}
void work(uint worker_id) override {
// ShenandoahEvacOOMScope has to be setup by ShenandoahContextEvacuateUpdateRootsClosure.
// Otherwise, may deadlock with watermark lock
ShenandoahContextEvacuateUpdateRootsClosure oops_cl;
ShenandoahConcurrentEvacThreadClosure thr_cl(&oops_cl);
_java_threads.threads_do(&thr_cl, worker_id);
@ -969,9 +967,8 @@ public:
void work(uint worker_id) override {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner sts_join;
SuspendibleThreadSetJoiner sts_join;
{
ShenandoahEvacOOMScope oom;
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
// may race against OopStorage::release() calls.
ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl(_generation);
@ -1044,9 +1041,6 @@ public:
void do_nmethod(nmethod* n) {
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(n);
ShenandoahNMethodLocker locker(data->lock());
// Setup EvacOOM scope below reentrant lock to avoid deadlock with
// nmethod_entry_barrier
ShenandoahEvacOOMScope oom;
data->oops_do(&_cl, /* fix_relocations = */ true);
ShenandoahNMethod::disarm_nmethod(n);
}
@ -1071,7 +1065,6 @@ public:
void work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
{
ShenandoahEvacOOMScope oom;
{
// vm_roots and weak_roots are OopStorage backed roots, concurrent iteration
// may race against OopStorage::release() calls.
@ -1086,7 +1079,6 @@ public:
}
}
// Cannot setup ShenandoahEvacOOMScope here, due to potential deadlock with nmethod_entry_barrier.
if (!ShenandoahHeap::heap()->unload_classes()) {
ShenandoahWorkerTimingsTracker timer(_phase, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
ShenandoahEvacUpdateCodeCacheClosure cl;

View File

@ -58,7 +58,7 @@ public:
void work(uint worker_id) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::conc_mark, ShenandoahPhaseTimings::ParallelMark, worker_id, true);
ShenandoahSuspendibleThreadSetJoiner stsj;
SuspendibleThreadSetJoiner stsj;
StringDedup::Requests requests;
_cm->mark_loop(worker_id, _terminator, GENERATION, true /*cancellable*/,
ShenandoahStringDedup::is_enabled() ? ENQUEUE_DEDUP : NO_DEDUP,

View File

@ -96,6 +96,16 @@ void ShenandoahDegenGC::op_degenerated() {
// some phase, we have to upgrade the Degenerate GC to Full GC.
heap->clear_cancelled_gc();
// If we degenerated from evacuation or update-refs, some objects in cset may
// have been self-forwarded by the failing thread. Clear those marks now so
// the remainder of this cycle (re-evac, update-refs, verification) sees a
// clean forwarding state.
if (_degen_point == ShenandoahDegenPoint::_degenerated_evac ||
_degen_point == ShenandoahDegenPoint::_degenerated_update_refs) {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_un_self_forward);
heap->un_self_forward_cset_regions();
}
// If it's passive mode with ShenandoahCardBarrier turned on: clean the write table
// without swapping the tables since no scan happens in passive mode anyway
if (ShenandoahCardBarrier && !heap->mode()->is_generational()) {
@ -305,6 +315,8 @@ void ShenandoahDegenGC::op_degenerated() {
ShouldNotReachHere();
}
DEBUG_ONLY(heap->assert_no_self_forwards());
if (ShenandoahVerify) {
heap->verifier()->verify_after_degenerated(_generation);
}

View File

@ -1,190 +0,0 @@
/*
* Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "gc/shenandoah/shenandoahEvacOOMHandler.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.hpp"
const jint ShenandoahEvacOOMCounter::OOM_MARKER_MASK = 0x80000000;
ShenandoahEvacOOMCounter::ShenandoahEvacOOMCounter() :
_bits(0) {
}
void ShenandoahEvacOOMCounter::decrement() {
assert(unmasked_count() > 0, "sanity");
// NOTE: It's ok to simply decrement, even with mask set, because unmasked value is positive.
_bits.fetch_then_sub(1);
}
void ShenandoahEvacOOMCounter::clear() {
assert(unmasked_count() == 0, "sanity");
_bits.release_store_fence((jint)0);
}
void ShenandoahEvacOOMCounter::set_oom_bit(bool decrement) {
jint threads_in_evac = _bits.load_acquire();
while (true) {
jint newval = decrement
? (threads_in_evac - 1) | OOM_MARKER_MASK
: threads_in_evac | OOM_MARKER_MASK;
jint other = _bits.compare_exchange(threads_in_evac, newval);
if (other == threads_in_evac) {
// Success: wait for other threads to get out of the protocol and return.
break;
} else {
// Failure: try again with updated new value.
threads_in_evac = other;
}
}
}
bool ShenandoahEvacOOMCounter::try_increment()
{
jint threads_in_evac = _bits.load_acquire();
while (true) {
// Cannot enter evacuation if OOM_MARKER_MASK is set.
if ((threads_in_evac & OOM_MARKER_MASK) != 0) {
return false;
}
jint other = _bits.compare_exchange(threads_in_evac, threads_in_evac + 1);
if (other == threads_in_evac) {
// Success: caller may safely enter evacuation
return true;
} else {
threads_in_evac = other;
}
}
}
ShenandoahEvacOOMHandler::ShenandoahEvacOOMHandler() :
_num_counters(calc_num_counters()) {
assert(_num_counters > 0, "sanity");
assert(is_power_of_2(_num_counters), "must be");
_threads_in_evac = NEW_C_HEAP_ARRAY(ShenandoahEvacOOMCounter, _num_counters, mtGC);
for (int i = 0; i < _num_counters; i++) {
new (&_threads_in_evac[i]) ShenandoahEvacOOMCounter();
}
}
int ShenandoahEvacOOMHandler::calc_num_counters() {
// Scale the number of counter buckets with the number of CPUs to
// minimise contention. Also make sure the number is a power of two
// so we can map hash values to buckets with a simple mask.
const int nproc = os::active_processor_count();
const int clamped = MAX2(1, MIN2(nproc, 128));
return round_up_power_of_2(clamped);
}
uint64_t ShenandoahEvacOOMHandler::hash_pointer(const void* p) {
// Bit mixing function from MurmurHash3
uint64_t key = (uintptr_t)p;
key ^= (key >> 33);
key *= UINT64_C(0xff51afd7ed558ccd);
key ^= (key >> 33);
key *= UINT64_C(0xc4ceb9fe1a85ec53);
key ^= (key >> 33);
return key;
}
ShenandoahEvacOOMCounter* ShenandoahEvacOOMHandler::counter_for_thread(Thread* t) {
const uint64_t key = hash_pointer(t);
return &_threads_in_evac[key & (_num_counters - 1)];
}
void ShenandoahEvacOOMHandler::wait_for_one_counter(ShenandoahEvacOOMCounter* ptr) {
// We might be racing against handle_out_of_memory_during_evacuation()
// setting the OOM_MARKER_MASK bit so we must make sure it is set here
// *and* the counter is zero.
while (ptr->load_acquire() != ShenandoahEvacOOMCounter::OOM_MARKER_MASK) {
os::naked_short_sleep(1);
}
}
void ShenandoahEvacOOMHandler::wait_for_no_evac_threads() {
// Once the OOM_MARKER_MASK bit is set the counter can only decrease
// so it's safe to check each bucket in turn.
for (int i = 0; i < _num_counters; i++) {
wait_for_one_counter(&_threads_in_evac[i]);
}
// At this point we are sure that no threads can evacuate anything. Raise
// the thread-local oom_during_evac flag to indicate that any attempt
// to evacuate should simply return the forwarding pointer instead (which is safe now).
ShenandoahThreadLocalData::set_oom_during_evac(Thread::current(), true);
}
void ShenandoahEvacOOMHandler::register_thread(Thread* thr) {
assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
ShenandoahEvacOOMCounter* counter = counter_for_thread(thr);
if (!counter->try_increment()) {
// Counter has OOM_MARKER_MASK set, loop until no more threads in evac
wait_for_no_evac_threads();
}
}
void ShenandoahEvacOOMHandler::unregister_thread(Thread* thr) {
if (!ShenandoahThreadLocalData::is_oom_during_evac(thr)) {
counter_for_thread(thr)->decrement();
} else {
// If we get here, the current thread has already gone through the
// OOM-during-evac protocol and has thus either never entered or successfully left
// the evacuation region. Simply flip its TL oom-during-evac flag back off.
ShenandoahThreadLocalData::set_oom_during_evac(thr, false);
}
assert(!ShenandoahThreadLocalData::is_oom_during_evac(thr), "TL oom-during-evac must be turned off");
}
void ShenandoahEvacOOMHandler::handle_out_of_memory_during_evacuation() {
assert(ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "sanity");
assert(!ShenandoahThreadLocalData::is_oom_during_evac(Thread::current()), "TL oom-during-evac must not be set");
ShenandoahEvacOOMCounter* self = counter_for_thread(Thread::current());
assert(self->unmasked_count() > 0, "sanity");
for (int i = 0; i < _num_counters; i++) {
ShenandoahEvacOOMCounter* counter = &_threads_in_evac[i];
counter->set_oom_bit(counter == self);
}
wait_for_no_evac_threads();
}
void ShenandoahEvacOOMHandler::clear() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint");
for (int i = 0; i < _num_counters; i++) {
_threads_in_evac[i].clear();
}
}
bool ShenandoahEvacOOMHandler::is_active() {
return ShenandoahThreadLocalData::evac_oom_scope_level(Thread::current()) > 0;
}

View File

@ -1,171 +0,0 @@
/*
* Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP
#include "gc/shenandoah/shenandoahPadding.hpp"
#include "memory/allocation.hpp"
#include "runtime/atomic.hpp"
#include "runtime/javaThread.hpp"
#include "utilities/globalDefinitions.hpp"
/**
* Striped counter used to implement the OOM protocol described below.
*/
class ShenandoahEvacOOMCounter {
private:
// Combination of a 31-bit counter and 1-bit OOM marker.
Atomic<jint> _bits;
// This class must be at least a cache line in size to prevent false sharing.
shenandoah_padding_minus_size(0, sizeof(jint));
public:
static const jint OOM_MARKER_MASK;
ShenandoahEvacOOMCounter();
void decrement();
bool try_increment();
void clear();
void set_oom_bit(bool decrement);
inline jint unmasked_count();
inline jint load_acquire();
};
/**
* Provides safe handling of out-of-memory situations during evacuation.
*
* When a Java thread encounters out-of-memory while evacuating an object in a
* load-reference-barrier (i.e. it cannot copy the object to to-space), it does not
* necessarily follow we can return immediately from the LRB (and store to from-space).
*
* In very basic case, on such failure we may wait until the evacuation is over,
* and then resolve the forwarded copy, and to the store there. This is possible
* because other threads might still have space in their GCLABs, and successfully
* evacuate the object.
*
* But, there is a race due to non-atomic evac_in_progress transition. Consider
* thread A is stuck waiting for the evacuation to be over -- it cannot leave with
* from-space copy yet. Control thread drops evacuation_in_progress preparing for
* next STW phase that has to recover from OOME. Thread B misses that update, and
* successfully evacuates the object, does the write to to-copy. But, before
* Thread B is able to install the fwdptr, thread A discovers evac_in_progress is
* down, exits from here, reads the fwdptr, discovers old from-copy, and stores there.
* Thread B then wakes up and installs to-copy. This breaks to-space invariant, and
* silently corrupts the heap: we accepted two writes to separate copies of the object.
*
* The way it is solved here is to maintain a counter of threads inside the
* 'evacuation path'. The 'evacuation path' is the part of evacuation that does the actual
* allocation, copying and CASing of the copy object, and is protected by this
* OOM-during-evac-handler. The handler allows multiple threads to enter and exit
* evacuation path, but on OOME it requires all threads that experienced OOME to wait
* for current threads to leave, and blocks other threads from entering. The counter state
* is striped across multiple cache lines to reduce contention when many threads attempt
* to enter or leave the protocol at the same time.
*
* Detailed state change:
*
* Upon entry of the evac-path, entering thread will attempt to increase the counter,
* using a CAS. Depending on the result of the CAS:
* - success: carry on with evac
* - failure:
* - if offending value is a valid counter, then try again
* - if offending value is OOM-during-evac special value: loop until
* counter drops to 0, then exit with resolving the ptr
*
* Upon exit, exiting thread will decrease the counter using atomic dec.
*
* Upon OOM-during-evac, any thread will attempt to CAS OOM-during-evac
* special value into the counter. Depending on result:
* - success: busy-loop until counter drops to zero, then exit with resolve
* - failure:
* - offender is valid counter update: try again
* - offender is OOM-during-evac: busy loop until counter drops to
* zero, then exit with resolve
*/
class ShenandoahEvacOOMHandler {
private:
const int _num_counters;
shenandoah_padding(0);
ShenandoahEvacOOMCounter* _threads_in_evac;
ShenandoahEvacOOMCounter* counter_for_thread(Thread* t);
void wait_for_no_evac_threads();
void wait_for_one_counter(ShenandoahEvacOOMCounter* ptr);
static uint64_t hash_pointer(const void* p);
static int calc_num_counters();
public:
ShenandoahEvacOOMHandler();
/**
* Attempt to enter the protected evacuation path.
*
* When this returns true, it is safe to continue with normal evacuation.
* When this method returns false, evacuation must not be entered, and caller
* may safely continue with a simple resolve (if Java thread).
*/
inline void enter_evacuation(Thread* t);
/**
* Leave evacuation path.
*/
inline void leave_evacuation(Thread* t);
/**
* Signal out-of-memory during evacuation. It will prevent any other threads
* from entering the evacuation path, then wait until all threads have left the
* evacuation path, and then return. It is then safe to continue with a simple resolve.
*/
void handle_out_of_memory_during_evacuation();
void clear();
/**
* Returns true if current thread is in evacuation OOM protocol.
*/
static bool is_active();
private:
// Register/Unregister thread to evacuation OOM protocol
void register_thread(Thread* t);
void unregister_thread(Thread* t);
};
class ShenandoahEvacOOMScope : public StackObj {
private:
Thread* const _thread;
public:
inline ShenandoahEvacOOMScope();
inline ShenandoahEvacOOMScope(Thread* t);
inline ~ShenandoahEvacOOMScope();
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_HPP

View File

@ -1,82 +0,0 @@
/*
* Copyright (c) 2020, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_INLINE_HPP
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
jint ShenandoahEvacOOMCounter::load_acquire() {
return _bits.load_acquire();
}
jint ShenandoahEvacOOMCounter::unmasked_count() {
return _bits.load_acquire() & ~OOM_MARKER_MASK;
}
void ShenandoahEvacOOMHandler::enter_evacuation(Thread* thr) {
uint8_t level = ShenandoahThreadLocalData::push_evac_oom_scope(thr);
if (level == 0) {
// Entering top level scope, register this thread.
register_thread(thr);
} else if (!ShenandoahThreadLocalData::is_oom_during_evac(thr)) {
ShenandoahEvacOOMCounter* counter = counter_for_thread(thr);
jint threads_in_evac = counter->load_acquire();
// If OOM is in progress, handle it.
if ((threads_in_evac & ShenandoahEvacOOMCounter::OOM_MARKER_MASK) != 0) {
counter->decrement();
wait_for_no_evac_threads();
}
}
}
void ShenandoahEvacOOMHandler::leave_evacuation(Thread* thr) {
uint8_t level = ShenandoahThreadLocalData::pop_evac_oom_scope(thr);
// Not top level, just return
if (level > 1) {
return;
}
// Leaving top level scope, unregister this thread.
unregister_thread(thr);
}
ShenandoahEvacOOMScope::ShenandoahEvacOOMScope() :
_thread(Thread::current()) {
ShenandoahHeap::heap()->enter_evacuation(_thread);
}
ShenandoahEvacOOMScope::ShenandoahEvacOOMScope(Thread* t) :
_thread(t) {
ShenandoahHeap::heap()->enter_evacuation(_thread);
}
ShenandoahEvacOOMScope::~ShenandoahEvacOOMScope() {
ShenandoahHeap::heap()->leave_evacuation(_thread);
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHEVACOOMHANDLER_INLINE_HPP

View File

@ -25,43 +25,68 @@
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHFORWARDING_HPP
#include "oops/markWord.hpp"
#include "oops/oop.hpp"
#include "utilities/globalDefinitions.hpp"
class ShenandoahForwarding {
public:
/* Gets forwardee from the given object.
/* Gets forwardee from the given object. For a self-forwarded object
* (evacuation failure), returns the object itself.
*/
static inline oop get_forwardee(oop obj);
/* Gets forwardee from the given object. Only from mutator thread.
* For a self-forwarded object, returns the object itself.
*/
static inline oop get_forwardee_mutator(oop obj);
/* Returns the raw value from forwardee slot.
/* Returns the raw value from forwardee slot. For a self-forwarded
* object, returns the object itself.
*/
static inline oop get_forwardee_raw(oop obj);
/* Returns the raw value from forwardee slot without any checks.
* Used for quick verification.
* Used for quick verification. For a self-forwarded object,
* returns the object itself.
*/
static inline oop get_forwardee_raw_unchecked(oop obj);
/**
* Returns true if the object is forwarded, false otherwise.
* Returns true if the object is forwarded (including self-forwarded),
* false otherwise.
*/
static inline bool is_forwarded(oop obj);
/**
* Returns true iff obj has been self-forwarded (i.e. evacuation has
* failed for this object in the current cycle).
*/
static inline bool is_self_forwarded(oop obj);
/* Tries to atomically update forwardee in $holder object to $update.
* Assumes $holder points at itself.
* Asserts $holder is in from-space.
* Asserts $update is in to-space.
*
* Returns the new object 'update' upon success, or
* the new forwardee that a competing thread installed.
* the new forwardee that a competing thread installed. If another
* thread self-forwarded the object, returns the object itself.
*/
static inline oop try_update_forwardee(oop obj, oop update);
/* Tries to atomically self-forward obj. Used by the evacuation path
* when the copy allocation fails: the failing thread installs the
* self-forwarded bit so other threads see the object as "already
* handled" and return it unchanged.
*
* Returns nullptr on success (we installed the self-forward), or
* the winning forwardee when another thread raced ahead (either a
* real forwardee pointing at a copy, or obj itself if the winner
* also self-forwarded).
*/
static inline oop try_forward_to_self(oop obj, markWord old_mark);
static inline size_t size(oop obj);
static inline Klass* klass(oop obj);
};

View File

@ -48,6 +48,8 @@ inline oop ShenandoahForwarding::get_forwardee_raw_unchecked(oop obj) {
return cast_to_oop(fwdptr);
}
}
// Self-forwarded (evacuation failure): the object stays put; the
// self-fwd bit is set alongside normal lock bits.
return obj;
}
@ -61,9 +63,9 @@ inline oop ShenandoahForwarding::get_forwardee_mutator(oop obj) {
HeapWord* fwdptr = (HeapWord*) mark.clear_lock_bits().to_pointer();
assert(fwdptr != nullptr, "Forwarding pointer is never null here");
return cast_to_oop(fwdptr);
} else {
return obj;
}
// Self-forwarded or not forwarded: return the object itself.
return obj;
}
inline oop ShenandoahForwarding::get_forwardee(oop obj) {
@ -72,7 +74,11 @@ inline oop ShenandoahForwarding::get_forwardee(oop obj) {
}
inline bool ShenandoahForwarding::is_forwarded(oop obj) {
return obj->mark().is_marked();
return obj->mark().is_forwarded();
}
inline bool ShenandoahForwarding::is_self_forwarded(oop obj) {
return obj->mark().is_self_forwarded();
}
inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
@ -80,14 +86,50 @@ inline oop ShenandoahForwarding::try_update_forwardee(oop obj, oop update) {
if (old_mark.is_marked()) {
return cast_to_oop(old_mark.clear_lock_bits().to_pointer());
}
if (old_mark.is_self_forwarded()) {
// Another thread lost the evacuation race; the object stays put.
return obj;
}
markWord new_mark = markWord::encode_pointer_as_mark(update);
markWord prev_mark = obj->cas_set_mark(new_mark, old_mark, memory_order_conservative);
if (prev_mark == old_mark) {
return update;
} else {
}
// Concurrent writers on a cset object's mark can only be other evacuation
// threads installing forwarding (real or self). Mutators cannot reach the
// mark of a not-yet-forwarded cset object: LRB + stack watermark barriers
// redirect all reference uses before a Java-level operation can touch it.
// So the only possible failure modes are a regular forwardee (marked) or
// a self-forward (possibly with mutator lock/hash mods layered on top
// after the self-forward became visible).
if (prev_mark.is_marked()) {
return cast_to_oop(prev_mark.clear_lock_bits().to_pointer());
}
assert(prev_mark.is_self_forwarded(),
"concurrent writers on cset objects must install forwarding: prev=" INTPTR_FORMAT,
prev_mark.value());
return obj;
}
inline oop ShenandoahForwarding::try_forward_to_self(oop obj, markWord old_mark) {
assert(!old_mark.is_forwarded(),
"caller must pass a non-forwarded mark: old=" INTPTR_FORMAT, old_mark.value());
markWord new_mark = old_mark.set_self_forwarded();
markWord prev_mark = obj->cas_set_mark(new_mark, old_mark, memory_order_conservative);
if (prev_mark == old_mark) {
// We installed the self-forward.
return nullptr;
}
// Same invariant as in try_update_forwardee: the only races on a
// cset object's mark come from other evac threads installing forwarding.
if (prev_mark.is_marked()) {
return cast_to_oop(prev_mark.clear_lock_bits().to_pointer());
}
assert(prev_mark.is_self_forwarded(),
"concurrent writers on cset objects must install forwarding: prev=" INTPTR_FORMAT,
prev_mark.value());
return obj;
}
inline Klass* ShenandoahForwarding::klass(oop obj) {

View File

@ -137,6 +137,15 @@ void ShenandoahFullGC::op_full(GCCause::Cause cause) {
void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
ShenandoahHeap* heap = ShenandoahHeap::heap();
// A full GC may be entered directly, or as an upgrade from a failed
// degenerated GC. In the latter case, self-forwarded objects may be
// present from the failed evacuation. Drain those marks before any phase
// (verify, update_roots, phase1_mark_heap) walks headers.
{
ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_un_self_forward);
heap->un_self_forward_cset_regions();
}
if (heap->mode()->is_generational()) {
ShenandoahGenerationalFullGC::prepare();
}
@ -266,6 +275,8 @@ void ShenandoahFullGC::do_it(GCCause::Cause gc_cause) {
heap->set_full_gc_move_in_progress(false);
heap->set_full_gc_in_progress(false);
DEBUG_ONLY(heap->assert_no_self_forwards());
if (ShenandoahVerify) {
heap->verifier()->verify_after_fullgc(_generation);
}

View File

@ -63,7 +63,7 @@ ShenandoahGenerationalEvacuationTask::ShenandoahGenerationalEvacuationTask(Shena
void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
if (_concurrent) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj;
SuspendibleThreadSetJoiner stsj;
do_work();
} else {
ShenandoahParallelWorkerSession worker_session(worker_id);
@ -73,7 +73,6 @@ void ShenandoahGenerationalEvacuationTask::work(uint worker_id) {
void ShenandoahGenerationalEvacuationTask::do_work() {
if (_only_promote_regions) {
// No allocations will be made, do not enter oom-during-evac protocol.
assert(_heap->collection_set()->is_empty(), "Should not have a collection set here");
promote_regions();
} else {
@ -122,7 +121,6 @@ void ShenandoahGenerationalEvacuationTask::evacuate_and_promote_regions() {
if (r->is_cset()) {
assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
ShenandoahEvacOOMScope oom_evac_scope;
_heap->marked_object_iterate(r, &cl);
} else {
promoter.maybe_promote_region(r);

View File

@ -199,13 +199,6 @@ void ShenandoahGenerationalHeap::promote_regions_in_place(ShenandoahGeneration*
oop ShenandoahGenerationalHeap::evacuate_object(oop p, Thread* thread) {
assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
// This thread went through the OOM during evac protocol and it is safe to return
// the forward pointer. It must not attempt to evacuate anymore.
return ShenandoahBarrierSet::resolve_forwarded(p);
}
assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
ShenandoahHeapRegion* from_region = heap_region_containing(p);
assert(!from_region->is_humongous(), "never evacuate humongous objects");
@ -329,8 +322,23 @@ oop ShenandoahGenerationalHeap::try_evacuate_object(oop p, Thread* thread, uint
}
control_thread()->handle_alloc_failure_evac(size);
oom_evac_handler()->handle_out_of_memory_during_evacuation();
return ShenandoahBarrierSet::resolve_forwarded(p);
// Install the self-forwarded bit so other evacuators/LRBs see the
// object as "already handled, do not try to evacuate". The CAS may
// fail if another thread concurrently installed a real forwardee or
// self-forwarded first.
markWord old_mark = p->mark();
if (old_mark.is_forwarded()) {
return ShenandoahForwarding::get_forwardee(p);
}
oop winner = ShenandoahForwarding::try_forward_to_self(p, old_mark);
if (winner == nullptr) {
// We own the self-forwarding. Flag the from-region so the degen/full
// GC entry drain knows to scan it for self_fwd bits to clear.
heap_region_containing(p)->set_has_self_forwards();
return p;
}
return winner;
}
if (ShenandoahEvacTracking) {
@ -717,7 +725,7 @@ public:
void work(uint worker_id) override {
if (CONCURRENT) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj;
SuspendibleThreadSetJoiner stsj;
do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
} else {
ShenandoahParallelWorkerSession worker_session(worker_id);

View File

@ -1149,7 +1149,7 @@ public:
void work(uint worker_id) {
if (_concurrent) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj;
SuspendibleThreadSetJoiner stsj;
do_work();
} else {
ShenandoahParallelWorkerSession worker_session(worker_id);
@ -1163,10 +1163,7 @@ private:
ShenandoahHeapRegion* r;
while ((r =_cs->claim_next()) != nullptr) {
assert(r->has_live(), "Region %zu should have been reclaimed early", r->index());
{
ShenandoahEvacOOMScope oom_evac_scope;
_sh->marked_object_iterate(r, &cl);
}
_sh->marked_object_iterate(r, &cl);
if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
break;
@ -1303,13 +1300,6 @@ void ShenandoahHeap::concurrent_final_roots(HandshakeClosure* handshake_closure)
oop ShenandoahHeap::evacuate_object(oop p, Thread* thread) {
assert(thread == Thread::current(), "Expected thread parameter to be current thread.");
if (ShenandoahThreadLocalData::is_oom_during_evac(thread)) {
// This thread went through the OOM during evac protocol. It is safe to return
// the forward pointer. It must not attempt to evacuate any other objects.
return ShenandoahBarrierSet::resolve_forwarded(p);
}
assert(ShenandoahThreadLocalData::is_evac_allowed(thread), "must be enclosed in oom-evac scope");
ShenandoahHeapRegion* r = heap_region_containing(p);
assert(!r->is_humongous(), "never evacuate humongous objects");
@ -1348,9 +1338,22 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
if (copy == nullptr) {
control_thread()->handle_alloc_failure_evac(size);
_oom_evac_handler.handle_out_of_memory_during_evacuation();
return ShenandoahBarrierSet::resolve_forwarded(p);
// Install the self-forwarded bit on p so other evacuators/LRBs see
// the object as "already handled, do not try to evacuate". The CAS
// may fail if another thread concurrently installed a real forwardee
// (they succeeded where we failed) or self-forwarded first.
markWord old_mark = p->mark();
if (old_mark.is_forwarded()) {
return ShenandoahForwarding::get_forwardee(p);
}
oop winner = ShenandoahForwarding::try_forward_to_self(p, old_mark);
if (winner == nullptr) {
// We own the self-forwarding. Flag the region so the degen/full GC
// entry drain knows to scan it for self_fwd bits to clear.
from_region->set_has_self_forwards();
return p;
}
return winner;
}
if (ShenandoahEvacTracking) {
@ -1405,6 +1408,71 @@ oop ShenandoahHeap::try_evacuate_object(oop p, Thread* thread, ShenandoahHeapReg
}
}
// Clear the self_fwd bit on a live cset object, if set. Runs at a safepoint,
// so a plain store is sufficient — no concurrent writers to the mark word.
class ShenandoahUnSelfForwardObjectClosure : public ObjectClosure {
public:
void do_object(oop obj) override {
markWord m = obj->mark();
if (m.is_self_forwarded()) {
obj->set_mark(m.unset_self_forwarded());
}
}
};
// Parallel task over flagged cset regions. Iterates the live objects via the
// mark bitmap (skipping evacuated and never-marked memory), clears self_fwd
// bits, and resets the region flag once done.
class ShenandoahUnSelfForwardTask : public WorkerTask {
private:
ShenandoahHeap* const _heap;
ShenandoahCollectionSet* const _cs;
public:
ShenandoahUnSelfForwardTask(ShenandoahHeap* heap, ShenandoahCollectionSet* cs) :
WorkerTask("Shenandoah Un-Self-Forward"),
_heap(heap),
_cs(cs) {}
void work(uint worker_id) override {
ShenandoahParallelWorkerSession worker_session(worker_id);
ShenandoahUnSelfForwardObjectClosure cl;
ShenandoahHeapRegion* r;
while ((r = _cs->claim_next()) != nullptr) {
if (r->has_self_forwards()) {
_heap->marked_object_iterate(r, &cl);
r->clear_has_self_forwards();
}
}
}
};
void ShenandoahHeap::un_self_forward_cset_regions() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
ShenandoahCollectionSet* cs = collection_set();
if (cs == nullptr || cs->is_empty()) {
return;
}
cs->clear_current_index();
ShenandoahUnSelfForwardTask task(this, cs);
workers()->run_task(&task);
DEBUG_ONLY(assert_no_self_forwards());
}
#ifdef ASSERT
void ShenandoahHeap::assert_no_self_forwards() const {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
ShenandoahCollectionSet* cs = collection_set();
if (cs == nullptr) return;
cs->clear_current_index();
ShenandoahHeapRegion* r;
while ((r = cs->next()) != nullptr) {
assert(!r->has_self_forwards(), "region still flagged after drain");
}
cs->clear_current_index();
}
#endif
void ShenandoahHeap::trash_cset_regions() {
ShenandoahHeapLocker locker(lock());
@ -2508,7 +2576,7 @@ public:
void work(uint worker_id) {
if (CONCURRENT) {
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj;
SuspendibleThreadSetJoiner stsj;
do_work<ShenandoahConcUpdateRefsClosure>(worker_id);
} else {
ShenandoahParallelWorkerSession worker_session(worker_id);

View File

@ -33,7 +33,6 @@
#include "gc/shenandoah/shenandoahAllocRequest.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahController.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
#include "gc/shenandoah/shenandoahEvacTracker.hpp"
#include "gc/shenandoah/shenandoahGenerationType.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
@ -558,8 +557,6 @@ public:
ShenandoahPhaseTimings* phase_timings() const { return _phase_timings; }
ShenandoahEvacOOMHandler* oom_evac_handler() { return &_oom_evac_handler; }
ShenandoahEvacuationTracker* evac_tracker() const {
return _evac_tracker;
}
@ -793,7 +790,6 @@ public:
//
private:
ShenandoahCollectionSet* _collection_set;
ShenandoahEvacOOMHandler _oom_evac_handler;
oop try_evacuate_object(oop src, Thread* thread, ShenandoahHeapRegion* from_region, ShenandoahAffiliation target_gen);
@ -813,12 +809,16 @@ public:
inline bool in_collection_set_loc(void* loc) const;
// Evacuates or promotes object src. Returns the evacuated object, either evacuated
// by this thread, or by some other thread.
// by this thread, or by some other thread. On allocation failure, installs the
// self-forwarded bit on src, flags src's region, and returns src.
virtual oop evacuate_object(oop src, Thread* thread);
// Call before/after evacuation.
inline void enter_evacuation(Thread* t);
inline void leave_evacuation(Thread* t);
// Parallel scan of flagged cset regions to clear self-forwarded bits on live
// objects. Must be called at a safepoint; intended for the degenerated and
// full GC entry paths.
void un_self_forward_cset_regions();
DEBUG_ONLY(void assert_no_self_forwards() const;)
// ---------- Helper functions
//

View File

@ -100,14 +100,6 @@ inline ShenandoahHeapRegion* ShenandoahHeap::heap_region_containing(const void*
return result;
}
inline void ShenandoahHeap::enter_evacuation(Thread* t) {
_oom_evac_handler.enter_evacuation(t);
}
inline void ShenandoahHeap::leave_evacuation(Thread* t) {
_oom_evac_handler.leave_evacuation(t);
}
template <class T>
inline void ShenandoahHeap::non_conc_update_with_forwarded(T* p) {
T o = RawAccess<>::oop_load(p);
@ -258,7 +250,6 @@ inline bool ShenandoahHeap::cancelled_gc() const {
inline bool ShenandoahHeap::check_cancelled_gc_and_yield(bool sts_active) {
if (sts_active && !cancelled_gc()) {
assert(!ShenandoahEvacOOMHandler::is_active(), "Potential deadlock: cannot yield while OOM evac handler is active");
if (SuspendibleThreadSet::should_yield()) {
SuspendibleThreadSet::yield();
}
@ -273,7 +264,6 @@ inline GCCause::Cause ShenandoahHeap::cancelled_cause() const {
inline void ShenandoahHeap::clear_cancelled_gc() {
_cancelled_gc.set(GCCause::_no_gc);
reset_cancellation_time();
_oom_evac_handler.clear();
}
inline GCCause::Cause ShenandoahHeap::clear_cancellation(const GCCause::Cause expected) {

View File

@ -90,6 +90,7 @@ ShenandoahHeapRegion::ShenandoahHeapRegion(HeapWord* start, size_t index, bool c
SpaceMangler::mangle_region(MemRegion(_bottom, _end));
}
_recycling.unset();
_has_self_forwards.unset();
}
void ShenandoahHeapRegion::report_illegal_transition(const char *method) {
@ -572,6 +573,7 @@ void ShenandoahHeapRegion::recycle_internal() {
reset_alloc_metadata();
heap->marking_context()->reset_top_at_mark_start(this);
set_update_watermark(bottom());
clear_has_self_forwards();
if (is_old()) {
heap->old_generation()->clear_cards_for(this);
}

View File

@ -274,6 +274,11 @@ private:
ShenandoahSharedFlag _recycling; // Used to indicate that the region is being recycled; see try_recycle*().
// Set when an evacuation failure self-forwarded at least one object in this
// region. The drain at degen/full GC entry scans flagged regions and CAS-
// clears the self_fwd bits. Safety-net reset on region recycle.
ShenandoahSharedFlag _has_self_forwards;
bool _needs_bitmap_reset;
public:
@ -531,6 +536,13 @@ public:
_needs_bitmap_reset = false;
}
// Self-forward accounting: set by an evacuating thread after it successfully
// installs a self-forward mark on an object in this region. Tested and cleared
// at the drain phase (degen/full GC entry) and again on region recycle.
bool has_self_forwards() const { return _has_self_forwards.is_set(); }
void set_has_self_forwards() { _has_self_forwards.set(); }
void clear_has_self_forwards() { _has_self_forwards.unset(); }
private:
void decrement_humongous_waste();
void do_commit();

View File

@ -199,7 +199,7 @@ void ShenandoahMark::mark_loop_work(T* cl, ShenandoahLiveData* live_data, uint w
if (work == 0) {
// No work encountered in current stride, try to terminate.
// Need to leave the STS here otherwise it might block safepoints.
ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE);
SuspendibleThreadSetLeaver stsl(CANCELLABLE);
ShenandoahTerminatorTerminator tt(heap);
if (terminator->offer_termination(&tt)) return;
}

View File

@ -126,7 +126,6 @@ void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
if (heap->is_concurrent_weak_root_in_progress() ||
heap->is_concurrent_strong_root_in_progress()) {
ShenandoahEvacOOMScope evac_scope;
heal_nmethod_metadata(data);
} else if (heap->is_concurrent_mark_in_progress()) {
ShenandoahKeepAliveClosure cl;

View File

@ -26,7 +26,6 @@
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
#include "gc/shenandoah/shenandoahParallelCleaning.hpp"
#include "runtime/safepoint.hpp"

View File

@ -138,6 +138,7 @@ class outputStream;
\
f(degen_gc_gross, "Pause Degenerated GC (G)") \
f(degen_gc, "Pause Degenerated GC (N)") \
f(degen_gc_un_self_forward, " Un-Self-Forward") \
f(degen_gc_stw_mark, " Degen STW Mark") \
SHENANDOAH_PAR_PHASE_DO(degen_gc_stw_mark_, " DSM: ", f) \
f(degen_gc_mark, " Degen Mark") \
@ -170,6 +171,7 @@ class outputStream;
\
f(full_gc_gross, "Pause Full GC (G)") \
f(full_gc, "Pause Full GC (N)") \
f(full_gc_un_self_forward, " Un-Self-Forward") \
f(full_gc_heapdump_pre, " Pre Heap Dump") \
f(full_gc_prepare, " Prepare") \
f(full_gc_update_roots, " Update Roots") \

View File

@ -798,7 +798,7 @@ void ShenandoahScanRememberedTask::work(uint worker_id) {
if (_is_concurrent) {
// This sets up a thread local reference to the worker_id which is needed by the weak reference processor.
ShenandoahConcurrentWorkerSession worker_session(worker_id);
ShenandoahSuspendibleThreadSetJoiner stsj;
SuspendibleThreadSetJoiner stsj;
do_work(worker_id);
} else {
// This sets up a thread local reference to the worker_id which is needed by the weak reference processor.

View File

@ -31,8 +31,6 @@
ShenandoahThreadLocalData::ShenandoahThreadLocalData() :
_gc_state(0),
_oom_scope_nesting_level(0),
_oom_during_evac(false),
_satb_mark_queue(&ShenandoahBarrierSet::satb_mark_queue_set()),
_card_table(nullptr),
_gclab(nullptr),

View File

@ -45,9 +45,6 @@
class ShenandoahThreadLocalData {
private:
char _gc_state;
// Evacuation OOM state
uint8_t _oom_scope_nesting_level;
bool _oom_during_evac;
SATBMarkQueue _satb_mark_queue;
@ -160,39 +157,6 @@ public:
return data(thread)->_shenandoah_plab;
}
// Evacuation OOM handling
static bool is_oom_during_evac(Thread* thread) {
return data(thread)->_oom_during_evac;
}
static void set_oom_during_evac(Thread* thread, bool oom) {
data(thread)->_oom_during_evac = oom;
}
static uint8_t evac_oom_scope_level(Thread* thread) {
return data(thread)->_oom_scope_nesting_level;
}
// Push the scope one level deeper, return previous level
static uint8_t push_evac_oom_scope(Thread* thread) {
uint8_t level = evac_oom_scope_level(thread);
assert(level < 254, "Overflow nesting level"); // UINT8_MAX = 255
data(thread)->_oom_scope_nesting_level = level + 1;
return level;
}
// Pop the scope by one level, return previous level
static uint8_t pop_evac_oom_scope(Thread* thread) {
uint8_t level = evac_oom_scope_level(thread);
assert(level > 0, "Underflow nesting level");
data(thread)->_oom_scope_nesting_level = level - 1;
return level;
}
static bool is_evac_allowed(Thread* thread) {
return evac_oom_scope_level(thread) > 0;
}
// Offsets
static ByteSize satb_mark_queue_index_offset() {
return satb_mark_queue_offset() + SATBMarkQueue::byte_offset_of_index();

View File

@ -221,30 +221,6 @@ public:
~ShenandoahParallelWorkerSession();
};
class ShenandoahSuspendibleThreadSetJoiner {
private:
SuspendibleThreadSetJoiner _joiner;
public:
ShenandoahSuspendibleThreadSetJoiner(bool active = true) : _joiner(active) {
assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be joined before evac scope");
}
~ShenandoahSuspendibleThreadSetJoiner() {
assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be left after evac scope");
}
};
class ShenandoahSuspendibleThreadSetLeaver {
private:
SuspendibleThreadSetLeaver _leaver;
public:
ShenandoahSuspendibleThreadSetLeaver(bool active = true) : _leaver(active) {
assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be left after evac scope");
}
~ShenandoahSuspendibleThreadSetLeaver() {
assert(!ShenandoahThreadLocalData::is_evac_allowed(Thread::current()), "STS should be joined before evac scope");
}
};
// Regions cannot be uncommitted when concurrent reset is zeroing out the bitmaps.
// This CADR class enforces this by forbidding region uncommits while it is in scope.
class ShenandoahNoUncommitMark : public StackObj {