8192647: GClocker induced GCs can starve threads requiring memory leading to OOME

Reviewed-by: tschatzl, iwalulya, egahlin
This commit is contained in:
Albert Mingkun Yang 2025-02-25 11:14:20 +00:00
parent aa70f0ae8b
commit a9c9f7f0cb
41 changed files with 171 additions and 943 deletions

View File

@ -1873,7 +1873,7 @@ bool G1CollectedHeap::try_collect(GCCause::Cause cause,
return try_collect_concurrently(cause,
counters_before.total_collections(),
counters_before.old_marking_cycles_started());
} else if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
} else if (cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
// Schedule a standard evacuation pause. We're setting word_size

View File

@ -188,6 +188,7 @@ void ParallelScavengeHeap::post_initialize() {
PSPromotionManager::initialize();
ScavengableNMethods::initialize(&_is_scavengable);
GCLocker::initialize();
}
void ParallelScavengeHeap::update_counters() {
@ -288,7 +289,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
uint loop_count = 0;
uint gc_count = 0;
uint gclocker_stalled_count = 0;
while (result == nullptr) {
// We don't want to have multiple collections for a single filled generation.
@ -318,37 +318,10 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return nullptr;
}
// Failed to allocate without a gc.
if (GCLocker::is_active_and_needs_gc()) {
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
GCLocker::stall_until_clear();
gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return nullptr;
}
}
}
if (result == nullptr) {
// Generate a VM operation
assert(result == nullptr, "inv");
{
VM_ParallelCollectForAllocation op(size, is_tlab, gc_count);
VMThread::execute(&op);
@ -358,13 +331,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size,
if (op.prologue_succeeded()) {
assert(is_in_or_null(op.result()), "result not in heap");
// If GC was locked out during VM operation then retry allocation
// and/or stall as necessary.
if (op.gc_locked()) {
assert(op.result() == nullptr, "must be null if gc_locked() is true");
continue; // retry and/or stall as necessary
}
// Exit the loop if the gc time limit has been exceeded.
// The allocation must have failed above ("result" guarding
// this path is null) and the most recent collection has exceeded the
@ -416,8 +382,8 @@ HeapWord* ParallelScavengeHeap::allocate_old_gen_and_record(size_t size) {
}
HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
if (!should_alloc_in_eden(size) || GCLocker::is_active_and_needs_gc()) {
// Size is too big for eden, or gc is locked out.
if (!should_alloc_in_eden(size)) {
// Size is too big for eden.
return allocate_old_gen_and_record(size);
}
@ -425,9 +391,6 @@ HeapWord* ParallelScavengeHeap::mem_allocate_old_gen(size_t size) {
}
void ParallelScavengeHeap::do_full_collection(bool clear_all_soft_refs) {
if (GCLocker::check_active_before_gc()) {
return;
}
PSParallelCompact::invoke(clear_all_soft_refs);
}
@ -446,11 +409,6 @@ HeapWord* ParallelScavengeHeap::satisfy_failed_allocation(size_t size, bool is_t
HeapWord* result = nullptr;
GCLocker::check_active_before_gc();
if (GCLocker::is_active_and_needs_gc()) {
return expand_heap_and_allocate(size, is_tlab);
}
// If young-gen can handle this allocation, attempt young-gc firstly.
bool should_run_young_gc = is_tlab || should_alloc_in_eden(size);
collect_at_safepoint(!should_run_young_gc);
@ -544,10 +502,6 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
full_gc_count = total_full_collections();
}
if (GCLocker::should_discard(cause, gc_count)) {
return;
}
while (true) {
VM_ParallelGCCollect op(gc_count, full_gc_count, cause);
VMThread::execute(&op);
@ -562,22 +516,9 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
return;
}
}
if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
GCLocker::stall_until_clear();
}
}
}
void ParallelScavengeHeap::try_collect_at_safepoint(bool full) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
if (GCLocker::check_active_before_gc()) {
return;
}
collect_at_safepoint(full);
}
bool ParallelScavengeHeap::must_clear_all_soft_refs() {
return _gc_cause == GCCause::_metadata_GC_clear_soft_refs ||
_gc_cause == GCCause::_wb_full_gc;
@ -889,11 +830,11 @@ GrowableArray<MemoryPool*> ParallelScavengeHeap::memory_pools() {
}
void ParallelScavengeHeap::pin_object(JavaThread* thread, oop obj) {
GCLocker::lock_critical(thread);
GCLocker::enter(thread);
}
void ParallelScavengeHeap::unpin_object(JavaThread* thread, oop obj) {
GCLocker::unlock_critical(thread);
GCLocker::exit(thread);
}
void ParallelScavengeHeap::update_parallel_worker_threads_cpu_time() {

View File

@ -95,8 +95,6 @@ class ParallelScavengeHeap : public CollectedHeap {
void update_parallel_worker_threads_cpu_time();
void collect_at_safepoint(bool full);
bool must_clear_all_soft_refs();
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
@ -198,7 +196,7 @@ public:
// Support for System.gc()
void collect(GCCause::Cause cause) override;
void try_collect_at_safepoint(bool full);
void collect_at_safepoint(bool full);
void ensure_parsability(bool retire_tlabs) override;
void resize_all_tlabs() override;

View File

@ -217,9 +217,6 @@ bool PSOldGen::expand(size_t bytes) {
success = expand_to_reserved();
}
if (success && GCLocker::is_active_and_needs_gc()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead");
}
return success;
}

View File

@ -993,10 +993,6 @@ bool PSParallelCompact::invoke_no_policy(bool clear_all_soft_refs) {
assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
assert(ref_processor() != nullptr, "Sanity");
if (GCLocker::check_active_before_gc()) {
return false;
}
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCIdMark gc_id_mark;

View File

@ -43,14 +43,10 @@ void VM_ParallelCollectForAllocation::doit() {
GCCauseSetter gccs(heap, _gc_cause);
_result = heap->satisfy_failed_allocation(_word_size, _is_tlab);
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
static bool is_cause_full(GCCause::Cause cause) {
return (cause != GCCause::_gc_locker) && (cause != GCCause::_wb_young_gc)
return (cause != GCCause::_wb_young_gc)
DEBUG_ONLY(&& (cause != GCCause::_scavenge_alot));
}
@ -64,5 +60,5 @@ void VM_ParallelGCCollect::doit() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
GCCauseSetter gccs(heap, _gc_cause);
heap->try_collect_at_safepoint(_full);
heap->collect_at_safepoint(_full);
}

View File

@ -367,18 +367,6 @@ bool DefNewGeneration::expand(size_t bytes) {
SpaceMangler::mangle_region(mangle_region);
}
// Do not attempt an expand-to-the reserve size. The
// request should properly observe the maximum size of
// the generation so an expand-to-reserve should be
// unnecessary. Also a second call to expand-to-reserve
// value potentially can cause an undue expansion.
// For example if the first expand fail for unknown reasons,
// but the second succeeds and expands the heap to its maximum
// value.
if (GCLocker::is_active()) {
log_debug(gc)("Garbage collection disabled, expanded heap instead");
}
return success;
}

View File

@ -101,6 +101,7 @@ SerialHeap::SerialHeap() :
_old_pool(nullptr) {
_young_manager = new GCMemoryManager("Copy");
_old_manager = new GCMemoryManager("MarkSweepCompact");
GCLocker::initialize();
}
void SerialHeap::initialize_serviceability() {
@ -167,11 +168,11 @@ void SerialHeap::complete_loaded_archive_space(MemRegion archive_space) {
}
void SerialHeap::pin_object(JavaThread* thread, oop obj) {
GCLocker::lock_critical(thread);
GCLocker::enter(thread);
}
void SerialHeap::unpin_object(JavaThread* thread, oop obj) {
GCLocker::unlock_critical(thread);
GCLocker::exit(thread);
}
jint SerialHeap::initialize() {
@ -282,12 +283,10 @@ size_t SerialHeap::max_capacity() const {
// Return true if any of the following is true:
// . the allocation won't fit into the current young gen heap
// . gc locker is occupied (jni critical section)
// . heap memory is tight
bool SerialHeap::should_try_older_generation_allocation(size_t word_size) const {
size_t young_capacity = _young_gen->capacity_before_gc();
return (word_size > heap_word_size(young_capacity))
|| GCLocker::is_active_and_needs_gc()
|| _is_heap_almost_full;
}
@ -306,14 +305,11 @@ HeapWord* SerialHeap::expand_heap_and_allocate(size_t size, bool is_tlab) {
return result;
}
HeapWord* SerialHeap::mem_allocate_work(size_t size,
bool is_tlab) {
HeapWord* SerialHeap::mem_allocate_work(size_t size, bool is_tlab) {
HeapWord* result = nullptr;
// Loop until the allocation is satisfied, or unsatisfied after GC.
for (uint try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
for (uint try_count = 1; /* return or throw */; try_count += 1) {
// First allocation attempt is lock-free.
DefNewGeneration *young = _young_gen;
if (young->should_allocate(size, is_tlab)) {
@ -337,45 +333,6 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size,
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
if (is_tlab) {
return nullptr; // Caller will retry allocating individual object.
}
if (!is_maximal_no_gc()) {
// Try and expand heap to satisfy request.
result = expand_heap_and_allocate(size, is_tlab);
// Result could be null if we are out of space.
if (result != nullptr) {
return result;
}
}
if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
return nullptr; // We didn't get to do a GC and we didn't get any memory.
}
// If this thread is not in a jni critical section, we stall
// the requestor until the critical section has cleared and
// GC allowed. When the critical section clears, a GC is
// initiated by the last thread exiting the critical section; so
// we retry the allocation sequence from the beginning of the loop,
// rather than causing more, now probably unnecessary, GC attempts.
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
MutexUnlocker mul(Heap_lock);
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
gclocker_stalled_count += 1;
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return nullptr;
}
}
// Read the gc count while the heap lock is held.
gc_count_before = total_collections();
}
@ -384,10 +341,6 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size,
VMThread::execute(&op);
if (op.prologue_succeeded()) {
result = op.result();
if (op.gc_locked()) {
assert(result == nullptr, "must be null if gc_locked() is true");
continue; // Retry and/or stall as necessary.
}
assert(result == nullptr || is_in_reserved(result),
"result not in heap");
@ -397,8 +350,8 @@ HeapWord* SerialHeap::mem_allocate_work(size_t size,
// Give a warning if we seem to be looping forever.
if ((QueuedAllocationWarningCount > 0) &&
(try_count % QueuedAllocationWarningCount == 0)) {
log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
" size=%zu %s", try_count, size, is_tlab ? "(TLAB)" : "");
log_warning(gc, ergo)("SerialHeap::mem_allocate_work retries %d times,"
" size=%zu %s", try_count, size, is_tlab ? "(TLAB)" : "");
}
}
}
@ -517,16 +470,6 @@ HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
HeapWord* result = nullptr;
GCLocker::check_active_before_gc();
if (GCLocker::is_active_and_needs_gc()) {
// GC locker is active; instead of a collection we will attempt
// to expand the heap, if there's room for expansion.
if (!is_maximal_no_gc()) {
result = expand_heap_and_allocate(size, is_tlab);
}
return result; // Could be null if we are out of space.
}
// If young-gen can handle this allocation, attempt young-gc firstly.
bool should_run_young_gc = _young_gen->should_allocate(size, is_tlab);
collect_at_safepoint(!should_run_young_gc);
@ -550,7 +493,7 @@ HeapWord* SerialHeap::satisfy_failed_allocation(size_t size, bool is_tlab) {
{
UIntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted
const bool clear_all_soft_refs = true;
do_full_collection_no_gc_locker(clear_all_soft_refs);
do_full_collection(clear_all_soft_refs);
}
result = attempt_allocation(size, is_tlab, false /* first_only */);
@ -632,14 +575,6 @@ void SerialHeap::scan_evacuated_objs(YoungGenScanClosure* young_cl,
guarantee(young_gen()->promo_failure_scan_is_complete(), "Failed to finish scan");
}
void SerialHeap::try_collect_at_safepoint(bool full) {
assert(SafepointSynchronize::is_at_safepoint(), "precondition");
if (GCLocker::check_active_before_gc()) {
return;
}
collect_at_safepoint(full);
}
void SerialHeap::collect_at_safepoint(bool full) {
assert(!GCLocker::is_active(), "precondition");
bool clear_soft_refs = must_clear_all_soft_refs();
@ -651,7 +586,7 @@ void SerialHeap::collect_at_safepoint(bool full) {
}
// Upgrade to Full-GC if young-gc fails
}
do_full_collection_no_gc_locker(clear_soft_refs);
do_full_collection(clear_soft_refs);
}
// public collection interfaces
@ -669,12 +604,7 @@ void SerialHeap::collect(GCCause::Cause cause) {
full_gc_count_before = total_full_collections();
}
if (GCLocker::should_discard(cause, gc_count_before)) {
return;
}
bool should_run_young_gc = (cause == GCCause::_wb_young_gc)
|| (cause == GCCause::_gc_locker)
DEBUG_ONLY(|| (cause == GCCause::_scavenge_alot));
while (true) {
@ -683,7 +613,6 @@ void SerialHeap::collect(GCCause::Cause cause) {
full_gc_count_before,
cause);
VMThread::execute(&op);
if (!GCCause::is_explicit_full_gc(cause)) {
return;
}
@ -695,22 +624,10 @@ void SerialHeap::collect(GCCause::Cause cause) {
return;
}
}
if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
GCLocker::stall_until_clear();
}
}
}
void SerialHeap::do_full_collection(bool clear_all_soft_refs) {
if (GCLocker::check_active_before_gc()) {
return;
}
do_full_collection_no_gc_locker(clear_all_soft_refs);
}
void SerialHeap::do_full_collection_no_gc_locker(bool clear_all_soft_refs) {
IsSTWGCActiveMark gc_active_mark;
SvcGCMarker sgcm(SvcGCMarker::FULL);
GCIdMark gc_id_mark;

View File

@ -108,9 +108,6 @@ private:
bool first_only);
void do_full_collection(bool clear_all_soft_refs) override;
void do_full_collection_no_gc_locker(bool clear_all_soft_refs);
void collect_at_safepoint(bool full);
// Does the "cause" of GC indicate that
// we absolutely __must__ clear soft refs?
@ -147,7 +144,7 @@ public:
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
// Callback from VM_SerialGCCollect.
void try_collect_at_safepoint(bool full);
void collect_at_safepoint(bool full);
// Perform a full collection of the heap; intended for use in implementing
// "System.gc". This implies as full a collection as the CollectedHeap
@ -257,8 +254,7 @@ private:
// Try to allocate space by expanding the heap.
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
HeapWord* mem_allocate_work(size_t size,
bool is_tlab);
HeapWord* mem_allocate_work(size_t size, bool is_tlab);
MemoryPool* _eden_pool;
MemoryPool* _survivor_pool;

View File

@ -30,14 +30,10 @@ void VM_SerialCollectForAllocation::doit() {
GCCauseSetter gccs(gch, _gc_cause);
_result = gch->satisfy_failed_allocation(_word_size, _tlab);
assert(_result == nullptr || gch->is_in_reserved(_result), "result not in heap");
if (_result == nullptr && GCLocker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
void VM_SerialGCCollect::doit() {
SerialHeap* gch = SerialHeap::heap();
GCCauseSetter gccs(gch, _gc_cause);
gch->try_collect_at_safepoint(_full);
gch->collect_at_safepoint(_full);
}

View File

@ -100,9 +100,6 @@ bool TenuredGeneration::expand(size_t bytes, size_t expand_bytes) {
if (!success) {
success = grow_to_reserved();
}
if (success && GCLocker::is_active_and_needs_gc()) {
log_trace(gc, heap)("Garbage collection disabled, expanded heap instead");
}
return success;
}

View File

@ -29,7 +29,7 @@
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/gcHeapSummary.hpp"
#include "gc/shared/stringdedup/stringDedup.hpp"
#include "gc/shared/gcTrace.hpp"
@ -350,36 +350,10 @@ MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loa
return result;
}
if (GCLocker::is_active_and_needs_gc()) {
// If the GCLocker is active, just expand and allocate.
// If that does not succeed, wait if this thread is not
// in a critical section itself.
result = loader_data->metaspace_non_null()->expand_and_allocate(word_size, mdtype);
if (result != nullptr) {
return result;
}
JavaThread* jthr = JavaThread::current();
if (!jthr->in_critical()) {
// Wait for JNI critical section to be exited
GCLocker::stall_until_clear();
// The GC invoked by the last thread leaving the critical
// section will be a young collection and a full collection
// is (currently) needed for unloading classes so continue
// to the next iteration to get a full GC.
continue;
} else {
if (CheckJNICalls) {
fatal("Possible deadlock due to allocating while"
" in jni critical section");
}
return nullptr;
}
}
{ // Need lock to get self consistent gc_count's
MutexLocker ml(Heap_lock);
gc_count = Universe::heap()->total_collections();
full_gc_count = Universe::heap()->total_full_collections();
gc_count = total_collections();
full_gc_count = total_full_collections();
}
// Generate a VM operation
@ -389,13 +363,8 @@ MetaWord* CollectedHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loa
gc_count,
full_gc_count,
GCCause::_metadata_GC_threshold);
VMThread::execute(&op);
// If GC was locked out, try again. Check before checking success because the
// prologue could have succeeded and the GC still have been locked out.
if (op.gc_locked()) {
continue;
}
VMThread::execute(&op);
if (op.prologue_succeeded()) {
return op.result();

View File

@ -41,9 +41,6 @@ const char* GCCause::to_string(GCCause::Cause cause) {
case _jvmti_force_gc:
return "JvmtiEnv ForceGarbageCollection";
case _gc_locker:
return "GCLocker Initiated GC";
case _heap_inspection:
return "Heap Inspection Initiated GC";

View File

@ -47,7 +47,6 @@ class GCCause : public AllStatic {
_scavenge_alot,
_allocation_profiler,
_jvmti_force_gc,
_gc_locker,
_heap_inspection,
_heap_dump,
_wb_young_gc,

View File

@ -29,15 +29,13 @@
#include "memory/universe.hpp"
#include "logging/log.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/spinYield.hpp"
#include "runtime/threadSMR.hpp"
#include "utilities/ticks.hpp"
volatile jint GCLocker::_jni_lock_count = 0;
volatile bool GCLocker::_needs_gc = false;
unsigned int GCLocker::_total_collections = 0;
// GCLockerTimingDebugLogger tracks specific timing information for GC lock waits.
class GCLockerTimingDebugLogger : public StackObj {
const char* _log_message;
@ -46,7 +44,9 @@ class GCLockerTimingDebugLogger : public StackObj {
public:
GCLockerTimingDebugLogger(const char* log_message) : _log_message(log_message) {
assert(_log_message != nullptr, "GC locker debug message must be set.");
_start = Ticks::now();
if (log_is_enabled(Debug, gc, jni)) {
_start = Ticks::now();
}
}
~GCLockerTimingDebugLogger() {
@ -59,138 +59,89 @@ public:
}
};
#ifdef ASSERT
volatile jint GCLocker::_debug_jni_lock_count = 0;
#endif
Monitor* GCLocker::_lock;
volatile bool GCLocker::_is_gc_request_pending;
DEBUG_ONLY(uint64_t GCLocker::_verify_in_cr_count;)
void GCLocker::initialize() {
assert(Heap_lock != nullptr, "inv");
_lock = Heap_lock;
_is_gc_request_pending = false;
DEBUG_ONLY(_verify_in_cr_count = 0;)
}
bool GCLocker::is_active() {
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); /* empty */) {
if (cur->in_critical_atomic()) {
return true;
}
}
return false;
}
void GCLocker::block() {
assert(_lock->is_locked(), "precondition");
assert(Atomic::load(&_is_gc_request_pending) == false, "precondition");
GCLockerTimingDebugLogger logger("Thread blocked to start GC.");
Atomic::store(&_is_gc_request_pending, true);
// The _is_gc_request_pending and _jni_active_critical (inside
// in_critical_atomic()) variables form a Dekker duality. On the GC side, the
// _is_gc_request_pending is set and _jni_active_critical is subsequently
// loaded. For Java threads, the opposite is true, just like a Dekker lock.
// That's why there is a fence to order the accesses involved in the Dekker
// synchronization.
OrderAccess::fence();
JavaThread* java_thread = JavaThread::current();
ThreadBlockInVM tbivm(java_thread);
// Wait for threads leaving critical section
SpinYield spin_yield;
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur = jtiwh.next(); /* empty */) {
while (cur->in_critical_atomic()) {
spin_yield.wait();
}
}
#ifdef ASSERT
void GCLocker::verify_critical_count() {
if (SafepointSynchronize::is_at_safepoint()) {
assert(!needs_gc() || _debug_jni_lock_count == _jni_lock_count, "must agree");
int count = 0;
// Count the number of threads with critical operations in progress
JavaThreadIteratorWithHandle jtiwh;
for (; JavaThread *thr = jtiwh.next(); ) {
if (thr->in_critical()) {
count++;
}
}
if (_jni_lock_count != count) {
log_error(gc, verify)("critical counts don't match: %d != %d", _jni_lock_count, count);
jtiwh.rewind();
for (; JavaThread *thr = jtiwh.next(); ) {
if (thr->in_critical()) {
log_error(gc, verify)(PTR_FORMAT " in_critical %d", p2i(thr), thr->in_critical());
}
}
}
assert(_jni_lock_count == count, "must be equal");
}
}
// In debug mode track the locking state at all times
void GCLocker::increment_debug_jni_lock_count() {
assert(_debug_jni_lock_count >= 0, "bad value");
Atomic::inc(&_debug_jni_lock_count);
}
void GCLocker::decrement_debug_jni_lock_count() {
assert(_debug_jni_lock_count > 0, "bad value");
Atomic::dec(&_debug_jni_lock_count);
}
// Matching the storestore in GCLocker::exit.
OrderAccess::loadload();
assert(Atomic::load(&_verify_in_cr_count) == 0, "inv");
#endif
void GCLocker::log_debug_jni(const char* msg) {
Log(gc, jni) log;
if (log.is_debug()) {
ResourceMark rm; // JavaThread::name() allocates to convert to UTF8
log.debug("%s Thread \"%s\" %d locked.", msg, Thread::current()->name(), _jni_lock_count);
}
}
bool GCLocker::is_at_safepoint() {
return SafepointSynchronize::is_at_safepoint();
void GCLocker::unblock() {
assert(_lock->is_locked(), "precondition");
assert(Atomic::load(&_is_gc_request_pending) == true, "precondition");
Atomic::store(&_is_gc_request_pending, false);
}
bool GCLocker::check_active_before_gc() {
assert(SafepointSynchronize::is_at_safepoint(), "only read at safepoint");
if (is_active() && !_needs_gc) {
verify_critical_count();
_needs_gc = true;
GCLockerTracer::start_gc_locker(_jni_lock_count);
log_debug_jni("Setting _needs_gc.");
}
return is_active();
}
void GCLocker::enter_slow(JavaThread* current_thread) {
assert(current_thread == JavaThread::current(), "Must be this thread");
void GCLocker::stall_until_clear() {
assert(!JavaThread::current()->in_critical(), "Would deadlock");
MonitorLocker ml(JNICritical_lock);
if (needs_gc()) {
GCLockerTracer::inc_stall_count();
log_debug_jni("Allocation failed. Thread stalled by JNI critical section.");
GCLockerTimingDebugLogger logger("Thread stalled by JNI critical section.");
// Wait for _needs_gc to be cleared
while (needs_gc()) {
ml.wait();
}
}
}
bool GCLocker::should_discard(GCCause::Cause cause, uint total_collections) {
return (cause == GCCause::_gc_locker) &&
(_total_collections != total_collections);
}
void GCLocker::jni_lock(JavaThread* thread) {
assert(!thread->in_critical(), "shouldn't currently be in a critical region");
MonitorLocker ml(JNICritical_lock);
// Block entering threads if there's a pending GC request.
if (needs_gc()) {
log_debug_jni("Blocking thread as there is a pending GC request");
GCLockerTimingDebugLogger logger("Thread blocked to enter critical region.");
while (needs_gc()) {
// There's at least one thread that has not left the critical region (CR)
// completely. When that last thread (no new threads can enter CR due to the
// blocking) exits CR, it calls `jni_unlock`, which sets `_needs_gc`
// to false and wakes up all blocked threads.
// We would like to assert #threads in CR to be > 0, `_jni_lock_count > 0`
// in the code, but it's too strong; it's possible that the last thread
// has called `jni_unlock`, but not yet finished the call, e.g. initiating
// a GCCause::_gc_locker GC.
ml.wait();
}
}
thread->enter_critical();
_jni_lock_count++;
increment_debug_jni_lock_count();
}
void GCLocker::jni_unlock(JavaThread* thread) {
assert(thread->in_last_critical(), "should be exiting critical region");
MutexLocker mu(JNICritical_lock);
_jni_lock_count--;
decrement_debug_jni_lock_count();
log_debug_jni("Thread exiting critical region.");
thread->exit_critical();
if (needs_gc() && !is_active_internal()) {
// We're the last thread out. Request a GC.
// Capture the current total collections, to allow detection of
// other collections that make this one unnecessary. The value of
// total_collections() is only changed at a safepoint, so there
// must not be a safepoint between the lock becoming inactive and
// getting the count, else there may be unnecessary GCLocker GCs.
_total_collections = Universe::heap()->total_collections();
GCLockerTracer::report_gc_locker();
GCLockerTimingDebugLogger logger("Thread blocked to enter critical region.");
while (true) {
{
// Must give up the lock while at a safepoint
MutexUnlocker munlock(JNICritical_lock);
log_debug_jni("Last thread exiting. Performing GC after exiting critical section.");
Universe::heap()->collect(GCCause::_gc_locker);
// There is a pending gc request and _lock is locked. Wait for the
// completion of a gc. It's enough to do an empty locker section.
MutexLocker locker(_lock);
}
_needs_gc = false;
JNICritical_lock->notify_all();
current_thread->enter_critical();
// Same as fast path.
OrderAccess::fence();
if (!Atomic::load(&_is_gc_request_pending)) {
return;
}
current_thread->exit_critical();
}
}

View File

@ -27,126 +27,43 @@
#include "gc/shared/gcCause.hpp"
#include "memory/allStatic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "runtime/mutex.hpp"
class JavaThread;
// The direct lock/unlock calls do not force a collection if an unlock
// decrements the count to zero. Avoid calling these if at all possible.
// GCLocker provides synchronization between the garbage collector (GC) and
// threads using JNI critical APIs. When threads enter a critical region (CR),
// certain GC implementations may suspend garbage collection until all such
// threads have exited.
//
// Threads that need to trigger a GC should use the `block()` and `unblock()`
// APIs. `block()` will block the caller and prevent new threads from entering
// the CR.
//
// Threads entering or exiting a CR must call the `enter` and `exit` APIs to
// ensure proper synchronization with the GC.
class GCLocker: public AllStatic {
private:
// The _jni_lock_count keeps track of the number of threads that are
// currently in a critical region. It's only kept up to date when
// _needs_gc is true. The current value is computed during
// safepointing and decremented during the slow path of GCLocker
// unlocking.
static volatile jint _jni_lock_count; // number of jni active instances.
static volatile bool _needs_gc; // heap is filling, we need a GC
static uint _total_collections; // value for _gc_locker collection
static Monitor* _lock;
static volatile bool _is_gc_request_pending;
#ifdef ASSERT
// This lock count is updated for all operations and is used to
// validate the jni_lock_count that is computed during safepoints.
static volatile jint _debug_jni_lock_count;
// Debug-only: to track the number of java threads in critical-region.
static uint64_t _verify_in_cr_count;
#endif
static void enter_slow(JavaThread* current_thread);
// At a safepoint, visit all threads and count the number of active
// critical sections. This is used to ensure that all active
// critical sections are exited before a new one is started.
static void verify_critical_count() NOT_DEBUG_RETURN;
public:
static void initialize();
static void jni_lock(JavaThread* thread);
static void jni_unlock(JavaThread* thread);
// To query current GCLocker state. Can become outdated if called outside a safepoint.
static bool is_active();
static bool is_active_internal() {
verify_critical_count();
return _jni_lock_count > 0;
}
// For use by Java threads requesting GC.
static void block();
static void unblock();
static void log_debug_jni(const char* msg);
static bool is_at_safepoint();
public:
// Accessors
static bool is_active() {
assert(GCLocker::is_at_safepoint(), "only read at safepoint");
return is_active_internal();
}
static bool needs_gc() { return _needs_gc; }
// Shorthand
static bool is_active_and_needs_gc() {
// Use is_active_internal since _needs_gc can change from true to
// false outside of a safepoint, triggering the assert in
// is_active.
return needs_gc() && is_active_internal();
}
// In debug mode track the locking state at all times
static void increment_debug_jni_lock_count() NOT_DEBUG_RETURN;
static void decrement_debug_jni_lock_count() NOT_DEBUG_RETURN;
// Set the current lock count
static void set_jni_lock_count(int count) {
_jni_lock_count = count;
verify_critical_count();
}
// Sets _needs_gc if is_active() is true. Returns is_active().
static bool check_active_before_gc();
// Return true if the designated collection is a GCLocker request
// that should be discarded. Returns true if cause == GCCause::_gc_locker
// and the given total collection value indicates a collection has been
// done since the GCLocker request was made.
static bool should_discard(GCCause::Cause cause, uint total_collections);
// Stalls the caller (who should not be in a jni critical section)
// until needs_gc() clears. Note however that needs_gc() may be
// set at a subsequent safepoint and/or cleared under the
// JNICritical_lock, so the caller may not safely assert upon
// return from this method that "!needs_gc()" since that is
// not a stable predicate.
static void stall_until_clear();
// The following two methods are used for JNI critical regions.
// If we find that we failed to perform a GC because the GCLocker
// was active, arrange for one as soon as possible by allowing
// all threads in critical regions to complete, but not allowing
// other critical regions to be entered. The reasons for that are:
// 1) a GC request won't be starved by overlapping JNI critical
// region activities, which can cause unnecessary OutOfMemory errors.
// 2) even if allocation requests can still be satisfied before GC locker
// becomes inactive, for example, in tenured generation possibly with
// heap expansion, those allocations can trigger lots of safepointing
// attempts (ineffective GC attempts) and require Heap_lock which
// slow down allocations tremendously.
//
// Note that critical regions can be nested in a single thread, so
// we must allow threads already in critical regions to continue.
//
// JNI critical regions are the only participants in this scheme
// because they are, by spec, well bounded while in a critical region.
//
// Each of the following two method is split into a fast path and a
// slow path. JNICritical_lock is only grabbed in the slow path.
// _needs_gc is initially false and every java thread will go
// through the fast path, which simply increments or decrements the
// current thread's critical count. When GC happens at a safepoint,
// GCLocker::is_active() is checked. Since there is no safepoint in
// the fast path of lock_critical() and unlock_critical(), there is
// no race condition between the fast path and GC. After _needs_gc
// is set at a safepoint, every thread will go through the slow path
// after the safepoint. Since after a safepoint, each of the
// following two methods is either entered from the method entry and
// falls into the slow path, or is resumed from the safepoints in
// the method, which only exist in the slow path. So when _needs_gc
// is set, the slow path is always taken, till _needs_gc is cleared.
inline static void lock_critical(JavaThread* thread);
inline static void unlock_critical(JavaThread* thread);
// For use by Java threads entering/leaving critical-region.
inline static void enter(JavaThread* current_thread);
inline static void exit(JavaThread* current_thread);
};
#endif // SHARE_GC_SHARED_GCLOCKER_HPP

View File

@ -29,30 +29,39 @@
#include "runtime/javaThread.inline.hpp"
void GCLocker::lock_critical(JavaThread* thread) {
if (!thread->in_critical()) {
if (needs_gc()) {
// jni_lock call calls enter_critical under the lock so that the
// global lock count and per thread count are in agreement.
jni_lock(thread);
return;
void GCLocker::enter(JavaThread* current_thread) {
assert(current_thread == JavaThread::current(), "Must be this thread");
if (!current_thread->in_critical()) {
current_thread->enter_critical();
// Matching the fence in GCLocker::block.
OrderAccess::fence();
if (Atomic::load(&_is_gc_request_pending)) {
current_thread->exit_critical();
// slow-path
enter_slow(current_thread);
}
increment_debug_jni_lock_count();
DEBUG_ONLY(Atomic::add(&_verify_in_cr_count, (uint64_t)1);)
} else {
current_thread->enter_critical();
}
thread->enter_critical();
}
void GCLocker::unlock_critical(JavaThread* thread) {
if (thread->in_last_critical()) {
if (needs_gc()) {
// jni_unlock call calls exit_critical under the lock so that
// the global lock count and per thread count are in agreement.
jni_unlock(thread);
return;
}
decrement_debug_jni_lock_count();
void GCLocker::exit(JavaThread* current_thread) {
assert(current_thread == JavaThread::current(), "Must be this thread");
#ifdef ASSERT
if (current_thread->in_last_critical()) {
Atomic::add(&_verify_in_cr_count, (uint64_t)-1);
// Matching the loadload in GCLocker::block.
OrderAccess::storestore();
}
thread->exit_critical();
#endif
current_thread->exit_critical();
}
#endif // SHARE_GC_SHARED_GCLOCKER_INLINE_HPP

View File

@ -212,19 +212,4 @@ class DefNewTracer : public YoungGCTracer, public CHeapObj<mtGC> {
DefNewTracer() : YoungGCTracer(DefNew) {}
};
class GCLockerTracer : public AllStatic {
#if INCLUDE_JFR
private:
static Ticks _needs_gc_start_timestamp;
static volatile jint _jni_lock_count;
static volatile jint _stall_count;
#endif
static bool is_started() NOT_JFR_RETURN_(false);
public:
static void start_gc_locker(jint jni_lock_count) NOT_JFR_RETURN();
static void inc_stall_count() NOT_JFR_RETURN();
static void report_gc_locker() NOT_JFR_RETURN();
};
#endif // SHARE_GC_SHARED_GCTRACE_HPP

View File

@ -358,49 +358,3 @@ void GCTracer::send_phase_events(TimePartitions* time_partitions) const {
phase->accept(&phase_reporter);
}
}
#if INCLUDE_JFR
Ticks GCLockerTracer::_needs_gc_start_timestamp;
volatile jint GCLockerTracer::_jni_lock_count = 0;
volatile jint GCLockerTracer::_stall_count = 0;
bool GCLockerTracer::is_started() {
return _needs_gc_start_timestamp != Ticks();
}
void GCLockerTracer::start_gc_locker(const jint jni_lock_count) {
assert(SafepointSynchronize::is_at_safepoint(), "sanity");
assert(!is_started(), "sanity");
assert(_jni_lock_count == 0, "sanity");
assert(_stall_count == 0, "sanity");
if (EventGCLocker::is_enabled()) {
_needs_gc_start_timestamp.stamp();
_jni_lock_count = jni_lock_count;
}
}
void GCLockerTracer::inc_stall_count() {
if (is_started()) {
_stall_count++;
}
}
void GCLockerTracer::report_gc_locker() {
if (is_started()) {
EventGCLocker event(UNTIMED);
if (event.should_commit()) {
event.set_starttime(_needs_gc_start_timestamp);
event.set_endtime(_needs_gc_start_timestamp);
event.set_lockCount(_jni_lock_count);
event.set_stallCount(_stall_count);
event.commit();
}
// reset
_needs_gc_start_timestamp = Ticks();
_jni_lock_count = 0;
_stall_count = 0;
assert(!is_started(), "sanity");
}
}
#endif

View File

@ -85,19 +85,11 @@ void VM_GC_Operation::notify_gc_end() {
// Allocations may fail in several threads at about the same time,
// resulting in multiple gc requests. We only want to do one of them.
// In case a GC locker is active and the need for a GC is already signaled,
// we want to skip this GC attempt altogether, without doing a futile
// safepoint operation.
bool VM_GC_Operation::skip_operation() const {
bool skip = (_gc_count_before != Universe::heap()->total_collections());
if (_full && skip) {
skip = (_full_gc_count_before != Universe::heap()->total_full_collections());
}
if (!skip && GCLocker::is_active_and_needs_gc()) {
skip = Universe::heap()->is_maximal_no_gc();
assert(!(skip && (_gc_cause == GCCause::_gc_locker)),
"GCLocker cannot be active when initiating GC");
}
return skip;
}
@ -122,6 +114,9 @@ bool VM_GC_Operation::doit_prologue() {
Heap_lock->unlock();
_prologue_succeeded = false;
} else {
if (UseSerialGC || UseParallelGC) {
GCLocker::block();
}
_prologue_succeeded = true;
}
return _prologue_succeeded;
@ -129,6 +124,9 @@ bool VM_GC_Operation::doit_prologue() {
void VM_GC_Operation::doit_epilogue() {
if (UseSerialGC || UseParallelGC) {
GCLocker::unblock();
}
// GC thread root traversal likely used OopMapCache a lot, which
// might have created lots of old entries. Trigger the cleanup now.
OopMapCache::try_trigger_cleanup();
@ -259,10 +257,6 @@ void VM_CollectForMetadataAllocation::doit() {
}
log_debug(gc)("After Metaspace GC failed to allocate size %zu", _size);
if (GCLocker::is_active_and_needs_gc()) {
set_gc_locked();
}
}
VM_CollectForAllocation::VM_CollectForAllocation(size_t word_size, uint gc_count_before, GCCause::Cause cause)

View File

@ -108,7 +108,6 @@ class VM_GC_Operation: public VM_GC_Sync_Operation {
bool _full; // whether a "full" collection
bool _prologue_succeeded; // whether doit_prologue succeeded
GCCause::Cause _gc_cause; // the putative cause for this gc op
bool _gc_locked; // will be set if gc was locked
virtual bool skip_operation() const;
@ -123,8 +122,6 @@ class VM_GC_Operation: public VM_GC_Sync_Operation {
_gc_cause = _cause;
_gc_locked = false;
_full_gc_count_before = full_gc_count_before;
// In ParallelScavengeHeap::mem_allocate() collections can be
// executed within a loop and _all_soft_refs_clear can be set
@ -148,9 +145,6 @@ class VM_GC_Operation: public VM_GC_Sync_Operation {
virtual bool allow_nested_vm_operations() const { return true; }
bool prologue_succeeded() const { return _prologue_succeeded; }
void set_gc_locked() { _gc_locked = true; }
bool gc_locked() const { return _gc_locked; }
static void notify_gc_begin(bool full = false);
static void notify_gc_end();
};

View File

@ -155,11 +155,6 @@
"A System.gc() request invokes a concurrent collection; " \
"(effective only when using concurrent collectors)") \
\
product(uintx, GCLockerRetryAllocationCount, 2, DIAGNOSTIC, \
"Number of times to retry allocations when " \
"blocked by the GC locker") \
range(0, max_uintx) \
\
product(uint, ParallelGCBufferWastePct, 10, \
"Wasted fraction of parallel allocation buffer") \
range(0, 100) \

View File

@ -1241,11 +1241,6 @@
<Field type="int" name="compression" label="Compression Level" description="Compression level of the dump, if larger than 0 we use gzip compression with this level" />
</Event>
<Event name="GCLocker" category="Java Virtual Machine, GC, Detailed" label="GC Locker" startTime="true" thread="true" stackTrace="true">
<Field type="uint" name="lockCount" label="Lock Count" description="The number of Java threads in a critical section when the GC locker is started" />
<Field type="uint" name="stallCount" label="Stall Count" description="The number of Java threads stalled by the GC locker" />
</Event>
<Event name="FinalizerStatistics" category="Java Application, Statistics" label="Finalizer Statistics" description="Per class statistics about finalizers" thread="false" startTime="false" period="endChunk">
<Field type="Class" name="finalizableClass" label="Class Overriding Finalize" />
<Field type="Symbol" name="codeSource" label="Code Source" description="URL from where the class was loaded" />

View File

@ -22,7 +22,6 @@
*/
#include "downcallLinker.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include <cerrno>

View File

@ -40,7 +40,6 @@
#include "classfile/vmSymbols.hpp"
#include "compiler/compiler_globals.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/stringdedup/stringDedup.hpp"
#include "interpreter/linkResolver.hpp"
#include "jni.h"

View File

@ -45,7 +45,6 @@
#include "compiler/methodMatcher.hpp"
#include "gc/shared/concurrentGCBreakpoints.hpp"
#include "gc/shared/gcConfig.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "gc/shared/genArguments.hpp"
#include "jvm.h"
#include "jvmtifiles/jvmtiEnv.hpp"
@ -2649,14 +2648,6 @@ WB_ENTRY(jstring, WB_GetLibcName(JNIEnv* env, jobject o))
return info_string;
WB_END
WB_ENTRY(void, WB_LockCritical(JNIEnv* env, jobject wb))
GCLocker::lock_critical(thread);
WB_END
WB_ENTRY(void, WB_UnlockCritical(JNIEnv* env, jobject wb))
GCLocker::unlock_critical(thread);
WB_END
WB_ENTRY(void, WB_PinObject(JNIEnv* env, jobject wb, jobject o))
#if INCLUDE_G1GC
if (!UseG1GC) {
@ -2997,8 +2988,6 @@ static JNINativeMethod methods[] = {
{CC"waitUnsafe", CC"(I)V", (void*)&WB_WaitUnsafe},
{CC"getLibcName", CC"()Ljava/lang/String;", (void*)&WB_GetLibcName},
{CC"lockCritical", CC"()V", (void*)&WB_LockCritical},
{CC"unlockCritical", CC"()V", (void*)&WB_UnlockCritical},
{CC"pinObject", CC"(Ljava/lang/Object;)V", (void*)&WB_PinObject},
{CC"unpinObject", CC"(Ljava/lang/Object;)V", (void*)&WB_UnpinObject},
{CC"setVirtualThreadsNotifyJvmtiMode", CC"(Z)Z", (void*)&WB_SetVirtualThreadsNotifyJvmtiMode},

View File

@ -935,6 +935,9 @@ private:
assert(_jni_active_critical >= 0, "JNI critical nesting problem?");
}
// Atomic version; invoked by a thread other than the owning thread.
bool in_critical_atomic() { return Atomic::load(&_jni_active_critical) > 0; }
// Checked JNI: is the programmer required to check for exceptions, if so specify
// which function name. Returning to a Java frame should implicitly clear the
// pending check, this is done for Native->Java transitions (i.e. user JNI code).

View File

@ -46,7 +46,6 @@ Mutex* CompiledIC_lock = nullptr;
Mutex* VMStatistic_lock = nullptr;
Mutex* JmethodIdCreation_lock = nullptr;
Mutex* JfieldIdCreation_lock = nullptr;
Monitor* JNICritical_lock = nullptr;
Mutex* JvmtiThreadState_lock = nullptr;
Monitor* EscapeBarrier_lock = nullptr;
Monitor* JvmtiVTMSTransition_lock = nullptr;
@ -339,7 +338,6 @@ void mutex_init() {
#endif
MUTEX_DEFL(Module_lock , PaddedMutex , ClassLoaderDataGraph_lock);
MUTEX_DEFL(SystemDictionary_lock , PaddedMonitor, Module_lock);
MUTEX_DEFL(JNICritical_lock , PaddedMonitor, AdapterHandlerLibrary_lock); // used for JNI critical regions
#if INCLUDE_JVMCI
// JVMCIRuntime_lock must be acquired before JVMCI_lock to avoid deadlock
MUTEX_DEFL(JVMCI_lock , PaddedMonitor, JVMCIRuntime_lock);

View File

@ -44,7 +44,6 @@ extern Mutex* CompiledIC_lock; // a lock used to guard compile
extern Mutex* VMStatistic_lock; // a lock used to guard statistics count increment
extern Mutex* JmethodIdCreation_lock; // a lock on creating JNI method identifiers
extern Mutex* JfieldIdCreation_lock; // a lock on creating JNI static field identifiers
extern Monitor* JNICritical_lock; // a lock used while entering and exiting JNI critical regions, allows GC to sometimes get in
extern Mutex* JvmtiThreadState_lock; // a lock on modification of JVMTI thread data
extern Monitor* EscapeBarrier_lock; // a lock to sync reallocating and relocking objects because of JVMTI access
extern Monitor* JvmtiVTMSTransition_lock; // a lock for Virtual Thread Mount State transition (VTMS transition) management

View File

@ -408,9 +408,6 @@ void SafepointSynchronize::begin() {
}
#endif // ASSERT
// Update the count of active JNI critical regions
GCLocker::set_jni_lock_count(_current_jni_active_count);
post_safepoint_synchronize_event(sync_event,
_safepoint_id,
initial_running,

View File

@ -37,7 +37,6 @@
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcLocker.inline.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/interpreterRuntime.hpp"
#include "jvm.h"

View File

@ -32,7 +32,6 @@ public enum GCCause {
_scavenge_alot ("ScavengeAlot"),
_allocation_profiler ("Allocation Profiler"),
_jvmti_force_gc ("JvmtiEnv ForceGarbageCollection"),
_gc_locker ("GCLocker Initiated GC"),
_heap_inspection ("Heap Inspection Initiated GC"),
_heap_dump ("Heap Dump Initiated GC"),
_wb_young_gc ("WhiteBox Initiated Young GC"),

View File

@ -898,12 +898,6 @@
<setting name="period">5 s</setting>
</event>
<event name="jdk.GCLocker">
<setting name="enabled">true</setting>
<setting name="threshold">1 s</setting>
<setting name="stackTrace">true</setting>
</event>
<event name="jdk.FinalizerStatistics">
<setting name="enabled">true</setting>
<setting name="period">endChunk</setting>

View File

@ -898,12 +898,6 @@
<setting name="period">5 s</setting>
</event>
<event name="jdk.GCLocker">
<setting name="enabled">true</setting>
<setting name="threshold">100 ms</setting>
<setting name="stackTrace">true</setting>
</event>
<event name="jdk.FinalizerStatistics">
<setting name="enabled">true</setting>
<setting name="period">endChunk</setting>

View File

@ -96,7 +96,6 @@ gc/TestAlwaysPreTouchBehavior.java#Shenandoah 8334513 generic-all
gc/TestAlwaysPreTouchBehavior.java#G1 8334513 generic-all
gc/TestAlwaysPreTouchBehavior.java#Z 8334513 generic-all
gc/TestAlwaysPreTouchBehavior.java#Epsilon 8334513 generic-all
gc/stress/gclocker/TestExcessGCLockerCollections.java 8229120 generic-all
gc/shenandoah/oom/TestAllocOutOfMemory.java#large 8344312 linux-ppc64le
gc/shenandoah/TestEvilSyncBug.java#generational 8345501 generic-all
@ -170,8 +169,6 @@ vmTestbase/nsk/jvmti/AttachOnDemand/attach045/TestDescription.java 8202971 gener
vmTestbase/nsk/jvmti/scenarios/capability/CM03/cm03t001/TestDescription.java 8073470 linux-all
vmTestbase/nsk/jvmti/InterruptThread/intrpthrd003/TestDescription.java 8288911 macosx-all
vmTestbase/gc/lock/jni/jnilock002/TestDescription.java 8192647 generic-all
vmTestbase/jit/escape/LockCoarsening/LockCoarsening001.java 8148743 generic-all
vmTestbase/jit/escape/LockCoarsening/LockCoarsening002.java 8208259 generic-all

View File

@ -1,180 +0,0 @@
/*
* Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package gc.stress.gclocker;
/*
* @test TestExcessGCLockerCollections
* @bug 8048556
* @summary Check for GC Locker initiated GCs that immediately follow another
* GC and so have very little needing to be collected.
* @requires vm.gc != "Z"
* @requires vm.gc != "Epsilon"
* @requires vm.gc != "Shenandoah"
* @requires vm.gc != "G1"
* @requires vm.gc != null
* @library /test/lib
* @modules java.base/jdk.internal.misc
* @run driver/timeout=1000 gc.stress.gclocker.TestExcessGCLockerCollections 300 4 2
*/
import java.util.HashMap;
import java.util.Map;
import java.util.zip.Deflater;
import java.util.ArrayList;
import java.util.Arrays;
import jdk.test.lib.process.ProcessTools;
import jdk.test.lib.process.OutputAnalyzer;
class TestExcessGCLockerCollectionsAux {
static private final int LARGE_MAP_SIZE = 64 * 1024;
static private final int MAP_ARRAY_LENGTH = 4;
static private final int MAP_SIZE = 1024;
static private final int BYTE_ARRAY_LENGTH = 128 * 1024;
static private void println(String str) { System.out.println(str); }
static private volatile boolean keepRunning = true;
static Map<Integer,String> populateMap(int size) {
Map<Integer,String> map = new HashMap<Integer,String>();
for (int i = 0; i < size; i += 1) {
Integer keyInt = Integer.valueOf(i);
String valStr = "value is [" + i + "]";
map.put(keyInt,valStr);
}
return map;
}
static private class AllocatingWorker implements Runnable {
private final Object[] array = new Object[MAP_ARRAY_LENGTH];
private int arrayIndex = 0;
private void doStep() {
Map<Integer,String> map = populateMap(MAP_SIZE);
array[arrayIndex] = map;
arrayIndex = (arrayIndex + 1) % MAP_ARRAY_LENGTH;
}
public void run() {
while (keepRunning) {
doStep();
}
}
}
static private class JNICriticalWorker implements Runnable {
private int count;
private void doStep() {
byte[] inputArray = new byte[BYTE_ARRAY_LENGTH];
for (int i = 0; i < inputArray.length; i += 1) {
inputArray[i] = (byte) (count + i);
}
Deflater deflater = new Deflater();
deflater.setInput(inputArray);
deflater.finish();
byte[] outputArray = new byte[2 * inputArray.length];
deflater.deflate(outputArray);
count += 1;
}
public void run() {
while (keepRunning) {
doStep();
}
}
}
static public Map<Integer,String> largeMap;
static public void main(String args[]) {
long durationSec = Long.parseLong(args[0]);
int allocThreadNum = Integer.parseInt(args[1]);
int jniCriticalThreadNum = Integer.parseInt(args[2]);
println("Running for " + durationSec + " secs");
largeMap = populateMap(LARGE_MAP_SIZE);
println("Starting " + allocThreadNum + " allocating threads");
for (int i = 0; i < allocThreadNum; i += 1) {
new Thread(new AllocatingWorker()).start();
}
println("Starting " + jniCriticalThreadNum + " jni critical threads");
for (int i = 0; i < jniCriticalThreadNum; i += 1) {
new Thread(new JNICriticalWorker()).start();
}
try {
Thread.sleep(durationSec * 1000L);
} catch (InterruptedException e) {
throw new RuntimeException("Test Failure, did not expect an InterruptedException", e);
}
println("Done.");
keepRunning = false;
}
}
public class TestExcessGCLockerCollections {
private static final String locker =
"\\[gc\\s*\\] .* \\(GCLocker Initiated GC\\)";
private static final String ANY_LOCKER = locker + " [1-9][0-9]*M";
private static final String BAD_LOCKER = locker + " [1-9][0-9]?M";
private static final String[] COMMON_OPTIONS = new String[] {
"-Xmx1G", "-Xms1G", "-Xmn256M", "-Xlog:gc,gc+ergo*=debug,gc+ergo+cset=trace:x.log", "-XX:+UnlockDiagnosticVMOptions", "-XX:+VerifyAfterGC"};
public static void main(String args[]) throws Exception {
if (args.length < 3) {
System.out.println("usage: TestExcessGCLockerCollectionsAux" +
" <duration sec> <alloc threads>" +
" <jni critical threads>");
throw new RuntimeException("Invalid arguments");
}
ArrayList<String> finalArgs = new ArrayList<String>();
finalArgs.addAll(Arrays.asList(COMMON_OPTIONS));
finalArgs.add(TestExcessGCLockerCollectionsAux.class.getName());
finalArgs.addAll(Arrays.asList(args));
// GC and other options obtained from test framework.
OutputAnalyzer output = ProcessTools.executeTestJava(finalArgs);
output.shouldHaveExitValue(0);
//System.out.println("------------- begin stdout ----------------");
//System.out.println(output.getStdout());
//System.out.println("------------- end stdout ----------------");
output.stdoutShouldMatch(ANY_LOCKER);
output.stdoutShouldNotMatch(BAD_LOCKER);
}
}

View File

@ -39,8 +39,7 @@ public class TestGCCauseWithParallelOld {
String testID = "ParallelOld";
String[] vmFlags = {"-XX:+UseParallelGC"};
String[] gcNames = {GCHelper.gcParallelScavenge, GCHelper.gcParallelOld};
String[] gcCauses = {"Allocation Failure", "System.gc()", "GCLocker Initiated GC",
"CodeCache GC Threshold"};
String[] gcCauses = {"Allocation Failure", "System.gc()", "CodeCache GC Threshold"};
GCGarbageCollectionUtil.test(testID, vmFlags, gcNames, gcCauses);
}
}

View File

@ -39,8 +39,7 @@ public class TestGCCauseWithSerial {
String testID = "Serial";
String[] vmFlags = {"-XX:+UseSerialGC"};
String[] gcNames = {GCHelper.gcDefNew, GCHelper.gcSerialOld};
String[] gcCauses = {"Allocation Failure", "System.gc()", "GCLocker Initiated GC",
"CodeCache GC Threshold"};
String[] gcCauses = {"Allocation Failure", "System.gc()", "CodeCache GC Threshold"};
GCGarbageCollectionUtil.test(testID, vmFlags, gcNames, gcCauses);
}
}

View File

@ -1,133 +0,0 @@
/*
* Copyright (c) 2021, Alibaba Group Holding Limited. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test TestGCLockerEvent
* @requires vm.flagless
* @requires vm.hasJFR
* @requires vm.gc.Serial | vm.gc.Parallel
* @requires vm.gc != null
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xmx32m -Xms32m -Xmn12m jdk.jfr.event.gc.detailed.TestGCLockerEvent
*/
package jdk.jfr.event.gc.detailed;
import static jdk.test.lib.Asserts.assertTrue;
import java.util.concurrent.CountDownLatch;
import jdk.jfr.Recording;
import jdk.jfr.consumer.RecordedEvent;
import jdk.test.lib.jfr.EventNames;
import jdk.test.lib.jfr.Events;
import jdk.test.whitebox.WhiteBox;
public class TestGCLockerEvent {
private static final String EVENT_NAME = EventNames.GCLocker;
private static final int CRITICAL_THREAD_COUNT = 4;
private static final CountDownLatch LOCK_COUNT_SIGNAL = new CountDownLatch(CRITICAL_THREAD_COUNT);
private static final CountDownLatch UNLOCK_SIGNAL = new CountDownLatch(1);
private static final CountDownLatch UNLOCK_COUNT_SIGNAL = new CountDownLatch(CRITICAL_THREAD_COUNT);
private static final String CRITICAL_THREAD_NAME_PREFIX = "Critical Thread ";
private static final int STALL_THREAD_COUNT = 8;
private static final CountDownLatch STALL_COUNT_SIGNAL = new CountDownLatch(STALL_THREAD_COUNT);
private static final int LOOP = 32;
private static final int M = 1024 * 1024;
public static void main(String[] args) throws Exception {
var recording = new Recording();
recording.enable(EVENT_NAME);
recording.start();
startCriticalThreads();
LOCK_COUNT_SIGNAL.await();
startStallThreads();
STALL_COUNT_SIGNAL.await();
// Wait threads to be stalled
Thread.sleep(1500);
UNLOCK_SIGNAL.countDown();
UNLOCK_COUNT_SIGNAL.await();
recording.stop();
// Verify recording
var all = Events.fromRecording(recording);
Events.hasEvents(all);
var event = all.getFirst();
assertTrue(Events.isEventType(event, EVENT_NAME));
Events.assertField(event, "lockCount").equal(CRITICAL_THREAD_COUNT);
Events.assertField(event, "stallCount").atLeast(STALL_THREAD_COUNT);
assertTrue(event.getThread().getJavaName().startsWith(CRITICAL_THREAD_NAME_PREFIX));
recording.close();
}
private static void startCriticalThreads() {
for (var i = 0; i < CRITICAL_THREAD_COUNT; i++) {
new Thread(() -> {
try {
WhiteBox.getWhiteBox().lockCritical();
LOCK_COUNT_SIGNAL.countDown();
UNLOCK_SIGNAL.await();
WhiteBox.getWhiteBox().unlockCritical();
UNLOCK_COUNT_SIGNAL.countDown();
} catch (InterruptedException ex) {
}
}, CRITICAL_THREAD_NAME_PREFIX + i).start();
}
}
private static void startStallThreads() {
var ts = new Thread[STALL_THREAD_COUNT];
for (var i = 0; i < STALL_THREAD_COUNT; i++) {
ts[i] = new Thread(() -> {
STALL_COUNT_SIGNAL.countDown();
for (int j = 0; j < LOOP; j++) {
byte[] bytes = new byte[M];
}
});
}
for (Thread t : ts) {
t.start();
}
}
}

View File

@ -151,7 +151,6 @@ public class EventNames {
public static final String ZRelocationSetGroup = PREFIX + "ZRelocationSetGroup";
public static final String ZUncommit = PREFIX + "ZUncommit";
public static final String ZUnmap = PREFIX + "ZUnmap";
public static final String GCLocker = PREFIX + "GCLocker";
public static final String SystemGC = PREFIX + "SystemGC";
public static final String GCCPUTime = PREFIX + "GCCPUTime";

View File

@ -786,10 +786,6 @@ public class WhiteBox {
public native void waitUnsafe(int time_ms);
public native void lockCritical();
public native void unlockCritical();
public native void pinObject(Object o);
public native void unpinObject(Object o);