8357306: G1: Remove _gc_succeeded from VM_G1CollectForAllocation because it is always true

Reviewed-by: ayang, sjohanss
This commit is contained in:
Thomas Schatzl 2025-05-23 12:01:45 +00:00
parent f5e6d2c93c
commit 48df41b699
4 changed files with 33 additions and 53 deletions

View File

@ -840,7 +840,7 @@ void G1CollectedHeap::verify_after_full_collection() {
_ref_processor_cm->verify_no_references_recorded();
}
bool G1CollectedHeap::do_full_collection(bool clear_all_soft_refs,
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs,
bool do_maximal_compaction,
size_t allocation_word_size) {
assert_at_safepoint_on_vm_thread();
@ -855,9 +855,6 @@ bool G1CollectedHeap::do_full_collection(bool clear_all_soft_refs,
collector.prepare_collection();
collector.collect();
collector.complete_collection(allocation_word_size);
// Full collection was successfully completed.
return true;
}
void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
@ -870,16 +867,12 @@ void G1CollectedHeap::do_full_collection(bool clear_all_soft_refs) {
size_t(0) /* allocation_word_size */);
}
bool G1CollectedHeap::upgrade_to_full_collection() {
void G1CollectedHeap::upgrade_to_full_collection() {
GCCauseSetter compaction(this, GCCause::_g1_compaction_pause);
log_info(gc, ergo)("Attempting full compaction clearing soft references");
bool success = do_full_collection(true /* clear_all_soft_refs */,
false /* do_maximal_compaction */,
size_t(0) /* allocation_word_size */);
// do_full_collection only fails if blocked by GC locker and that can't
// be the case here since we only call this when already completed one gc.
assert(success, "invariant");
return success;
do_full_collection(true /* clear_all_soft_refs */,
false /* do_maximal_compaction */,
size_t(0) /* allocation_word_size */);
}
void G1CollectedHeap::resize_heap_if_necessary(size_t allocation_word_size) {
@ -900,9 +893,7 @@ void G1CollectedHeap::resize_heap_if_necessary(size_t allocation_word_size) {
HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
bool do_gc,
bool maximal_compaction,
bool expect_null_mutator_alloc_region,
bool* gc_succeeded) {
*gc_succeeded = true;
bool expect_null_mutator_alloc_region) {
// Let's attempt the allocation first.
HeapWord* result =
attempt_allocation_at_safepoint(word_size,
@ -930,16 +921,15 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
} else {
log_info(gc, ergo)("Attempting full compaction");
}
*gc_succeeded = do_full_collection(maximal_compaction /* clear_all_soft_refs */,
maximal_compaction /* do_maximal_compaction */,
word_size /* allocation_word_size */);
do_full_collection(maximal_compaction /* clear_all_soft_refs */,
maximal_compaction /* do_maximal_compaction */,
word_size /* allocation_word_size */);
}
return nullptr;
}
HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
bool* succeeded) {
HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size) {
assert_at_safepoint_on_vm_thread();
// Attempts to allocate followed by Full GC.
@ -947,10 +937,9 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
satisfy_failed_allocation_helper(word_size,
true, /* do_gc */
false, /* maximum_collection */
false, /* expect_null_mutator_alloc_region */
succeeded);
false /* expect_null_mutator_alloc_region */);
if (result != nullptr || !*succeeded) {
if (result != nullptr) {
return result;
}
@ -958,10 +947,9 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
result = satisfy_failed_allocation_helper(word_size,
true, /* do_gc */
true, /* maximum_collection */
true, /* expect_null_mutator_alloc_region */
succeeded);
true /* expect_null_mutator_alloc_region */);
if (result != nullptr || !*succeeded) {
if (result != nullptr) {
return result;
}
@ -969,8 +957,7 @@ HeapWord* G1CollectedHeap::satisfy_failed_allocation(size_t word_size,
result = satisfy_failed_allocation_helper(word_size,
false, /* do_gc */
false, /* maximum_collection */
true, /* expect_null_mutator_alloc_region */
succeeded);
true /* expect_null_mutator_alloc_region */);
if (result != nullptr) {
return result;
@ -2285,10 +2272,9 @@ HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
VMThread::execute(&op);
HeapWord* result = op.result();
bool ret_succeeded = op.prologue_succeeded() && op.gc_succeeded();
assert(result == nullptr || ret_succeeded,
*succeeded = op.gc_succeeded();
assert(result == nullptr || *succeeded,
"the result should be null if the VM did not succeed");
*succeeded = ret_succeeded;
assert_heap_not_locked();
return result;
@ -2415,12 +2401,11 @@ void G1CollectedHeap::expand_heap_after_young_collection(){
}
}
bool G1CollectedHeap::do_collection_pause_at_safepoint() {
void G1CollectedHeap::do_collection_pause_at_safepoint() {
assert_at_safepoint_on_vm_thread();
guarantee(!is_stw_gc_active(), "collection is not reentrant");
do_collection_pause_at_safepoint_helper();
return true;
}
G1HeapPrinterMark::G1HeapPrinterMark(G1CollectedHeap* g1h) : _g1h(g1h), _heap_transition(g1h) {

View File

@ -481,7 +481,7 @@ private:
// be accounted for in case shrinking of the heap happens.
// - it returns false if it is unable to do the collection due to the
// GC locker being active, true otherwise.
bool do_full_collection(bool clear_all_soft_refs,
void do_full_collection(bool clear_all_soft_refs,
bool do_maximal_compaction,
size_t allocation_word_size);
@ -489,13 +489,12 @@ private:
void do_full_collection(bool clear_all_soft_refs) override;
// Helper to do a full collection that clears soft references.
bool upgrade_to_full_collection();
void upgrade_to_full_collection();
// Callback from VM_G1CollectForAllocation operation.
// This function does everything necessary/possible to satisfy a
// failed allocation request (including collection, expansion, etc.)
HeapWord* satisfy_failed_allocation(size_t word_size,
bool* succeeded);
HeapWord* satisfy_failed_allocation(size_t word_size);
// Internal helpers used during full GC to split it up to
// increase readability.
bool abort_concurrent_cycle();
@ -510,8 +509,7 @@ private:
HeapWord* satisfy_failed_allocation_helper(size_t word_size,
bool do_gc,
bool maximal_compaction,
bool expect_null_mutator_alloc_region,
bool* gc_succeeded);
bool expect_null_mutator_alloc_region);
// Attempting to expand the heap sufficiently
// to support an allocation of the given "word_size". If
@ -742,12 +740,10 @@ private:
GCCause::Cause gc_cause);
// Perform an incremental collection at a safepoint, possibly
// followed by a by-policy upgrade to a full collection. Returns
// false if unable to do the collection due to the GC locker being
// active, true otherwise.
// followed by a by-policy upgrade to a full collection.
// precondition: at safepoint on VM thread
// precondition: !is_stw_gc_active()
bool do_collection_pause_at_safepoint();
void do_collection_pause_at_safepoint();
// Helper for do_collection_pause_at_safepoint, containing the guts
// of the incremental collection pause, executed by the vm thread.

View File

@ -102,34 +102,32 @@ void VM_G1TryInitiateConcMark::doit() {
// we've rejected this request.
_whitebox_attached = true;
} else {
_gc_succeeded = g1h->do_collection_pause_at_safepoint();
assert(_gc_succeeded, "No reason to fail");
g1h->do_collection_pause_at_safepoint();
_gc_succeeded = true;
}
}
VM_G1CollectForAllocation::VM_G1CollectForAllocation(size_t word_size,
uint gc_count_before,
GCCause::Cause gc_cause) :
VM_CollectForAllocation(word_size, gc_count_before, gc_cause),
_gc_succeeded(false) {}
VM_CollectForAllocation(word_size, gc_count_before, gc_cause) {}
void VM_G1CollectForAllocation::doit() {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
GCCauseSetter x(g1h, _gc_cause);
// Try a partial collection of some kind.
_gc_succeeded = g1h->do_collection_pause_at_safepoint();
assert(_gc_succeeded, "no reason to fail");
g1h->do_collection_pause_at_safepoint();
if (_word_size > 0) {
// An allocation had been requested. Do it, eventually trying a stronger
// kind of GC.
_result = g1h->satisfy_failed_allocation(_word_size, &_gc_succeeded);
_result = g1h->satisfy_failed_allocation(_word_size);
} else if (g1h->should_upgrade_to_full_gc()) {
// There has been a request to perform a GC to free some space. We have no
// information on how much memory has been asked for. In case there are
// absolutely no regions left to allocate into, do a full compaction.
_gc_succeeded = g1h->upgrade_to_full_collection();
g1h->upgrade_to_full_collection();
}
}

View File

@ -49,6 +49,8 @@ class VM_G1TryInitiateConcMark : public VM_GC_Operation {
bool _cycle_already_in_progress;
bool _whitebox_attached;
bool _terminating;
// The concurrent start pause may be cancelled for some reasons. Keep track of
// this.
bool _gc_succeeded;
public:
@ -65,7 +67,6 @@ public:
};
class VM_G1CollectForAllocation : public VM_CollectForAllocation {
bool _gc_succeeded;
public:
VM_G1CollectForAllocation(size_t word_size,
@ -73,7 +74,7 @@ public:
GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_G1CollectForAllocation; }
virtual void doit();
bool gc_succeeded() const { return _gc_succeeded; }
bool gc_succeeded() const { return prologue_succeeded(); }
};
// Concurrent G1 stop-the-world operations such as remark and cleanup.