diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 45c364ab35a..3b530895eb1 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -279,17 +279,23 @@ HeapWord* ParallelScavengeHeap::mem_allocate(size_t size, HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab, bool* gc_overhead_limit_was_exceeded) { - { + for (uint loop_count = 0; /* empty */; ++loop_count) { + // Try young-gen first. HeapWord* result = young_gen()->allocate(size); if (result != nullptr) { return result; } - } - uint loop_count = 0; - uint gc_count = 0; + // Try allocating from the old gen for non-TLAB in certain scenarios. + if (!is_tlab) { + if (!should_alloc_in_eden(size) || _is_heap_almost_full) { + result = old_gen()->cas_allocate_noexpand(size); + if (result != nullptr) { + return result; + } + } + } - while (true) { // We don't want to have multiple collections for a single filled generation. // To prevent this, each thread tracks the total_collections() value, and if // the count has changed, does not do a new collection. @@ -301,49 +307,31 @@ HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, // the collection count has already changed. To prevent duplicate collections, // The policy MUST attempt allocations during the same period it reads the // total_collections() value! + uint gc_count; { MutexLocker ml(Heap_lock); gc_count = total_collections(); - - HeapWord* result = young_gen()->allocate(size); - if (result != nullptr) { - return result; - } - - // Try allocating from the old gen for non-TLAB in certain scenarios. - if (!is_tlab) { - if (!should_alloc_in_eden(size) || _is_heap_almost_full) { - result = old_gen()->cas_allocate_noexpand(size); - if (result != nullptr) { - return result; - } - } - } } { VM_ParallelCollectForAllocation op(size, is_tlab, gc_count); VMThread::execute(&op); - // Did the VM operation execute? If so, return the result directly. - // This prevents us from looping until time out on requests that can - // not be satisfied. if (op.gc_succeeded()) { assert(is_in_or_null(op.result()), "result not in heap"); - return op.result(); } - // Was the gc-overhead reached inside the safepoint? If so, this mutator should return null as well for global consistency. - if (_gc_overhead_counter >= GCOverheadLimitThreshold) { - return nullptr; - } } - loop_count++; + // Was the gc-overhead reached inside the safepoint? If so, this mutator + // should return null as well for global consistency. + if (_gc_overhead_counter >= GCOverheadLimitThreshold) { + return nullptr; + } + if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) { - log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count); - log_warning(gc)("\tsize=%zu", size); + log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times, size=%zu", loop_count, size); } } } diff --git a/src/hotspot/share/gc/parallel/psOldGen.hpp b/src/hotspot/share/gc/parallel/psOldGen.hpp index 23fde1f2fe0..77ae5510b31 100644 --- a/src/hotspot/share/gc/parallel/psOldGen.hpp +++ b/src/hotspot/share/gc/parallel/psOldGen.hpp @@ -108,7 +108,7 @@ class PSOldGen : public CHeapObj { void shrink(size_t bytes); - // Invoked by mutators and GC-workers. + // Used by GC-workers during GC or for CDS at startup. HeapWord* allocate(size_t word_size) { HeapWord* res; do { @@ -120,7 +120,6 @@ class PSOldGen : public CHeapObj { // Invoked by mutators before attempting GC. HeapWord* cas_allocate_noexpand(size_t word_size) { - assert_locked_or_safepoint(Heap_lock); HeapWord* res = object_space()->cas_allocate(word_size); if (res != nullptr) { _start_array->update_for_block(res, res + word_size);