diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp index 81412e2f614..213e8f95d63 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.cpp @@ -275,38 +275,46 @@ HeapWord* ParallelScavengeHeap::mem_allocate(size_t size) { return mem_allocate_work(size, is_tlab); } +HeapWord* ParallelScavengeHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) { + // Try young-gen first. + HeapWord* result = young_gen()->allocate(size); + if (result != nullptr) { + return result; + } + + // Try allocating from the old gen for non-TLAB in certain scenarios. + if (!is_tlab) { + if (!should_alloc_in_eden(size) || _is_heap_almost_full) { + result = old_gen()->cas_allocate_noexpand(size); + if (result != nullptr) { + return result; + } + } + } + + return nullptr; +} + HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) { for (uint loop_count = 0; /* empty */; ++loop_count) { - // Try young-gen first. - HeapWord* result = young_gen()->allocate(size); + HeapWord* result = mem_allocate_cas_noexpand(size, is_tlab); if (result != nullptr) { return result; } - // Try allocating from the old gen for non-TLAB in certain scenarios. - if (!is_tlab) { - if (!should_alloc_in_eden(size) || _is_heap_almost_full) { - result = old_gen()->cas_allocate_noexpand(size); - if (result != nullptr) { - return result; - } - } - } - - // We don't want to have multiple collections for a single filled generation. - // To prevent this, each thread tracks the total_collections() value, and if - // the count has changed, does not do a new collection. - // - // The collection count must be read only while holding the heap lock. VM - // operations also hold the heap lock during collections. There is a lock - // contention case where thread A blocks waiting on the Heap_lock, while - // thread B is holding it doing a collection. When thread A gets the lock, - // the collection count has already changed. To prevent duplicate collections, - // The policy MUST attempt allocations during the same period it reads the - // total_collections() value! + // Read total_collections() under the lock so that multiple + // allocation-failures result in one GC. uint gc_count; { MutexLocker ml(Heap_lock); + + // Re-try after acquiring the lock, because a GC might have occurred + // while waiting for this lock. + result = mem_allocate_cas_noexpand(size, is_tlab); + if (result != nullptr) { + return result; + } + gc_count = total_collections(); } diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp index b1176a1637b..fea827430ca 100644 --- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp +++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp @@ -100,6 +100,7 @@ class ParallelScavengeHeap : public CollectedHeap { inline bool should_alloc_in_eden(size_t size) const; + HeapWord* mem_allocate_cas_noexpand(size_t size, bool is_tlab); HeapWord* mem_allocate_work(size_t size, bool is_tlab); HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);