mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8367737: Parallel: Retry allocation after lock acquire in mem_allocate_work
Reviewed-by: fandreuzzi, tschatzl, iwalulya
This commit is contained in:
parent
6df01178c0
commit
c28142e7c1
@ -275,38 +275,46 @@ HeapWord* ParallelScavengeHeap::mem_allocate(size_t size) {
|
|||||||
return mem_allocate_work(size, is_tlab);
|
return mem_allocate_work(size, is_tlab);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HeapWord* ParallelScavengeHeap::mem_allocate_cas_noexpand(size_t size, bool is_tlab) {
|
||||||
|
// Try young-gen first.
|
||||||
|
HeapWord* result = young_gen()->allocate(size);
|
||||||
|
if (result != nullptr) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try allocating from the old gen for non-TLAB in certain scenarios.
|
||||||
|
if (!is_tlab) {
|
||||||
|
if (!should_alloc_in_eden(size) || _is_heap_almost_full) {
|
||||||
|
result = old_gen()->cas_allocate_noexpand(size);
|
||||||
|
if (result != nullptr) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) {
|
HeapWord* ParallelScavengeHeap::mem_allocate_work(size_t size, bool is_tlab) {
|
||||||
for (uint loop_count = 0; /* empty */; ++loop_count) {
|
for (uint loop_count = 0; /* empty */; ++loop_count) {
|
||||||
// Try young-gen first.
|
HeapWord* result = mem_allocate_cas_noexpand(size, is_tlab);
|
||||||
HeapWord* result = young_gen()->allocate(size);
|
|
||||||
if (result != nullptr) {
|
if (result != nullptr) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try allocating from the old gen for non-TLAB in certain scenarios.
|
// Read total_collections() under the lock so that multiple
|
||||||
if (!is_tlab) {
|
// allocation-failures result in one GC.
|
||||||
if (!should_alloc_in_eden(size) || _is_heap_almost_full) {
|
|
||||||
result = old_gen()->cas_allocate_noexpand(size);
|
|
||||||
if (result != nullptr) {
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We don't want to have multiple collections for a single filled generation.
|
|
||||||
// To prevent this, each thread tracks the total_collections() value, and if
|
|
||||||
// the count has changed, does not do a new collection.
|
|
||||||
//
|
|
||||||
// The collection count must be read only while holding the heap lock. VM
|
|
||||||
// operations also hold the heap lock during collections. There is a lock
|
|
||||||
// contention case where thread A blocks waiting on the Heap_lock, while
|
|
||||||
// thread B is holding it doing a collection. When thread A gets the lock,
|
|
||||||
// the collection count has already changed. To prevent duplicate collections,
|
|
||||||
// The policy MUST attempt allocations during the same period it reads the
|
|
||||||
// total_collections() value!
|
|
||||||
uint gc_count;
|
uint gc_count;
|
||||||
{
|
{
|
||||||
MutexLocker ml(Heap_lock);
|
MutexLocker ml(Heap_lock);
|
||||||
|
|
||||||
|
// Re-try after acquiring the lock, because a GC might have occurred
|
||||||
|
// while waiting for this lock.
|
||||||
|
result = mem_allocate_cas_noexpand(size, is_tlab);
|
||||||
|
if (result != nullptr) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
gc_count = total_collections();
|
gc_count = total_collections();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -100,6 +100,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
|||||||
|
|
||||||
inline bool should_alloc_in_eden(size_t size) const;
|
inline bool should_alloc_in_eden(size_t size) const;
|
||||||
|
|
||||||
|
HeapWord* mem_allocate_cas_noexpand(size_t size, bool is_tlab);
|
||||||
HeapWord* mem_allocate_work(size_t size, bool is_tlab);
|
HeapWord* mem_allocate_work(size_t size, bool is_tlab);
|
||||||
|
|
||||||
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
|
HeapWord* expand_heap_and_allocate(size_t size, bool is_tlab);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user