mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8371131: Cleanup Thread parameter in CollectedHeap TLAB methods
Reviewed-by: ayang, tschatzl
This commit is contained in:
parent
21f41c5f49
commit
19cca0a2a8
@ -91,7 +91,7 @@ GrowableArray<MemoryPool*> EpsilonHeap::memory_pools() {
|
||||
return memory_pools;
|
||||
}
|
||||
|
||||
size_t EpsilonHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
||||
size_t EpsilonHeap::unsafe_max_tlab_alloc() const {
|
||||
// Return max allocatable TLAB size, and let allocation path figure out
|
||||
// the actual allocation size. Note: result should be in bytes.
|
||||
return _max_tlab_size * HeapWordSize;
|
||||
|
||||
@ -90,10 +90,10 @@ public:
|
||||
size_t* actual_size) override;
|
||||
|
||||
// TLAB allocation
|
||||
size_t tlab_capacity(Thread* thr) const override { return capacity(); }
|
||||
size_t tlab_used(Thread* thr) const override { return used(); }
|
||||
size_t max_tlab_size() const override { return _max_tlab_size; }
|
||||
size_t unsafe_max_tlab_alloc(Thread* thr) const override;
|
||||
size_t tlab_capacity() const override { return capacity(); }
|
||||
size_t tlab_used() const override { return used(); }
|
||||
size_t max_tlab_size() const override { return _max_tlab_size; }
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
|
||||
void collect(GCCause::Cause cause) override;
|
||||
void do_full_collection(bool clear_all_soft_refs) override;
|
||||
|
||||
@ -2268,11 +2268,11 @@ bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
|
||||
return hr->block_is_obj(addr, hr->parsable_bottom_acquire());
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
|
||||
size_t G1CollectedHeap::tlab_capacity() const {
|
||||
return eden_target_length() * G1HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
|
||||
size_t G1CollectedHeap::tlab_used() const {
|
||||
return _eden.length() * G1HeapRegion::GrainBytes;
|
||||
}
|
||||
|
||||
@ -2282,7 +2282,7 @@ size_t G1CollectedHeap::max_tlab_size() const {
|
||||
return align_down(_humongous_object_threshold_in_words, MinObjAlignment);
|
||||
}
|
||||
|
||||
size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
size_t G1CollectedHeap::unsafe_max_tlab_alloc() const {
|
||||
return _allocator->unsafe_max_tlab_alloc();
|
||||
}
|
||||
|
||||
|
||||
@ -1202,10 +1202,10 @@ public:
|
||||
// Section on thread-local allocation buffers (TLABs)
|
||||
// See CollectedHeap for semantics.
|
||||
|
||||
size_t tlab_capacity(Thread* ignored) const override;
|
||||
size_t tlab_used(Thread* ignored) const override;
|
||||
size_t tlab_capacity() const override;
|
||||
size_t tlab_used() const override;
|
||||
size_t max_tlab_size() const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread* ignored) const override;
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
|
||||
inline bool is_in_young(const oop obj) const;
|
||||
inline bool requires_barriers(stackChunkOop obj) const override;
|
||||
|
||||
@ -110,7 +110,7 @@ size_t MutableNUMASpace::free_in_words() const {
|
||||
return s;
|
||||
}
|
||||
|
||||
size_t MutableNUMASpace::tlab_capacity(Thread *ignored) const {
|
||||
size_t MutableNUMASpace::tlab_capacity() const {
|
||||
size_t s = 0;
|
||||
for (LGRPSpace* ls : *lgrp_spaces()) {
|
||||
s += ls->space()->capacity_in_bytes();
|
||||
@ -118,7 +118,7 @@ size_t MutableNUMASpace::tlab_capacity(Thread *ignored) const {
|
||||
return s / (size_t)lgrp_spaces()->length();
|
||||
}
|
||||
|
||||
size_t MutableNUMASpace::tlab_used(Thread *ignored) const {
|
||||
size_t MutableNUMASpace::tlab_used() const {
|
||||
size_t s = 0;
|
||||
for (LGRPSpace* ls : *lgrp_spaces()) {
|
||||
s += ls->space()->used_in_bytes();
|
||||
@ -126,7 +126,7 @@ size_t MutableNUMASpace::tlab_used(Thread *ignored) const {
|
||||
return s / (size_t)lgrp_spaces()->length();
|
||||
}
|
||||
|
||||
size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *ignored) const {
|
||||
size_t MutableNUMASpace::unsafe_max_tlab_alloc() const {
|
||||
size_t s = 0;
|
||||
for (LGRPSpace* ls : *lgrp_spaces()) {
|
||||
s += ls->space()->free_in_bytes();
|
||||
|
||||
@ -166,9 +166,9 @@ public:
|
||||
virtual size_t used_in_words() const;
|
||||
virtual size_t free_in_words() const;
|
||||
|
||||
virtual size_t tlab_capacity(Thread* ignored) const;
|
||||
virtual size_t tlab_used(Thread* ignored) const;
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread* ignored) const;
|
||||
virtual size_t tlab_capacity() const;
|
||||
virtual size_t tlab_used() const;
|
||||
virtual size_t unsafe_max_tlab_alloc() const;
|
||||
|
||||
// Allocation (return null if full)
|
||||
virtual HeapWord* cas_allocate(size_t word_size);
|
||||
|
||||
@ -117,11 +117,11 @@ public:
|
||||
size_t free_in_bytes() const { return free_in_words() * HeapWordSize; }
|
||||
|
||||
// Size computations. Sizes are in heapwords.
|
||||
virtual size_t used_in_words() const { return pointer_delta(top(), bottom()); }
|
||||
virtual size_t free_in_words() const { return pointer_delta(end(), top()); }
|
||||
virtual size_t tlab_capacity(Thread* thr) const { return capacity_in_bytes(); }
|
||||
virtual size_t tlab_used(Thread* thr) const { return used_in_bytes(); }
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread* thr) const { return free_in_bytes(); }
|
||||
virtual size_t used_in_words() const { return pointer_delta(top(), bottom()); }
|
||||
virtual size_t free_in_words() const { return pointer_delta(end(), top()); }
|
||||
virtual size_t tlab_capacity() const { return capacity_in_bytes(); }
|
||||
virtual size_t tlab_used() const { return used_in_bytes(); }
|
||||
virtual size_t unsafe_max_tlab_alloc() const { return free_in_bytes(); }
|
||||
|
||||
// Allocation (return null if full)
|
||||
virtual HeapWord* cas_allocate(size_t word_size);
|
||||
|
||||
@ -456,16 +456,16 @@ void ParallelScavengeHeap::ensure_parsability(bool retire_tlabs) {
|
||||
young_gen()->eden_space()->ensure_parsability();
|
||||
}
|
||||
|
||||
size_t ParallelScavengeHeap::tlab_capacity(Thread* thr) const {
|
||||
return young_gen()->eden_space()->tlab_capacity(thr);
|
||||
size_t ParallelScavengeHeap::tlab_capacity() const {
|
||||
return young_gen()->eden_space()->tlab_capacity();
|
||||
}
|
||||
|
||||
size_t ParallelScavengeHeap::tlab_used(Thread* thr) const {
|
||||
return young_gen()->eden_space()->tlab_used(thr);
|
||||
size_t ParallelScavengeHeap::tlab_used() const {
|
||||
return young_gen()->eden_space()->tlab_used();
|
||||
}
|
||||
|
||||
size_t ParallelScavengeHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
||||
return young_gen()->eden_space()->unsafe_max_tlab_alloc(thr);
|
||||
size_t ParallelScavengeHeap::unsafe_max_tlab_alloc() const {
|
||||
return young_gen()->eden_space()->unsafe_max_tlab_alloc();
|
||||
}
|
||||
|
||||
HeapWord* ParallelScavengeHeap::allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) {
|
||||
|
||||
@ -214,9 +214,9 @@ public:
|
||||
void ensure_parsability(bool retire_tlabs) override;
|
||||
void resize_all_tlabs() override;
|
||||
|
||||
size_t tlab_capacity(Thread* thr) const override;
|
||||
size_t tlab_used(Thread* thr) const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread* thr) const override;
|
||||
size_t tlab_capacity() const override;
|
||||
size_t tlab_used() const override;
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
|
||||
void object_iterate(ObjectClosure* cl) override;
|
||||
void object_iterate_parallel(ObjectClosure* cl, HeapBlockClaimer* claimer);
|
||||
|
||||
@ -660,16 +660,16 @@ bool SerialHeap::block_is_obj(const HeapWord* addr) const {
|
||||
return addr < _old_gen->space()->top();
|
||||
}
|
||||
|
||||
size_t SerialHeap::tlab_capacity(Thread* thr) const {
|
||||
size_t SerialHeap::tlab_capacity() const {
|
||||
// Only young-gen supports tlab allocation.
|
||||
return _young_gen->tlab_capacity();
|
||||
}
|
||||
|
||||
size_t SerialHeap::tlab_used(Thread* thr) const {
|
||||
size_t SerialHeap::tlab_used() const {
|
||||
return _young_gen->tlab_used();
|
||||
}
|
||||
|
||||
size_t SerialHeap::unsafe_max_tlab_alloc(Thread* thr) const {
|
||||
size_t SerialHeap::unsafe_max_tlab_alloc() const {
|
||||
return _young_gen->unsafe_max_tlab_alloc();
|
||||
}
|
||||
|
||||
|
||||
@ -189,9 +189,9 @@ public:
|
||||
bool block_is_obj(const HeapWord* addr) const;
|
||||
|
||||
// Section on TLAB's.
|
||||
size_t tlab_capacity(Thread* thr) const override;
|
||||
size_t tlab_used(Thread* thr) const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread* thr) const override;
|
||||
size_t tlab_capacity() const override;
|
||||
size_t tlab_used() const override;
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
HeapWord* allocate_new_tlab(size_t min_size,
|
||||
size_t requested_size,
|
||||
size_t* actual_size) override;
|
||||
|
||||
@ -341,17 +341,17 @@ protected:
|
||||
virtual void ensure_parsability(bool retire_tlabs);
|
||||
|
||||
// The amount of space available for thread-local allocation buffers.
|
||||
virtual size_t tlab_capacity(Thread *thr) const = 0;
|
||||
virtual size_t tlab_capacity() const = 0;
|
||||
|
||||
// The amount of used space for thread-local allocation buffers for the given thread.
|
||||
virtual size_t tlab_used(Thread *thr) const = 0;
|
||||
// The amount of space used for thread-local allocation buffers.
|
||||
virtual size_t tlab_used() const = 0;
|
||||
|
||||
virtual size_t max_tlab_size() const;
|
||||
|
||||
// An estimate of the maximum allocation that could be performed
|
||||
// for thread-local allocation buffers without triggering any
|
||||
// collection or expansion activity.
|
||||
virtual size_t unsafe_max_tlab_alloc(Thread *thr) const = 0;
|
||||
virtual size_t unsafe_max_tlab_alloc() const = 0;
|
||||
|
||||
// Perform a collection of the heap; intended for use in implementing
|
||||
// "System.gc". This probably implies as full a collection as the
|
||||
|
||||
@ -72,12 +72,11 @@ size_t ThreadLocalAllocBuffer::remaining() {
|
||||
}
|
||||
|
||||
void ThreadLocalAllocBuffer::accumulate_and_reset_statistics(ThreadLocalAllocStats* stats) {
|
||||
Thread* thr = thread();
|
||||
size_t capacity = Universe::heap()->tlab_capacity(thr);
|
||||
size_t used = Universe::heap()->tlab_used(thr);
|
||||
size_t capacity = Universe::heap()->tlab_capacity();
|
||||
size_t used = Universe::heap()->tlab_used();
|
||||
|
||||
_gc_waste += (unsigned)remaining();
|
||||
size_t total_allocated = thr->allocated_bytes();
|
||||
size_t total_allocated = (size_t)thread()->allocated_bytes();
|
||||
size_t allocated_since_last_gc = total_allocated - _allocated_before_last_gc;
|
||||
_allocated_before_last_gc = total_allocated;
|
||||
|
||||
@ -148,7 +147,7 @@ void ThreadLocalAllocBuffer::resize() {
|
||||
// Compute the next tlab size using expected allocation amount
|
||||
assert(ResizeTLAB, "Should not call this otherwise");
|
||||
size_t alloc = (size_t)(_allocation_fraction.average() *
|
||||
(Universe::heap()->tlab_capacity(thread()) / HeapWordSize));
|
||||
(Universe::heap()->tlab_capacity() / HeapWordSize));
|
||||
size_t new_size = alloc / _target_refills;
|
||||
|
||||
new_size = clamp(new_size, min_size(), max_size());
|
||||
@ -204,7 +203,7 @@ void ThreadLocalAllocBuffer::initialize() {
|
||||
|
||||
set_desired_size(initial_desired_size());
|
||||
|
||||
size_t capacity = Universe::heap()->tlab_capacity(thread()) / HeapWordSize;
|
||||
size_t capacity = Universe::heap()->tlab_capacity() / HeapWordSize;
|
||||
if (capacity > 0) {
|
||||
// Keep alloc_frac as float and not double to avoid the double to float conversion
|
||||
float alloc_frac = desired_size() * target_refills() / (float)capacity;
|
||||
@ -268,7 +267,7 @@ size_t ThreadLocalAllocBuffer::initial_desired_size() {
|
||||
// Initial size is a function of the average number of allocating threads.
|
||||
unsigned int nof_threads = ThreadLocalAllocStats::allocating_threads_avg();
|
||||
|
||||
init_sz = (Universe::heap()->tlab_capacity(thread()) / HeapWordSize) /
|
||||
init_sz = (Universe::heap()->tlab_capacity() / HeapWordSize) /
|
||||
(nof_threads * target_refills());
|
||||
init_sz = align_object_size(init_sz);
|
||||
}
|
||||
@ -289,7 +288,7 @@ void ThreadLocalAllocBuffer::print_stats(const char* tag) {
|
||||
Thread* thrd = thread();
|
||||
size_t waste = _gc_waste + _refill_waste;
|
||||
double waste_percent = percent_of(waste, _allocated_size);
|
||||
size_t tlab_used = Universe::heap()->tlab_used(thrd);
|
||||
size_t tlab_used = Universe::heap()->tlab_used();
|
||||
log.trace("TLAB: %s thread: " PTR_FORMAT " [id: %2d]"
|
||||
" desired_size: %zuKB"
|
||||
" slow allocs: %d refill waste: %zuB"
|
||||
|
||||
@ -54,8 +54,7 @@ inline HeapWord* ThreadLocalAllocBuffer::allocate(size_t size) {
|
||||
inline size_t ThreadLocalAllocBuffer::compute_size(size_t obj_size) {
|
||||
// Compute the size for the new TLAB.
|
||||
// The "last" tlab may be smaller to reduce fragmentation.
|
||||
// unsafe_max_tlab_alloc is just a hint.
|
||||
const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc(thread()) / HeapWordSize;
|
||||
const size_t available_size = Universe::heap()->unsafe_max_tlab_alloc() / HeapWordSize;
|
||||
size_t new_tlab_size = MIN3(available_size, desired_size() + align_object_size(obj_size), max_size());
|
||||
|
||||
// Make sure there's enough room for object and filler int[].
|
||||
|
||||
@ -74,7 +74,7 @@ size_t ShenandoahGenerationalHeap::calculate_max_plab() {
|
||||
}
|
||||
|
||||
// Returns size in bytes
|
||||
size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc(Thread *thread) const {
|
||||
size_t ShenandoahGenerationalHeap::unsafe_max_tlab_alloc() const {
|
||||
return MIN2(ShenandoahHeapRegion::max_tlab_size_bytes(), young_generation()->available());
|
||||
}
|
||||
|
||||
|
||||
@ -56,7 +56,7 @@ public:
|
||||
|
||||
void print_init_logger() const override;
|
||||
|
||||
size_t unsafe_max_tlab_alloc(Thread *thread) const override;
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
|
||||
private:
|
||||
// ---------- Evacuations and Promotions
|
||||
|
||||
@ -1507,7 +1507,7 @@ void ShenandoahHeap::gclabs_retire(bool resize) {
|
||||
}
|
||||
|
||||
// Returns size in bytes
|
||||
size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
|
||||
size_t ShenandoahHeap::unsafe_max_tlab_alloc() const {
|
||||
// Return the max allowed size, and let the allocation path
|
||||
// figure out the safe size for current allocation.
|
||||
return ShenandoahHeapRegion::max_tlab_size_bytes();
|
||||
@ -1649,7 +1649,7 @@ void ShenandoahHeap::verify(VerifyOption vo) {
|
||||
}
|
||||
}
|
||||
}
|
||||
size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
|
||||
size_t ShenandoahHeap::tlab_capacity() const {
|
||||
return _free_set->capacity();
|
||||
}
|
||||
|
||||
@ -2125,7 +2125,7 @@ GCTracer* ShenandoahHeap::tracer() {
|
||||
return shenandoah_policy()->tracer();
|
||||
}
|
||||
|
||||
size_t ShenandoahHeap::tlab_used(Thread* thread) const {
|
||||
size_t ShenandoahHeap::tlab_used() const {
|
||||
return _free_set->used();
|
||||
}
|
||||
|
||||
|
||||
@ -692,10 +692,10 @@ public:
|
||||
Metaspace::MetadataType mdtype) override;
|
||||
|
||||
HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, size_t* actual_size) override;
|
||||
size_t tlab_capacity(Thread *thr) const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread *thread) const override;
|
||||
size_t tlab_capacity() const override;
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
size_t max_tlab_size() const override;
|
||||
size_t tlab_used(Thread* ignored) const override;
|
||||
size_t tlab_used() const override;
|
||||
|
||||
void ensure_parsability(bool retire_labs) override;
|
||||
|
||||
|
||||
@ -222,11 +222,11 @@ void ZCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
size_t ZCollectedHeap::tlab_capacity(Thread* ignored) const {
|
||||
size_t ZCollectedHeap::tlab_capacity() const {
|
||||
return _heap.tlab_capacity();
|
||||
}
|
||||
|
||||
size_t ZCollectedHeap::tlab_used(Thread* ignored) const {
|
||||
size_t ZCollectedHeap::tlab_used() const {
|
||||
return _heap.tlab_used();
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ size_t ZCollectedHeap::max_tlab_size() const {
|
||||
return _heap.max_tlab_size() / HeapWordSize;
|
||||
}
|
||||
|
||||
size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
size_t ZCollectedHeap::unsafe_max_tlab_alloc() const {
|
||||
return _heap.unsafe_max_tlab_alloc();
|
||||
}
|
||||
|
||||
|
||||
@ -83,10 +83,10 @@ public:
|
||||
void collect_as_vm_thread(GCCause::Cause cause) override;
|
||||
void do_full_collection(bool clear_all_soft_refs) override;
|
||||
|
||||
size_t tlab_capacity(Thread* thr) const override;
|
||||
size_t tlab_used(Thread* thr) const override;
|
||||
size_t tlab_capacity() const override;
|
||||
size_t tlab_used() const override;
|
||||
size_t max_tlab_size() const override;
|
||||
size_t unsafe_max_tlab_alloc(Thread* thr) const override;
|
||||
size_t unsafe_max_tlab_alloc() const override;
|
||||
|
||||
MemoryUsage memory_usage() override;
|
||||
GrowableArray<GCMemoryManager*> memory_managers() override;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user