8368213: ZGC: Cleanup dead code, unimplemented declarations, unused private fields

Reviewed-by: stefank, jsikstro
This commit is contained in:
Axel Boldt-Christmas 2025-09-23 07:15:06 +00:00
parent 7ed72d943b
commit 47ed1a8d17
9 changed files with 10 additions and 45 deletions

View File

@ -40,6 +40,6 @@
// should consider placing frequently accessed fields first in
// T, so that field offsets relative to Thread are small, which
// often allows for a more compact instruction encoding.
typedef uint64_t GCThreadLocalData[43]; // 344 bytes
typedef uint64_t GCThreadLocalData[40]; // 320 bytes
#endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP

View File

@ -71,8 +71,6 @@ typedef zpointer (*ZBarrierColor)(zaddress, zpointer);
class ZGeneration;
void z_assert_is_barrier_safe();
class ZBarrier : public AllStatic {
friend class ZContinuation;
friend class ZStoreBarrierBuffer;

View File

@ -306,8 +306,7 @@ static bool is_young_small(const ZDirectorStats& stats) {
return young_used_percent <= 5.0;
}
template <typename PrintFn = void(*)(size_t, double)>
static bool is_high_usage(const ZDirectorStats& stats, PrintFn* print_function = nullptr) {
static bool is_high_usage(const ZDirectorStats& stats, bool log = false) {
// Calculate amount of free memory available. Note that we take the
// relocation headroom into account to avoid in-place relocation.
const size_t soft_max_capacity = stats._heap._soft_max_heap_size;
@ -316,8 +315,9 @@ static bool is_high_usage(const ZDirectorStats& stats, PrintFn* print_function =
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
const double free_percent = percent_of(free, soft_max_capacity);
if (print_function != nullptr) {
(*print_function)(free, free_percent);
if (log) {
log_debug(gc, director)("Rule Minor: High Usage, Free: %zuMB(%.1f%%)",
free / M, free_percent);
}
// The heap has high usage if there is less than 5% free memory left
@ -377,19 +377,7 @@ static bool rule_minor_high_usage(const ZDirectorStats& stats) {
// such that the allocation rate rule doesn't trigger, but the amount of free
// memory is still slowly but surely heading towards zero. In this situation,
// we start a GC cycle to avoid a potential allocation stall later.
const size_t soft_max_capacity = stats._heap._soft_max_heap_size;
const size_t used = stats._heap._used;
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
const double free_percent = percent_of(free, soft_max_capacity);
auto print_function = [&](size_t free, double free_percent) {
log_debug(gc, director)("Rule Minor: High Usage, Free: %zuMB(%.1f%%)",
free / M, free_percent);
};
return is_high_usage(stats, &print_function);
return is_high_usage(stats, true /* log */);
}
// Major GC rules

View File

@ -701,13 +701,11 @@ uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats st
double young_life_expectancy_sum = 0.0;
uint young_life_expectancy_samples = 0;
uint last_populated_age = 0;
size_t last_populated_live = 0;
for (ZPageAge age : ZPageAgeRangeAll) {
const size_t young_live = stats.small(age).live() + stats.medium(age).live() + stats.large(age).live();
if (young_live > 0) {
last_populated_age = untype(age);
last_populated_live = young_live;
if (young_live_last > 0) {
young_life_expectancy_sum += double(young_live) / double(young_live_last);
young_life_expectancy_samples++;
@ -721,7 +719,6 @@ uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats st
return 0;
}
const size_t young_used_at_mark_start = ZGeneration::young()->stat_heap()->used_generation_at_mark_start();
const size_t young_garbage = ZGeneration::young()->stat_heap()->garbage_at_mark_end();
const size_t young_allocated = ZGeneration::young()->stat_heap()->allocated_at_mark_end();
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();

View File

@ -87,7 +87,6 @@ protected:
void free_empty_pages(ZRelocationSetSelector* selector, int bulk);
void flip_age_pages(const ZRelocationSetSelector* selector);
void flip_age_pages(const ZArray<ZPage*>* pages);
void mark_free();

View File

@ -789,7 +789,6 @@ typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkOldCLDClosu
class ZMarkOldRootsTask : public ZTask {
private:
ZMark* const _mark;
ZRootsIteratorStrongColored _roots_colored;
ZRootsIteratorStrongUncolored _roots_uncolored;
@ -800,9 +799,8 @@ private:
ZMarkNMethodClosure _nm_cl;
public:
ZMarkOldRootsTask(ZMark* mark)
ZMarkOldRootsTask()
: ZTask("ZMarkOldRootsTask"),
_mark(mark),
_roots_colored(ZGenerationIdOptional::old),
_roots_uncolored(ZGenerationIdOptional::old),
_cl_colored(),
@ -847,7 +845,6 @@ public:
class ZMarkYoungRootsTask : public ZTask {
private:
ZMark* const _mark;
ZRootsIteratorAllColored _roots_colored;
ZRootsIteratorAllUncolored _roots_uncolored;
@ -858,9 +855,8 @@ private:
ZMarkYoungNMethodClosure _nm_cl;
public:
ZMarkYoungRootsTask(ZMark* mark)
ZMarkYoungRootsTask()
: ZTask("ZMarkYoungRootsTask"),
_mark(mark),
_roots_colored(ZGenerationIdOptional::young),
_roots_uncolored(ZGenerationIdOptional::young),
_cl_colored(),
@ -928,13 +924,13 @@ void ZMark::resize_workers(uint nworkers) {
void ZMark::mark_young_roots() {
SuspendibleThreadSetJoiner sts_joiner;
ZMarkYoungRootsTask task(this);
ZMarkYoungRootsTask task;
workers()->run(&task);
}
void ZMark::mark_old_roots() {
SuspendibleThreadSetJoiner sts_joiner;
ZMarkOldRootsTask task(this);
ZMarkOldRootsTask task;
workers()->run(&task);
}

View File

@ -43,9 +43,6 @@ private:
static ZNMethodTableIteration _iteration_secondary;
static ZSafeDelete<ZNMethodTableEntry[]> _safe_delete;
static ZNMethodTableEntry* create(size_t size);
static void destroy(ZNMethodTableEntry* table);
static size_t first_index(const nmethod* nm, size_t size);
static size_t next_index(size_t prev_index, size_t size);
@ -67,9 +64,6 @@ public:
static void nmethods_do_begin(bool secondary);
static void nmethods_do_end(bool secondary);
static void nmethods_do(bool secondary, NMethodClosure* cl);
static void unlink(ZWorkers* workers, bool unloading_occurred);
static void purge(ZWorkers* workers);
};
#endif // SHARE_GC_Z_ZNMETHODTABLE_HPP

View File

@ -1060,7 +1060,6 @@ void ZPartition::commit_increased_capacity(ZMemoryAllocation* allocation, const
const size_t already_committed = allocation->harvested();
const ZVirtualMemory already_committed_vmem = vmem.first_part(already_committed);
const ZVirtualMemory to_be_committed_vmem = vmem.last_part(already_committed);
// Try to commit the uncommitted physical memory
@ -1422,7 +1421,6 @@ ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags
const ZPageAllocationStats stats = allocation.stats();
const int num_harvested_vmems = stats._num_harvested_vmems;
const size_t harvested = stats._total_harvested;
const size_t committed = stats._total_committed_capacity;
if (harvested > 0) {
ZStatInc(ZCounterMappedCacheHarvest, harvested);
@ -1963,9 +1961,6 @@ void ZPageAllocator::cleanup_failed_commit_multi_partition(ZMultiPartitionAlloca
continue;
}
// Remove the harvested part
const ZVirtualMemory non_harvest_vmem = partial_vmem.last_part(allocation->harvested());
ZArray<ZVirtualMemory>* const partial_vmems = allocation->partial_vmems();
// Keep track of the start index

View File

@ -39,7 +39,6 @@ private:
uintptr_t _mark_bad_mask;
uintptr_t _store_good_mask;
uintptr_t _store_bad_mask;
uintptr_t _uncolor_mask;
uintptr_t _nmethod_disarmed;
ZStoreBarrierBuffer* _store_barrier_buffer;
ZMarkThreadLocalStacks _mark_stacks[2];
@ -51,7 +50,6 @@ private:
_mark_bad_mask(0),
_store_good_mask(0),
_store_bad_mask(0),
_uncolor_mask(0),
_nmethod_disarmed(0),
_store_barrier_buffer(new ZStoreBarrierBuffer()),
_mark_stacks(),