diff --git a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp index ba0e6d8fb1a..2632e806bab 100644 --- a/src/hotspot/share/gc/shared/gcThreadLocalData.hpp +++ b/src/hotspot/share/gc/shared/gcThreadLocalData.hpp @@ -40,6 +40,6 @@ // should consider placing frequently accessed fields first in // T, so that field offsets relative to Thread are small, which // often allows for a more compact instruction encoding. -typedef uint64_t GCThreadLocalData[43]; // 344 bytes +typedef uint64_t GCThreadLocalData[40]; // 320 bytes #endif // SHARE_GC_SHARED_GCTHREADLOCALDATA_HPP diff --git a/src/hotspot/share/gc/z/zBarrier.hpp b/src/hotspot/share/gc/z/zBarrier.hpp index 3f9e6c78b04..3071061c997 100644 --- a/src/hotspot/share/gc/z/zBarrier.hpp +++ b/src/hotspot/share/gc/z/zBarrier.hpp @@ -71,8 +71,6 @@ typedef zpointer (*ZBarrierColor)(zaddress, zpointer); class ZGeneration; -void z_assert_is_barrier_safe(); - class ZBarrier : public AllStatic { friend class ZContinuation; friend class ZStoreBarrierBuffer; diff --git a/src/hotspot/share/gc/z/zDirector.cpp b/src/hotspot/share/gc/z/zDirector.cpp index 7c5bb907edf..cd5474b22ba 100644 --- a/src/hotspot/share/gc/z/zDirector.cpp +++ b/src/hotspot/share/gc/z/zDirector.cpp @@ -306,8 +306,7 @@ static bool is_young_small(const ZDirectorStats& stats) { return young_used_percent <= 5.0; } -template -static bool is_high_usage(const ZDirectorStats& stats, PrintFn* print_function = nullptr) { +static bool is_high_usage(const ZDirectorStats& stats, bool log = false) { // Calculate amount of free memory available. Note that we take the // relocation headroom into account to avoid in-place relocation. const size_t soft_max_capacity = stats._heap._soft_max_heap_size; @@ -316,8 +315,9 @@ static bool is_high_usage(const ZDirectorStats& stats, PrintFn* print_function = const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom()); const double free_percent = percent_of(free, soft_max_capacity); - if (print_function != nullptr) { - (*print_function)(free, free_percent); + if (log) { + log_debug(gc, director)("Rule Minor: High Usage, Free: %zuMB(%.1f%%)", + free / M, free_percent); } // The heap has high usage if there is less than 5% free memory left @@ -377,19 +377,7 @@ static bool rule_minor_high_usage(const ZDirectorStats& stats) { // such that the allocation rate rule doesn't trigger, but the amount of free // memory is still slowly but surely heading towards zero. In this situation, // we start a GC cycle to avoid a potential allocation stall later. - - const size_t soft_max_capacity = stats._heap._soft_max_heap_size; - const size_t used = stats._heap._used; - const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used); - const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom()); - const double free_percent = percent_of(free, soft_max_capacity); - - auto print_function = [&](size_t free, double free_percent) { - log_debug(gc, director)("Rule Minor: High Usage, Free: %zuMB(%.1f%%)", - free / M, free_percent); - }; - - return is_high_usage(stats, &print_function); + return is_high_usage(stats, true /* log */); } // Major GC rules diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index 2a7d0af966a..d1680b6c336 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -701,13 +701,11 @@ uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats st double young_life_expectancy_sum = 0.0; uint young_life_expectancy_samples = 0; uint last_populated_age = 0; - size_t last_populated_live = 0; for (ZPageAge age : ZPageAgeRangeAll) { const size_t young_live = stats.small(age).live() + stats.medium(age).live() + stats.large(age).live(); if (young_live > 0) { last_populated_age = untype(age); - last_populated_live = young_live; if (young_live_last > 0) { young_life_expectancy_sum += double(young_live) / double(young_live_last); young_life_expectancy_samples++; @@ -721,7 +719,6 @@ uint ZGenerationYoung::compute_tenuring_threshold(ZRelocationSetSelectorStats st return 0; } - const size_t young_used_at_mark_start = ZGeneration::young()->stat_heap()->used_generation_at_mark_start(); const size_t young_garbage = ZGeneration::young()->stat_heap()->garbage_at_mark_end(); const size_t young_allocated = ZGeneration::young()->stat_heap()->allocated_at_mark_end(); const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity(); diff --git a/src/hotspot/share/gc/z/zGeneration.hpp b/src/hotspot/share/gc/z/zGeneration.hpp index 13adc06b123..7ce096de6db 100644 --- a/src/hotspot/share/gc/z/zGeneration.hpp +++ b/src/hotspot/share/gc/z/zGeneration.hpp @@ -87,7 +87,6 @@ protected: void free_empty_pages(ZRelocationSetSelector* selector, int bulk); void flip_age_pages(const ZRelocationSetSelector* selector); - void flip_age_pages(const ZArray* pages); void mark_free(); diff --git a/src/hotspot/share/gc/z/zMark.cpp b/src/hotspot/share/gc/z/zMark.cpp index 8ddab4c9c3d..3b247fdd35e 100644 --- a/src/hotspot/share/gc/z/zMark.cpp +++ b/src/hotspot/share/gc/z/zMark.cpp @@ -789,7 +789,6 @@ typedef ClaimingCLDToOopClosure ZMarkOldCLDClosu class ZMarkOldRootsTask : public ZTask { private: - ZMark* const _mark; ZRootsIteratorStrongColored _roots_colored; ZRootsIteratorStrongUncolored _roots_uncolored; @@ -800,9 +799,8 @@ private: ZMarkNMethodClosure _nm_cl; public: - ZMarkOldRootsTask(ZMark* mark) + ZMarkOldRootsTask() : ZTask("ZMarkOldRootsTask"), - _mark(mark), _roots_colored(ZGenerationIdOptional::old), _roots_uncolored(ZGenerationIdOptional::old), _cl_colored(), @@ -847,7 +845,6 @@ public: class ZMarkYoungRootsTask : public ZTask { private: - ZMark* const _mark; ZRootsIteratorAllColored _roots_colored; ZRootsIteratorAllUncolored _roots_uncolored; @@ -858,9 +855,8 @@ private: ZMarkYoungNMethodClosure _nm_cl; public: - ZMarkYoungRootsTask(ZMark* mark) + ZMarkYoungRootsTask() : ZTask("ZMarkYoungRootsTask"), - _mark(mark), _roots_colored(ZGenerationIdOptional::young), _roots_uncolored(ZGenerationIdOptional::young), _cl_colored(), @@ -928,13 +924,13 @@ void ZMark::resize_workers(uint nworkers) { void ZMark::mark_young_roots() { SuspendibleThreadSetJoiner sts_joiner; - ZMarkYoungRootsTask task(this); + ZMarkYoungRootsTask task; workers()->run(&task); } void ZMark::mark_old_roots() { SuspendibleThreadSetJoiner sts_joiner; - ZMarkOldRootsTask task(this); + ZMarkOldRootsTask task; workers()->run(&task); } diff --git a/src/hotspot/share/gc/z/zNMethodTable.hpp b/src/hotspot/share/gc/z/zNMethodTable.hpp index a8b9029caeb..3626a67312c 100644 --- a/src/hotspot/share/gc/z/zNMethodTable.hpp +++ b/src/hotspot/share/gc/z/zNMethodTable.hpp @@ -43,9 +43,6 @@ private: static ZNMethodTableIteration _iteration_secondary; static ZSafeDelete _safe_delete; - static ZNMethodTableEntry* create(size_t size); - static void destroy(ZNMethodTableEntry* table); - static size_t first_index(const nmethod* nm, size_t size); static size_t next_index(size_t prev_index, size_t size); @@ -67,9 +64,6 @@ public: static void nmethods_do_begin(bool secondary); static void nmethods_do_end(bool secondary); static void nmethods_do(bool secondary, NMethodClosure* cl); - - static void unlink(ZWorkers* workers, bool unloading_occurred); - static void purge(ZWorkers* workers); }; #endif // SHARE_GC_Z_ZNMETHODTABLE_HPP diff --git a/src/hotspot/share/gc/z/zPageAllocator.cpp b/src/hotspot/share/gc/z/zPageAllocator.cpp index 1eb0ed67a86..dbda9e9e9a2 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.cpp +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp @@ -1060,7 +1060,6 @@ void ZPartition::commit_increased_capacity(ZMemoryAllocation* allocation, const const size_t already_committed = allocation->harvested(); - const ZVirtualMemory already_committed_vmem = vmem.first_part(already_committed); const ZVirtualMemory to_be_committed_vmem = vmem.last_part(already_committed); // Try to commit the uncommitted physical memory @@ -1422,7 +1421,6 @@ ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags const ZPageAllocationStats stats = allocation.stats(); const int num_harvested_vmems = stats._num_harvested_vmems; const size_t harvested = stats._total_harvested; - const size_t committed = stats._total_committed_capacity; if (harvested > 0) { ZStatInc(ZCounterMappedCacheHarvest, harvested); @@ -1963,9 +1961,6 @@ void ZPageAllocator::cleanup_failed_commit_multi_partition(ZMultiPartitionAlloca continue; } - // Remove the harvested part - const ZVirtualMemory non_harvest_vmem = partial_vmem.last_part(allocation->harvested()); - ZArray* const partial_vmems = allocation->partial_vmems(); // Keep track of the start index diff --git a/src/hotspot/share/gc/z/zThreadLocalData.hpp b/src/hotspot/share/gc/z/zThreadLocalData.hpp index 8ff8196fbe2..111a64e978e 100644 --- a/src/hotspot/share/gc/z/zThreadLocalData.hpp +++ b/src/hotspot/share/gc/z/zThreadLocalData.hpp @@ -39,7 +39,6 @@ private: uintptr_t _mark_bad_mask; uintptr_t _store_good_mask; uintptr_t _store_bad_mask; - uintptr_t _uncolor_mask; uintptr_t _nmethod_disarmed; ZStoreBarrierBuffer* _store_barrier_buffer; ZMarkThreadLocalStacks _mark_stacks[2]; @@ -51,7 +50,6 @@ private: _mark_bad_mask(0), _store_good_mask(0), _store_bad_mask(0), - _uncolor_mask(0), _nmethod_disarmed(0), _store_barrier_buffer(new ZStoreBarrierBuffer()), _mark_stacks(),