diff --git a/src/hotspot/share/gc/z/zArray.hpp b/src/hotspot/share/gc/z/zArray.hpp index 1b7e99b3ace..9ef911bb1b5 100644 --- a/src/hotspot/share/gc/z/zArray.hpp +++ b/src/hotspot/share/gc/z/zArray.hpp @@ -91,6 +91,10 @@ public: ZArrayIteratorImpl(const ZArray* array); bool next(T* elem); + + template + bool next_if(T* elem, Function predicate, Args&&... args); + bool next_index(size_t* index); T index_to_elem(size_t index); diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp index 547a73ffc0d..bf606a88e68 100644 --- a/src/hotspot/share/gc/z/zArray.inline.hpp +++ b/src/hotspot/share/gc/z/zArray.inline.hpp @@ -161,6 +161,20 @@ inline bool ZArrayIteratorImpl::next(T* elem) { return false; } +template +template +inline bool ZArrayIteratorImpl::next_if(T* elem, Function predicate, Args&&... args) { + size_t index; + while (next_index(&index)) { + if (predicate(index_to_elem(index), args...)) { + *elem = index_to_elem(index); + return true; + } + } + + return false; +} + template inline bool ZArrayIteratorImpl::next_index(size_t* index) { if (Parallel) { diff --git a/src/hotspot/share/gc/z/zForwarding.hpp b/src/hotspot/share/gc/z/zForwarding.hpp index c58c479984f..29b5cf4aabe 100644 --- a/src/hotspot/share/gc/z/zForwarding.hpp +++ b/src/hotspot/share/gc/z/zForwarding.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -59,6 +59,7 @@ private: const size_t _object_alignment_shift; const AttachedArray _entries; ZPage* const _page; + const uint32_t _partition_id; const ZPageAge _from_age; const ZPageAge _to_age; volatile bool _claimed; @@ -108,6 +109,8 @@ public: size_t size() const; size_t object_alignment_shift() const; + uint32_t partition_id() const; + bool is_promotion() const; // Visit from-objects diff --git a/src/hotspot/share/gc/z/zForwarding.inline.hpp b/src/hotspot/share/gc/z/zForwarding.inline.hpp index eb5f4a36161..43558018793 100644 --- a/src/hotspot/share/gc/z/zForwarding.inline.hpp +++ b/src/hotspot/share/gc/z/zForwarding.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -61,6 +61,7 @@ inline ZForwarding::ZForwarding(ZPage* page, ZPageAge to_age, size_t nentries) _object_alignment_shift(page->object_alignment_shift()), _entries(nentries), _page(page), + _partition_id(page->single_partition_id()), _from_age(page->age()), _to_age(to_age), _claimed(false), @@ -102,6 +103,10 @@ inline size_t ZForwarding::object_alignment_shift() const { return _object_alignment_shift; } +inline uint32_t ZForwarding::partition_id() const { + return _partition_id; +} + inline bool ZForwarding::is_promotion() const { return _from_age != ZPageAge::old && _to_age == ZPageAge::old; diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp index e43336c8ea3..44e5974993e 100644 --- a/src/hotspot/share/gc/z/zHeap.cpp +++ b/src/hotspot/share/gc/z/zHeap.cpp @@ -250,8 +250,8 @@ void ZHeap::account_undo_alloc_page(ZPage* page) { p2i(Thread::current()), ZUtils::thread_name(), p2i(page), page->size()); } -ZPage* ZHeap::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) { - ZPage* const page = _page_allocator.alloc_page(type, size, flags, age); +ZPage* ZHeap::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age, uint32_t preferred_partition) { + ZPage* const page = _page_allocator.alloc_page(type, size, flags, age, preferred_partition); if (page != nullptr) { // Insert page table entry _page_table.insert(page); diff --git a/src/hotspot/share/gc/z/zHeap.hpp b/src/hotspot/share/gc/z/zHeap.hpp index f7d606fc260..64ebc253b26 100644 --- a/src/hotspot/share/gc/z/zHeap.hpp +++ b/src/hotspot/share/gc/z/zHeap.hpp @@ -108,7 +108,7 @@ public: void mark_flush(Thread* thread); // Page allocation - ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age); + ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age, uint32_t preferred_partition); void undo_alloc_page(ZPage* page); void free_page(ZPage* page); size_t free_empty_pages(ZGenerationId id, const ZArray* pages); diff --git a/src/hotspot/share/gc/z/zHeuristics.cpp b/src/hotspot/share/gc/z/zHeuristics.cpp index fac5fca080d..ccd31ee1749 100644 --- a/src/hotspot/share/gc/z/zHeuristics.cpp +++ b/src/hotspot/share/gc/z/zHeuristics.cpp @@ -26,6 +26,7 @@ #include "gc/z/zCPU.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zHeuristics.hpp" +#include "gc/z/zNUMA.inline.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" @@ -58,9 +59,11 @@ void ZHeuristics::set_medium_page_size() { } size_t ZHeuristics::relocation_headroom() { - // Calculate headroom needed to avoid in-place relocation. Each worker will try - // to allocate a small page, and all workers will share a single medium page. - return (ConcGCThreads * ZPageSizeSmall) + ZPageSizeMediumMax; + // Calculate headroom needed to avoid in-place relocation. For each NUMA node, + // each worker will try to allocate a small page, and all workers will share a + // single medium page. + const size_t per_numa_headroom = (ConcGCThreads * ZPageSizeSmall) + ZPageSizeMediumMax; + return per_numa_headroom * ZNUMA::count(); } bool ZHeuristics::use_per_cpu_shared_small_pages() { diff --git a/src/hotspot/share/gc/z/zObjectAllocator.cpp b/src/hotspot/share/gc/z/zObjectAllocator.cpp index c6dd0a1a152..63e7f2b4ae9 100644 --- a/src/hotspot/share/gc/z/zObjectAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp @@ -53,7 +53,7 @@ ZPage* const* ZObjectAllocator::PerAge::shared_small_page_addr() const { } ZPage* ZObjectAllocator::PerAge::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags) { - return ZHeap::heap()->alloc_page(type, size, flags, _age); + return ZHeap::heap()->alloc_page(type, size, flags, _age, ZNUMA::id()); } void ZObjectAllocator::PerAge::undo_alloc_page(ZPage* page) { diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp index f6c2029ac06..f9a0dbf328d 100644 --- a/src/hotspot/share/gc/z/zPage.inline.hpp +++ b/src/hotspot/share/gc/z/zPage.inline.hpp @@ -157,6 +157,7 @@ inline const ZVirtualMemory& ZPage::virtual_memory() const { } inline uint32_t ZPage::single_partition_id() const { + assert(!is_multi_partition(), "Don't fetch single partition id if page is multi-partition"); return _single_partition_id; } diff --git a/src/hotspot/share/gc/z/zPageAllocator.cpp b/src/hotspot/share/gc/z/zPageAllocator.cpp index 52d5d775757..ba0ab923e11 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.cpp +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp @@ -412,7 +412,7 @@ private: const Ticks _start_timestamp; const uint32_t _young_seqnum; const uint32_t _old_seqnum; - const uint32_t _initiating_numa_id; + const uint32_t _preferred_partition; bool _is_multi_partition; ZSinglePartitionAllocation _single_partition_allocation; ZMultiPartitionAllocation _multi_partition_allocation; @@ -420,7 +420,7 @@ private: ZFuture _stall_result; public: - ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) + ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age, uint32_t preferred_partition) : _type(type), _requested_size(size), _flags(flags), @@ -428,12 +428,14 @@ public: _start_timestamp(Ticks::now()), _young_seqnum(ZGeneration::young()->seqnum()), _old_seqnum(ZGeneration::old()->seqnum()), - _initiating_numa_id(ZNUMA::id()), + _preferred_partition(preferred_partition), _is_multi_partition(false), _single_partition_allocation(size), _multi_partition_allocation(size), _node(), - _stall_result() {} + _stall_result() { + assert(_preferred_partition < ZNUMA::count(), "Preferred partition out-of-bounds (0 <= %d < %d)", _preferred_partition, ZNUMA::count()); + } void reset_for_retry() { _is_multi_partition = false; @@ -474,8 +476,8 @@ public: return _old_seqnum; } - uint32_t initiating_numa_id() const { - return _initiating_numa_id; + uint32_t preferred_partition() const { + return _preferred_partition; } bool is_multi_partition() const { @@ -1397,10 +1399,10 @@ static void check_out_of_memory_during_initialization() { } } -ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) { +ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age, uint32_t preferred_partition) { EventZPageAllocation event; - ZPageAllocation allocation(type, size, flags, age); + ZPageAllocation allocation(type, size, flags, age, preferred_partition); // Allocate the page ZPage* const page = alloc_page_inner(&allocation); @@ -1548,7 +1550,7 @@ bool ZPageAllocator::claim_capacity(ZPageAllocation* allocation) { } // Round robin single-partition claiming - const uint32_t start_numa_id = allocation->initiating_numa_id(); + const uint32_t start_numa_id = allocation->preferred_partition(); const uint32_t start_partition = start_numa_id; const uint32_t num_partitions = _partitions.count(); @@ -1560,7 +1562,7 @@ bool ZPageAllocator::claim_capacity(ZPageAllocation* allocation) { } } - if (!is_multi_partition_enabled() || sum_available() < allocation->size()) { + if (!is_multi_partition_allowed(allocation)) { // Multi-partition claiming is not possible return false; } @@ -1578,7 +1580,7 @@ bool ZPageAllocator::claim_capacity(ZPageAllocation* allocation) { } bool ZPageAllocator::claim_capacity_fast_medium(ZPageAllocation* allocation) { - const uint32_t start_node = allocation->initiating_numa_id(); + const uint32_t start_node = allocation->preferred_partition(); const uint32_t numa_nodes = ZNUMA::count(); for (uint32_t i = 0; i < numa_nodes; ++i) { @@ -2191,6 +2193,12 @@ bool ZPageAllocator::is_multi_partition_enabled() const { return _virtual.is_multi_partition_enabled(); } +bool ZPageAllocator::is_multi_partition_allowed(const ZPageAllocation* allocation) const { + return is_multi_partition_enabled() && + allocation->type() == ZPageType::large && + allocation->size() <= sum_available(); +} + const ZPartition& ZPageAllocator::partition_from_partition_id(uint32_t numa_id) const { return _partitions.get(numa_id); } diff --git a/src/hotspot/share/gc/z/zPageAllocator.hpp b/src/hotspot/share/gc/z/zPageAllocator.hpp index c5d1bedd863..a2f4c94e8d4 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.hpp +++ b/src/hotspot/share/gc/z/zPageAllocator.hpp @@ -220,6 +220,7 @@ private: void satisfy_stalled(); bool is_multi_partition_enabled() const; + bool is_multi_partition_allowed(const ZPageAllocation* allocation) const; const ZPartition& partition_from_partition_id(uint32_t partition_id) const; ZPartition& partition_from_partition_id(uint32_t partition_id); @@ -263,7 +264,7 @@ public: ZPageAllocatorStats stats(ZGeneration* generation) const; ZPageAllocatorStats update_and_stats(ZGeneration* generation); - ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age); + ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age, uint32_t preferred_partition); void safe_destroy_page(ZPage* page); void free_page(ZPage* page); void free_pages(ZGenerationId id, const ZArray* pages); diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp index 95e22cf4c69..556f413348b 100644 --- a/src/hotspot/share/gc/z/zRelocate.cpp +++ b/src/hotspot/share/gc/z/zRelocate.cpp @@ -32,6 +32,7 @@ #include "gc/z/zHeap.inline.hpp" #include "gc/z/zIndexDistributor.inline.hpp" #include "gc/z/zIterator.inline.hpp" +#include "gc/z/zNUMA.inline.hpp" #include "gc/z/zObjectAllocator.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageAge.inline.hpp" @@ -43,6 +44,7 @@ #include "gc/z/zStringDedup.inline.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zUncoloredRoot.inline.hpp" +#include "gc/z/zValue.inline.hpp" #include "gc/z/zVerify.hpp" #include "gc/z/zWorkers.hpp" #include "prims/jvmtiTagMap.hpp" @@ -304,9 +306,38 @@ void ZRelocateQueue::desynchronize() { _lock.notify_all(); } +ZRelocationTargets::ZRelocationTargets() + : _targets() {} + +ZPage* ZRelocationTargets::get(uint32_t partition_id, ZPageAge age) { + return _targets.get(partition_id)[untype(age) - 1]; +} + +void ZRelocationTargets::set(uint32_t partition_id, ZPageAge age, ZPage* page) { + _targets.get(partition_id)[untype(age) - 1] = page; +} + +template +void ZRelocationTargets::apply_and_clear_targets(Function function) { + ZPerNUMAIterator iter(&_targets); + for (TargetArray* targets; iter.next(&targets);) { + for (size_t i = 0; i < ZNumRelocationAges; i++) { + // Apply function + function((*targets)[i]); + + // Clear target + (*targets)[i] = nullptr; + } + } +} + ZRelocate::ZRelocate(ZGeneration* generation) : _generation(generation), - _queue() {} + _queue(), + _iters(), + _small_targets(), + _medium_targets(), + _shared_medium_targets() {} ZWorkers* ZRelocate::workers() const { return _generation->workers(); @@ -394,12 +425,13 @@ static ZPage* alloc_page(ZForwarding* forwarding) { const ZPageType type = forwarding->type(); const size_t size = forwarding->size(); const ZPageAge age = forwarding->to_age(); + const uint32_t preferred_partition = forwarding->partition_id(); ZAllocationFlags flags; flags.set_non_blocking(); flags.set_gc_relocation(); - return ZHeap::heap()->alloc_page(type, size, flags, age); + return ZHeap::heap()->alloc_page(type, size, flags, age, preferred_partition); } static void retire_target_page(ZGeneration* generation, ZPage* page) { @@ -442,7 +474,7 @@ public: return page; } - void share_target_page(ZPage* page) { + void share_target_page(ZPage* page, uint32_t partition_id) { // Does nothing } @@ -467,34 +499,26 @@ public: class ZRelocateMediumAllocator { private: - ZGeneration* const _generation; - ZConditionLock _lock; - ZPage* _shared[ZNumRelocationAges]; - bool _in_place; - volatile size_t _in_place_count; + ZGeneration* const _generation; + ZConditionLock _lock; + ZRelocationTargets* _shared_targets; + bool _in_place; + volatile size_t _in_place_count; public: - ZRelocateMediumAllocator(ZGeneration* generation) + ZRelocateMediumAllocator(ZGeneration* generation, ZRelocationTargets* shared_targets) : _generation(generation), _lock(), - _shared(), + _shared_targets(shared_targets), _in_place(false), _in_place_count(0) {} ~ZRelocateMediumAllocator() { - for (uint i = 0; i < ZNumRelocationAges; ++i) { - if (_shared[i] != nullptr) { - retire_target_page(_generation, _shared[i]); + _shared_targets->apply_and_clear_targets([&](ZPage* page) { + if (page != nullptr) { + retire_target_page(_generation, page); } - } - } - - ZPage* shared(ZPageAge age) { - return _shared[untype(age - 1)]; - } - - void set_shared(ZPageAge age, ZPage* page) { - _shared[untype(age - 1)] = page; + }); } ZPage* alloc_and_retire_target_page(ZForwarding* forwarding, ZPage* target) { @@ -510,9 +534,10 @@ public: // current target page if another thread shared a page, or allocated // a new page. const ZPageAge to_age = forwarding->to_age(); - if (shared(to_age) == target) { + const uint32_t partition_id = forwarding->partition_id(); + if (_shared_targets->get(partition_id, to_age) == target) { ZPage* const to_page = alloc_page(forwarding); - set_shared(to_age, to_page); + _shared_targets->set(partition_id, to_age, to_page); if (to_page == nullptr) { Atomic::inc(&_in_place_count); _in_place = true; @@ -524,18 +549,18 @@ public: } } - return shared(to_age); + return _shared_targets->get(partition_id, to_age); } - void share_target_page(ZPage* page) { + void share_target_page(ZPage* page, uint32_t partition_id) { const ZPageAge age = page->age(); ZLocker locker(&_lock); assert(_in_place, "Invalid state"); - assert(shared(age) == nullptr, "Invalid state"); + assert(_shared_targets->get(partition_id, age) == nullptr, "Invalid state"); assert(page != nullptr, "Invalid page"); - set_shared(age, page); + _shared_targets->set(partition_id, age, page); _in_place = false; _lock.notify_all(); @@ -563,21 +588,12 @@ class ZRelocateWork : public StackObj { private: Allocator* const _allocator; ZForwarding* _forwarding; - ZPage* _target[ZNumRelocationAges]; + ZRelocationTargets* _targets; ZGeneration* const _generation; size_t _other_promoted; size_t _other_compacted; ZStringDedupContext _string_dedup_context; - - ZPage* target(ZPageAge age) { - return _target[untype(age - 1)]; - } - - void set_target(ZPageAge age, ZPage* page) { - _target[untype(age - 1)] = page; - } - size_t object_alignment() const { return (size_t)1 << _forwarding->object_alignment_shift(); } @@ -591,11 +607,11 @@ private: } } - zaddress try_relocate_object_inner(zaddress from_addr) { + zaddress try_relocate_object_inner(zaddress from_addr, uint32_t partition_id) { ZForwardingCursor cursor; const size_t size = ZUtils::object_size(from_addr); - ZPage* const to_page = target(_forwarding->to_age()); + ZPage* const to_page = _targets->get(partition_id, _forwarding->to_age()); // Lookup forwarding { @@ -806,8 +822,8 @@ private: } } - bool try_relocate_object(zaddress from_addr) { - const zaddress to_addr = try_relocate_object_inner(from_addr); + bool try_relocate_object(zaddress from_addr, uint32_t partition_id) { + const zaddress to_addr = try_relocate_object_inner(from_addr, partition_id); if (is_null(to_addr)) { return false; @@ -888,13 +904,18 @@ private: const zaddress addr = to_zaddress(obj); assert(ZHeap::heap()->is_object_live(addr), "Should be live"); - while (!try_relocate_object(addr)) { - // Allocate a new target page, or if that fails, use the page being - // relocated as the new target, which will cause it to be relocated - // in-place. - const ZPageAge to_age = _forwarding->to_age(); - ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target(to_age)); - set_target(to_age, to_page); + const ZPageAge to_age = _forwarding->to_age(); + const uint32_t partition_id = _forwarding->partition_id(); + + while (!try_relocate_object(addr, partition_id)) { + // Failed to relocate object, try to allocate a new target page, + // or if that fails, use the page being relocated as the new target, + // which will cause it to be relocated in-place. + ZPage* const target_page = _targets->get(partition_id, to_age); + ZPage* to_page = _allocator->alloc_and_retire_target_page(_forwarding, target_page); + _targets->set(partition_id, to_age, to_page); + + // We got a new page, retry relocation if (to_page != nullptr) { continue; } @@ -903,23 +924,24 @@ private: // the page, or its forwarding table, until it has been released // (relocation completed). to_page = start_in_place_relocation(ZAddress::offset(addr)); - set_target(to_age, to_page); + _targets->set(partition_id, to_age, to_page); } } public: - ZRelocateWork(Allocator* allocator, ZGeneration* generation) + ZRelocateWork(Allocator* allocator, ZRelocationTargets* targets, ZGeneration* generation) : _allocator(allocator), _forwarding(nullptr), - _target(), + _targets(targets), _generation(generation), _other_promoted(0), _other_compacted(0) {} ~ZRelocateWork() { - for (uint i = 0; i < ZNumRelocationAges; ++i) { - _allocator->free_target_page(_target[i]); - } + _targets->apply_and_clear_targets([&](ZPage* page) { + _allocator->free_target_page(page); + }); + // Report statistics on-behalf of non-worker threads _generation->increase_promoted(_other_promoted); _generation->increase_compacted(_other_compacted); @@ -1012,8 +1034,9 @@ public: page->log_msg(" (relocate page done in-place)"); // Different pages when promoting - ZPage* const target_page = target(_forwarding->to_age()); - _allocator->share_target_page(target_page); + const uint32_t target_partition = _forwarding->partition_id(); + ZPage* const target_page = _targets->get(target_partition, _forwarding->to_age()); + _allocator->share_target_page(target_page, target_partition); } else { // Wait for all other threads to call release_page @@ -1057,31 +1080,63 @@ public: class ZRelocateTask : public ZRestartableTask { private: - ZRelocationSetParallelIterator _iter; - ZGeneration* const _generation; - ZRelocateQueue* const _queue; - ZRelocateSmallAllocator _small_allocator; - ZRelocateMediumAllocator _medium_allocator; + ZGeneration* const _generation; + ZRelocateQueue* const _queue; + ZPerNUMA* _iters; + ZPerWorker* _small_targets; + ZPerWorker* _medium_targets; + ZRelocateSmallAllocator _small_allocator; + ZRelocateMediumAllocator _medium_allocator; + const size_t _total_forwardings; + volatile size_t _numa_local_forwardings; public: - ZRelocateTask(ZRelocationSet* relocation_set, ZRelocateQueue* queue) + ZRelocateTask(ZRelocationSet* relocation_set, + ZRelocateQueue* queue, + ZPerNUMA* iters, + ZPerWorker* small_targets, + ZPerWorker* medium_targets, + ZRelocationTargets* shared_medium_targets) : ZRestartableTask("ZRelocateTask"), - _iter(relocation_set), _generation(relocation_set->generation()), _queue(queue), + _iters(iters), + _small_targets(small_targets), + _medium_targets(medium_targets), _small_allocator(_generation), - _medium_allocator(_generation) {} + _medium_allocator(_generation, shared_medium_targets), + _total_forwardings(relocation_set->nforwardings()), + _numa_local_forwardings(0) { + + for (uint32_t i = 0; i < ZNUMA::count(); i++) { + ZRelocationSetParallelIterator* const iter = _iters->addr(i); + + // Destruct the iterator from the previous GC-cycle, which is a temporary + // iterator if this is the first GC-cycle. + iter->~ZRelocationSetParallelIterator(); + + // In-place construct the iterator with the current relocation set + ::new (iter) ZRelocationSetParallelIterator(relocation_set); + } + } ~ZRelocateTask() { _generation->stat_relocation()->at_relocate_end(_small_allocator.in_place_count(), _medium_allocator.in_place_count()); // Signal that we're not using the queue anymore. Used mostly for asserts. _queue->deactivate(); + + if (ZNUMA::is_enabled()) { + log_debug(gc, reloc, numa)("Forwardings relocated NUMA-locally: %zu / %zu (%.0f%%)", + _numa_local_forwardings, _total_forwardings, percent_of(_numa_local_forwardings, _total_forwardings)); + } } virtual void work() { - ZRelocateWork small(&_small_allocator, _generation); - ZRelocateWork medium(&_medium_allocator, _generation); + ZRelocateWork small(&_small_allocator, _small_targets->addr(), _generation); + ZRelocateWork medium(&_medium_allocator, _medium_targets->addr(), _generation); + const uint32_t num_nodes = ZNUMA::count(); + uint32_t numa_local_forwardings_worker = 0; const auto do_forwarding = [&](ZForwarding* forwarding) { ZPage* const page = forwarding->page(); @@ -1107,12 +1162,29 @@ public: } }; + const auto check_numa_local = [&](ZForwarding* forwarding, uint32_t numa_id) { + return forwarding->partition_id() == numa_id; + }; + const auto do_forwarding_one_from_iter = [&]() { ZForwarding* forwarding; + const uint32_t start_node = ZNUMA::id(); + uint32_t current_node = start_node; - if (_iter.next(&forwarding)) { - claim_and_do_forwarding(forwarding); - return true; + for (uint32_t i = 0; i < num_nodes; i++) { + if (_iters->get(current_node).next_if(&forwarding, check_numa_local, current_node)) { + claim_and_do_forwarding(forwarding); + + if (current_node == start_node) { + // Track if this forwarding was relocated on the local NUMA node + numa_local_forwardings_worker++; + } + + return true; + } + + // Check next node. + current_node = (current_node + 1) % num_nodes; } return false; @@ -1138,6 +1210,10 @@ public: } } + if (ZNUMA::is_enabled()) { + Atomic::add(&_numa_local_forwardings, numa_local_forwardings_worker, memory_order_relaxed); + } + _queue->leave(); } @@ -1218,7 +1294,7 @@ void ZRelocate::relocate(ZRelocationSet* relocation_set) { } { - ZRelocateTask relocate_task(relocation_set, &_queue); + ZRelocateTask relocate_task(relocation_set, &_queue, &_iters, &_small_targets, &_medium_targets, &_shared_medium_targets); workers()->run(&relocate_task); } diff --git a/src/hotspot/share/gc/z/zRelocate.hpp b/src/hotspot/share/gc/z/zRelocate.hpp index 400fc61b055..d0ddf7deecf 100644 --- a/src/hotspot/share/gc/z/zRelocate.hpp +++ b/src/hotspot/share/gc/z/zRelocate.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,6 +27,7 @@ #include "gc/z/zAddress.hpp" #include "gc/z/zPageAge.hpp" #include "gc/z/zRelocationSet.hpp" +#include "gc/z/zValue.hpp" class ZForwarding; class ZGeneration; @@ -74,15 +75,34 @@ public: void desynchronize(); }; +class ZRelocationTargets { +private: + using TargetArray = ZPage*[ZNumRelocationAges]; + + ZPerNUMA _targets; + +public: + ZRelocationTargets(); + + ZPage* get(uint32_t partition_id, ZPageAge age); + void set(uint32_t partition_id, ZPageAge age, ZPage* page); + + template + void apply_and_clear_targets(Function function); +}; + class ZRelocate { friend class ZRelocateTask; private: - ZGeneration* const _generation; - ZRelocateQueue _queue; + ZGeneration* const _generation; + ZRelocateQueue _queue; + ZPerNUMA _iters; + ZPerWorker _small_targets; + ZPerWorker _medium_targets; + ZRelocationTargets _shared_medium_targets; ZWorkers* workers() const; - void work(ZRelocationSetParallelIterator* iter); public: ZRelocate(ZGeneration* generation); diff --git a/src/hotspot/share/gc/z/zRelocationSet.hpp b/src/hotspot/share/gc/z/zRelocationSet.hpp index 2052f3c7bf1..ee1a9447617 100644 --- a/src/hotspot/share/gc/z/zRelocationSet.hpp +++ b/src/hotspot/share/gc/z/zRelocationSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,6 +52,8 @@ private: public: ZRelocationSet(ZGeneration* generation); + size_t nforwardings() const; + void install(const ZRelocationSetSelector* selector); void reset(ZPageAllocator* page_allocator); ZGeneration* generation() const; @@ -64,6 +66,7 @@ public: template class ZRelocationSetIteratorImpl : public ZArrayIteratorImpl { public: + ZRelocationSetIteratorImpl(); ZRelocationSetIteratorImpl(ZRelocationSet* relocation_set); }; diff --git a/src/hotspot/share/gc/z/zRelocationSet.inline.hpp b/src/hotspot/share/gc/z/zRelocationSet.inline.hpp index 9c093936d4c..b501021bc26 100644 --- a/src/hotspot/share/gc/z/zRelocationSet.inline.hpp +++ b/src/hotspot/share/gc/z/zRelocationSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,6 +28,14 @@ #include "gc/z/zArray.inline.hpp" +inline size_t ZRelocationSet::nforwardings() const { + return _nforwardings; +} + +template +inline ZRelocationSetIteratorImpl::ZRelocationSetIteratorImpl() + : ZArrayIteratorImpl(nullptr, 0) {} + template inline ZRelocationSetIteratorImpl::ZRelocationSetIteratorImpl(ZRelocationSet* relocation_set) : ZArrayIteratorImpl(relocation_set->_forwardings, relocation_set->_nforwardings) {}