diff --git a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp index a58c91a6a41..20e37528c04 100644 --- a/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp +++ b/src/hotspot/cpu/aarch64/gc/z/zAddress_aarch64.cpp @@ -95,7 +95,7 @@ size_t ZPlatformAddressOffsetBits() { static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = ZGlobalsPointers::min_address_offset_request(); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } diff --git a/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp index 28a57b2dc29..2e3eed8ec60 100644 --- a/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp +++ b/src/hotspot/cpu/ppc/gc/z/zAddress_ppc.cpp @@ -92,7 +92,7 @@ size_t ZPlatformAddressOffsetBits() { static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = ZGlobalsPointers::min_address_offset_request(); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } diff --git a/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp index 683d892915f..1f2f0146f04 100644 --- a/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp +++ b/src/hotspot/cpu/riscv/gc/z/zAddress_riscv.cpp @@ -94,7 +94,7 @@ size_t ZPlatformAddressOffsetBits() { static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1; const size_t max_address_offset_bits = valid_max_address_offset_bits - 3; const size_t min_address_offset_bits = max_address_offset_bits - 2; - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = ZGlobalsPointers::min_address_offset_request(); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } diff --git a/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp b/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp index 3667a52050c..6b5b64d3036 100644 --- a/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp +++ b/src/hotspot/cpu/x86/gc/z/zAddress_x86.cpp @@ -32,7 +32,7 @@ size_t ZPointerLoadShift; size_t ZPlatformAddressOffsetBits() { const size_t min_address_offset_bits = 42; // 4TB const size_t max_address_offset_bits = 44; // 16TB - const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio); + const size_t address_offset = ZGlobalsPointers::min_address_offset_request(); const size_t address_offset_bits = log2i_exact(address_offset); return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits); } diff --git a/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp index ac723483637..d0c06e2ebf1 100644 --- a/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp +++ b/src/hotspot/os/bsd/gc/z/zNUMA_bsd.cpp @@ -21,15 +21,24 @@ * questions. */ -#include "gc/z/zNUMA.hpp" -#include "utilities/globalDefinitions.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zCPU.inline.hpp" +#include "gc/z/zNUMA.inline.hpp" +#include "runtime/globals_extension.hpp" void ZNUMA::pd_initialize() { _enabled = false; - _count = 1; + _count = !FLAG_IS_DEFAULT(ZFakeNUMA) + ? ZFakeNUMA + : 1; } uint32_t ZNUMA::id() { + if (is_faked()) { + // ZFakeNUMA testing, ignores _enabled + return ZCPU::id() % ZFakeNUMA; + } + return 0; } diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp index 37c855c2e2b..86549e878cb 100644 --- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp +++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.cpp @@ -26,7 +26,6 @@ #include "gc/z/zGlobals.hpp" #include "gc/z/zInitialize.hpp" #include "gc/z/zLargePages.inline.hpp" -#include "gc/z/zPhysicalMemory.inline.hpp" #include "gc/z/zPhysicalMemoryBacking_bsd.hpp" #include "logging/log.hpp" #include "runtime/globals.hpp" @@ -97,12 +96,12 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Does nothing } -bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { +bool ZPhysicalMemoryBacking::commit_inner(zbacking_offset offset, size_t length) const { assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset"); assert(is_aligned(length, os::vm_page_size()), "Invalid length"); log_trace(gc, heap)("Committing memory: %zuM-%zuM (%zuM)", - untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); + untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M); const uintptr_t addr = _base + untype(offset); const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -116,7 +115,7 @@ bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { return true; } -size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit(zbacking_offset offset, size_t length, uint32_t /* numa_id - ignored */) const { // Try to commit the whole region if (commit_inner(offset, length)) { // Success @@ -124,8 +123,8 @@ size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { } // Failed, try to commit as much as possible - zoffset start = offset; - zoffset end = offset + length; + zbacking_offset start = offset; + zbacking_offset end = offset + length; for (;;) { length = align_down((end - start) / 2, ZGranuleSize); @@ -144,12 +143,12 @@ size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { } } -size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { +size_t ZPhysicalMemoryBacking::uncommit(zbacking_offset offset, size_t length) const { assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset"); assert(is_aligned(length, os::vm_page_size()), "Invalid length"); log_trace(gc, heap)("Uncommitting memory: %zuM-%zuM (%zuM)", - untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); + untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M); const uintptr_t start = _base + untype(offset); const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0); @@ -162,7 +161,7 @@ size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { return length; } -void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const { +void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const { const ZErrno err = mremap(_base + untype(offset), untype(addr), size); if (err) { fatal("Failed to remap memory (%s)", err.to_string()); diff --git a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp index d74de5375ee..9fa64fb51bd 100644 --- a/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp +++ b/src/hotspot/os/bsd/gc/z/zPhysicalMemoryBacking_bsd.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ private: uintptr_t _base; bool _initialized; - bool commit_inner(zoffset offset, size_t length) const; + bool commit_inner(zbacking_offset offset, size_t length) const; public: ZPhysicalMemoryBacking(size_t max_capacity); @@ -40,10 +40,10 @@ public: void warn_commit_limits(size_t max_capacity) const; - size_t commit(zoffset offset, size_t length) const; - size_t uncommit(zoffset offset, size_t length) const; + size_t commit(zbacking_offset offset, size_t length, uint32_t numa_id) const; + size_t uncommit(zbacking_offset offset, size_t length) const; - void map(zaddress_unsafe addr, size_t size, zoffset offset) const; + void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const; void unmap(zaddress_unsafe addr, size_t size) const; }; diff --git a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp index 5a5db428548..74e69655940 100644 --- a/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp +++ b/src/hotspot/os/linux/gc/z/zNUMA_linux.cpp @@ -21,23 +21,34 @@ * questions. */ +#include "gc/shared/gc_globals.hpp" #include "gc/z/zCPU.inline.hpp" #include "gc/z/zErrno.hpp" -#include "gc/z/zNUMA.hpp" +#include "gc/z/zNUMA.inline.hpp" #include "gc/z/zSyscall_linux.hpp" #include "os_linux.hpp" #include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" void ZNUMA::pd_initialize() { _enabled = UseNUMA; + + // UseNUMA and is_faked() are mutually excluded in zArguments.cpp. _count = UseNUMA ? os::Linux::numa_max_node() + 1 - : 1; + : !FLAG_IS_DEFAULT(ZFakeNUMA) + ? ZFakeNUMA + : 1; // No NUMA nodes } uint32_t ZNUMA::id() { + if (is_faked()) { + // ZFakeNUMA testing, ignores _enabled + return ZCPU::id() % ZFakeNUMA; + } + if (!_enabled) { // NUMA support not enabled return 0; diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp index 1ae4e18fcf1..c33e49b57f9 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.cpp @@ -388,7 +388,7 @@ bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const { return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(zbacking_offset offset, size_t length, bool touch) const { // On hugetlbfs, mapping a file segment will fail immediately, without // the need to touch the mapped pages first, if there aren't enough huge // pages available to back the mapping. @@ -439,7 +439,7 @@ static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) { return true; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zbacking_offset offset, size_t length) const { // On tmpfs, we need to touch the mapped pages to figure out // if there are enough pages available to back the mapping. void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, untype(offset)); @@ -468,11 +468,11 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zoffset offset, size_ return backed ? 0 : ENOMEM; } -ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zbacking_offset offset, size_t length) const { uint8_t data = 0; // Allocate backing memory by writing to each block - for (zoffset pos = offset; pos < offset + length; pos += _block_size) { + for (zbacking_offset pos = offset; pos < offset + length; pos += _block_size) { if (pwrite(_fd, &data, sizeof(data), untype(pos)) == -1) { // Failed return errno; @@ -483,7 +483,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zoffset offset, size_t le return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zbacking_offset offset, size_t length) const { // fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs // since Linux 4.3. When fallocate(2) is not supported we emulate it using // mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite @@ -497,7 +497,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zoffset offset, size_t } } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zbacking_offset offset, size_t length) const { const int mode = 0; // Allocate const int res = ZSyscall::fallocate(_fd, mode, untype(offset), length); if (res == -1) { @@ -509,7 +509,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zoffset offset, size_ return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zbacking_offset offset, size_t length) const { // Using compat mode is more efficient when allocating space on hugetlbfs. // Note that allocating huge pages this way will only reserve them, and not // associate them with segments of the file. We must guarantee that we at @@ -536,7 +536,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zoffset offset, size_t length return fallocate_fill_hole_compat(offset, length); } -ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zbacking_offset offset, size_t length) const { if (ZLargePages::is_explicit()) { // We can only punch hole in pages that have been touched. Non-touched // pages are only reserved, and not associated with any specific file @@ -559,9 +559,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zoffset offset, size_t lengt return 0; } -ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zbacking_offset offset, size_t length) const { // Try first half - const zoffset offset0 = offset; + const zbacking_offset offset0 = offset; const size_t length0 = align_up(length / 2, _block_size); const ZErrno err0 = fallocate(punch_hole, offset0, length0); if (err0) { @@ -569,7 +569,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offs } // Try second half - const zoffset offset1 = offset0 + length0; + const zbacking_offset offset1 = offset0 + length0; const size_t length1 = length - length0; const ZErrno err1 = fallocate(punch_hole, offset1, length1); if (err1) { @@ -580,7 +580,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offs return 0; } -ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t length) const { +ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zbacking_offset offset, size_t length) const { assert(is_aligned(untype(offset), _block_size), "Invalid offset"); assert(is_aligned(length, _block_size), "Invalid length"); @@ -596,9 +596,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t return err; } -bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const { +bool ZPhysicalMemoryBacking::commit_inner(zbacking_offset offset, size_t length) const { log_trace(gc, heap)("Committing memory: %zuM-%zuM (%zuM)", - untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); + untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M); retry: const ZErrno err = fallocate(false /* punch_hole */, offset, length); @@ -627,30 +627,11 @@ retry: return true; } -static int offset_to_node(zoffset offset) { - const GrowableArray* mapping = os::Linux::numa_nindex_to_node(); - const size_t nindex = (untype(offset) >> ZGranuleSizeShift) % mapping->length(); - return mapping->at((int)nindex); -} +size_t ZPhysicalMemoryBacking::commit_numa_preferred(zbacking_offset offset, size_t length, uint32_t numa_id) const { + // Setup NUMA policy to allocate memory from a preferred node + os::Linux::numa_set_preferred((int)numa_id); -size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t length) const { - size_t committed = 0; - - // Commit one granule at a time, so that each granule - // can be allocated from a different preferred node. - while (committed < length) { - const zoffset granule_offset = offset + committed; - - // Setup NUMA policy to allocate memory from a preferred node - os::Linux::numa_set_preferred(offset_to_node(granule_offset)); - - if (!commit_inner(granule_offset, ZGranuleSize)) { - // Failed - break; - } - - committed += ZGranuleSize; - } + const size_t committed = commit_default(offset, length); // Restore NUMA policy os::Linux::numa_set_preferred(-1); @@ -658,7 +639,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t le return committed; } -size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit_default(zbacking_offset offset, size_t length) const { // Try to commit the whole region if (commit_inner(offset, length)) { // Success @@ -666,8 +647,8 @@ size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) con } // Failed, try to commit as much as possible - zoffset start = offset; - zoffset end = offset + length; + zbacking_offset start = offset; + zbacking_offset_end end = to_zbacking_offset_end(offset, length); for (;;) { length = align_down((end - start) / 2, ZGranuleSize); @@ -686,19 +667,19 @@ size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) con } } -size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const { +size_t ZPhysicalMemoryBacking::commit(zbacking_offset offset, size_t length, uint32_t numa_id) const { if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) { - // To get granule-level NUMA interleaving when using non-large pages, - // we must explicitly interleave the memory at commit/fallocate time. - return commit_numa_interleaved(offset, length); + // The memory is required to be preferred at the time it is paged in. As a + // consequence we must prefer the memory when committing non-large pages. + return commit_numa_preferred(offset, length, numa_id); } return commit_default(offset, length); } -size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { +size_t ZPhysicalMemoryBacking::uncommit(zbacking_offset offset, size_t length) const { log_trace(gc, heap)("Uncommitting memory: %zuM-%zuM (%zuM)", - untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); + untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M); const ZErrno err = fallocate(true /* punch_hole */, offset, length); if (err) { @@ -709,7 +690,7 @@ size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const { return length; } -void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const { +void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const { const void* const res = mmap((void*)untype(addr), size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, untype(offset)); if (res == MAP_FAILED) { ZErrno err; diff --git a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp index 59c00ad01bf..4b083d1c79c 100644 --- a/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp +++ b/src/hotspot/os/linux/gc/z/zPhysicalMemoryBacking_linux.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -48,19 +48,19 @@ private: bool is_hugetlbfs() const; bool tmpfs_supports_transparent_huge_pages() const; - ZErrno fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const; - ZErrno fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const; - ZErrno fallocate_compat_pwrite(zoffset offset, size_t length) const; - ZErrno fallocate_fill_hole_compat(zoffset offset, size_t length) const; - ZErrno fallocate_fill_hole_syscall(zoffset offset, size_t length) const; - ZErrno fallocate_fill_hole(zoffset offset, size_t length) const; - ZErrno fallocate_punch_hole(zoffset offset, size_t length) const; - ZErrno split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const; - ZErrno fallocate(bool punch_hole, zoffset offset, size_t length) const; + ZErrno fallocate_compat_mmap_hugetlbfs(zbacking_offset offset, size_t length, bool touch) const; + ZErrno fallocate_compat_mmap_tmpfs(zbacking_offset offset, size_t length) const; + ZErrno fallocate_compat_pwrite(zbacking_offset offset, size_t length) const; + ZErrno fallocate_fill_hole_compat(zbacking_offset offset, size_t length) const; + ZErrno fallocate_fill_hole_syscall(zbacking_offset offset, size_t length) const; + ZErrno fallocate_fill_hole(zbacking_offset offset, size_t length) const; + ZErrno fallocate_punch_hole(zbacking_offset offset, size_t length) const; + ZErrno split_and_fallocate(bool punch_hole, zbacking_offset offset, size_t length) const; + ZErrno fallocate(bool punch_hole, zbacking_offset offset, size_t length) const; - bool commit_inner(zoffset offset, size_t length) const; - size_t commit_numa_interleaved(zoffset offset, size_t length) const; - size_t commit_default(zoffset offset, size_t length) const; + bool commit_inner(zbacking_offset offset, size_t length) const; + size_t commit_numa_preferred(zbacking_offset offset, size_t length, uint32_t numa_id) const; + size_t commit_default(zbacking_offset offset, size_t length) const; public: ZPhysicalMemoryBacking(size_t max_capacity); @@ -69,10 +69,10 @@ public: void warn_commit_limits(size_t max_capacity) const; - size_t commit(zoffset offset, size_t length) const; - size_t uncommit(zoffset offset, size_t length) const; + size_t commit(zbacking_offset offset, size_t length, uint32_t numa_id) const; + size_t uncommit(zbacking_offset offset, size_t length) const; - void map(zaddress_unsafe addr, size_t size, zoffset offset) const; + void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const; void unmap(zaddress_unsafe addr, size_t size) const; }; diff --git a/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp b/src/hotspot/os/posix/gc/z/zVirtualMemoryManager_posix.cpp similarity index 81% rename from src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp rename to src/hotspot/os/posix/gc/z/zVirtualMemoryManager_posix.cpp index a103f764c98..4eea35c8a2e 100644 --- a/src/hotspot/os/posix/gc/z/zVirtualMemory_posix.cpp +++ b/src/hotspot/os/posix/gc/z/zVirtualMemoryManager_posix.cpp @@ -22,21 +22,16 @@ */ #include "gc/z/zAddress.inline.hpp" -#include "gc/z/zVirtualMemory.hpp" +#include "gc/z/zVirtualMemoryManager.hpp" #include "logging/log.hpp" #include -#include -void ZVirtualMemoryManager::pd_initialize_before_reserve() { +void ZVirtualMemoryReserver::pd_register_callbacks(ZVirtualMemoryRegistry* registry) { // Does nothing } -void ZVirtualMemoryManager::pd_register_callbacks(ZMemoryManager* manager) { - // Does nothing -} - -bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) { +bool ZVirtualMemoryReserver::pd_reserve(zaddress_unsafe addr, size_t size) { void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0); if (res == MAP_FAILED) { // Failed to reserve memory @@ -53,7 +48,7 @@ bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) { return true; } -void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) { +void ZVirtualMemoryReserver::pd_unreserve(zaddress_unsafe addr, size_t size) { const int res = munmap((void*)untype(addr), size); assert(res == 0, "Failed to unmap memory"); } diff --git a/src/hotspot/os/windows/gc/z/zInitialize_windows.cpp b/src/hotspot/os/windows/gc/z/zInitialize_windows.cpp index a4751617ce7..e0e0b3be82c 100644 --- a/src/hotspot/os/windows/gc/z/zInitialize_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zInitialize_windows.cpp @@ -24,6 +24,9 @@ #include "gc/z/zInitialize.hpp" #include "gc/z/zSyscall_windows.hpp" +void ZVirtualMemoryReserverImpl_initialize(); + void ZInitialize::pd_initialize() { ZSyscall::initialize(); + ZVirtualMemoryReserverImpl_initialize(); } diff --git a/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp index afe8f18c392..dc7521dde56 100644 --- a/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zNUMA_windows.cpp @@ -21,14 +21,24 @@ * questions. */ -#include "gc/z/zNUMA.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zCPU.inline.hpp" +#include "gc/z/zNUMA.inline.hpp" +#include "runtime/globals_extension.hpp" void ZNUMA::pd_initialize() { _enabled = false; - _count = 1; + _count = !FLAG_IS_DEFAULT(ZFakeNUMA) + ? ZFakeNUMA + : 1; } uint32_t ZNUMA::id() { + if (is_faked()) { + // ZFakeNUMA testing + return ZCPU::id() % ZFakeNUMA; + } + return 0; } diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp index 2764f51c13b..b18abd7e979 100644 --- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.cpp @@ -33,9 +33,9 @@ class ZPhysicalMemoryBackingImpl : public CHeapObj { public: - virtual size_t commit(zoffset offset, size_t size) = 0; - virtual size_t uncommit(zoffset offset, size_t size) = 0; - virtual void map(zaddress_unsafe addr, size_t size, zoffset offset) const = 0; + virtual size_t commit(zbacking_offset offset, size_t size) = 0; + virtual size_t uncommit(zbacking_offset offset, size_t size) = 0; + virtual void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const = 0; virtual void unmap(zaddress_unsafe addr, size_t size) const = 0; }; @@ -50,21 +50,29 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl { private: ZGranuleMap _handles; - HANDLE get_handle(zoffset offset) const { - HANDLE const handle = _handles.get(offset); + static zoffset to_zoffset(zbacking_offset offset) { + // A zbacking_offset is always a valid zoffset + return zoffset(untype(offset)); + } + + HANDLE get_handle(zbacking_offset offset) const { + const zoffset z_offset = to_zoffset(offset); + HANDLE const handle = _handles.get(z_offset); assert(handle != 0, "Should be set"); return handle; } - void put_handle(zoffset offset, HANDLE handle) { + void put_handle(zbacking_offset offset, HANDLE handle) { + const zoffset z_offset = to_zoffset(offset); assert(handle != INVALID_HANDLE_VALUE, "Invalid handle"); - assert(_handles.get(offset) == 0, "Should be cleared"); - _handles.put(offset, handle); + assert(_handles.get(z_offset) == 0, "Should be cleared"); + _handles.put(z_offset, handle); } - void clear_handle(zoffset offset) { - assert(_handles.get(offset) != 0, "Should be set"); - _handles.put(offset, 0); + void clear_handle(zbacking_offset offset) { + const zoffset z_offset = to_zoffset(offset); + assert(_handles.get(z_offset) != 0, "Should be set"); + _handles.put(z_offset, 0); } public: @@ -72,7 +80,7 @@ public: : ZPhysicalMemoryBackingImpl(), _handles(max_capacity) {} - size_t commit(zoffset offset, size_t size) { + size_t commit(zbacking_offset offset, size_t size) { for (size_t i = 0; i < size; i += ZGranuleSize) { HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize); if (handle == 0) { @@ -85,7 +93,7 @@ public: return size; } - size_t uncommit(zoffset offset, size_t size) { + size_t uncommit(zbacking_offset offset, size_t size) { for (size_t i = 0; i < size; i += ZGranuleSize) { HANDLE const handle = get_handle(offset + i); clear_handle(offset + i); @@ -95,7 +103,7 @@ public: return size; } - void map(zaddress_unsafe addr, size_t size, zoffset offset) const { + void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const { assert(is_aligned(untype(offset), ZGranuleSize), "Misaligned"); assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned"); assert(is_aligned(size, ZGranuleSize), "Misaligned"); @@ -149,7 +157,7 @@ public: : ZPhysicalMemoryBackingImpl(), _page_array(alloc_page_array(max_capacity)) {} - size_t commit(zoffset offset, size_t size) { + size_t commit(zbacking_offset offset, size_t size) { const size_t index = untype(offset) >> ZGranuleSizeShift; const size_t npages = size >> ZGranuleSizeShift; @@ -167,7 +175,7 @@ public: return npages_res << ZGranuleSizeShift; } - size_t uncommit(zoffset offset, size_t size) { + size_t uncommit(zbacking_offset offset, size_t size) { const size_t index = untype(offset) >> ZGranuleSizeShift; const size_t npages = size >> ZGranuleSizeShift; @@ -181,7 +189,7 @@ public: return npages_res << ZGranuleSizeShift; } - void map(zaddress_unsafe addr, size_t size, zoffset offset) const { + void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const { const size_t npages = size >> ZGranuleSizeShift; const size_t index = untype(offset) >> ZGranuleSizeShift; @@ -222,21 +230,21 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const { // Does nothing } -size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) { +size_t ZPhysicalMemoryBacking::commit(zbacking_offset offset, size_t length, uint32_t /* numa_id - ignored */) { log_trace(gc, heap)("Committing memory: %zuM-%zuM (%zuM)", - untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); + untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M); return _impl->commit(offset, length); } -size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) { +size_t ZPhysicalMemoryBacking::uncommit(zbacking_offset offset, size_t length) { log_trace(gc, heap)("Uncommitting memory: %zuM-%zuM (%zuM)", - untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M); + untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M); return _impl->uncommit(offset, length); } -void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const { +void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const { assert(is_aligned(untype(offset), ZGranuleSize), "Misaligned: " PTR_FORMAT, untype(offset)); assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned: " PTR_FORMAT, addr); assert(is_aligned(size, ZGranuleSize), "Misaligned: " PTR_FORMAT, size); diff --git a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp index b8b73519ab5..91d59f49609 100644 --- a/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp +++ b/src/hotspot/os/windows/gc/z/zPhysicalMemoryBacking_windows.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,10 +42,10 @@ public: void warn_commit_limits(size_t max_capacity) const; - size_t commit(zoffset offset, size_t length); - size_t uncommit(zoffset offset, size_t length); + size_t commit(zbacking_offset offset, size_t length, uint32_t numa_id); + size_t uncommit(zbacking_offset offset, size_t length); - void map(zaddress_unsafe addr, size_t size, zoffset offset) const; + void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const; void unmap(zaddress_unsafe addr, size_t size) const; }; diff --git a/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp b/src/hotspot/os/windows/gc/z/zVirtualMemoryManager_windows.cpp similarity index 81% rename from src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp rename to src/hotspot/os/windows/gc/z/zVirtualMemoryManager_windows.cpp index ac5be56a0c0..48a32157f59 100644 --- a/src/hotspot/os/windows/gc/z/zVirtualMemory_windows.cpp +++ b/src/hotspot/os/windows/gc/z/zVirtualMemoryManager_windows.cpp @@ -26,25 +26,26 @@ #include "gc/z/zLargePages.inline.hpp" #include "gc/z/zMapper_windows.hpp" #include "gc/z/zSyscall_windows.hpp" +#include "gc/z/zValue.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" +#include "gc/z/zVirtualMemoryManager.inline.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" -class ZVirtualMemoryManagerImpl : public CHeapObj { +class ZVirtualMemoryReserverImpl : public CHeapObj { public: - virtual void initialize_before_reserve() {} - virtual void register_callbacks(ZMemoryManager* manager) {} + virtual void register_callbacks(ZVirtualMemoryRegistry* registry) {} virtual bool reserve(zaddress_unsafe addr, size_t size) = 0; virtual void unreserve(zaddress_unsafe addr, size_t size) = 0; }; // Implements small pages (paged) support using placeholder reservation. // -// When a memory area is free (kept by the virtual memory manager) a +// When a memory area is available (kept by the virtual memory manager) a // single placeholder is covering that memory area. When memory is -// allocated from the manager the placeholder is split into granule +// removed from the registry the placeholder is split into granule // sized placeholders to allow mapping operations on that granularity. -class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl { +class ZVirtualMemoryReserverSmallPages : public ZVirtualMemoryReserverImpl { private: class PlaceholderCallbacks : public AllStatic { private: @@ -84,7 +85,7 @@ private: // Called when a memory area is going to be handed out to be used. // // Splits the memory area into granule-sized placeholders. - static void prepare_for_hand_out_callback(const ZMemory& area) { + static void prepare_for_hand_out_callback(const ZVirtualMemory& area) { assert(is_aligned(area.size(), ZGranuleSize), "Must be granule aligned"); split_into_granule_sized_placeholders(area.start(), area.size()); @@ -93,7 +94,7 @@ private: // Called when a memory area is handed back to the memory manager. // // Combines the granule-sized placeholders into one placeholder. - static void prepare_for_hand_back_callback(const ZMemory& area) { + static void prepare_for_hand_back_callback(const ZVirtualMemory& area) { assert(is_aligned(area.size(), ZGranuleSize), "Must be granule aligned"); coalesce_into_one_placeholder(area.start(), area.size()); @@ -103,7 +104,7 @@ private: // existing, adjacent memory area. // // Coalesces the underlying placeholders into one. - static void grow_callback(const ZMemory& from, const ZMemory& to) { + static void grow_callback(const ZVirtualMemory& from, const ZVirtualMemory& to) { assert(is_aligned(from.size(), ZGranuleSize), "Must be granule aligned"); assert(is_aligned(to.size(), ZGranuleSize), "Must be granule aligned"); assert(from != to, "Must have grown"); @@ -116,7 +117,7 @@ private: // memory area. // // Splits the memory into two placeholders. - static void shrink_callback(const ZMemory& from, const ZMemory& to) { + static void shrink_callback(const ZVirtualMemory& from, const ZVirtualMemory& to) { assert(is_aligned(from.size(), ZGranuleSize), "Must be granule aligned"); assert(is_aligned(to.size(), ZGranuleSize), "Must be granule aligned"); assert(from != to, "Must have shrunk"); @@ -129,7 +130,7 @@ private: } public: - static ZMemoryManager::Callbacks callbacks() { + static ZVirtualMemoryRegistry::Callbacks callbacks() { // Each reserved virtual memory address area registered in _manager is // exactly covered by a single placeholder. Callbacks are installed so // that whenever a memory area changes, the corresponding placeholder @@ -153,7 +154,7 @@ private: // See comment in zMapper_windows.cpp explaining why placeholders are // split into ZGranuleSize sized placeholders. - ZMemoryManager::Callbacks callbacks; + ZVirtualMemoryRegistry::Callbacks callbacks; callbacks._prepare_for_hand_out = &prepare_for_hand_out_callback; callbacks._prepare_for_hand_back = &prepare_for_hand_back_callback; @@ -164,8 +165,8 @@ private: } }; - virtual void register_callbacks(ZMemoryManager* manager) { - manager->register_callbacks(PlaceholderCallbacks::callbacks()); + virtual void register_callbacks(ZVirtualMemoryRegistry* registry) { + registry->register_callbacks(PlaceholderCallbacks::callbacks()); } virtual bool reserve(zaddress_unsafe addr, size_t size) { @@ -185,12 +186,8 @@ private: // ZPhysicalMemory layer needs access to the section HANDLE ZAWESection; -class ZVirtualMemoryManagerLargePages : public ZVirtualMemoryManagerImpl { +class ZVirtualMemoryReserverLargePages : public ZVirtualMemoryReserverImpl { private: - virtual void initialize_before_reserve() { - ZAWESection = ZMapper::create_shared_awe_section(); - } - virtual bool reserve(zaddress_unsafe addr, size_t size) { const zaddress_unsafe res = ZMapper::reserve_for_shared_awe(ZAWESection, addr, size); @@ -201,27 +198,33 @@ private: virtual void unreserve(zaddress_unsafe addr, size_t size) { ZMapper::unreserve_for_shared_awe(addr, size); } + +public: + ZVirtualMemoryReserverLargePages() { + ZAWESection = ZMapper::create_shared_awe_section(); + } }; -static ZVirtualMemoryManagerImpl* _impl = nullptr; +static ZVirtualMemoryReserverImpl* _impl = nullptr; + +void ZVirtualMemoryReserverImpl_initialize() { + assert(_impl == nullptr, "Should only initialize once"); -void ZVirtualMemoryManager::pd_initialize_before_reserve() { if (ZLargePages::is_enabled()) { - _impl = new ZVirtualMemoryManagerLargePages(); + _impl = new ZVirtualMemoryReserverLargePages(); } else { - _impl = new ZVirtualMemoryManagerSmallPages(); + _impl = new ZVirtualMemoryReserverSmallPages(); } - _impl->initialize_before_reserve(); } -void ZVirtualMemoryManager::pd_register_callbacks(ZMemoryManager* manager) { - _impl->register_callbacks(manager); +void ZVirtualMemoryReserver::pd_register_callbacks(ZVirtualMemoryRegistry* registry) { + _impl->register_callbacks(registry); } -bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) { +bool ZVirtualMemoryReserver::pd_reserve(zaddress_unsafe addr, size_t size) { return _impl->reserve(addr, size); } -void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) { +void ZVirtualMemoryReserver::pd_unreserve(zaddress_unsafe addr, size_t size) { _impl->unreserve(addr, size); } diff --git a/src/hotspot/share/gc/z/vmStructs_z.hpp b/src/hotspot/share/gc/z/vmStructs_z.hpp index 47fa6ac3021..e1eb2a661f5 100644 --- a/src/hotspot/share/gc/z/vmStructs_z.hpp +++ b/src/hotspot/share/gc/z/vmStructs_z.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,8 +29,11 @@ #include "gc/z/zForwarding.hpp" #include "gc/z/zGranuleMap.hpp" #include "gc/z/zHeap.hpp" +#include "gc/z/zNUMA.hpp" #include "gc/z/zPageAllocator.hpp" #include "gc/z/zPageType.hpp" +#include "gc/z/zValue.hpp" +#include "gc/z/zVirtualMemory.hpp" #include "utilities/macros.hpp" // Expose some ZGC globals to the SA agent. @@ -61,6 +64,7 @@ public: typedef ZGranuleMap ZGranuleMapForPageTable; typedef ZGranuleMap ZGranuleMapForForwarding; typedef ZAttachedArray ZAttachedArrayForForwarding; +typedef ZValue ZPerNUMAZPartition; #define VM_STRUCTS_Z(nonstatic_field, volatile_nonstatic_field, static_field) \ static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \ @@ -87,8 +91,13 @@ typedef ZAttachedArray ZAttachedArrayForForwardin volatile_nonstatic_field(ZPage, _top, zoffset_end) \ \ nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \ - volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \ - volatile_nonstatic_field(ZPageAllocator, _used, size_t) \ + nonstatic_field(ZPageAllocator, _partitions, ZPerNUMAZPartition) \ + \ + static_field(ZNUMA, _count, uint32_t) \ + nonstatic_field(ZPerNUMAZPartition, _addr, const uintptr_t) \ + \ + volatile_nonstatic_field(ZPartition, _capacity, size_t) \ + nonstatic_field(ZPartition, _used, size_t) \ \ nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \ \ @@ -97,8 +106,8 @@ typedef ZAttachedArray ZAttachedArrayForForwardin \ nonstatic_field(ZForwardingTable, _map, ZGranuleMapForForwarding) \ \ - nonstatic_field(ZVirtualMemory, _start, const zoffset) \ - nonstatic_field(ZVirtualMemory, _end, const zoffset_end) \ + nonstatic_field(ZVirtualMemory, _start, const zoffset_end) \ + nonstatic_field(ZVirtualMemory, _size, const size_t) \ \ nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \ nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \ @@ -134,6 +143,9 @@ typedef ZAttachedArray ZAttachedArrayForForwardin declare_toplevel_type(ZPageType) \ declare_toplevel_type(ZPageAllocator) \ declare_toplevel_type(ZPageTable) \ + declare_toplevel_type(ZPartition) \ + declare_toplevel_type(ZNUMA) \ + declare_toplevel_type(ZPerNUMAZPartition) \ declare_toplevel_type(ZAttachedArrayForForwarding) \ declare_toplevel_type(ZGranuleMapForPageTable) \ declare_toplevel_type(ZGranuleMapForForwarding) \ diff --git a/src/hotspot/share/gc/z/zAddress.cpp b/src/hotspot/share/gc/z/zAddress.cpp index 59489f62372..98779419219 100644 --- a/src/hotspot/share/gc/z/zAddress.cpp +++ b/src/hotspot/share/gc/z/zAddress.cpp @@ -24,6 +24,7 @@ #include "gc/shared/barrierSet.hpp" #include "gc/shared/gc_globals.hpp" #include "gc/z/zAddress.inline.hpp" +#include "gc/z/zNUMA.inline.hpp" #include "gc/z/zVerify.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/java.hpp" @@ -36,6 +37,10 @@ size_t ZAddressOffsetBits; uintptr_t ZAddressOffsetMask; size_t ZAddressOffsetMax; +size_t ZBackingOffsetMax; + +uint32_t ZBackingIndexMax; + uintptr_t ZPointerRemapped; uintptr_t ZPointerRemappedYoungMask; uintptr_t ZPointerRemappedOldMask; @@ -145,3 +150,10 @@ void ZGlobalsPointers::flip_old_relocate_start() { ZPointerRemappedOldMask ^= ZPointerRemappedMask; set_good_masks(); } + +size_t ZGlobalsPointers::min_address_offset_request() { + // See ZVirtualMemoryReserver for logic around setting up the heap for NUMA + const size_t desired_for_heap = MaxHeapSize * ZVirtualToPhysicalRatio; + const size_t desired_for_numa_multiplier = ZNUMA::count() > 1 ? 2 : 1; + return round_up_power_of_2(desired_for_heap * desired_for_numa_multiplier); +} diff --git a/src/hotspot/share/gc/z/zAddress.hpp b/src/hotspot/share/gc/z/zAddress.hpp index 6b3c887062e..de97c17d089 100644 --- a/src/hotspot/share/gc/z/zAddress.hpp +++ b/src/hotspot/share/gc/z/zAddress.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,12 @@ const size_t ZAddressOffsetShift = 0; extern uintptr_t ZAddressOffsetMask; extern size_t ZAddressOffsetMax; +// Describes the maximal offset inside the backing storage. +extern size_t ZBackingOffsetMax; + +// Describes the maximal granule index inside the backing storage. +extern uint32_t ZBackingIndexMax; + // Layout of metadata bits in colored pointer / zpointer. // // A zpointer is a combination of the address bits (heap base bit + offset) @@ -223,16 +229,25 @@ const int ZPointerStoreGoodMaskLowOrderBitsOffset = LITTLE_ENDIAN_ONLY(0 // Offsets // - Virtual address range offsets -// - Physical memory offsets -enum class zoffset : uintptr_t {}; +enum class zoffset : uintptr_t { invalid = UINTPTR_MAX }; // Offsets including end of offset range -enum class zoffset_end : uintptr_t {}; +enum class zoffset_end : uintptr_t { invalid = UINTPTR_MAX }; + +// - Physical memory segment offsets +enum class zbacking_offset : uintptr_t {}; +// Offsets including end of offset range +enum class zbacking_offset_end : uintptr_t {}; + +// - Physical memory segment indicies +enum class zbacking_index : uint32_t { zero = 0, invalid = UINT32_MAX }; +// Offsets including end of indicies range +enum class zbacking_index_end : uint32_t { zero = 0, invalid = UINT32_MAX }; // Colored oop -enum class zpointer : uintptr_t { null = 0 }; +enum class zpointer : uintptr_t { null = 0 }; // Uncolored oop - safe to dereference -enum class zaddress : uintptr_t { null = 0 }; +enum class zaddress : uintptr_t { null = 0 }; // Uncolored oop - not safe to dereference, could point uncommitted memory enum class zaddress_unsafe : uintptr_t { null = 0 }; @@ -307,6 +322,8 @@ public: static void flip_young_relocate_start(); static void flip_old_mark_start(); static void flip_old_relocate_start(); + + static size_t min_address_offset_request(); }; #endif // SHARE_GC_Z_ZADDRESS_HPP diff --git a/src/hotspot/share/gc/z/zAddress.inline.hpp b/src/hotspot/share/gc/z/zAddress.inline.hpp index bbc92a7e2aa..98f6900cbf0 100644 --- a/src/hotspot/share/gc/z/zAddress.inline.hpp +++ b/src/hotspot/share/gc/z/zAddress.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,14 +27,124 @@ #include "gc/z/zAddress.hpp" #include "gc/shared/gc_globals.hpp" +#include "gc/z/zGlobals.hpp" #include "oops/oop.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/atomic.hpp" +#include "utilities/align.hpp" +#include "utilities/checkedCast.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/powerOfTwo.hpp" #include CPU_HEADER_INLINE(gc/z/zAddress) +#include + +// Offset Operator Macro +// Creates operators for the offset, offset_end style types + +#define CREATE_ZOFFSET_OPERATORS(offset_type) \ + \ + /* Arithmetic operators for offset_type */ \ + \ +inline offset_type operator+(offset_type offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + return to_##offset_type(untype(offset) + size_value); \ +} \ + \ +inline offset_type& operator+=(offset_type& offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + offset = to_##offset_type(untype(offset) + size_value); \ + return offset; \ +} \ + \ +inline offset_type operator-(offset_type offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + return to_##offset_type(untype(offset) - size_value); \ +} \ + \ +inline size_t operator-(offset_type first, offset_type second) { \ + return untype(first - untype(second)); \ +} \ + \ +inline offset_type& operator-=(offset_type& offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + offset = to_##offset_type(untype(offset) - size_value); \ + return offset; \ +} \ + \ + /* Arithmetic operators for offset_type##_end */ \ + \ +inline offset_type##_end operator+(offset_type##_end offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + return to_##offset_type##_end(untype(offset) + size_value); \ +} \ + \ +inline offset_type##_end& operator+=(offset_type##_end& offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + offset = to_##offset_type##_end(untype(offset) + size_value); \ + return offset; \ +} \ + \ +inline offset_type##_end operator-(offset_type##_end first, size_t size) { \ + const auto size_value = checked_cast>(size); \ + return to_##offset_type##_end(untype(first) - size_value); \ +} \ + \ +inline size_t operator-(offset_type##_end first, offset_type##_end second) { \ + return untype(first - untype(second)); \ +} \ + \ +inline offset_type##_end& operator-=(offset_type##_end& offset, size_t size) { \ + const auto size_value = checked_cast>(size); \ + offset = to_##offset_type##_end(untype(offset) - size_value); \ + return offset; \ +} \ + \ + /* Arithmetic operators for offset_type cross offset_type##_end */ \ + \ +inline size_t operator-(offset_type##_end first, offset_type second) { \ + return untype(first - untype(second)); \ +} \ + \ + /* Logical operators for offset_type cross offset_type##_end */ \ + \ +inline bool operator!=(offset_type first, offset_type##_end second) { \ + return untype(first) != untype(second); \ +} \ + \ +inline bool operator!=(offset_type##_end first, offset_type second) { \ + return untype(first) != untype(second); \ +} \ + \ +inline bool operator==(offset_type first, offset_type##_end second) { \ + return untype(first) == untype(second); \ +} \ + \ +inline bool operator==(offset_type##_end first, offset_type second) { \ + return untype(first) == untype(second); \ +} \ + \ +inline bool operator<(offset_type##_end first, offset_type second) { \ + return untype(first) < untype(second); \ +} \ + \ +inline bool operator<(offset_type first, offset_type##_end second) { \ + return untype(first) < untype(second); \ +} \ + \ +inline bool operator<=(offset_type##_end first, offset_type second) { \ + return untype(first) <= untype(second); \ +} \ + \ +inline bool operator>(offset_type first, offset_type##_end second) { \ + return untype(first) > untype(second); \ +} \ + \ +inline bool operator>=(offset_type first, offset_type##_end second) { \ + return untype(first) >= untype(second); \ +} \ + // zoffset functions inline uintptr_t untype(zoffset offset) { @@ -59,31 +169,6 @@ inline zoffset to_zoffset(zoffset_end offset) { return to_zoffset(value); } -inline zoffset operator+(zoffset offset, size_t size) { - return to_zoffset(untype(offset) + size); -} - -inline zoffset& operator+=(zoffset& offset, size_t size) { - offset = to_zoffset(untype(offset) + size); - return offset; -} - -inline zoffset operator-(zoffset offset, size_t size) { - const uintptr_t value = untype(offset) - size; - return to_zoffset(value); -} - -inline size_t operator-(zoffset left, zoffset right) { - const size_t diff = untype(left) - untype(right); - assert(diff < ZAddressOffsetMax, "Underflow"); - return diff; -} - -inline zoffset& operator-=(zoffset& offset, size_t size) { - offset = to_zoffset(untype(offset) - size); - return offset; -} - inline bool to_zoffset_end(zoffset_end* result, zoffset_end start, size_t size) { const uintptr_t value = untype(start) + size; if (value <= ZAddressOffsetMax) { @@ -109,62 +194,124 @@ inline zoffset_end to_zoffset_end(zoffset offset) { return zoffset_end(untype(offset)); } -inline bool operator!=(zoffset first, zoffset_end second) { - return untype(first) != untype(second); +CREATE_ZOFFSET_OPERATORS(zoffset) + +// zbacking_offset functions + +inline uintptr_t untype(zbacking_offset offset) { + const uintptr_t value = static_cast(offset); + assert(value < ZBackingOffsetMax, "must have no other bits"); + return value; } -inline bool operator!=(zoffset_end first, zoffset second) { - return untype(first) != untype(second); +inline uintptr_t untype(zbacking_offset_end offset) { + const uintptr_t value = static_cast(offset); + assert(value <= ZBackingOffsetMax, "must have no other bits"); + return value; } -inline bool operator==(zoffset first, zoffset_end second) { - return untype(first) == untype(second); +inline zbacking_offset to_zbacking_offset(uintptr_t value) { + assert(value < ZBackingOffsetMax, "must have no other bits"); + return zbacking_offset(value); } -inline bool operator==(zoffset_end first, zoffset second) { - return untype(first) == untype(second); +inline zbacking_offset to_zbacking_offset(zbacking_offset_end offset) { + const uintptr_t value = untype(offset); + return to_zbacking_offset(value); } -inline bool operator<(zoffset_end first, zoffset second) { - return untype(first) < untype(second); +inline zbacking_offset_end to_zbacking_offset_end(zbacking_offset start, size_t size) { + const uintptr_t value = untype(start) + size; + assert(value <= ZBackingOffsetMax, "Overflow start: " PTR_FORMAT " size: " PTR_FORMAT " value: " PTR_FORMAT, + untype(start), size, value); + return zbacking_offset_end(value); } -inline bool operator<(zoffset first, zoffset_end second) { - return untype(first) < untype(second); +inline zbacking_offset_end to_zbacking_offset_end(uintptr_t value) { + assert(value <= ZBackingOffsetMax, "must have no other bits"); + return zbacking_offset_end(value); } -inline bool operator<=(zoffset_end first, zoffset second) { - return untype(first) <= untype(second); +inline zbacking_offset_end to_zbacking_offset_end(zbacking_offset offset) { + return zbacking_offset_end(untype(offset)); } -inline bool operator>(zoffset first, zoffset_end second) { - return untype(first) > untype(second); +CREATE_ZOFFSET_OPERATORS(zbacking_offset) + +// zbacking_index functions + +inline uint32_t untype(zbacking_index index) { + const uint32_t value = static_cast(index); + assert(value < ZBackingIndexMax, "must have no other bits"); + return value; } -inline bool operator>=(zoffset first, zoffset_end second) { - return untype(first) >= untype(second); +inline uint32_t untype(zbacking_index_end index) { + const uint32_t value = static_cast(index); + assert(value <= ZBackingIndexMax, "must have no other bits"); + return value; } -inline size_t operator-(zoffset_end first, zoffset second) { - return untype(first) - untype(second); +inline zbacking_index to_zbacking_index(uint32_t value) { + assert(value < ZBackingIndexMax, "must have no other bits"); + return zbacking_index(value); } -inline zoffset_end operator-(zoffset_end first, size_t second) { - return to_zoffset_end(untype(first) - second); +inline zbacking_index to_zbacking_index(zbacking_index_end index) { + const uint32_t value = untype(index); + return to_zbacking_index(value); } -inline size_t operator-(zoffset_end first, zoffset_end second) { - return untype(first) - untype(second); +inline zbacking_index_end to_zbacking_index_end(zbacking_index start, size_t size) { + const uint32_t start_value = untype(start); + const uint32_t value = start_value + checked_cast(size); + assert(value <= ZBackingIndexMax && start_value <= value, + "Overflow start: %x size: %zu value: %x", start_value, size, value); + return zbacking_index_end(value); } -inline zoffset_end& operator-=(zoffset_end& offset, size_t size) { - offset = to_zoffset_end(untype(offset) - size); - return offset; +inline zbacking_index_end to_zbacking_index_end(uint32_t value) { + assert(value <= ZBackingIndexMax, "must have no other bits"); + return zbacking_index_end(value); } -inline zoffset_end& operator+=(zoffset_end& offset, size_t size) { - offset = to_zoffset_end(untype(offset) + size); - return offset; +inline zbacking_index_end to_zbacking_index_end(zbacking_index index) { + return zbacking_index_end(untype(index)); +} + +CREATE_ZOFFSET_OPERATORS(zbacking_index) + +#undef CREATE_ZOFFSET_OPERATORS + +// zbacking_offset <-> zbacking_index conversion functions + +inline zbacking_index to_zbacking_index(zbacking_offset offset) { + const uintptr_t value = untype(offset); + assert(is_aligned(value, ZGranuleSize), "must be granule aligned"); + return to_zbacking_index((uint32_t)(value >> ZGranuleSizeShift)); +} + +inline zbacking_offset to_zbacking_offset(zbacking_index index) { + const uintptr_t value = untype(index); + return to_zbacking_offset(value << ZGranuleSizeShift); +} + +// ZRange helper functions + +inline zoffset to_start_type(zoffset_end offset) { + return to_zoffset(offset); +} + +inline zbacking_index to_start_type(zbacking_index_end offset) { + return to_zbacking_index(offset); +} + +inline zoffset_end to_end_type(zoffset start, size_t size) { + return to_zoffset_end(start, size); +} + +inline zbacking_index_end to_end_type(zbacking_index start, size_t size) { + return to_zbacking_index_end(start, size); } // zpointer functions diff --git a/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp b/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp index fc42d9f3db1..9a6fb12779a 100644 --- a/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp +++ b/src/hotspot/share/gc/z/zAddressSpaceLimit.cpp @@ -22,11 +22,13 @@ */ #include "gc/shared/gc_globals.hpp" +#include "gc/shared/gcLogPrecious.hpp" #include "gc/z/zAddressSpaceLimit.hpp" #include "gc/z/zGlobals.hpp" #include "runtime/globals.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" +#include "utilities/ostream.hpp" static size_t address_space_limit() { size_t limit = 0; @@ -44,3 +46,13 @@ size_t ZAddressSpaceLimit::heap() { const size_t limit = address_space_limit() / MaxVirtMemFraction; return align_up(limit, ZGranuleSize); } + +void ZAddressSpaceLimit::print_limits() { + const size_t limit = address_space_limit(); + + if (limit == SIZE_MAX) { + log_info_p(gc, init)("Address Space Size: unlimited"); + } else { + log_info_p(gc, init)("Address Space Size: limited (" EXACTFMT ")", EXACTFMTARGS(limit)); + } +} diff --git a/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp b/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp index 66e01f0ebb0..1c9c65c3255 100644 --- a/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp +++ b/src/hotspot/share/gc/z/zAddressSpaceLimit.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,6 +30,8 @@ class ZAddressSpaceLimit : public AllStatic { public: static size_t heap(); + + static void print_limits(); }; #endif // SHARE_GC_Z_ZADDRESSSPACELIMIT_HPP diff --git a/src/hotspot/share/gc/z/zAllocationFlags.hpp b/src/hotspot/share/gc/z/zAllocationFlags.hpp index c6998bac6c1..d94badae929 100644 --- a/src/hotspot/share/gc/z/zAllocationFlags.hpp +++ b/src/hotspot/share/gc/z/zAllocationFlags.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,25 +31,22 @@ // Allocation flags layout // ----------------------- // -// 7 2 1 0 -// +-----+-+-+-+ -// |00000|1|1|1| -// +-----+-+-+-+ -// | | | | -// | | | * 0-0 Non-Blocking Flag (1-bit) -// | | | -// | | * 1-1 GC Relocation Flag (1-bit) -// | | -// | * 2-2 Low Address Flag (1-bit) +// 7 1 0 +// +------+-+-+ +// |000000|1|1| +// +------+-+-+ +// | | | +// | | * 0-0 Non-Blocking Flag (1-bit) +// | | +// | * 1-1 GC Relocation Flag (1-bit) // | -// * 7-3 Unused (5-bits) +// * 7-2 Unused (6-bits) // class ZAllocationFlags { private: typedef ZBitField field_non_blocking; typedef ZBitField field_gc_relocation; - typedef ZBitField field_low_address; uint8_t _flags; @@ -65,10 +62,6 @@ public: _flags |= field_gc_relocation::encode(true); } - void set_low_address() { - _flags |= field_low_address::encode(true); - } - bool non_blocking() const { return field_non_blocking::decode(_flags); } @@ -76,10 +69,6 @@ public: bool gc_relocation() const { return field_gc_relocation::decode(_flags); } - - bool low_address() const { - return field_low_address::decode(_flags); - } }; #endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP diff --git a/src/hotspot/share/gc/z/zArguments.cpp b/src/hotspot/share/gc/z/zArguments.cpp index f972e0718b4..67b9f6f0bb9 100644 --- a/src/hotspot/share/gc/z/zArguments.cpp +++ b/src/hotspot/share/gc/z/zArguments.cpp @@ -122,7 +122,7 @@ void ZArguments::initialize() { GCArguments::initialize(); // Enable NUMA by default - if (FLAG_IS_DEFAULT(UseNUMA)) { + if (FLAG_IS_DEFAULT(UseNUMA) && FLAG_IS_DEFAULT(ZFakeNUMA)) { FLAG_SET_DEFAULT(UseNUMA, true); } diff --git a/src/hotspot/share/gc/z/zArray.hpp b/src/hotspot/share/gc/z/zArray.hpp index 7bcd4f59eeb..1b7e99b3ace 100644 --- a/src/hotspot/share/gc/z/zArray.hpp +++ b/src/hotspot/share/gc/z/zArray.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,9 +32,49 @@ #include +template class ZArray; class ZLock; -template using ZArray = GrowableArrayCHeap; +template +class ZArraySlice : public GrowableArrayView { + friend class ZArray; + friend class ZArray>; + friend class ZArraySlice>; + friend class ZArraySlice; + +private: + ZArraySlice(T* data, int len); + +public: + ZArraySlice slice_front(int end); + ZArraySlice slice_front(int end) const; + + ZArraySlice slice_back(int start); + ZArraySlice slice_back(int start) const; + + ZArraySlice slice(int start, int end); + ZArraySlice slice(int start, int end) const; + + operator ZArraySlice() const; +}; + +template +class ZArray : public GrowableArrayCHeap { +public: + using GrowableArrayCHeap::GrowableArrayCHeap; + + ZArraySlice slice_front(int end); + ZArraySlice slice_front(int end) const; + + ZArraySlice slice_back(int start); + ZArraySlice slice_back(int start) const; + + ZArraySlice slice(int start, int end); + ZArraySlice slice(int start, int end) const; + + operator ZArraySlice(); + operator ZArraySlice() const; +}; template class ZArrayIteratorImpl : public StackObj { diff --git a/src/hotspot/share/gc/z/zArray.inline.hpp b/src/hotspot/share/gc/z/zArray.inline.hpp index ec7feda8d63..547a73ffc0d 100644 --- a/src/hotspot/share/gc/z/zArray.inline.hpp +++ b/src/hotspot/share/gc/z/zArray.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,6 +29,93 @@ #include "gc/z/zLock.inline.hpp" #include "runtime/atomic.hpp" +template +ZArraySlice::ZArraySlice(T* data, int len) + : GrowableArrayView(data, len, len) {} + +template +ZArraySlice ZArraySlice::slice_front(int end) { + return slice(0, end); +} + +template +ZArraySlice ZArraySlice::slice_front(int end) const { + return slice(0, end); +} + +template +ZArraySlice ZArraySlice::slice_back(int start) { + return slice(start, this->_len); +} + +template +ZArraySlice ZArraySlice::slice_back(int start) const { + return slice(start, this->_len); +} + +template +ZArraySlice ZArraySlice::slice(int start, int end) { + assert(0 <= start && start <= end && end <= this->_len, + "slice called with invalid range (%d, %d) for length %d", start, end, this->_len); + return ZArraySlice(this->_data + start, end - start); +} + +template +ZArraySlice ZArraySlice::slice(int start, int end) const { + assert(0 <= start && start <= end && end <= this->_len, + "slice called with invalid range (%d, %d) for length %d", start, end, this->_len); + return ZArraySlice(this->_data + start, end - start); +} + +template +ZArraySlice::operator ZArraySlice() const { + return slice(0, this->_len); +} + +template +ZArraySlice ZArray::slice_front(int end) { + return slice(0, end); +} + +template +ZArraySlice ZArray::slice_front(int end) const { + return slice(0, end); +} + +template +ZArraySlice ZArray::slice_back(int start) { + return slice(start, this->_len); +} + +template +ZArraySlice ZArray::slice_back(int start) const { + return slice(start, this->_len); +} + +template +ZArraySlice ZArray::slice(int start, int end) { + assert(0 <= start && start <= end && end <= this->_len, + "slice called with invalid range (%d, %d) for length %d", start, end, this->_len); + return ZArraySlice(this->_data + start, end - start); +} + +template +ZArraySlice ZArray::slice(int start, int end) const { + assert(0 <= start && start <= end && end <= this->_len, + "slice called with invalid range (%d, %d) for length %d", start, end, this->_len); + return ZArraySlice(this->_data + start, end - start); +} + +template +ZArray::operator ZArraySlice() { + return slice(0, this->_len); +} + +template +ZArray::operator ZArraySlice() const { + return slice(0, this->_len); +} + template inline bool ZArrayIteratorImpl::next_serial(size_t* index) { if (_next == _end) { diff --git a/src/hotspot/share/gc/z/zCollectedHeap.cpp b/src/hotspot/share/gc/z/zCollectedHeap.cpp index 828e3c9d033..642ad42a1d7 100644 --- a/src/hotspot/share/gc/z/zCollectedHeap.cpp +++ b/src/hotspot/share/gc/z/zCollectedHeap.cpp @@ -53,6 +53,7 @@ #include "runtime/stackWatermarkSet.hpp" #include "services/memoryUsage.hpp" #include "utilities/align.hpp" +#include "utilities/ostream.hpp" ZCollectedHeap* ZCollectedHeap::heap() { return named_heap(CollectedHeap::Z); @@ -245,7 +246,7 @@ size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const { } MemoryUsage ZCollectedHeap::memory_usage() { - const size_t initial_size = ZHeap::heap()->initial_capacity(); + const size_t initial_size = InitialHeapSize; const size_t committed = ZHeap::heap()->capacity(); const size_t used = MIN2(ZHeap::heap()->used(), committed); const size_t max_size = ZHeap::heap()->max_capacity(); @@ -355,10 +356,14 @@ void ZCollectedHeap::prepare_for_verify() { } void ZCollectedHeap::print_on(outputStream* st) const { + StreamAutoIndentor auto_indentor(st); + _heap.print_on(st); } void ZCollectedHeap::print_on_error(outputStream* st) const { + StreamAutoIndentor auto_indentor(st); + _heap.print_on_error(st); } diff --git a/src/hotspot/share/gc/z/zGeneration.cpp b/src/hotspot/share/gc/z/zGeneration.cpp index f8475256733..f3cd3393fc5 100644 --- a/src/hotspot/share/gc/z/zGeneration.cpp +++ b/src/hotspot/share/gc/z/zGeneration.cpp @@ -160,7 +160,7 @@ void ZGeneration::free_empty_pages(ZRelocationSetSelector* selector, int bulk) { // the page allocator lock, and trying to satisfy stalled allocations // too frequently. if (selector->should_free_empty_pages(bulk)) { - const size_t freed = ZHeap::heap()->free_empty_pages(selector->empty_pages()); + const size_t freed = ZHeap::heap()->free_empty_pages(_id, selector->empty_pages()); increase_freed(freed); selector->clear_empty_pages(); } @@ -190,17 +190,6 @@ void ZGeneration::select_relocation_set(bool promote_all) { for (ZPage* page; pt_iter.next(&page);) { if (!page->is_relocatable()) { // Not relocatable, don't register - // Note that the seqnum can change under our feet here as the page - // can be concurrently freed and recycled by a concurrent generation - // collection. However this property is stable across such transitions. - // If it was not relocatable before recycling, then it won't be - // relocatable after it gets recycled either, as the seqnum atomically - // becomes allocating for the given generation. The opposite property - // also holds: if the page is relocatable, then it can't have been - // concurrently freed; if it was re-allocated it would not be - // relocatable, and if it was not re-allocated we know that it was - // allocated earlier than mark start of the current generation - // collection. continue; } @@ -213,15 +202,14 @@ void ZGeneration::select_relocation_set(bool promote_all) { // Reclaim empty pages in bulk - // An active iterator blocks immediate recycle and delete of pages. - // The intent it to allow the code that iterates over the pages to - // safely read the properties of the pages without them being changed - // by another thread. However, this function both iterates over the - // pages AND frees/recycles them. We "yield" the iterator, so that we - // can perform immediate recycling (as long as no other thread is - // iterating over the pages). The contract is that the pages that are - // about to be freed are "owned" by this thread, and no other thread - // will change their states. + // An active iterator blocks immediate deletion of pages. The intent is + // to allow the code that iterates over pages to safely read properties + // of the pages without them being freed/deleted. However, this + // function both iterates over the pages AND frees them. We "yield" the + // iterator, so that we can perform immediate deletion (as long as no + // other thread is iterating over the pages). The contract is that the + // pages that are about to be freed are "owned" by this thread, and no + // other thread will change their states. pt_iter.yield([&]() { free_empty_pages(&selector, 64 /* bulk */); }); @@ -934,7 +922,7 @@ void ZGenerationYoung::flip_promote(ZPage* from_page, ZPage* to_page) { _page_table->replace(from_page, to_page); // Update statistics - _page_allocator->promote_used(from_page->size()); + _page_allocator->promote_used(from_page, to_page); increase_freed(from_page->size()); increase_promoted(from_page->live_bytes()); } @@ -943,7 +931,7 @@ void ZGenerationYoung::in_place_relocate_promote(ZPage* from_page, ZPage* to_pag _page_table->replace(from_page, to_page); // Update statistics - _page_allocator->promote_used(from_page->size()); + _page_allocator->promote_used(from_page, to_page); } void ZGenerationYoung::register_flip_promoted(const ZArray& pages) { diff --git a/src/hotspot/share/gc/z/zGranuleMap.hpp b/src/hotspot/share/gc/z/zGranuleMap.hpp index 58c95e331b6..4cde26847be 100644 --- a/src/hotspot/share/gc/z/zGranuleMap.hpp +++ b/src/hotspot/share/gc/z/zGranuleMap.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,6 +55,9 @@ public: T get_acquire(zoffset offset) const; void release_put(zoffset offset, T value); void release_put(zoffset offset, size_t size, T value); + + const T* addr(zoffset offset) const; + T* addr(zoffset offset); }; template diff --git a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp index 6cdc31f8270..21be6a3d80a 100644 --- a/src/hotspot/share/gc/z/zGranuleMap.inline.hpp +++ b/src/hotspot/share/gc/z/zGranuleMap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -101,6 +101,17 @@ inline void ZGranuleMap::release_put(zoffset offset, size_t size, T value) { put(offset, size, value); } +template +inline const T* ZGranuleMap::addr(zoffset offset) const { + const size_t index = index_for_offset(offset); + return _map + index; +} + +template +inline T* ZGranuleMap::addr(zoffset offset) { + return const_cast(const_cast*>(this)->addr(offset)); +} + template inline ZGranuleMapIterator::ZGranuleMapIterator(const ZGranuleMap* granule_map) : ZArrayIteratorImpl(granule_map->_map, granule_map->_size) {} diff --git a/src/hotspot/share/gc/z/zHeap.cpp b/src/hotspot/share/gc/z/zHeap.cpp index e0f4cb65303..90f8a867135 100644 --- a/src/hotspot/share/gc/z/zHeap.cpp +++ b/src/hotspot/share/gc/z/zHeap.cpp @@ -59,7 +59,7 @@ ZHeap::ZHeap() _page_table(), _allocator_eden(), _allocator_relocation(), - _serviceability(initial_capacity(), min_capacity(), max_capacity()), + _serviceability(InitialHeapSize, min_capacity(), max_capacity()), _old(&_page_table, &_page_allocator), _young(&_page_table, _old.forwarding_table(), &_page_allocator), _initialized(false) { @@ -94,10 +94,6 @@ bool ZHeap::is_initialized() const { return _initialized; } -size_t ZHeap::initial_capacity() const { - return _page_allocator.initial_capacity(); -} - size_t ZHeap::min_capacity() const { return _page_allocator.min_capacity(); } @@ -240,18 +236,18 @@ void ZHeap::undo_alloc_page(ZPage* page) { log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: %zu", p2i(Thread::current()), ZUtils::thread_name(), p2i(page), page->size()); - free_page(page, false /* allow_defragment */); + free_page(page); } -void ZHeap::free_page(ZPage* page, bool allow_defragment) { +void ZHeap::free_page(ZPage* page) { // Remove page table entry _page_table.remove(page); // Free page - _page_allocator.free_page(page, allow_defragment); + _page_allocator.free_page(page); } -size_t ZHeap::free_empty_pages(const ZArray* pages) { +size_t ZHeap::free_empty_pages(ZGenerationId id, const ZArray* pages) { size_t freed = 0; // Remove page table entries ZArrayIterator iter(pages); @@ -261,7 +257,7 @@ size_t ZHeap::free_empty_pages(const ZArray* pages) { } // Free pages - _page_allocator.free_pages(pages); + _page_allocator.free_pages(id, pages); return freed; } @@ -319,21 +315,32 @@ ZServiceabilityCounters* ZHeap::serviceability_counters() { } void ZHeap::print_on(outputStream* st) const { - st->print_cr(" ZHeap used %zuM, capacity %zuM, max capacity %zuM", - used() / M, - capacity() / M, - max_capacity() / M); + streamIndentor indentor(st, 1); + _page_allocator.print_on(st); + + // Metaspace printing prepends spaces instead of using outputStream indentation + streamIndentor indentor_back(st, -1); MetaspaceUtils::print_on(st); } void ZHeap::print_on_error(outputStream* st) const { - print_on(st); + { + streamIndentor indentor(st, 1); + _page_allocator.print_on_error(st); + + // Metaspace printing prepends spaces instead of using outputStream indentation + streamIndentor indentor_back(st, -1); + MetaspaceUtils::print_on(st); + } st->cr(); print_globals_on(st); st->cr(); print_page_table_on(st); + st->cr(); + + _page_allocator.print_extended_on_error(st); } void ZHeap::print_globals_on(outputStream* st) const { @@ -366,9 +373,12 @@ void ZHeap::print_page_table_on(outputStream* st) const { // Print all pages st->print_cr("ZGC Page Table:"); - ZPageTableIterator iter(&_page_table); - for (ZPage* page; iter.next(&page);) { - page->print_on(st); + { + streamIndentor indentor(st, 1); + ZPageTableIterator iter(&_page_table); + for (ZPage* page; iter.next(&page);) { + page->print_on(st); + } } // Allow pages to be deleted diff --git a/src/hotspot/share/gc/z/zHeap.hpp b/src/hotspot/share/gc/z/zHeap.hpp index 25cd2209003..823fc009b2c 100644 --- a/src/hotspot/share/gc/z/zHeap.hpp +++ b/src/hotspot/share/gc/z/zHeap.hpp @@ -67,7 +67,6 @@ public: void out_of_memory(); // Heap metrics - size_t initial_capacity() const; size_t min_capacity() const; size_t max_capacity() const; size_t soft_max_capacity() const; @@ -104,8 +103,8 @@ public: // Page allocation ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age); void undo_alloc_page(ZPage* page); - void free_page(ZPage* page, bool allow_defragment); - size_t free_empty_pages(const ZArray* pages); + void free_page(ZPage* page); + size_t free_empty_pages(ZGenerationId id, const ZArray* pages); // Object allocation bool is_alloc_stalling() const; diff --git a/src/hotspot/share/gc/z/zInitialize.cpp b/src/hotspot/share/gc/z/zInitialize.cpp index 125231355ac..b8efa8bcd69 100644 --- a/src/hotspot/share/gc/z/zInitialize.cpp +++ b/src/hotspot/share/gc/z/zInitialize.cpp @@ -57,8 +57,8 @@ void ZInitialize::initialize(ZBarrierSet* barrier_set) { // Early initialization ZNMT::initialize(); - ZGlobalsPointers::initialize(); ZNUMA::initialize(); + ZGlobalsPointers::initialize(); ZCPU::initialize(); ZStatValue::initialize(); ZThreadLocalAllocBuffer::initialize(); diff --git a/src/hotspot/share/gc/z/zIntrusiveRBTree.hpp b/src/hotspot/share/gc/z/zIntrusiveRBTree.hpp new file mode 100644 index 00000000000..ff204d86462 --- /dev/null +++ b/src/hotspot/share/gc/z/zIntrusiveRBTree.hpp @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZINTRUSIVERBTREE_HPP +#define SHARE_GC_Z_ZINTRUSIVERBTREE_HPP + +#include "metaprogramming/enableIf.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +enum class ZIntrusiveRBTreeDirection { LEFT, RIGHT }; + +class ZIntrusiveRBTreeNode { + template + friend class ZIntrusiveRBTree; + +public: + enum Color { RED = 0b0, BLACK = 0b1 }; + +private: + class ColoredNodePtr { + private: + static constexpr uintptr_t COLOR_MASK = 0b1; + static constexpr uintptr_t NODE_MASK = ~COLOR_MASK; + + uintptr_t _value; + + public: + ColoredNodePtr(ZIntrusiveRBTreeNode* node = nullptr, Color color = RED); + + constexpr Color color() const; + constexpr bool is_black() const; + constexpr bool is_red() const; + + ZIntrusiveRBTreeNode* node() const; + ZIntrusiveRBTreeNode* red_node() const; + ZIntrusiveRBTreeNode* black_node() const; + }; + +private: + ColoredNodePtr _colored_parent; + ZIntrusiveRBTreeNode* _left; + ZIntrusiveRBTreeNode* _right; + + template + const ZIntrusiveRBTreeNode* find_next_node() const; + + template + const ZIntrusiveRBTreeNode* child() const; + template + ZIntrusiveRBTreeNode* child(); + + template + ZIntrusiveRBTreeNode* const* child_addr() const; + + template + bool has_child() const; + + template + void update_child(ZIntrusiveRBTreeNode* new_child); + + void link_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode** insert_location); + + void copy_parent_and_color(ZIntrusiveRBTreeNode* other); + void update_parent_and_color(ZIntrusiveRBTreeNode* parent, Color color); + + void update_parent(ZIntrusiveRBTreeNode* parent); + void update_color(Color color); + + void update_left_child(ZIntrusiveRBTreeNode* new_child); + void update_right_child(ZIntrusiveRBTreeNode* new_child); + + const ZIntrusiveRBTreeNode* parent() const; + ZIntrusiveRBTreeNode* parent(); + const ZIntrusiveRBTreeNode* red_parent() const; + ZIntrusiveRBTreeNode* red_parent(); + const ZIntrusiveRBTreeNode* black_parent() const; + ZIntrusiveRBTreeNode* black_parent(); + + bool has_parent() const; + + Color color() const; + bool is_black() const; + bool is_red() const; + static bool is_black(ZIntrusiveRBTreeNode* node); + + ZIntrusiveRBTreeNode* const* left_child_addr() const; + ZIntrusiveRBTreeNode* const* right_child_addr() const; + + const ZIntrusiveRBTreeNode* left_child() const; + ZIntrusiveRBTreeNode* left_child(); + const ZIntrusiveRBTreeNode* right_child() const; + ZIntrusiveRBTreeNode* right_child(); + + bool has_left_child() const; + bool has_right_child() const; + +public: + ZIntrusiveRBTreeNode(); + + const ZIntrusiveRBTreeNode* prev() const; + ZIntrusiveRBTreeNode* prev(); + const ZIntrusiveRBTreeNode* next() const; + ZIntrusiveRBTreeNode* next(); +}; + +template +class ZIntrusiveRBTree { +public: + class FindCursor { + friend class ZIntrusiveRBTree; + + private: + ZIntrusiveRBTreeNode** _insert_location; + ZIntrusiveRBTreeNode* _parent; + bool _left_most; + bool _right_most; + DEBUG_ONLY(uintptr_t _sequence_number;) + + FindCursor(ZIntrusiveRBTreeNode** insert_location, ZIntrusiveRBTreeNode* parent, bool left_most, bool right_most DEBUG_ONLY(COMMA uintptr_t sequence_number)); + FindCursor(); + +#ifdef ASSERT + bool is_valid(uintptr_t sequence_number) const; +#endif + + public: + FindCursor(const FindCursor&) = default; + FindCursor& operator=(const FindCursor&) = default; + + bool is_valid() const; + bool found() const; + ZIntrusiveRBTreeNode* node() const; + bool is_left_most() const; + bool is_right_most() const; + ZIntrusiveRBTreeNode* parent() const; + ZIntrusiveRBTreeNode** insert_location() const; + }; + +private: + ZIntrusiveRBTreeNode* _root_node; + ZIntrusiveRBTreeNode* _left_most; + ZIntrusiveRBTreeNode* _right_most; + DEBUG_ONLY(uintptr_t _sequence_number;) + + NONCOPYABLE(ZIntrusiveRBTree); + +#ifdef ASSERT + template + bool verify_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* left_child, ZIntrusiveRBTreeNode* right_child); + template + bool verify_node(ZIntrusiveRBTreeNode* parent); + template + bool verify_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* left_child); + struct any_t {}; + template + bool verify_node(ZIntrusiveRBTreeNode* parent, any_t, ZIntrusiveRBTreeNode* right_child); +#endif // ASSERT + + ZIntrusiveRBTreeNode* const* root_node_addr() const; + + void update_child_or_root(ZIntrusiveRBTreeNode* old_node, ZIntrusiveRBTreeNode* new_node, ZIntrusiveRBTreeNode* parent); + void rotate_and_update_child_or_root(ZIntrusiveRBTreeNode* old_node, ZIntrusiveRBTreeNode* new_node, ZIntrusiveRBTreeNode::Color color); + + template + void rebalance_insert_with_sibling(ZIntrusiveRBTreeNode* node, ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* grand_parent); + template + bool rebalance_insert_with_parent_sibling(ZIntrusiveRBTreeNode** node_addr, ZIntrusiveRBTreeNode** parent_addr, ZIntrusiveRBTreeNode* grand_parent); + void rebalance_insert(ZIntrusiveRBTreeNode* new_node); + + template + bool rebalance_remove_with_sibling(ZIntrusiveRBTreeNode** node_addr, ZIntrusiveRBTreeNode** parent_addr); + void rebalance_remove(ZIntrusiveRBTreeNode* rebalance_from); + + FindCursor make_cursor(ZIntrusiveRBTreeNode* const* insert_location, ZIntrusiveRBTreeNode* parent, bool left_most, bool right_most) const; + template + FindCursor find_next(const FindCursor& cursor) const; + +public: + ZIntrusiveRBTree(); + + ZIntrusiveRBTreeNode* first() const; + ZIntrusiveRBTreeNode* last() const; + + FindCursor root_cursor() const; + FindCursor get_cursor(const ZIntrusiveRBTreeNode* node) const; + FindCursor prev_cursor(const ZIntrusiveRBTreeNode* node) const; + FindCursor next_cursor(const ZIntrusiveRBTreeNode* node) const; + FindCursor prev(const FindCursor& cursor) const; + FindCursor next(const FindCursor& cursor) const; + FindCursor find(const Key& key) const; + + void insert(ZIntrusiveRBTreeNode* new_node, const FindCursor& find_cursor); + void replace(ZIntrusiveRBTreeNode* new_node, const FindCursor& find_cursor); + void remove(const FindCursor& find_cursor); + + void verify_tree(); + +public: + template + class IteratorImplementation; + + using Iterator = IteratorImplementation; + using ConstIterator = IteratorImplementation; + using ReverseIterator = IteratorImplementation; + using ConstReverseIterator = IteratorImplementation; + + // remove and replace invalidate the iterators + // however the iterators provide a remove and replace + // function which does not invalidate that iterator nor + // any end iterator + Iterator begin(); + Iterator end(); + ConstIterator begin() const; + ConstIterator end() const; + ConstIterator cbegin() const; + ConstIterator cend() const; + ReverseIterator rbegin(); + ReverseIterator rend(); + ConstReverseIterator rbegin() const; + ConstReverseIterator rend() const; + ConstReverseIterator crbegin() const; + ConstReverseIterator crend() const; +}; + +template +template +class ZIntrusiveRBTree::IteratorImplementation { + friend IteratorImplementation; + +public: + using difference_type = std::ptrdiff_t; + using value_type = const ZIntrusiveRBTreeNode; + using pointer = value_type*; + using reference = value_type&; + +private: + ZIntrusiveRBTree* _tree; + const ZIntrusiveRBTreeNode* _node; + bool _removed; + + bool at_end() const; + +public: + IteratorImplementation(ZIntrusiveRBTree& tree, pointer node); + IteratorImplementation(const IteratorImplementation&) = default; + template + IteratorImplementation(const IteratorImplementation& other); + + reference operator*() const; + pointer operator->(); + IteratorImplementation& operator--(); + IteratorImplementation operator--(int); + IteratorImplementation& operator++(); + IteratorImplementation operator++(int); + + template + void replace(ZIntrusiveRBTreeNode * new_node); + template + void remove(); + + // Note: friend operator overloads defined inside class declaration because of problems with ADL + friend bool operator==(const IteratorImplementation& a, const IteratorImplementation& b) { + precond(a._tree == b._tree); + return a._node == b._node; + } + friend bool operator!=(const IteratorImplementation& a, const IteratorImplementation& b) { + precond(a._tree == b._tree); + return a._node != b._node; + } +}; + +#endif // SHARE_GC_Z_ZINTRUSIVERBTREE_HPP diff --git a/src/hotspot/share/gc/z/zIntrusiveRBTree.inline.hpp b/src/hotspot/share/gc/z/zIntrusiveRBTree.inline.hpp new file mode 100644 index 00000000000..b62db5166dd --- /dev/null +++ b/src/hotspot/share/gc/z/zIntrusiveRBTree.inline.hpp @@ -0,0 +1,1351 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZINTRUSIVERBTREE_INLINE_HPP +#define SHARE_GC_Z_ZINTRUSIVERBTREE_INLINE_HPP + +#include "gc/z/zIntrusiveRBTree.hpp" + +#include "metaprogramming/enableIf.hpp" +#include "utilities/debug.hpp" + +static constexpr ZIntrusiveRBTreeDirection other(const ZIntrusiveRBTreeDirection& direction) { + return direction == ZIntrusiveRBTreeDirection::LEFT ? ZIntrusiveRBTreeDirection::RIGHT : ZIntrusiveRBTreeDirection::LEFT; +} + +inline ZIntrusiveRBTreeNode::ColoredNodePtr::ColoredNodePtr(ZIntrusiveRBTreeNode* node, Color color) + : _value(reinterpret_cast(node) | color) {} + +inline constexpr ZIntrusiveRBTreeNode::Color ZIntrusiveRBTreeNode::ColoredNodePtr::color() const { + return static_cast(_value & COLOR_MASK); +} + +inline constexpr bool ZIntrusiveRBTreeNode::ColoredNodePtr::is_black() const { + return color() == BLACK; +} + +inline constexpr bool ZIntrusiveRBTreeNode::ColoredNodePtr::is_red() const { + return color() == RED; +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::ColoredNodePtr::node() const { + return reinterpret_cast(_value & NODE_MASK); +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::ColoredNodePtr::red_node() const { + precond(is_red()); + return reinterpret_cast(_value); +} +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::ColoredNodePtr::black_node() const { + precond(is_black()); + return reinterpret_cast(_value ^ BLACK); +} + +template +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::find_next_node() const { + constexpr ZIntrusiveRBTreeDirection OTHER_DIRECTION = other(DIRECTION); + const ZIntrusiveRBTreeNode* node = this; + + // Down the tree + if (node->has_child()) { + node = node->child(); + while (node->has_child()) { + node = node->child(); + } + return node; + } + + // Up the tree + const ZIntrusiveRBTreeNode* parent = node->parent(); + while (parent != nullptr && node == parent->child()) { + node = parent; + parent = node->parent(); + } + return parent; +} + +template +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::child() const { + if (DIRECTION == ZIntrusiveRBTreeDirection::LEFT) { + return _left; + } + assert(DIRECTION == ZIntrusiveRBTreeDirection::RIGHT, "must be"); + return _right; +} + +template +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::child() { + return const_cast(const_cast(this)->template child()); +} + +template +inline ZIntrusiveRBTreeNode* const* ZIntrusiveRBTreeNode::child_addr() const { + if (DIRECTION == ZIntrusiveRBTreeDirection::LEFT) { + return &_left; + } + assert(DIRECTION == ZIntrusiveRBTreeDirection::RIGHT, "must be"); + return &_right; +} + +template +inline bool ZIntrusiveRBTreeNode::has_child() const { + if (DIRECTION == ZIntrusiveRBTreeDirection::LEFT) { + return _left != nullptr; + } + assert(DIRECTION == ZIntrusiveRBTreeDirection::RIGHT, "must be"); + return _right != nullptr; +} + +template +inline void ZIntrusiveRBTreeNode::update_child(ZIntrusiveRBTreeNode* new_child) { + if (DIRECTION == ZIntrusiveRBTreeDirection::LEFT) { + _left = new_child; + return; + } + assert(DIRECTION == ZIntrusiveRBTreeDirection::RIGHT, "must be"); + _right = new_child; +} + +inline void ZIntrusiveRBTreeNode::link_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode** insert_location) { + // Newly linked node is always red + _colored_parent = ColoredNodePtr(parent, RED); + _left = nullptr; + _right = nullptr; + + // Link into location + *insert_location = this; +} + +inline void ZIntrusiveRBTreeNode::copy_parent_and_color(ZIntrusiveRBTreeNode* other) { + _colored_parent = other->_colored_parent; +} + +inline void ZIntrusiveRBTreeNode::update_parent_and_color(ZIntrusiveRBTreeNode* parent, Color color) { + _colored_parent = ColoredNodePtr(parent, color); +} + +inline void ZIntrusiveRBTreeNode::update_parent(ZIntrusiveRBTreeNode* parent) { + _colored_parent = ColoredNodePtr(parent, color()); +} + +inline void ZIntrusiveRBTreeNode::update_color(Color color) { + _colored_parent = ColoredNodePtr(parent(), color); +} + +inline void ZIntrusiveRBTreeNode::update_left_child(ZIntrusiveRBTreeNode* new_child) { + update_child(new_child); +} + +inline void ZIntrusiveRBTreeNode::update_right_child(ZIntrusiveRBTreeNode* new_child) { + update_child(new_child); +} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::parent() const { + return _colored_parent.node(); +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::parent() { + return const_cast(const_cast(this)->parent()); +} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::red_parent() const { + return _colored_parent.red_node(); +} +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::red_parent() { + return const_cast(const_cast(this)->red_parent()); +} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::black_parent() const { + return _colored_parent.black_node(); +} +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::black_parent() { + return const_cast(const_cast(this)->black_parent()); +} + +inline bool ZIntrusiveRBTreeNode::has_parent() const { + return _colored_parent.node() != nullptr; +} + +inline ZIntrusiveRBTreeNode::Color ZIntrusiveRBTreeNode::color() const { + return _colored_parent.color(); +} + +inline bool ZIntrusiveRBTreeNode::is_black() const { + return _colored_parent.is_black(); +} + +inline bool ZIntrusiveRBTreeNode::is_red() const { + return _colored_parent.is_red(); +} + +inline bool ZIntrusiveRBTreeNode::is_black(ZIntrusiveRBTreeNode* node) { + return node == nullptr || node->is_black(); +} + +inline ZIntrusiveRBTreeNode* const* ZIntrusiveRBTreeNode::left_child_addr() const { + return child_addr(); +} + +inline ZIntrusiveRBTreeNode* const* ZIntrusiveRBTreeNode::right_child_addr() const { + return child_addr(); +} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::left_child() const { + return child(); +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::left_child() { + return const_cast(const_cast(this)->left_child()); +} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::right_child() const { + return child(); +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::right_child() { + return const_cast(const_cast(this)->right_child()); +} + +inline bool ZIntrusiveRBTreeNode::has_left_child() const { + return has_child(); +} + +inline bool ZIntrusiveRBTreeNode::has_right_child() const { + return has_child(); +} + +inline ZIntrusiveRBTreeNode::ZIntrusiveRBTreeNode() {} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::prev() const { + return find_next_node(); +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::prev() { + return const_cast(const_cast(this)->prev()); +} + +inline const ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::next() const { + return find_next_node(); +} + +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTreeNode::next() { + return const_cast(const_cast(this)->next()); +} + +#ifdef ASSERT +template +template +inline bool ZIntrusiveRBTree::verify_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* left_child, ZIntrusiveRBTreeNode* right_child) { + if (swap_left_right) { + ::swap(left_child, right_child); + } + assert(parent->left_child() == left_child, swap_left_right ? "Bad child Swapped" : "Bad child"); + assert(parent->right_child() == right_child, swap_left_right ? "Bad child Swapped" : "Bad child"); + if (left_child != nullptr) { + assert(left_child->parent() == parent, swap_left_right ? "Bad parent Swapped" : "Bad parent"); + } + if (right_child != nullptr) { + assert(right_child->parent() == parent, swap_left_right ? "Bad parent Swapped" : "Bad parent"); + } + return true; +} + +template +template +inline bool ZIntrusiveRBTree::verify_node(ZIntrusiveRBTreeNode* parent) { + if (parent == nullptr) { + return true; + } + if (swap_left_right) { + return verify_node(parent, parent->right_child()); + } + return verify_node(parent, parent->left_child()); +} + +template +template +inline bool ZIntrusiveRBTree::verify_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* left_child) { + if (swap_left_right) { + return verify_node(parent, left_child, parent->left_child()); + } + return verify_node(parent, left_child, parent->right_child()); +} + +template +template +inline bool ZIntrusiveRBTree::verify_node(ZIntrusiveRBTreeNode* parent, any_t, ZIntrusiveRBTreeNode* right_child) { + if (swap_left_right) { + return verify_node(parent, parent->right_child(), right_child); + } + return verify_node(parent, parent->left_child(), right_child); +} +#endif // ASSERT + +template +inline ZIntrusiveRBTreeNode* const* ZIntrusiveRBTree::root_node_addr() const { + return &_root_node; +} + +template +void ZIntrusiveRBTree::update_child_or_root(ZIntrusiveRBTreeNode* old_node, ZIntrusiveRBTreeNode* new_node, ZIntrusiveRBTreeNode* parent) { + if (parent == nullptr) { + // Update root + _root_node = new_node; + return; + } + if (old_node == parent->left_child()) { + parent->update_left_child(new_node); + return; + } + assert(old_node == parent->right_child(), "must be"); + parent->update_right_child(new_node); +} + +template +inline void ZIntrusiveRBTree::rotate_and_update_child_or_root(ZIntrusiveRBTreeNode* old_node, ZIntrusiveRBTreeNode* new_node, ZIntrusiveRBTreeNode::Color color) { + ZIntrusiveRBTreeNode* const parent = old_node->parent(); + new_node->copy_parent_and_color(old_node); + old_node->update_parent_and_color(new_node, color); + update_child_or_root(old_node, new_node, parent); +} + +template +template +inline void ZIntrusiveRBTree::rebalance_insert_with_sibling(ZIntrusiveRBTreeNode* node, ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* grand_parent) { + DEBUG_ONLY(const bool swap_left_right = PARENT_SIBLING_DIRECTION == ZIntrusiveRBTreeDirection::LEFT;) + constexpr ZIntrusiveRBTreeDirection OTHER_DIRECTION = other(PARENT_SIBLING_DIRECTION); + ZIntrusiveRBTreeNode* sibling = parent->template child(); + DEBUG_ONLY(bool rotated_parent = false;) + if (node == sibling) { + DEBUG_ONLY(rotated_parent = true;) + // Rotate up node through parent + ZIntrusiveRBTreeNode* child = node->template child(); + + //// PRE + // + // G G + // / \ + // p or p + // \ / + // n n + // / \ + // (c) (c) + // + //// + precond(grand_parent->is_black()); + precond(parent->is_red()); + precond(node->is_red()); + precond(verify_node(grand_parent, parent)); + precond(verify_node(parent, any_t{}, node)); + precond(verify_node(node, child)); + precond(verify_node(child)); + + // Fix children + parent->template update_child(child); + node->template update_child(parent); + + // Fix parents and colors + if (child != nullptr) { + child->update_parent_and_color(parent, ZIntrusiveRBTreeNode::BLACK); + } + parent->update_parent_and_color(node, ZIntrusiveRBTreeNode::RED); + + //// POST + // + // G G + // / \ + // n or n + // / \ + // p p + // \ / + // (C) (C) + // + //// + postcond(grand_parent->is_black()); + postcond(parent->is_red()); + postcond(node->is_red()); + postcond(ZIntrusiveRBTreeNode::is_black(child)); + // The grand_parent is updated in the next rotation + // postcond(verify_node(grand_parent, node)); + postcond(verify_node(node, parent)); + postcond(verify_node(parent, any_t{}, child)); + postcond(verify_node(child)); + + parent = node; + sibling = parent->template child(); + DEBUG_ONLY(node = parent->template child();) + } + + //// PRE + // + // G G + // / \ + // p or p + // / \ / \ + // n (s) (s) n + // + //// + precond(grand_parent->is_black()); + precond(parent->is_red()); + precond(node->is_red()); + precond(rotated_parent || verify_node(grand_parent, parent)); + precond(verify_node(parent, node, sibling)); + precond(verify_node(node)); + precond(verify_node(sibling)); + + // Rotate up parent through grand-parent + + // Fix children + grand_parent->template update_child(sibling); + parent->template update_child(grand_parent); + + // Fix parents and colors + if (sibling != nullptr) { + sibling->update_parent_and_color(grand_parent, ZIntrusiveRBTreeNode::BLACK); + } + rotate_and_update_child_or_root(grand_parent, parent, ZIntrusiveRBTreeNode::RED); + + //// POST + // + // P P + // / \ / \ + // n g or g n + // / \ + // (S) (S) + // + //// + postcond(parent->is_black()); + postcond(grand_parent->is_red()); + postcond(node->is_red()); + postcond(ZIntrusiveRBTreeNode::is_black(sibling)); + postcond(verify_node(parent, node, grand_parent)); + postcond(verify_node(node)); + postcond(verify_node(grand_parent, sibling)); + postcond(verify_node(sibling)); +} + +template +template +inline bool ZIntrusiveRBTree::rebalance_insert_with_parent_sibling(ZIntrusiveRBTreeNode** node_addr, ZIntrusiveRBTreeNode** parent_addr, ZIntrusiveRBTreeNode* grand_parent) { + DEBUG_ONLY(const bool swap_left_right = PARENT_SIBLING_DIRECTION == ZIntrusiveRBTreeDirection::LEFT;) + constexpr ZIntrusiveRBTreeDirection OTHER_DIRECTION = other(PARENT_SIBLING_DIRECTION); + ZIntrusiveRBTreeNode* const parent_sibling = grand_parent->template child(); + ZIntrusiveRBTreeNode*& node = *node_addr; + ZIntrusiveRBTreeNode*& parent = *parent_addr; + if (parent_sibling != nullptr && parent_sibling->is_red()) { + //// PRE + // + // G G + // / \ / \ + // p u or u p + // / \ / \ + // n | n n | n + // + //// + precond(grand_parent->is_black()); + precond(parent_sibling->is_red()); + precond(parent->is_red()); + precond(node->is_red()); + precond(verify_node(grand_parent, parent, parent_sibling)); + precond(parent->left_child() == node || parent->right_child() == node); + precond(verify_node(parent)); + precond(verify_node(parent_sibling)); + precond(verify_node(node)); + + // Flip colors of parent, parent sibling and grand parent + parent_sibling->update_parent_and_color(grand_parent, ZIntrusiveRBTreeNode::BLACK); + parent->update_parent_and_color(grand_parent, ZIntrusiveRBTreeNode::BLACK); + ZIntrusiveRBTreeNode* grand_grand_parent = grand_parent->black_parent(); + grand_parent->update_parent_and_color(grand_grand_parent, ZIntrusiveRBTreeNode::RED); + + //// POST + // + // g g + // / \ / \ + // P U or U P + // / \ / \ + // n | n n | n + // + //// + postcond(grand_parent->is_red()); + postcond(parent_sibling->is_black()); + postcond(parent->is_black()); + postcond(node->is_red()); + postcond(verify_node(grand_parent, parent, parent_sibling)); + postcond(parent->left_child() == node || parent->right_child() == node); + postcond(verify_node(parent)); + postcond(verify_node(parent_sibling)); + postcond(verify_node(node)); + + // Recurse up the tree + node = grand_parent; + parent = grand_grand_parent; + return false; // Not finished + } + + rebalance_insert_with_sibling(node, parent, grand_parent); + return true; // Finished +} + +template +inline void ZIntrusiveRBTree::rebalance_insert(ZIntrusiveRBTreeNode* new_node) { + ZIntrusiveRBTreeNode* node = new_node; + ZIntrusiveRBTreeNode* parent = node->red_parent(); + for (;;) { + precond(node->is_red()); + if (parent == nullptr) { + // Recursive (or root) case + node->update_parent_and_color(parent, ZIntrusiveRBTreeNode::BLACK); + break; + } + if (parent->is_black()) { + // Tree is balanced + break; + } + ZIntrusiveRBTreeNode* grand_parent = parent->red_parent(); + if (parent == grand_parent->left_child() ? rebalance_insert_with_parent_sibling(&node, &parent, grand_parent) + : rebalance_insert_with_parent_sibling(&node, &parent, grand_parent)) { + break; + } + } +} + +template +template +inline bool ZIntrusiveRBTree::rebalance_remove_with_sibling(ZIntrusiveRBTreeNode** node_addr, ZIntrusiveRBTreeNode** parent_addr) { + DEBUG_ONLY(const bool swap_left_right = SIBLING_DIRECTION == ZIntrusiveRBTreeDirection::LEFT;) + constexpr ZIntrusiveRBTreeDirection OTHER_DIRECTION = other(SIBLING_DIRECTION); + ZIntrusiveRBTreeNode*& node = *node_addr; + ZIntrusiveRBTreeNode*& parent = *parent_addr; + ZIntrusiveRBTreeNode* sibling = parent->template child(); + if (sibling->is_red()) { + ZIntrusiveRBTreeNode* sibling_child = sibling->template child(); + //// PRE + // + // P P + // / \ / \ + // N s or s N + // / \ + // SC SC + // + //// + precond(parent->is_black()); + precond(ZIntrusiveRBTreeNode::is_black(node)); + precond(sibling->is_red()); + precond(ZIntrusiveRBTreeNode::is_black(sibling_child)); + precond(verify_node(parent, node, sibling)); + precond(verify_node(node)); + precond(verify_node(sibling, sibling_child)); + precond(verify_node(sibling_child)); + + // Rotate sibling up through parent + + // Fix children + parent->template update_child(sibling_child); + sibling->template update_child(parent); + + // Fix parents and colors + sibling_child->update_parent_and_color(parent, ZIntrusiveRBTreeNode::BLACK); + rotate_and_update_child_or_root(parent, sibling, ZIntrusiveRBTreeNode::RED); + + //// POST + // + // S S + // / \ + // p p + // / \ / \ + // N SC SC N + // + //// + postcond(sibling->is_black()); + postcond(parent->is_red()); + postcond(ZIntrusiveRBTreeNode::is_black(node)); + postcond(ZIntrusiveRBTreeNode::is_black(sibling_child)); + postcond(verify_node(sibling, parent)); + postcond(verify_node(parent, node, sibling_child)); + postcond(verify_node(node)); + postcond(verify_node(sibling_child)); + + // node has a new sibling + sibling = sibling_child; + } + + ZIntrusiveRBTreeNode* sibling_child = sibling->template child(); + DEBUG_ONLY(bool rotated_parent = false;) + if (ZIntrusiveRBTreeNode::is_black(sibling_child)) { + DEBUG_ONLY(rotated_parent = true;) + ZIntrusiveRBTreeNode* sibling_other_child = sibling->template child(); + if (ZIntrusiveRBTreeNode::is_black(sibling_other_child)) { + //// PRE + // + // (p) (p) + // / \ / \ + // N S or S N + // + //// + precond(ZIntrusiveRBTreeNode::is_black(node)); + precond(sibling->is_black()); + precond(verify_node(parent, node, sibling)); + + // Flip sibling color to RED + sibling->update_parent_and_color(parent, ZIntrusiveRBTreeNode::RED); + + //// POST + // + // (p) (p) + // / \ / \ + // N s or s N + // + //// + postcond(ZIntrusiveRBTreeNode::is_black(node)); + postcond(sibling->is_red()); + postcond(verify_node(parent, node, sibling)); + + if (parent->is_black()) { + // We did not introduce a RED-RED edge, if parent is + // the root we are done, else recurse up the tree + if (parent->parent() != nullptr) { + node = parent; + parent = node->parent(); + return false; + } + return true; + } + // Change RED-RED edge to BLACK-RED edge + parent->update_color(ZIntrusiveRBTreeNode::BLACK); + return true; + } + + ZIntrusiveRBTreeNode* sibling_grand_child = sibling_other_child->template child(); + //// PRE + // + // (p) (p) + // / \ / \ + // N S S N + // / or \ + // soc soc + // \ / + // (sgc) (sgc) + // + //// + precond(ZIntrusiveRBTreeNode::is_black(node)); + precond(sibling->is_black()); + precond(sibling_other_child->is_red()); + precond(verify_node(parent, node, sibling)); + precond(verify_node(node)); + precond(verify_node(sibling, sibling_other_child, sibling_child)); + precond(verify_node(sibling_other_child, any_t{}, sibling_grand_child)); + precond(verify_node(sibling_grand_child)); + + // Rotate sibling other child through the sibling + + // Fix children + sibling->template update_child(sibling_grand_child); + sibling_other_child->template update_child(sibling); + parent->template update_child(sibling_other_child); + + // Fix parents and colors + if (sibling_grand_child != nullptr) { + sibling_grand_child->update_parent_and_color(sibling, ZIntrusiveRBTreeNode::BLACK); + } + // Defer updating the sibling and sibling other child parents until + // after we rotate below. This will also fix the any potential RED-RED + // edge between parent and sibling_other_child + + //// POST + // + // (p) (p) + // / \ / \ + // N soc or soc N + // / \ / \ + // SGC S S SGC + // + //// + postcond(ZIntrusiveRBTreeNode::is_black(node)); + postcond(sibling->is_black()); + postcond(sibling_other_child->is_red()); + postcond(ZIntrusiveRBTreeNode::is_black(sibling_grand_child)); + // Deferred + // postcond(verify_node(parent, node, sibling_other_child)); + postcond(verify_node(node)); + // postcond(verify_node(sibling_other_child, sibling_grand_child, sibling)); + postcond(verify_node(sibling_grand_child)); + postcond(verify_node(sibling)); + + // node has a new sibling + sibling_child = sibling; + sibling = sibling_other_child; + } + + ZIntrusiveRBTreeNode* sibling_other_child = sibling->template child(); + //// PRE + // + // (p) (p) + // / \ / \ + // N S or S N + // / \ / \ + // (soc)(sc) (sc)(soc) + // + //// + DEBUG_ONLY(ZIntrusiveRBTreeNode::Color parent_color = parent->color();) + precond(ZIntrusiveRBTreeNode::is_black(node)); + precond(rotated_parent || sibling->is_black()); + DEBUG_ONLY(bool sibling_other_child_is_black = ZIntrusiveRBTreeNode::is_black(sibling_other_child);) + precond(rotated_parent || verify_node(parent, node, sibling)); + precond(verify_node(node)); + precond(rotated_parent || verify_node(sibling, sibling_other_child, sibling_child)); + postcond(verify_node(sibling_other_child)); + postcond(verify_node(sibling_child)); + + // Rotate sibling through parent and fix colors + + // Fix children + parent->template update_child(sibling_other_child); + sibling->template update_child(parent); + + // Fix parents and colors + sibling_child->update_parent_and_color(sibling, ZIntrusiveRBTreeNode::BLACK); + if (sibling_other_child != nullptr) { + sibling_other_child->update_parent(parent); + } + rotate_and_update_child_or_root(parent, sibling, ZIntrusiveRBTreeNode::BLACK); + + //// POST + // + // (s) (s) + // / \ / \ + // P SC or SC P + // / \ / \ + // N (soc) (soc) N + // + //// + postcond(sibling->color() == parent_color); + postcond(parent->is_black()); + postcond(sibling_child->is_black()); + postcond(ZIntrusiveRBTreeNode::is_black(node)); + postcond(sibling_other_child_is_black == ZIntrusiveRBTreeNode::is_black(sibling_other_child)); + postcond(verify_node(sibling, parent, sibling_child)); + postcond(verify_node(parent, node, sibling_other_child)); + postcond(verify_node(sibling_child)); + postcond(verify_node(node)); + postcond(verify_node(sibling_other_child)); + return true; +} + +template +inline void ZIntrusiveRBTree::rebalance_remove(ZIntrusiveRBTreeNode* rebalance_from) { + ZIntrusiveRBTreeNode* node = nullptr; + ZIntrusiveRBTreeNode* parent = rebalance_from; + + for (;;) { + precond(ZIntrusiveRBTreeNode::is_black(node)); + precond(parent != nullptr); + if (node == parent->left_child() ? rebalance_remove_with_sibling(&node, &parent) + : rebalance_remove_with_sibling(&node, &parent)) { + break; + } + } +} + +template +inline ZIntrusiveRBTree::FindCursor::FindCursor(ZIntrusiveRBTreeNode** insert_location, ZIntrusiveRBTreeNode* parent, bool left_most, bool right_most DEBUG_ONLY(COMMA uintptr_t sequence_number)) + : _insert_location(insert_location), + _parent(parent), + _left_most(left_most), + _right_most(right_most) + DEBUG_ONLY(COMMA _sequence_number(sequence_number)) {} + +template +inline ZIntrusiveRBTree::FindCursor::FindCursor() + : _insert_location(nullptr), + _parent(nullptr), + _left_most(), + _right_most() + DEBUG_ONLY(COMMA _sequence_number()) {} + +#ifdef ASSERT +template +inline bool ZIntrusiveRBTree::FindCursor::is_valid(uintptr_t sequence_number) const { + return is_valid() && _sequence_number == sequence_number; +} +#endif // ASSERT + +template +inline bool ZIntrusiveRBTree::FindCursor::is_valid() const { + return insert_location() != nullptr; +} + +template +inline bool ZIntrusiveRBTree::FindCursor::found() const { + return node() != nullptr; +} + +template +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTree::FindCursor::node() const { + precond(is_valid()); + return *_insert_location == nullptr ? nullptr : *_insert_location; +} + +template +inline bool ZIntrusiveRBTree::FindCursor::is_left_most() const { + precond(is_valid()); + return _left_most; +} + +template +inline bool ZIntrusiveRBTree::FindCursor::is_right_most() const { + precond(is_valid()); + return _right_most; +} + +template +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTree::FindCursor::parent() const { + precond(is_valid()); + return _parent; +} + +template +inline ZIntrusiveRBTreeNode** ZIntrusiveRBTree::FindCursor::insert_location() const { + return _insert_location; +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::make_cursor(ZIntrusiveRBTreeNode* const* insert_location, ZIntrusiveRBTreeNode* parent, bool left_most, bool right_most) const { + return FindCursor(const_cast(insert_location), parent, left_most, right_most DEBUG_ONLY(COMMA _sequence_number)); +} + +template +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::find_next(const FindCursor& cursor) const { + constexpr ZIntrusiveRBTreeDirection OTHER_DIRECTION = other(DIRECTION); + if (cursor.found()) { + ZIntrusiveRBTreeNode* const node = cursor.node(); + const ZIntrusiveRBTreeNode* const next_node = node->template find_next_node(); + if (next_node != nullptr) { + return get_cursor(next_node); + } + const bool is_right_most = DIRECTION == ZIntrusiveRBTreeDirection::RIGHT && node == _right_most; + const bool is_left_most = DIRECTION == ZIntrusiveRBTreeDirection::LEFT && node == _left_most; + return make_cursor(node->template child_addr(), node, is_left_most, is_right_most); + } + ZIntrusiveRBTreeNode* const parent = cursor.parent(); + if (parent == nullptr) { + assert(&_root_node == cursor.insert_location(), "must be"); + // tree is empty + return FindCursor(); + } + if (parent->template child_addr() == cursor.insert_location()) { + // Cursor at leaf in other direction, parent is next in direction + return get_cursor(parent); + } + assert(parent->template child_addr() == cursor.insert_location(), "must be"); + // Cursor at leaf in direction, parent->next in direction is also cursors next in direction + return get_cursor(parent->template find_next_node()); +} + +template +inline ZIntrusiveRBTree::ZIntrusiveRBTree() + : _root_node(nullptr), + _left_most(nullptr), + _right_most(nullptr) + DEBUG_ONLY(COMMA _sequence_number()) {} + +template +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTree::first() const { + return _left_most; +} + +template +inline ZIntrusiveRBTreeNode* ZIntrusiveRBTree::last() const { + return _right_most; +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::root_cursor() const { + const bool is_left_most = _root_node == _left_most; + const bool is_right_most = _root_node == _right_most; + return make_cursor(&_root_node, nullptr, is_left_most, is_right_most); +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::get_cursor(const ZIntrusiveRBTreeNode* node) const { + if (node == nullptr) { + // Return a invalid cursor + return FindCursor(); + } + const bool is_left_most = node == _left_most; + const bool is_right_most = node == _right_most; + if (node->has_parent()) { + const ZIntrusiveRBTreeNode* const parent = node->parent(); + if (parent->left_child() == node) { + return make_cursor(parent->left_child_addr(), nullptr, is_left_most, is_right_most); + } + assert(parent->right_child() == node, "must be"); + return make_cursor(parent->right_child_addr(), nullptr, is_left_most, is_right_most); + } + // No parent, root node + return make_cursor(&_root_node, nullptr, is_left_most, is_right_most); +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::prev_cursor(const ZIntrusiveRBTreeNode* node) const { + return prev(get_cursor(node)); +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::next_cursor(const ZIntrusiveRBTreeNode* node) const { + return next(get_cursor(node)); +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::prev(const FindCursor& cursor) const { + return find_next(cursor); +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::next(const FindCursor& cursor) const { + return find_next(cursor); +} + +template +inline typename ZIntrusiveRBTree::FindCursor ZIntrusiveRBTree::find(const Key& key) const { + Compare compare_fn; + ZIntrusiveRBTreeNode* const* insert_location = root_node_addr(); + ZIntrusiveRBTreeNode* parent = nullptr; + bool left_most = true; + bool right_most = true; + while (*insert_location != nullptr) { + int result = compare_fn(key, *insert_location); + if (result == 0) { + assert(*insert_location != _left_most || left_most, "must be"); + assert(*insert_location != _right_most || right_most, "must be"); + return make_cursor(insert_location, parent, *insert_location == _left_most, *insert_location == _right_most); + } + parent = *insert_location; + if (result < 0) { + insert_location = parent->left_child_addr(); + // We took one step to the left, cannot be right_most. + right_most = false; + } else { + insert_location = parent->right_child_addr(); + // We took one step to the right, cannot be left_most. + left_most = false; + } + } + return make_cursor(insert_location, parent, left_most, right_most); +} + +template +inline void ZIntrusiveRBTree::insert(ZIntrusiveRBTreeNode* new_node, const FindCursor& find_cursor) { + precond(find_cursor.is_valid(_sequence_number)); + precond(!find_cursor.found()); + DEBUG_ONLY(_sequence_number++;) + + // Link in the new node + new_node->link_node(find_cursor.parent(), find_cursor.insert_location()); + + // Keep track of first and last node(s) + if (find_cursor.is_left_most()) { + _left_most = new_node; + } + if (find_cursor.is_right_most()) { + _right_most = new_node; + } + + rebalance_insert(new_node); +} + +template +inline void ZIntrusiveRBTree::replace(ZIntrusiveRBTreeNode* new_node, const FindCursor& find_cursor) { + precond(find_cursor.is_valid(_sequence_number)); + precond(find_cursor.found()); + DEBUG_ONLY(_sequence_number++;) + + const ZIntrusiveRBTreeNode* const node = find_cursor.node(); + + if (new_node != node) { + // Node has changed + + // Copy the node to new location + *new_node = *node; + + // Update insert location + *find_cursor.insert_location() = new_node; + + // Update children's parent + if (new_node->has_left_child()) { + new_node->left_child()->update_parent(new_node); + } + if (new_node->has_right_child()) { + new_node->right_child()->update_parent(new_node); + } + + // Keep track of first and last node(s) + if (find_cursor.is_left_most()) { + assert(_left_most == node, "must be"); + _left_most = new_node; + } + if (find_cursor.is_right_most()) { + assert(_right_most == node, "must be"); + _right_most = new_node; + } + } +} + +template +inline void ZIntrusiveRBTree::remove(const FindCursor& find_cursor) { + precond(find_cursor.is_valid(_sequence_number)); + precond(find_cursor.found()); + DEBUG_ONLY(_sequence_number++;) + + ZIntrusiveRBTreeNode* const node = find_cursor.node(); + ZIntrusiveRBTreeNode* const parent = node->parent(); + + // Keep track of first and last node(s) + if (find_cursor.is_left_most()) { + assert(_left_most == node, "must be"); + _left_most = _left_most->next(); + } + if (find_cursor.is_right_most()) { + assert(_right_most == node, "must be"); + _right_most = _right_most->prev(); + } + + ZIntrusiveRBTreeNode* rebalance_from = nullptr; + + if (!node->has_left_child() && !node->has_right_child()) { + // No children + + // Remove node + update_child_or_root(node, nullptr, parent); + if (node->is_black()) { + // We unbalanced the tree + rebalance_from = parent; + } + } else if (!node->has_left_child() || !node->has_right_child()) { + assert(node->has_right_child() || node->has_left_child(), "must be"); + // Only one child + ZIntrusiveRBTreeNode* child = node->has_left_child() ? node->left_child() : node->right_child(); + + // Let child take nodes places + update_child_or_root(node, child, parent); + + // And update parent and color + child->copy_parent_and_color(node); + } else { + assert(node->has_left_child() && node->has_right_child(), "must be"); + // Find next node and let it take the nodes place + // This asymmetry always swap next instead of prev, + // I wonder how this behaves w.r.t. our mapped cache + // strategy of mostly removing from the left side of + // the tree + + // This will never walk up the tree, hope the compiler sees this. + ZIntrusiveRBTreeNode* next_node = node->next(); + + ZIntrusiveRBTreeNode* next_node_parent = next_node->parent(); + ZIntrusiveRBTreeNode* next_node_child = next_node->right_child(); + if (next_node_parent != node) { + // Not the direct descendant, adopt node's child + ZIntrusiveRBTreeNode* node_child = node->right_child(); + next_node->update_right_child(node_child); + node_child->update_parent(next_node); + + // And let parent adopt their grand child + next_node_parent->update_left_child(next_node_child); + } else { + next_node_parent = next_node; + } + // Adopt node's other child + ZIntrusiveRBTreeNode* node_child = node->left_child(); + next_node->update_left_child(node_child); + node_child->update_parent(next_node); + + update_child_or_root(node, next_node, parent); + + // Update parent(s) and colors + if (next_node_child != nullptr) { + next_node_child->update_parent_and_color(next_node_parent, ZIntrusiveRBTreeNode::BLACK); + } else if (next_node->is_black()) { + rebalance_from = next_node_parent; + } + next_node->copy_parent_and_color(node); + } + + if (rebalance_from == nullptr) { + // Removal did not unbalance the tree + return; + } + + rebalance_remove(rebalance_from); +} + +template +inline void ZIntrusiveRBTree::verify_tree() { + // Properties: + // (a) Node's are either BLACK or RED + // (b) All nullptr children are counted as BLACK + // (c) Compare::operator(Node*, Node*) <=> 0 is transitive + // Invariants: + // (1) Root node is BLACK + // (2) All RED nodes only have BLACK children + // (3) Every simple path from the root to a leaf + // contains the same amount of BLACK nodes + // (4) A node's children must have that node as + // its parent + // (5) Each node N in the sub-tree formed from a + // node A's child must: + // if left child: Compare::operator(A, N) < 0 + // if right child: Compare::operator(A, N) > 0 + // + // Note: 1-4 may not hold during a call to insert + // and remove. + + // Helpers + const auto is_leaf = [](ZIntrusiveRBTreeNode* node) { + return node == nullptr; + }; + const auto is_black = [&](ZIntrusiveRBTreeNode* node) { + return is_leaf(node) || node->is_black(); + }; + const auto is_red = [&](ZIntrusiveRBTreeNode* node) { + return !is_black(node); + }; + + // Verify (1) + ZIntrusiveRBTreeNode* const root_node = _root_node; + guarantee(is_black(root_node), "Invariant (1)"); + + // Verify (2) + const auto verify_2 = [&](ZIntrusiveRBTreeNode* node) { + guarantee(!is_red(node) || is_black(node->left_child()), "Invariant (2)"); + guarantee(!is_red(node) || is_black(node->right_child()), "Invariant (2)"); + }; + + // Verify (3) + size_t first_simple_path_black_nodes_traversed = 0; + const auto verify_3 = [&](ZIntrusiveRBTreeNode* node, size_t black_nodes_traversed) { + if (!is_leaf(node)) { return; } + if (first_simple_path_black_nodes_traversed == 0) { + first_simple_path_black_nodes_traversed = black_nodes_traversed; + } + guarantee(first_simple_path_black_nodes_traversed == black_nodes_traversed, "Invariant (3)"); + }; + + // Verify (4) + const auto verify_4 = [&](ZIntrusiveRBTreeNode* node) { + if (is_leaf(node)) { return; } + guarantee(!node->has_left_child() || node->left_child()->parent() == node, "Invariant (4)"); + guarantee(!node->has_right_child() || node->right_child()->parent() == node, "Invariant (4)"); + }; + guarantee(root_node == nullptr || root_node->parent() == nullptr, "Invariant (4)"); + + // Verify (5) + const auto verify_5 = [&](ZIntrusiveRBTreeNode* node) { + // Because of the transitive property of Compare (c) we simply check + // this that (5) hold for each parent child pair. + if (is_leaf(node)) { return; } + Compare compare_fn; + guarantee(!node->has_left_child() || compare_fn(node->left_child(), node) < 0, "Invariant (5)"); + guarantee(!node->has_right_child() || compare_fn(node->right_child(), node) > 0, "Invariant (5)"); + }; + + // Walk every simple path by recursively descending the tree from the root + const auto recursive_walk = [&](auto&& recurse, ZIntrusiveRBTreeNode* node, size_t black_nodes_traversed) { + if (is_black(node)) { black_nodes_traversed++; } + verify_2(node); + verify_3(node, black_nodes_traversed); + verify_4(node); + verify_5(node); + if (is_leaf(node)) { return; } + recurse(recurse, node->left_child(), black_nodes_traversed); + recurse(recurse, node->right_child(), black_nodes_traversed); + }; + recursive_walk(recursive_walk, root_node, 0); +} + +template +inline typename ZIntrusiveRBTree::Iterator ZIntrusiveRBTree::begin() { + return Iterator(*this, first()); +} + +template +inline typename ZIntrusiveRBTree::Iterator ZIntrusiveRBTree::end() { + return Iterator(*this, nullptr); +} + +template +inline typename ZIntrusiveRBTree::ConstIterator ZIntrusiveRBTree::begin() const { + return cbegin(); +} + +template +inline typename ZIntrusiveRBTree::ConstIterator ZIntrusiveRBTree::end() const { + return cend(); +} + +template +inline typename ZIntrusiveRBTree::ConstIterator ZIntrusiveRBTree::cbegin() const { + return const_cast*>(this)->begin(); +} + +template +inline typename ZIntrusiveRBTree::ConstIterator ZIntrusiveRBTree::cend() const { + return const_cast*>(this)->end(); +} + +template +inline typename ZIntrusiveRBTree::ReverseIterator ZIntrusiveRBTree::rbegin() { + return ReverseIterator(*this, last()); +} + +template +inline typename ZIntrusiveRBTree::ReverseIterator ZIntrusiveRBTree::rend() { + return ReverseIterator(*this, nullptr); +} + +template +inline typename ZIntrusiveRBTree::ConstReverseIterator ZIntrusiveRBTree::rbegin() const { + return crbegin(); +} + +template +inline typename ZIntrusiveRBTree::ConstReverseIterator ZIntrusiveRBTree::rend() const { + return crend(); +} + +template +inline typename ZIntrusiveRBTree::ConstReverseIterator ZIntrusiveRBTree::crbegin() const { + return const_cast*>(this)->rbegin(); +} + +template +inline typename ZIntrusiveRBTree::ConstReverseIterator ZIntrusiveRBTree::crend() const { + return const_cast*>(this)->rend(); +} + +template +template +inline bool ZIntrusiveRBTree::IteratorImplementation::at_end() const { + return _node == nullptr; +} + +template +template +inline ZIntrusiveRBTree::IteratorImplementation::IteratorImplementation(ZIntrusiveRBTree& tree, pointer node) +: _tree(&tree), + _node(node), + _removed(false) {} + +template +template +template +inline ZIntrusiveRBTree::IteratorImplementation::IteratorImplementation(const IteratorImplementation& other) +: _tree(other._tree), + _node(other._node), + _removed(false) {} + +template +template +inline typename ZIntrusiveRBTree::template IteratorImplementation::reference ZIntrusiveRBTree::IteratorImplementation::operator*() const { + precond(!_removed); + return *_node; +} + +template +template +inline typename ZIntrusiveRBTree::template IteratorImplementation::pointer ZIntrusiveRBTree::IteratorImplementation::operator->() { + precond(!_removed); + return _node; +} + +template +template +inline typename ZIntrusiveRBTree::template IteratorImplementation& ZIntrusiveRBTree::IteratorImplementation::operator--() { + if (_removed) { + _removed = false; + } else if (Reverse) { + precond(_node != _tree->last()); + _node = at_end() ? _tree->first() : _node->next(); + } else { + precond(_node != _tree->first()); + _node = at_end() ? _tree->last() : _node->prev(); + } + return *this; +} + +template +template +inline typename ZIntrusiveRBTree::template IteratorImplementation ZIntrusiveRBTree::IteratorImplementation::operator--(int) { + IteratorImplementation tmp = *this; + --(*this); + return tmp; +} + +template +template +inline typename ZIntrusiveRBTree::template IteratorImplementation& ZIntrusiveRBTree::IteratorImplementation::operator++() { + if (_removed) { + _removed = false; + } else if (Reverse) { + precond(!at_end()); + _node = _node->prev(); + } else { + precond(!at_end()); + _node = _node->next(); + } + return *this; +} + +template +template +inline typename ZIntrusiveRBTree::template IteratorImplementation ZIntrusiveRBTree::IteratorImplementation::operator++(int) { + IteratorImplementation tmp = *this; + ++(*this); + return tmp; +} + +template +template +template +void ZIntrusiveRBTree::IteratorImplementation::replace(ZIntrusiveRBTreeNode* new_node) { + precond(!_removed); + precond(!at_end()); + FindCursor cursor = _tree->get_cursor(_node); + _node = new_node; + _tree->replace(new_node, cursor); +} + +template +template +template +void ZIntrusiveRBTree::IteratorImplementation::remove() { + precond(!_removed); + precond(!at_end()); + FindCursor cursor = _tree->get_cursor(_node); + ++(*this); + _removed = true; + _tree->remove(cursor); +} + +#endif // SHARE_GC_Z_ZINTRUSIVERBTREE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zList.hpp b/src/hotspot/share/gc/z/zList.hpp index 0da2f823864..36cefbbeef7 100644 --- a/src/hotspot/share/gc/z/zList.hpp +++ b/src/hotspot/share/gc/z/zList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,6 +25,7 @@ #define SHARE_GC_Z_ZLIST_HPP #include "memory/allocation.hpp" +#include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" template class ZList; @@ -46,7 +47,12 @@ private: public: ZListNode(); - ~ZListNode(); + ~ZListNode() { + // Implementation placed here to make it easier easier to embed ZListNode + // instances without having to include zListNode.inline.hpp. + assert(_next == this, "Should not be in a list"); + assert(_prev == this, "Should not be in a list"); + } }; // Doubly linked list @@ -59,6 +65,7 @@ private: NONCOPYABLE(ZList); void verify_head() const; + void verify_head_error_reporter_safe() const; void insert(ZListNode* before, ZListNode* node); @@ -68,6 +75,9 @@ private: public: ZList(); + size_t size_error_reporter_safe() const; + bool is_empty_error_reporter_safe() const; + size_t size() const; bool is_empty() const; diff --git a/src/hotspot/share/gc/z/zList.inline.hpp b/src/hotspot/share/gc/z/zList.inline.hpp index 9c4f2b8fbf2..20edfaa620b 100644 --- a/src/hotspot/share/gc/z/zList.inline.hpp +++ b/src/hotspot/share/gc/z/zList.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,17 +27,13 @@ #include "gc/z/zList.hpp" #include "utilities/debug.hpp" +#include "utilities/vmError.hpp" template inline ZListNode::ZListNode() : _next(this), _prev(this) {} -template -inline ZListNode::~ZListNode() { - verify_links_unlinked(); -} - template inline void ZListNode::verify_links() const { assert(_next->_prev == this, "Corrupt list node"); @@ -62,6 +58,16 @@ inline void ZList::verify_head() const { _head.verify_links(); } +template +inline void ZList::verify_head_error_reporter_safe() const { + if (VMError::is_error_reported() && VMError::is_error_reported_in_current_thread()) { + // Do not verify if this thread is in the process of reporting an error. + return; + } + + verify_head(); +} + template inline void ZList::insert(ZListNode* before, ZListNode* node) { verify_head(); @@ -97,6 +103,17 @@ inline ZList::ZList() verify_head(); } +template +inline size_t ZList::size_error_reporter_safe() const { + verify_head_error_reporter_safe(); + return _size; +} + +template +inline bool ZList::is_empty_error_reporter_safe() const { + return size_error_reporter_safe() == 0; +} + template inline size_t ZList::size() const { verify_head(); diff --git a/src/hotspot/share/gc/z/zLiveMap.cpp b/src/hotspot/share/gc/z/zLiveMap.cpp index ef125af9c2e..5b9e0a932c3 100644 --- a/src/hotspot/share/gc/z/zLiveMap.cpp +++ b/src/hotspot/share/gc/z/zLiveMap.cpp @@ -34,24 +34,19 @@ static const ZStatCounter ZCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", ZStatUnitOpsPerSecond); static const ZStatCounter ZCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", ZStatUnitOpsPerSecond); -static size_t bitmap_size(uint32_t size, size_t NumSegments) { - // We need at least one bit per segment - return MAX2(size, NumSegments) * 2; -} - -ZLiveMap::ZLiveMap(uint32_t size) - : _seqnum(0), +ZLiveMap::ZLiveMap(uint32_t object_max_count) + : _segment_size((object_max_count == 1 ? 1u : (object_max_count / NumSegments)) * BitsPerObject), + _segment_shift(log2i_exact(_segment_size)), + _seqnum(0), _live_objects(0), _live_bytes(0), _segment_live_bits(0), _segment_claim_bits(0), - _bitmap_size(bitmap_size(size, NumSegments)), - _bitmap(0), - _segment_shift(log2i_exact(segment_size())) {} + _bitmap(0) {} -void ZLiveMap::allocate_bitmap() { - if (_bitmap.size() != _bitmap_size) { - _bitmap.initialize(_bitmap_size, false /* clear */); +void ZLiveMap::initialize_bitmap() { + if (_bitmap.size() == 0) { + _bitmap.initialize(size_t(_segment_size) * size_t(NumSegments), false /* clear */); } } @@ -71,14 +66,14 @@ void ZLiveMap::reset(ZGenerationId id) { _live_bytes = 0; _live_objects = 0; - // We lazily initialize the bitmap the first time the page is - // marked, i.e. a bit is about to be set for the first time. - allocate_bitmap(); - // Clear segment claimed/live bits segment_live_bits().clear(); segment_claim_bits().clear(); + // We lazily initialize the bitmap the first time the page is marked, i.e. + // a bit is about to be set for the first time. + initialize_bitmap(); + assert(_seqnum == seqnum_initializing, "Invalid"); // Make sure the newly reset marking information is ordered @@ -125,7 +120,7 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) { // Segment claimed, clear it const BitMap::idx_t start_index = segment_start(segment); const BitMap::idx_t end_index = segment_end(segment); - if (segment_size() / BitsPerWord >= 32) { + if (_segment_size / BitsPerWord >= 32) { _bitmap.clear_large_range(start_index, end_index); } else { _bitmap.clear_range(start_index, end_index); @@ -135,13 +130,3 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) { const bool success = set_segment_live(segment); assert(success, "Should never fail"); } - -void ZLiveMap::resize(uint32_t size) { - const size_t new_bitmap_size = bitmap_size(size, NumSegments); - _bitmap_size = new_bitmap_size; - _segment_shift = log2i_exact(segment_size()); - - if (_bitmap.size() != 0 && _bitmap.size() != new_bitmap_size) { - _bitmap.reinitialize(new_bitmap_size, false /* clear */); - } -} diff --git a/src/hotspot/share/gc/z/zLiveMap.hpp b/src/hotspot/share/gc/z/zLiveMap.hpp index 9f6514b574f..71457d05a41 100644 --- a/src/hotspot/share/gc/z/zLiveMap.hpp +++ b/src/hotspot/share/gc/z/zLiveMap.hpp @@ -35,16 +35,18 @@ class ZLiveMap { friend class ZLiveMapTest; private: - static const size_t NumSegments = 64; + static const uint32_t NumSegments = 64; + static const uint32_t BitsPerObject = 2; + + const uint32_t _segment_size; + const int _segment_shift; volatile uint32_t _seqnum; volatile uint32_t _live_objects; volatile size_t _live_bytes; BitMap::bm_word_t _segment_live_bits; BitMap::bm_word_t _segment_claim_bits; - size_t _bitmap_size; ZBitMap _bitmap; - int _segment_shift; const BitMapView segment_live_bits() const; const BitMapView segment_claim_bits() const; @@ -52,8 +54,6 @@ private: BitMapView segment_live_bits(); BitMapView segment_claim_bits(); - BitMap::idx_t segment_size() const; - BitMap::idx_t segment_start(BitMap::idx_t segment) const; BitMap::idx_t segment_end(BitMap::idx_t segment) const; @@ -66,7 +66,7 @@ private: bool claim_segment(BitMap::idx_t segment); - void allocate_bitmap(); + void initialize_bitmap(); void reset(ZGenerationId id); void reset_segment(BitMap::idx_t segment); @@ -77,11 +77,10 @@ private: void iterate_segment(BitMap::idx_t segment, Function function); public: - ZLiveMap(uint32_t size); + ZLiveMap(uint32_t object_max_count); ZLiveMap(const ZLiveMap& other) = delete; void reset(); - void resize(uint32_t size); bool is_marked(ZGenerationId id) const; diff --git a/src/hotspot/share/gc/z/zLiveMap.inline.hpp b/src/hotspot/share/gc/z/zLiveMap.inline.hpp index fdbbdfaba0e..a7f836a8559 100644 --- a/src/hotspot/share/gc/z/zLiveMap.inline.hpp +++ b/src/hotspot/share/gc/z/zLiveMap.inline.hpp @@ -87,10 +87,6 @@ inline BitMap::idx_t ZLiveMap::next_live_segment(BitMap::idx_t segment) const { return segment_live_bits().find_first_set_bit(segment + 1, NumSegments); } -inline BitMap::idx_t ZLiveMap::segment_size() const { - return _bitmap_size / NumSegments; -} - inline BitMap::idx_t ZLiveMap::index_to_segment(BitMap::idx_t index) const { return index >> _segment_shift; } @@ -125,11 +121,11 @@ inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) { } inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const { - return segment_size() * segment; + return segment * _segment_size; } inline BitMap::idx_t ZLiveMap::segment_end(BitMap::idx_t segment) const { - return segment_start(segment) + segment_size(); + return segment_start(segment) + _segment_size; } inline size_t ZLiveMap::do_object(ObjectClosure* cl, zaddress addr) const { diff --git a/src/hotspot/share/gc/z/zMappedCache.cpp b/src/hotspot/share/gc/z/zMappedCache.cpp new file mode 100644 index 00000000000..c1c6e9edee9 --- /dev/null +++ b/src/hotspot/share/gc/z/zMappedCache.cpp @@ -0,0 +1,629 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zIntrusiveRBTree.inline.hpp" +#include "gc/z/zList.inline.hpp" +#include "gc/z/zMappedCache.hpp" +#include "gc/z/zVirtualMemory.inline.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +class ZMappedCacheEntry { +private: + ZVirtualMemory _vmem; + ZMappedCache::TreeNode _tree_node; + ZMappedCache::SizeClassListNode _size_class_list_node; + +public: + ZMappedCacheEntry(ZVirtualMemory vmem) + : _vmem(vmem), + _tree_node(), + _size_class_list_node() {} + + static ZMappedCacheEntry* cast_to_entry(ZMappedCache::TreeNode* tree_node); + static const ZMappedCacheEntry* cast_to_entry(const ZMappedCache::TreeNode* tree_node); + static ZMappedCacheEntry* cast_to_entry(ZMappedCache::SizeClassListNode* list_node); + + zoffset start() const { + return _vmem.start(); + } + + zoffset_end end() const { + return _vmem.end(); + } + + ZVirtualMemory vmem() const { + return _vmem; + } + + ZMappedCache::TreeNode* node_addr() { + return &_tree_node; + } + + void update_start(ZVirtualMemory vmem) { + precond(vmem.end() == end()); + + _vmem = vmem; + } + + ZMappedCache::ZSizeClassListNode* size_class_node() { + return &_size_class_list_node; + } +}; + +ZMappedCacheEntry* ZMappedCacheEntry::cast_to_entry(ZMappedCache::TreeNode* tree_node) { + return const_cast(ZMappedCacheEntry::cast_to_entry(const_cast(tree_node))); +} + +const ZMappedCacheEntry* ZMappedCacheEntry::cast_to_entry(const ZMappedCache::TreeNode* tree_node) { + return (const ZMappedCacheEntry*)((uintptr_t)tree_node - offset_of(ZMappedCacheEntry, _tree_node)); +} + +ZMappedCacheEntry* ZMappedCacheEntry::cast_to_entry(ZMappedCache::SizeClassListNode* list_node) { + const size_t offset = offset_of(ZMappedCacheEntry, _size_class_list_node); + return (ZMappedCacheEntry*)((uintptr_t)list_node - offset); +} + +static void* entry_address_for_zoffset_end(zoffset_end offset) { + STATIC_ASSERT(is_aligned(ZCacheLineSize, alignof(ZMappedCacheEntry)));; + + // This spreads out the location of the entries in an effort to combat hyper alignment. + // Verify if this is an efficient and worthwhile optimization. + + constexpr size_t aligned_entry_size = align_up(sizeof(ZMappedCacheEntry), ZCacheLineSize); + + // Do not use the last location + constexpr size_t number_of_locations = ZGranuleSize / aligned_entry_size - 1; + const size_t granule_index = untype(offset) >> ZGranuleSizeShift; + const size_t index = granule_index % number_of_locations; + const uintptr_t end_addr = untype(offset) + ZAddressHeapBase; + + return reinterpret_cast(end_addr - aligned_entry_size * (index + 1)); +} + +static ZMappedCacheEntry* create_entry(const ZVirtualMemory& vmem) { + precond(vmem.size() >= ZGranuleSize); + + void* placement_addr = entry_address_for_zoffset_end(vmem.end()); + ZMappedCacheEntry* entry = new (placement_addr) ZMappedCacheEntry(vmem); + + postcond(entry->start() == vmem.start()); + postcond(entry->end() == vmem.end()); + + return entry; +} + +int ZMappedCache::EntryCompare::operator()(ZMappedCache::TreeNode* a, ZMappedCache::TreeNode* b) { + const ZVirtualMemory vmem_a = ZMappedCacheEntry::cast_to_entry(a)->vmem(); + const ZVirtualMemory vmem_b = ZMappedCacheEntry::cast_to_entry(b)->vmem(); + + if (vmem_a.end() < vmem_b.start()) { return -1; } + if (vmem_b.end() < vmem_a.start()) { return 1; } + + return 0; // Overlapping +} + +int ZMappedCache::EntryCompare::operator()(zoffset key, ZMappedCache::TreeNode* node) { + const ZVirtualMemory vmem = ZMappedCacheEntry::cast_to_entry(node)->vmem(); + + if (key < vmem.start()) { return -1; } + if (key > vmem.end()) { return 1; } + + return 0; // Containing +} + +int ZMappedCache::size_class_index(size_t size) { + // Returns the size class index of for size, or -1 if smaller than the smallest size class. + const int size_class_power = log2i_graceful(size) - (int)ZGranuleSizeShift; + + if (size_class_power < MinSizeClassShift) { + // Allocation is smaller than the smallest size class minimum size. + return -1; + } + + return MIN2(size_class_power, MaxSizeClassShift) - MinSizeClassShift; +} + +int ZMappedCache::guaranteed_size_class_index(size_t size) { + // Returns the size class index of the smallest size class which can always + // accommodate a size allocation, or -1 otherwise. + const int size_class_power = log2i_ceil(size) - (int)ZGranuleSizeShift; + + if (size_class_power > MaxSizeClassShift) { + // Allocation is larger than the largest size class minimum size. + return -1; + } + + return MAX2(size_class_power, MinSizeClassShift) - MinSizeClassShift; +} + +void ZMappedCache::tree_insert(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem) { + ZMappedCacheEntry* const entry = create_entry(vmem); + + // Insert creates a new entry + _entry_count += 1; + + // Insert in tree + _tree.insert(entry->node_addr(), cursor); + + // Insert in size-class lists + const size_t size = vmem.size(); + const int index = size_class_index(size); + if (index != -1) { + _size_class_lists[index].insert_first(entry->size_class_node()); + } +} + +void ZMappedCache::tree_remove(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem) { + ZMappedCacheEntry* entry = ZMappedCacheEntry::cast_to_entry(cursor.node()); + + // Remove destroys an old entry + _entry_count -= 1; + + // Remove from tree + _tree.remove(cursor); + + // Insert in size-class lists + const size_t size = vmem.size(); + const int index = size_class_index(size); + if (index != -1) { + _size_class_lists[index].remove(entry->size_class_node()); + } + + // Destroy entry + entry->~ZMappedCacheEntry(); +} + +void ZMappedCache::tree_replace(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem) { + ZMappedCacheEntry* const entry = create_entry(vmem); + + ZMappedCache::TreeNode* const node = cursor.node(); + ZMappedCacheEntry* const old_entry = ZMappedCacheEntry::cast_to_entry(node); + assert(old_entry->end() != vmem.end(), "should not replace, use update"); + + // Replace in tree + _tree.replace(entry->node_addr(), cursor); + + // Replace in size-class lists + + // Remove old + const size_t old_size = old_entry->vmem().size(); + const int old_index = size_class_index(old_size); + if (old_index != -1) { + _size_class_lists[old_index].remove(old_entry->size_class_node()); + } + + // Insert new + const size_t new_size = vmem.size(); + const int new_index = size_class_index(new_size); + if (new_index != -1) { + _size_class_lists[new_index].insert_first(entry->size_class_node()); + } + + // Destroy old entry + old_entry->~ZMappedCacheEntry(); +} + +void ZMappedCache::tree_update(ZMappedCacheEntry* entry, const ZVirtualMemory& vmem) { + assert(entry->end() == vmem.end(), "must be"); + + // Remove or add to size-class lists if required + + const size_t old_size = entry->vmem().size(); + const size_t new_size = vmem.size(); + const int old_index = size_class_index(old_size); + const int new_index = size_class_index(new_size); + + if (old_index != new_index) { + // Size class changed + + // Remove old + if (old_index != -1) { + _size_class_lists[old_index].remove(entry->size_class_node()); + } + + // Insert new + if (new_index != -1) { + _size_class_lists[new_index].insert_first(entry->size_class_node()); + } + } + + // And update entry + entry->update_start(vmem); +} + +template +ZVirtualMemory ZMappedCache::remove_vmem(ZMappedCacheEntry* const entry, size_t min_size, SelectFunction select) { + ZVirtualMemory vmem = entry->vmem(); + const size_t size = vmem.size(); + + if (size < min_size) { + // Do not select this, smaller than min_size + return ZVirtualMemory(); + } + + // Query how much to remove + const size_t to_remove = select(size); + assert(to_remove <= size, "must not remove more than size"); + + if (to_remove == 0) { + // Nothing to remove + return ZVirtualMemory(); + } + + if (to_remove != size) { + // Partial removal + if (strategy == RemovalStrategy::LowestAddress) { + const size_t unused_size = size - to_remove; + const ZVirtualMemory unused_vmem = vmem.shrink_from_back(unused_size); + tree_update(entry, unused_vmem); + + } else { + assert(strategy == RemovalStrategy::HighestAddress, "must be LowestAddress or HighestAddress"); + + const size_t unused_size = size - to_remove; + const ZVirtualMemory unused_vmem = vmem.shrink_from_front(unused_size); + + auto cursor = _tree.get_cursor(entry->node_addr()); + assert(cursor.is_valid(), "must be"); + tree_replace(cursor, unused_vmem); + } + + } else { + // Whole removal + auto cursor = _tree.get_cursor(entry->node_addr()); + assert(cursor.is_valid(), "must be"); + tree_remove(cursor, vmem); + } + + // Update statistics + _size -= to_remove; + _min = MIN2(_size, _min); + + postcond(to_remove == vmem.size()); + return vmem; +} + +template +bool ZMappedCache::try_remove_vmem_size_class(size_t min_size, SelectFunction select, ConsumeFunction consume) { +new_max_size: + // Query the max select size possible given the size of the cache + const size_t max_size = select(_size); + + if (max_size < min_size) { + // Never select less than min_size + return false; + } + + // Start scanning from max_size guaranteed size class to the largest size class + const int guaranteed_index = guaranteed_size_class_index(max_size); + for (int index = guaranteed_index; index != -1 && index < NumSizeClasses; ++index) { + ZList& list = _size_class_lists[index]; + if (!list.is_empty()) { + ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(list.first()); + + // Because this is guaranteed, select should always succeed + const ZVirtualMemory vmem = remove_vmem(entry, min_size, select); + assert(!vmem.is_null(), "select must succeed"); + + if (consume(vmem)) { + // consume is satisfied + return true; + } + + // Continue with a new max_size + goto new_max_size; + } + } + + // Consume the rest starting at max_size's size class to min_size's size class + const int max_size_index = size_class_index(max_size); + const int min_size_index = size_class_index(min_size); + const int lowest_index = MAX2(min_size_index, 0); + + for (int index = max_size_index; index >= lowest_index; --index) { + ZListIterator iter(&_size_class_lists[index]); + for (ZSizeClassListNode* list_node; iter.next(&list_node);) { + ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(list_node); + + // Try remove + const ZVirtualMemory vmem = remove_vmem(entry, min_size, select); + + if (!vmem.is_null() && consume(vmem)) { + // Found a vmem and consume is satisfied + return true; + } + } + } + + // consume was not satisfied + return false; +} + +template +void ZMappedCache::scan_remove_vmem(size_t min_size, SelectFunction select, ConsumeFunction consume) { + if (strategy == RemovalStrategy::SizeClasses) { + if (try_remove_vmem_size_class(min_size, select, consume)) { + // Satisfied using size classes + return; + } + + if (size_class_index(min_size) != -1) { + // There exists a size class for our min size. All possibilities must have + // been exhausted, do not scan the tree. + return; + } + + // Fallthrough to tree scan + } + + if (strategy == RemovalStrategy::HighestAddress) { + // Scan whole tree starting at the highest address + for (ZMappedCache::TreeNode* node = _tree.last(); node != nullptr; node = node->prev()) { + ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(node); + + const ZVirtualMemory vmem = remove_vmem(entry, min_size, select); + + if (!vmem.is_null() && consume(vmem)) { + // Found a vmem and consume is satisfied. + return; + } + } + + } else { + assert(strategy == RemovalStrategy::SizeClasses || strategy == RemovalStrategy::LowestAddress, "unknown strategy"); + + // Scan whole tree starting at the lowest address + for (ZMappedCache::TreeNode* node = _tree.first(); node != nullptr; node = node->next()) { + ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(node); + + const ZVirtualMemory vmem = remove_vmem(entry, min_size, select); + + if (!vmem.is_null() && consume(vmem)) { + // Found a vmem and consume is satisfied. + return; + } + } + } +} + +template +void ZMappedCache::scan_remove_vmem(SelectFunction select, ConsumeFunction consume) { + // Scan without a min_size + scan_remove_vmem(0, select, consume); +} + +template +size_t ZMappedCache::remove_discontiguous_with_strategy(size_t size, ZArray* out) { + precond(size > 0); + precond(is_aligned(size, ZGranuleSize)); + + size_t remaining = size; + + const auto select_size_fn = [&](size_t vmem_size) { + // Select at most remaining + return MIN2(remaining, vmem_size); + }; + + const auto consume_vmem_fn = [&](ZVirtualMemory vmem) { + const size_t vmem_size = vmem.size(); + out->append(vmem); + + assert(vmem_size <= remaining, "consumed to much"); + + // Track remaining, and stop when it reaches zero + remaining -= vmem_size; + + return remaining == 0; + }; + + scan_remove_vmem(select_size_fn, consume_vmem_fn); + + return size - remaining; +} + +ZMappedCache::ZMappedCache() + : _tree(), + _entry_count(0), + _size_class_lists{}, + _size(0), + _min(_size) {} + +void ZMappedCache::insert(const ZVirtualMemory& vmem) { + _size += vmem.size(); + + Tree::FindCursor current_cursor = _tree.find(vmem.start()); + Tree::FindCursor next_cursor = _tree.next(current_cursor); + + const bool extends_left = current_cursor.found(); + const bool extends_right = next_cursor.is_valid() && next_cursor.found() && + ZMappedCacheEntry::cast_to_entry(next_cursor.node())->start() == vmem.end(); + + if (extends_left && extends_right) { + ZMappedCacheEntry* next_entry = ZMappedCacheEntry::cast_to_entry(next_cursor.node()); + + const ZVirtualMemory left_vmem = ZMappedCacheEntry::cast_to_entry(current_cursor.node())->vmem(); + const ZVirtualMemory right_vmem = next_entry->vmem(); + assert(left_vmem.adjacent_to(vmem), "must be"); + assert(vmem.adjacent_to(right_vmem), "must be"); + + ZVirtualMemory new_vmem = left_vmem; + new_vmem.grow_from_back(vmem.size()); + new_vmem.grow_from_back(right_vmem.size()); + + // Remove current (left vmem) + tree_remove(current_cursor, left_vmem); + + // And update next's start + tree_update(next_entry, new_vmem); + + return; + } + + if (extends_left) { + const ZVirtualMemory left_vmem = ZMappedCacheEntry::cast_to_entry(current_cursor.node())->vmem(); + assert(left_vmem.adjacent_to(vmem), "must be"); + + ZVirtualMemory new_vmem = left_vmem; + new_vmem.grow_from_back(vmem.size()); + + tree_replace(current_cursor, new_vmem); + + return; + } + + if (extends_right) { + ZMappedCacheEntry* next_entry = ZMappedCacheEntry::cast_to_entry(next_cursor.node()); + + const ZVirtualMemory right_vmem = next_entry->vmem(); + assert(vmem.adjacent_to(right_vmem), "must be"); + + ZVirtualMemory new_vmem = vmem; + new_vmem.grow_from_back(right_vmem.size()); + + // Update next's start + tree_update(next_entry, new_vmem); + + return; + } + + tree_insert(current_cursor, vmem); +} + +ZVirtualMemory ZMappedCache::remove_contiguous(size_t size) { + precond(size > 0); + precond(is_aligned(size, ZGranuleSize)); + + ZVirtualMemory result; + + const auto select_size_fn = [&](size_t) { + // We always select the size + return size; + }; + + const auto consume_vmem_fn = [&](ZVirtualMemory vmem) { + assert(result.is_null(), "only consume once"); + assert(vmem.size() == size, "wrong size consumed"); + + result = vmem; + + // Only require one vmem + return true; + }; + + if (size == ZPageSizeSmall) { + // Small page allocations allocate at the lowest possible address + scan_remove_vmem(size, select_size_fn, consume_vmem_fn); + } else { + // Other sizes uses approximate best fit size classes first + scan_remove_vmem(size, select_size_fn, consume_vmem_fn); + } + + return result; +} + +size_t ZMappedCache::remove_discontiguous(size_t size, ZArray* out) { + return remove_discontiguous_with_strategy(size, out); +} + +size_t ZMappedCache::reset_min() { + const size_t old_min = _min; + + _min = _size; + + return old_min; +} + +size_t ZMappedCache::remove_from_min(size_t max_size, ZArray* out) { + const size_t size = MIN2(_min, max_size); + + if (size == 0) { + return 0; + } + + return remove_discontiguous_with_strategy(size, out); +} + +void ZMappedCache::print_on(outputStream* st) const { + // This may be called from error printing where we may not hold the lock, so + // values may be inconsistent. As such we read the _entry_count only once. And + // use is_empty_error_reporter_safe and size_error_reporter_safe on the size + // class lists. + const size_t entry_count = Atomic::load(&_entry_count); + + st->print("Cache"); + st->fill_to(17); + st->print_cr("%zuM (%zu)", _size / M, entry_count); + + if (entry_count == 0) { + // Empty cache, skip printing size classes + return; + } + + // Aggregate the number of size class entries + size_t size_class_entry_count = 0; + for (int index = 0; index < NumSizeClasses; ++index) { + size_class_entry_count += _size_class_lists[index].size_error_reporter_safe(); + } + + // Print information on size classes + streamIndentor indentor(st, 1); + + st->print("size classes"); + st->fill_to(17); + + // Print the number of entries smaller than the min size class's size + const size_t small_entry_size_count = entry_count - size_class_entry_count; + bool first = true; + if (small_entry_size_count != 0) { + st->print(EXACTFMT " (%zu)", EXACTFMTARGS(ZGranuleSize), small_entry_size_count); + first = false; + } + + for (int index = 0; index < NumSizeClasses; ++index) { + const ZList& list = _size_class_lists[index]; + if (!list.is_empty_error_reporter_safe()) { + const int shift = index + MinSizeClassShift + (int)ZGranuleSizeShift; + const size_t size = (size_t)1 << shift; + + st->print("%s" EXACTFMT " (%zu)", first ? "" : ", ", EXACTFMTARGS(size), list.size_error_reporter_safe()); + first = false; + } + } + + st->cr(); +} + +void ZMappedCache::print_extended_on(outputStream* st) const { + // Print the ranges and size of all nodes in the tree + for (ZMappedCache::TreeNode* node = _tree.first(); node != nullptr; node = node->next()) { + const ZVirtualMemory vmem = ZMappedCacheEntry::cast_to_entry(node)->vmem(); + + st->print_cr(PTR_FORMAT " " PTR_FORMAT " " EXACTFMT, + untype(vmem.start()), untype(vmem.end()), EXACTFMTARGS(vmem.size())); + } +} diff --git a/src/hotspot/share/gc/z/zMappedCache.hpp b/src/hotspot/share/gc/z/zMappedCache.hpp new file mode 100644 index 00000000000..8d1c90fd003 --- /dev/null +++ b/src/hotspot/share/gc/z/zMappedCache.hpp @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZMAPPEDCACHE_HPP +#define SHARE_GC_Z_ZMAPPEDCACHE_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zArray.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zIntrusiveRBTree.hpp" +#include "gc/z/zList.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/ostream.hpp" + +class ZMappedCacheEntry; +class ZVirtualMemory; + +class ZMappedCache { + friend class ZMappedCacheEntry; + +private: + struct EntryCompare { + int operator()(ZIntrusiveRBTreeNode* a, ZIntrusiveRBTreeNode* b); + int operator()(zoffset key, ZIntrusiveRBTreeNode* node); + }; + + struct ZSizeClassListNode { + ZListNode _node; + }; + + using Tree = ZIntrusiveRBTree; + using TreeNode = ZIntrusiveRBTreeNode; + using SizeClassList = ZList; + using SizeClassListNode = ZSizeClassListNode; + + // Maintain size class lists from 4MB to 16GB + static constexpr int MaxLongArraySizeClassShift = 3 /* 8 byte */ + 31 /* max length */; + static constexpr int MinSizeClassShift = 1; + static constexpr int MaxSizeClassShift = MaxLongArraySizeClassShift - ZGranuleSizeShift; + static constexpr int NumSizeClasses = MaxSizeClassShift - MinSizeClassShift + 1; + + Tree _tree; + size_t _entry_count; + SizeClassList _size_class_lists[NumSizeClasses]; + size_t _size; + size_t _min; + + static int size_class_index(size_t size); + static int guaranteed_size_class_index(size_t size); + + void tree_insert(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem); + void tree_remove(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem); + void tree_replace(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem); + void tree_update(ZMappedCacheEntry* entry, const ZVirtualMemory& vmem); + + enum class RemovalStrategy { + LowestAddress, + HighestAddress, + SizeClasses, + }; + + template + ZVirtualMemory remove_vmem(ZMappedCacheEntry* const entry, size_t min_size, SelectFunction select); + + template + bool try_remove_vmem_size_class(size_t min_size, SelectFunction select, ConsumeFunction consume); + + template + void scan_remove_vmem(size_t min_size, SelectFunction select, ConsumeFunction consume); + + template + void scan_remove_vmem(SelectFunction select, ConsumeFunction consume); + + template + size_t remove_discontiguous_with_strategy(size_t size, ZArray* out); + +public: + ZMappedCache(); + + void insert(const ZVirtualMemory& vmem); + + ZVirtualMemory remove_contiguous(size_t size); + size_t remove_discontiguous(size_t size, ZArray* out); + + size_t reset_min(); + size_t remove_from_min(size_t max_size, ZArray* out); + + void print_on(outputStream* st) const; + void print_extended_on(outputStream* st) const; +}; + +#endif // SHARE_GC_Z_ZMAPPEDCACHE_HPP diff --git a/src/hotspot/share/gc/z/zMemory.cpp b/src/hotspot/share/gc/z/zMemory.cpp deleted file mode 100644 index 35e95888d4d..00000000000 --- a/src/hotspot/share/gc/z/zMemory.cpp +++ /dev/null @@ -1,284 +0,0 @@ -/* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "gc/z/zList.inline.hpp" -#include "gc/z/zLock.inline.hpp" -#include "gc/z/zMemory.inline.hpp" - -void ZMemoryManager::shrink_from_front(ZMemory* area, size_t size) { - if (_callbacks._shrink != nullptr) { - const ZMemory* from = area; - const ZMemory to(area->start() + size, area->size() - size); - _callbacks._shrink(*from, to); - } - area->shrink_from_front(size); -} - -void ZMemoryManager::shrink_from_back(ZMemory* area, size_t size) { - if (_callbacks._shrink != nullptr) { - const ZMemory* from = area; - const ZMemory to(area->start(), area->size() - size); - _callbacks._shrink(*from, to); - } - area->shrink_from_back(size); -} - -void ZMemoryManager::grow_from_front(ZMemory* area, size_t size) { - if (_callbacks._grow != nullptr) { - const ZMemory* from = area; - const ZMemory to(area->start() - size, area->size() + size); - _callbacks._grow(*from, to); - } - area->grow_from_front(size); -} - -void ZMemoryManager::grow_from_back(ZMemory* area, size_t size) { - if (_callbacks._grow != nullptr) { - const ZMemory* from = area; - const ZMemory to(area->start(), area->size() + size); - _callbacks._grow(*from, to); - } - area->grow_from_back(size); -} - -ZMemoryManager::Callbacks::Callbacks() - : _prepare_for_hand_out(nullptr), - _prepare_for_hand_back(nullptr), - _grow(nullptr), - _shrink(nullptr) {} - -ZMemoryManager::ZMemoryManager() - : _freelist(), - _callbacks() {} - -bool ZMemoryManager::free_is_contiguous() const { - return _freelist.size() == 1; -} - -void ZMemoryManager::register_callbacks(const Callbacks& callbacks) { - _callbacks = callbacks; -} - -zoffset ZMemoryManager::peek_low_address() const { - ZLocker locker(&_lock); - - const ZMemory* const area = _freelist.first(); - if (area != nullptr) { - return area->start(); - } - - // Out of memory - return zoffset(UINTPTR_MAX); -} - -zoffset_end ZMemoryManager::peak_high_address_end() const { - ZLocker locker(&_lock); - - const ZMemory* const area = _freelist.last(); - if (area != nullptr) { - return area->end(); - } - - // Out of memory - return zoffset_end(UINTPTR_MAX); -} - -zoffset ZMemoryManager::alloc_low_address(size_t size) { - ZLocker locker(&_lock); - - ZListIterator iter(&_freelist); - for (ZMemory* area; iter.next(&area);) { - if (area->size() >= size) { - zoffset start; - - if (area->size() == size) { - // Exact match, remove area - start = area->start(); - _freelist.remove(area); - delete area; - } else { - // Larger than requested, shrink area - start = area->start(); - shrink_from_front(area, size); - } - - if (_callbacks._prepare_for_hand_out != nullptr) { - _callbacks._prepare_for_hand_out(ZMemory(start, size)); - } - - return start; - } - } - - // Out of memory - return zoffset(UINTPTR_MAX); -} - -zoffset ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) { - ZLocker locker(&_lock); - - ZMemory* const area = _freelist.first(); - if (area != nullptr) { - const zoffset start = area->start(); - - if (area->size() <= size) { - // Smaller than or equal to requested, remove area - _freelist.remove(area); - *allocated = area->size(); - delete area; - } else { - // Larger than requested, shrink area - shrink_from_front(area, size); - *allocated = size; - } - - if (_callbacks._prepare_for_hand_out != nullptr) { - _callbacks._prepare_for_hand_out(ZMemory(start, *allocated)); - } - - return start; - } - - // Out of memory - *allocated = 0; - return zoffset(UINTPTR_MAX); -} - -zoffset ZMemoryManager::alloc_high_address(size_t size) { - ZLocker locker(&_lock); - - ZListReverseIterator iter(&_freelist); - for (ZMemory* area; iter.next(&area);) { - if (area->size() >= size) { - zoffset start; - - if (area->size() == size) { - // Exact match, remove area - start = area->start(); - _freelist.remove(area); - delete area; - } else { - // Larger than requested, shrink area - shrink_from_back(area, size); - start = to_zoffset(area->end()); - } - - if (_callbacks._prepare_for_hand_out != nullptr) { - _callbacks._prepare_for_hand_out(ZMemory(start, size)); - } - - return start; - } - } - - // Out of memory - return zoffset(UINTPTR_MAX); -} - -void ZMemoryManager::move_into(zoffset start, size_t size) { - assert(start != zoffset(UINTPTR_MAX), "Invalid address"); - const zoffset_end end = to_zoffset_end(start, size); - - ZListIterator iter(&_freelist); - for (ZMemory* area; iter.next(&area);) { - if (start < area->start()) { - ZMemory* const prev = _freelist.prev(area); - if (prev != nullptr && start == prev->end()) { - if (end == area->start()) { - // Merge with prev and current area - grow_from_back(prev, size + area->size()); - _freelist.remove(area); - delete area; - } else { - // Merge with prev area - grow_from_back(prev, size); - } - } else if (end == area->start()) { - // Merge with current area - grow_from_front(area, size); - } else { - // Insert new area before current area - assert(end < area->start(), "Areas must not overlap"); - ZMemory* const new_area = new ZMemory(start, size); - _freelist.insert_before(area, new_area); - } - - // Done - return; - } - } - - // Insert last - ZMemory* const last = _freelist.last(); - if (last != nullptr && start == last->end()) { - // Merge with last area - grow_from_back(last, size); - } else { - // Insert new area last - ZMemory* const new_area = new ZMemory(start, size); - _freelist.insert_last(new_area); - } -} - -void ZMemoryManager::free(zoffset start, size_t size) { - ZLocker locker(&_lock); - - if (_callbacks._prepare_for_hand_back != nullptr) { - _callbacks._prepare_for_hand_back(ZMemory(start, size)); - } - - move_into(start, size); -} - -void ZMemoryManager::register_range(zoffset start, size_t size) { - // Note that there's no need to call the _prepare_for_hand_back when memory - // is added the first time. We don't have to undo the effects of a previous - // _prepare_for_hand_out callback. - - // No need to lock during initialization. - - move_into(start, size); -} - -bool ZMemoryManager::unregister_first(zoffset* start_out, size_t* size_out) { - // Note that this doesn't hand out memory to be used, so we don't call the - // _prepare_for_hand_out callback. - - ZLocker locker(&_lock); - - if (_freelist.is_empty()) { - return false; - } - - // Don't invoke the _prepare_for_hand_out callback - - ZMemory* const area = _freelist.remove_first(); - - // Return the range - *start_out = area->start(); - *size_out = area->size(); - - delete area; - - return true; -} diff --git a/src/hotspot/share/gc/z/zMemory.hpp b/src/hotspot/share/gc/z/zMemory.hpp deleted file mode 100644 index da37596c1c7..00000000000 --- a/src/hotspot/share/gc/z/zMemory.hpp +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_ZMEMORY_HPP -#define SHARE_GC_Z_ZMEMORY_HPP - -#include "gc/z/zAddress.hpp" -#include "gc/z/zList.hpp" -#include "gc/z/zLock.hpp" -#include "memory/allocation.hpp" - -class ZMemory : public CHeapObj { - friend class ZList; - -private: - zoffset _start; - zoffset_end _end; - ZListNode _node; - -public: - ZMemory(zoffset start, size_t size); - - zoffset start() const; - zoffset_end end() const; - size_t size() const; - - bool operator==(const ZMemory& other) const; - bool operator!=(const ZMemory& other) const; - - bool contains(const ZMemory& other) const; - - void shrink_from_front(size_t size); - void shrink_from_back(size_t size); - void grow_from_front(size_t size); - void grow_from_back(size_t size); -}; - -class ZMemoryManager { - friend class ZVirtualMemoryManagerTest; - -public: - typedef void (*CallbackPrepare)(const ZMemory& area); - typedef void (*CallbackResize)(const ZMemory& from, const ZMemory& to); - - struct Callbacks { - CallbackPrepare _prepare_for_hand_out; - CallbackPrepare _prepare_for_hand_back; - CallbackResize _grow; - CallbackResize _shrink; - - Callbacks(); - }; - -private: - mutable ZLock _lock; - ZList _freelist; - Callbacks _callbacks; - - void shrink_from_front(ZMemory* area, size_t size); - void shrink_from_back(ZMemory* area, size_t size); - void grow_from_front(ZMemory* area, size_t size); - void grow_from_back(ZMemory* area, size_t size); - - void move_into(zoffset start, size_t size); - -public: - ZMemoryManager(); - - bool free_is_contiguous() const; - - void register_callbacks(const Callbacks& callbacks); - - zoffset peek_low_address() const; - zoffset_end peak_high_address_end() const; - zoffset alloc_low_address(size_t size); - zoffset alloc_low_address_at_most(size_t size, size_t* allocated); - zoffset alloc_high_address(size_t size); - - void free(zoffset start, size_t size); - void register_range(zoffset start, size_t size); - bool unregister_first(zoffset* start_out, size_t* size_out); -}; - -#endif // SHARE_GC_Z_ZMEMORY_HPP diff --git a/src/hotspot/share/gc/z/zMemory.inline.hpp b/src/hotspot/share/gc/z/zMemory.inline.hpp deleted file mode 100644 index 39e5b26d856..00000000000 --- a/src/hotspot/share/gc/z/zMemory.inline.hpp +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_ZMEMORY_INLINE_HPP -#define SHARE_GC_Z_ZMEMORY_INLINE_HPP - -#include "gc/z/zMemory.hpp" - -#include "gc/z/zAddress.inline.hpp" -#include "gc/z/zList.inline.hpp" -#include "utilities/debug.hpp" - -inline ZMemory::ZMemory(zoffset start, size_t size) - : _start(start), - _end(to_zoffset_end(start, size)) {} - -inline zoffset ZMemory::start() const { - return _start; -} - -inline zoffset_end ZMemory::end() const { - return _end; -} - -inline size_t ZMemory::size() const { - return end() - start(); -} - -inline bool ZMemory::operator==(const ZMemory& other) const { - return _start == other._start && _end == other._end; -} - -inline bool ZMemory::operator!=(const ZMemory& other) const { - return !operator==(other); -} - -inline bool ZMemory::contains(const ZMemory& other) const { - return _start <= other._start && other.end() <= end(); -} - -inline void ZMemory::shrink_from_front(size_t size) { - assert(this->size() > size, "Too small"); - _start += size; -} - -inline void ZMemory::shrink_from_back(size_t size) { - assert(this->size() > size, "Too small"); - _end -= size; -} - -inline void ZMemory::grow_from_front(size_t size) { - assert(size_t(start()) >= size, "Too big"); - _start -= size; -} - -inline void ZMemory::grow_from_back(size_t size) { - _end += size; -} - -#endif // SHARE_GC_Z_ZMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zNMT.cpp b/src/hotspot/share/gc/z/zNMT.cpp index 4e1efbf9caf..76e164308dd 100644 --- a/src/hotspot/share/gc/z/zNMT.cpp +++ b/src/hotspot/share/gc/z/zNMT.cpp @@ -24,7 +24,6 @@ #include "gc/z/zAddress.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zNMT.hpp" -#include "gc/z/zVirtualMemory.hpp" #include "nmt/memTag.hpp" #include "nmt/memTracker.hpp" #include "nmt/memoryFileTracker.hpp" @@ -60,15 +59,15 @@ void ZNMT::unreserve(zaddress_unsafe start, size_t size) { } } -void ZNMT::commit(zoffset offset, size_t size) { +void ZNMT::commit(zbacking_offset offset, size_t size) { MemTracker::allocate_memory_in(ZNMT::_device, untype(offset), size, CALLER_PC, mtJavaHeap); } -void ZNMT::uncommit(zoffset offset, size_t size) { +void ZNMT::uncommit(zbacking_offset offset, size_t size) { MemTracker::free_memory_in(ZNMT::_device, untype(offset), size); } -void ZNMT::map(zaddress_unsafe addr, size_t size, zoffset offset) { +void ZNMT::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) { // NMT doesn't track mappings at the moment. } diff --git a/src/hotspot/share/gc/z/zNMT.hpp b/src/hotspot/share/gc/z/zNMT.hpp index bea6a6b62ac..b5b1aa07870 100644 --- a/src/hotspot/share/gc/z/zNMT.hpp +++ b/src/hotspot/share/gc/z/zNMT.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,13 +26,10 @@ #include "gc/z/zAddress.hpp" #include "gc/z/zGlobals.hpp" -#include "gc/z/zMemory.hpp" -#include "gc/z/zVirtualMemory.hpp" #include "memory/allStatic.hpp" #include "nmt/memTracker.hpp" #include "nmt/memoryFileTracker.hpp" #include "utilities/globalDefinitions.hpp" -#include "utilities/nativeCallStack.hpp" class ZNMT : public AllStatic { private: @@ -44,10 +41,10 @@ public: static void reserve(zaddress_unsafe start, size_t size); static void unreserve(zaddress_unsafe start, size_t size); - static void commit(zoffset offset, size_t size); - static void uncommit(zoffset offset, size_t size); + static void commit(zbacking_offset offset, size_t size); + static void uncommit(zbacking_offset offset, size_t size); - static void map(zaddress_unsafe addr, size_t size, zoffset offset); + static void map(zaddress_unsafe addr, size_t size, zbacking_offset offset); static void unmap(zaddress_unsafe addr, size_t size); }; diff --git a/src/hotspot/share/gc/z/zNUMA.cpp b/src/hotspot/share/gc/z/zNUMA.cpp index a302a1843bb..cf2d88a90e5 100644 --- a/src/hotspot/share/gc/z/zNUMA.cpp +++ b/src/hotspot/share/gc/z/zNUMA.cpp @@ -21,8 +21,10 @@ * questions. */ +#include "gc/shared/gc_globals.hpp" #include "gc/shared/gcLogPrecious.hpp" -#include "gc/z/zNUMA.hpp" +#include "gc/z/zNUMA.inline.hpp" +#include "utilities/macros.hpp" bool ZNUMA::_enabled; uint32_t ZNUMA::_count; @@ -31,11 +33,20 @@ void ZNUMA::initialize() { pd_initialize(); log_info_p(gc, init)("NUMA Support: %s", to_string()); + if (_enabled) { + assert(!is_faked(), "Currently not supported"); log_info_p(gc, init)("NUMA Nodes: %u", _count); + + } else if (is_faked()) { + log_info_p(gc, init)("Fake NUMA Nodes: %u", count()); } } const char* ZNUMA::to_string() { + if (is_faked()) { + return "Faked"; + } + return _enabled ? "Enabled" : "Disabled"; } diff --git a/src/hotspot/share/gc/z/zNUMA.hpp b/src/hotspot/share/gc/z/zNUMA.hpp index ac614247522..de74086b10a 100644 --- a/src/hotspot/share/gc/z/zNUMA.hpp +++ b/src/hotspot/share/gc/z/zNUMA.hpp @@ -24,10 +24,15 @@ #ifndef SHARE_GC_Z_ZNUMA_HPP #define SHARE_GC_Z_ZNUMA_HPP +#include "gc/z/zGlobals.hpp" #include "memory/allStatic.hpp" #include "utilities/globalDefinitions.hpp" class ZNUMA : public AllStatic { + friend class VMStructs; + friend class ZNUMATest; + friend class ZTest; + private: static bool _enabled; static uint32_t _count; @@ -36,13 +41,17 @@ private: public: static void initialize(); + static bool is_enabled(); + static bool is_faked(); static uint32_t count(); static uint32_t id(); static uint32_t memory_id(uintptr_t addr); + static size_t calculate_share(uint32_t numa_id, size_t total, size_t granule = ZGranuleSize, uint32_t ignore_count = 0); + static const char* to_string(); }; diff --git a/src/hotspot/share/gc/z/zNUMA.inline.hpp b/src/hotspot/share/gc/z/zNUMA.inline.hpp index 4596c8f090b..d90b44e905a 100644 --- a/src/hotspot/share/gc/z/zNUMA.inline.hpp +++ b/src/hotspot/share/gc/z/zNUMA.inline.hpp @@ -26,12 +26,36 @@ #include "gc/z/zNUMA.hpp" +#include "gc/shared/gc_globals.hpp" +#include "gc/z/zGlobals.hpp" +#include "utilities/align.hpp" + inline bool ZNUMA::is_enabled() { return _enabled; } +inline bool ZNUMA::is_faked() { + return ZFakeNUMA > 1; +} + inline uint32_t ZNUMA::count() { return _count; } +inline size_t ZNUMA::calculate_share(uint32_t numa_id, size_t total, size_t granule, uint32_t ignore_count) { + assert(total % granule == 0, "total must be divisible by granule"); + assert(ignore_count < count(), "must not ignore all nodes"); + assert(numa_id < count() - ignore_count, "numa_id must be in bounds"); + + const uint32_t num_nodes = count() - ignore_count; + const size_t base_share = ((total / num_nodes) / granule) * granule; + + const size_t extra_share_nodes = (total - base_share * num_nodes) / granule; + if (numa_id < extra_share_nodes) { + return base_share + granule; + } + + return base_share; +} + #endif // SHARE_GC_Z_ZNUMA_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zObjectAllocator.cpp b/src/hotspot/share/gc/z/zObjectAllocator.cpp index 81a92aa6cc6..54724c1b48e 100644 --- a/src/hotspot/share/gc/z/zObjectAllocator.cpp +++ b/src/hotspot/share/gc/z/zObjectAllocator.cpp @@ -138,10 +138,10 @@ zaddress ZObjectAllocator::alloc_object_in_medium_page(size_t size, } if (is_null(addr)) { - // When a new medium page is required, we synchronize the allocation - // of the new page using a lock. This is to avoid having multiple - // threads requesting a medium page from the page cache when we know - // only one of the will succeed in installing the page at this layer. + // When a new medium page is required, we synchronize the allocation of the + // new page using a lock. This is to avoid having multiple threads allocate + // medium pages when we know only one of them will succeed in installing + // the page at this layer. ZLocker locker(&_medium_page_alloc_lock); // When holding the lock we can't allow the page allocator to stall, diff --git a/src/hotspot/share/gc/z/zPage.cpp b/src/hotspot/share/gc/z/zPage.cpp index 5264076c8a9..f4cc7542052 100644 --- a/src/hotspot/share/gc/z/zPage.cpp +++ b/src/hotspot/share/gc/z/zPage.cpp @@ -23,42 +23,47 @@ #include "gc/shared/gc_globals.hpp" #include "gc/z/zGeneration.inline.hpp" -#include "gc/z/zList.inline.hpp" #include "gc/z/zPage.inline.hpp" -#include "gc/z/zPhysicalMemory.inline.hpp" +#include "gc/z/zPageAge.hpp" #include "gc/z/zRememberedSet.inline.hpp" -#include "gc/z/zVirtualMemory.inline.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" -#include "utilities/growableArray.hpp" -ZPage::ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem) +ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker, uint32_t partition_id) : _type(type), - _generation_id(ZGenerationId::young), - _age(ZPageAge::eden), - _numa_id((uint8_t)-1), - _seqnum(0), - _seqnum_other(0), + _generation_id(/* set in reset */), + _age(/* set in reset */), + _seqnum(/* set in reset */), + _seqnum_other(/* set in reset */), + _single_partition_id(partition_id), _virtual(vmem), _top(to_zoffset_end(start())), _livemap(object_max_count()), _remembered_set(), - _last_used(0), - _physical(pmem), - _node() { + _multi_partition_tracker(multi_partition_tracker) { assert(!_virtual.is_null(), "Should not be null"); - assert(!_physical.is_null(), "Should not be null"); - assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch"); assert((_type == ZPageType::small && size() == ZPageSizeSmall) || (_type == ZPageType::medium && size() == ZPageSizeMedium) || (_type == ZPageType::large && is_aligned(size(), ZGranuleSize)), "Page type/size mismatch"); + reset(age); + + if (is_old()) { + remset_alloc(); + } } -ZPage* ZPage::clone_limited() const { +ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, uint32_t partition_id) + : ZPage(type, age, vmem, nullptr /* multi_partition_tracker */, partition_id) {} + +ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker) + : ZPage(type, age, vmem, multi_partition_tracker, -1u /* partition_id */) {} + +ZPage* ZPage::clone_for_promotion() const { + assert(_age != ZPageAge::old, "must be used for promotion"); // Only copy type and memory layouts, and also update _top. Let the rest be // lazily reconstructed when needed. - ZPage* const page = new ZPage(_type, _virtual, _physical); + ZPage* const page = new ZPage(_type, ZPageAge::old, _virtual, _multi_partition_tracker, _single_partition_id); page->_top = _top; return page; @@ -85,19 +90,16 @@ void ZPage::remset_alloc() { _remembered_set.initialize(size()); } -void ZPage::remset_delete() { - _remembered_set.delete_all(); -} - -void ZPage::reset(ZPageAge age) { +ZPage* ZPage::reset(ZPageAge age) { _age = age; - _last_used = 0; _generation_id = age == ZPageAge::old ? ZGenerationId::old : ZGenerationId::young; reset_seqnum(); + + return this; } void ZPage::reset_livemap() { @@ -108,59 +110,6 @@ void ZPage::reset_top_for_allocation() { _top = to_zoffset_end(start()); } -void ZPage::reset_type_and_size(ZPageType type) { - _type = type; - _livemap.resize(object_max_count()); -} - -ZPage* ZPage::retype(ZPageType type) { - assert(_type != type, "Invalid retype"); - reset_type_and_size(type); - return this; -} - -ZPage* ZPage::split(size_t split_of_size) { - return split(type_from_size(split_of_size), split_of_size); -} - -ZPage* ZPage::split_with_pmem(ZPageType type, const ZPhysicalMemory& pmem) { - // Resize this page - const ZVirtualMemory vmem = _virtual.split(pmem.size()); - assert(vmem.end() == _virtual.start(), "Should be consecutive"); - - reset_type_and_size(type_from_size(_virtual.size())); - - log_trace(gc, page)("Split page [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT "]", - untype(vmem.start()), - untype(vmem.end()), - untype(_virtual.end())); - - // Create new page - return new ZPage(type, vmem, pmem); -} - -ZPage* ZPage::split(ZPageType type, size_t split_of_size) { - assert(_virtual.size() > split_of_size, "Invalid split"); - - const ZPhysicalMemory pmem = _physical.split(split_of_size); - - return split_with_pmem(type, pmem); -} - -ZPage* ZPage::split_committed() { - // Split any committed part of this page into a separate page, - // leaving this page with only uncommitted physical memory. - const ZPhysicalMemory pmem = _physical.split_committed(); - if (pmem.is_null()) { - // Nothing committed - return nullptr; - } - - assert(!_physical.is_null(), "Should not be null"); - - return split_with_pmem(type_from_size(pmem.size()), pmem); -} - class ZFindBaseOopClosure : public ObjectClosure { private: volatile zpointer* _p; @@ -215,18 +164,19 @@ void* ZPage::remset_current() { return _remembered_set.current(); } -void ZPage::print_on_msg(outputStream* out, const char* msg) const { - out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s/%-4u %s%s%s", +void ZPage::print_on_msg(outputStream* st, const char* msg) const { + st->print_cr("%-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s/%-4u %s%s%s%s", type_to_string(), untype(start()), untype(top()), untype(end()), is_young() ? "Y" : "O", seqnum(), - is_allocating() ? " Allocating " : "", is_relocatable() ? " Relocatable" : "", - msg == nullptr ? "" : msg); + is_allocating() ? " Allocating" : "", + is_allocating() && msg != nullptr ? " " : "", + msg != nullptr ? msg : ""); } -void ZPage::print_on(outputStream* out) const { - print_on_msg(out, nullptr); +void ZPage::print_on(outputStream* st) const { + print_on_msg(st, nullptr); } void ZPage::print() const { diff --git a/src/hotspot/share/gc/z/zPage.hpp b/src/hotspot/share/gc/z/zPage.hpp index 9b6c155f77d..96900a37680 100644 --- a/src/hotspot/share/gc/z/zPage.hpp +++ b/src/hotspot/share/gc/z/zPage.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,38 +25,34 @@ #define SHARE_GC_Z_ZPAGE_HPP #include "gc/z/zGenerationId.hpp" -#include "gc/z/zList.hpp" #include "gc/z/zLiveMap.hpp" #include "gc/z/zPageAge.hpp" #include "gc/z/zPageType.hpp" -#include "gc/z/zPhysicalMemory.hpp" #include "gc/z/zRememberedSet.hpp" #include "gc/z/zVirtualMemory.hpp" #include "memory/allocation.hpp" +#include "oops/oopsHierarchy.hpp" class ZGeneration; +class ZMultiPartitionTracker; class ZPage : public CHeapObj { friend class VMStructs; - friend class ZList; friend class ZForwardingTest; private: - ZPageType _type; - ZGenerationId _generation_id; - ZPageAge _age; - uint8_t _numa_id; - uint32_t _seqnum; - uint32_t _seqnum_other; - ZVirtualMemory _virtual; - volatile zoffset_end _top; - ZLiveMap _livemap; - ZRememberedSet _remembered_set; - uint64_t _last_used; - ZPhysicalMemory _physical; - ZListNode _node; + const ZPageType _type; + ZGenerationId _generation_id; + ZPageAge _age; + uint32_t _seqnum; + uint32_t _seqnum_other; + const uint32_t _single_partition_id; + const ZVirtualMemory _virtual; + volatile zoffset_end _top; + ZLiveMap _livemap; + ZRememberedSet _remembered_set; + ZMultiPartitionTracker* const _multi_partition_tracker; - ZPageType type_from_size(size_t size) const; const char* type_to_string() const; BitMap::idx_t bit_index(zaddress addr) const; @@ -71,12 +67,13 @@ private: void reset_seqnum(); - ZPage* split_with_pmem(ZPageType type, const ZPhysicalMemory& pmem); + ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker, uint32_t partition_id); public: - ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem); + ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, uint32_t partition_id); + ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker); - ZPage* clone_limited() const; + ZPage* clone_for_promotion() const; uint32_t object_max_count() const; size_t object_alignment_shift() const; @@ -99,28 +96,20 @@ public: size_t used() const; const ZVirtualMemory& virtual_memory() const; - const ZPhysicalMemory& physical_memory() const; - ZPhysicalMemory& physical_memory(); - uint8_t numa_id(); + uint32_t single_partition_id() const; + bool is_multi_partition() const; + ZMultiPartitionTracker* multi_partition_tracker() const; + ZPageAge age() const; uint32_t seqnum() const; bool is_allocating() const; bool is_relocatable() const; - uint64_t last_used() const; - void set_last_used(); - - void reset(ZPageAge age); + ZPage* reset(ZPageAge age); void reset_livemap(); void reset_top_for_allocation(); - void reset_type_and_size(ZPageType type); - - ZPage* retype(ZPageType type); - ZPage* split(size_t split_of_size); - ZPage* split(ZPageType type, size_t split_of_size); - ZPage* split_committed(); bool is_in(zoffset offset) const; bool is_in(zaddress addr) const; @@ -156,7 +145,6 @@ public: void swap_remset_bitmaps(); void remset_alloc(); - void remset_delete(); ZBitMap::ReverseIterator remset_reverse_iterator_previous(); BitMap::Iterator remset_iterator_limited_current(uintptr_t l_offset, size_t size); @@ -193,8 +181,8 @@ public: void log_msg(const char* msg_format, ...) const ATTRIBUTE_PRINTF(2, 3); - void print_on_msg(outputStream* out, const char* msg) const; - void print_on(outputStream* out) const; + void print_on_msg(outputStream* st, const char* msg) const; + void print_on(outputStream* st) const; void print() const; // Verification diff --git a/src/hotspot/share/gc/z/zPage.inline.hpp b/src/hotspot/share/gc/z/zPage.inline.hpp index 5d476f273d3..f6c2029ac06 100644 --- a/src/hotspot/share/gc/z/zPage.inline.hpp +++ b/src/hotspot/share/gc/z/zPage.inline.hpp @@ -30,28 +30,14 @@ #include "gc/z/zGeneration.inline.hpp" #include "gc/z/zGlobals.hpp" #include "gc/z/zLiveMap.inline.hpp" -#include "gc/z/zNUMA.hpp" -#include "gc/z/zPhysicalMemory.inline.hpp" #include "gc/z/zRememberedSet.inline.hpp" -#include "gc/z/zUtils.inline.hpp" #include "gc/z/zVirtualMemory.inline.hpp" #include "logging/logStream.hpp" #include "runtime/atomic.hpp" #include "runtime/os.hpp" #include "utilities/align.hpp" -#include "utilities/checkedCast.hpp" #include "utilities/debug.hpp" -inline ZPageType ZPage::type_from_size(size_t size) const { - if (size == ZPageSizeSmall) { - return ZPageType::small; - } else if (size == ZPageSizeMedium) { - return ZPageType::medium; - } else { - return ZPageType::large; - } -} - inline const char* ZPage::type_to_string() const { switch (type()) { case ZPageType::small: @@ -170,20 +156,16 @@ inline const ZVirtualMemory& ZPage::virtual_memory() const { return _virtual; } -inline const ZPhysicalMemory& ZPage::physical_memory() const { - return _physical; +inline uint32_t ZPage::single_partition_id() const { + return _single_partition_id; } -inline ZPhysicalMemory& ZPage::physical_memory() { - return _physical; +inline bool ZPage::is_multi_partition() const { + return _multi_partition_tracker != nullptr; } -inline uint8_t ZPage::numa_id() { - if (_numa_id == (uint8_t)-1) { - _numa_id = checked_cast(ZNUMA::memory_id(untype(ZOffset::address(start())))); - } - - return _numa_id; +inline ZMultiPartitionTracker* ZPage::multi_partition_tracker() const { + return _multi_partition_tracker; } inline ZPageAge ZPage::age() const { @@ -202,14 +184,6 @@ inline bool ZPage::is_relocatable() const { return _seqnum < generation()->seqnum(); } -inline uint64_t ZPage::last_used() const { - return _last_used; -} - -inline void ZPage::set_last_used() { - _last_used = (uint64_t)ceil(os::elapsedTime()); -} - inline bool ZPage::is_in(zoffset offset) const { return offset >= start() && offset < top(); } diff --git a/src/hotspot/share/gc/z/zPageAllocator.cpp b/src/hotspot/share/gc/z/zPageAllocator.cpp index 7913aa68fbe..444f7a9b845 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.cpp +++ b/src/hotspot/share/gc/z/zPageAllocator.cpp @@ -23,6 +23,8 @@ #include "gc/shared/gcLogPrecious.hpp" #include "gc/shared/suspendibleThreadSet.hpp" +#include "gc/z/zAddress.hpp" +#include "gc/z/zAllocationFlags.hpp" #include "gc/z/zArray.inline.hpp" #include "gc/z/zDriver.hpp" #include "gc/z/zFuture.inline.hpp" @@ -31,71 +33,361 @@ #include "gc/z/zGlobals.hpp" #include "gc/z/zLargePages.inline.hpp" #include "gc/z/zLock.inline.hpp" +#include "gc/z/zMappedCache.hpp" +#include "gc/z/zNUMA.inline.hpp" #include "gc/z/zPage.inline.hpp" #include "gc/z/zPageAge.hpp" #include "gc/z/zPageAllocator.inline.hpp" -#include "gc/z/zPageCache.hpp" +#include "gc/z/zPageType.hpp" +#include "gc/z/zPhysicalMemoryManager.hpp" #include "gc/z/zSafeDelete.inline.hpp" #include "gc/z/zStat.hpp" #include "gc/z/zTask.hpp" #include "gc/z/zUncommitter.hpp" -#include "gc/z/zUnmapper.hpp" +#include "gc/z/zValue.inline.hpp" +#include "gc/z/zVirtualMemory.inline.hpp" +#include "gc/z/zVirtualMemoryManager.inline.hpp" #include "gc/z/zWorkers.hpp" #include "jfr/jfrEvents.hpp" #include "logging/log.hpp" +#include "memory/allocation.hpp" +#include "nmt/memTag.hpp" #include "runtime/globals.hpp" #include "runtime/init.hpp" #include "runtime/java.hpp" #include "runtime/os.hpp" +#include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/ticks.hpp" +#include "utilities/vmError.hpp" + +#include + +class ZMemoryAllocation; static const ZStatCounter ZCounterMutatorAllocationRate("Memory", "Allocation Rate", ZStatUnitBytesPerSecond); -static const ZStatCounter ZCounterPageCacheFlush("Memory", "Page Cache Flush", ZStatUnitBytesPerSecond); +static const ZStatCounter ZCounterMappedCacheHarvest("Memory", "Mapped Cache Harvest", ZStatUnitBytesPerSecond); static const ZStatCounter ZCounterDefragment("Memory", "Defragment", ZStatUnitOpsPerSecond); static const ZStatCriticalPhase ZCriticalPhaseAllocationStall("Allocation Stall"); -ZSafePageRecycle::ZSafePageRecycle(ZPageAllocator* page_allocator) - : _page_allocator(page_allocator), - _unsafe_to_recycle() {} - -void ZSafePageRecycle::activate() { - _unsafe_to_recycle.activate(); +static void check_numa_mismatch(const ZVirtualMemory& vmem, uint32_t desired_id) { + if (ZNUMA::is_enabled()) { + // Check if memory ended up on desired NUMA node or not + const uint32_t actual_id = ZNUMA::memory_id(untype(ZOffset::address(vmem.start()))); + if (actual_id != desired_id) { + log_debug(gc, heap)("NUMA Mismatch: desired %d, actual %d", desired_id, actual_id); + } + } } -void ZSafePageRecycle::deactivate() { - auto delete_function = [&](ZPage* page) { - _page_allocator->safe_destroy_page(page); - }; +class ZMemoryAllocation : public CHeapObj { +private: + const size_t _size; + ZPartition* _partition; + ZVirtualMemory _satisfied_from_cache_vmem; + ZArray _partial_vmems; + int _num_harvested; + size_t _harvested; + size_t _increased_capacity; + size_t _committed_capacity; + bool _commit_failed; - _unsafe_to_recycle.deactivate_and_apply(delete_function); -} + explicit ZMemoryAllocation(const ZMemoryAllocation& other) + : ZMemoryAllocation(other._size) { + // Transfer the partition + set_partition(other._partition); -ZPage* ZSafePageRecycle::register_and_clone_if_activated(ZPage* page) { - if (!_unsafe_to_recycle.is_activated()) { - // The page has no concurrent readers. - // Recycle original page. - return page; + // Reserve space for the partial vmems + _partial_vmems.reserve(other._partial_vmems.length() + (other._satisfied_from_cache_vmem.is_null() ? 1 : 0)); + + // Transfer the claimed capacity + transfer_claimed_capacity(other); } - // The page could have concurrent readers. - // It would be unsafe to recycle this page at this point. + ZMemoryAllocation(const ZMemoryAllocation& a1, const ZMemoryAllocation& a2) + : ZMemoryAllocation(a1._size + a2._size) { + // Transfer the partition + assert(a1._partition == a2._partition, "only merge with same partition"); + set_partition(a1._partition); - // As soon as the page is added to _unsafe_to_recycle, it - // must not be used again. Hence, the extra double-checked - // locking to only clone the page if it is believed to be - // unsafe to recycle the page. - ZPage* const cloned_page = page->clone_limited(); - if (!_unsafe_to_recycle.add_if_activated(page)) { - // It became safe to recycle the page after the is_activated check - delete cloned_page; - return page; + // Reserve space for the partial vmems + const int num_vmems_a1 = a1._partial_vmems.length() + (a1._satisfied_from_cache_vmem.is_null() ? 1 : 0); + const int num_vmems_a2 = a2._partial_vmems.length() + (a2._satisfied_from_cache_vmem.is_null() ? 1 : 0); + _partial_vmems.reserve(num_vmems_a1 + num_vmems_a2); + + // Transfer the claimed capacity + transfer_claimed_capacity(a1); + transfer_claimed_capacity(a2); } - // The original page has been registered to be deleted by another thread. - // Recycle the cloned page. - return cloned_page; -} + void transfer_claimed_capacity(const ZMemoryAllocation& from) { + assert(from._committed_capacity == 0, "Unexpected value %zu", from._committed_capacity); + assert(!from._commit_failed, "Unexpected value"); + + // Transfer increased capacity + _increased_capacity += from._increased_capacity; + + // Transfer satisfying vmem or partial mappings + const ZVirtualMemory vmem = from._satisfied_from_cache_vmem; + if (!vmem.is_null()) { + assert(_partial_vmems.is_empty(), "Must either have result or partial vmems"); + _partial_vmems.push(vmem); + _num_harvested += 1; + _harvested += vmem.size(); + } else { + _partial_vmems.appendAll(&from._partial_vmems); + _num_harvested += from._num_harvested; + _harvested += from._harvested; + } + } + +public: + explicit ZMemoryAllocation(size_t size) + : _size(size), + _partition(nullptr), + _satisfied_from_cache_vmem(), + _partial_vmems(0), + _num_harvested(0), + _harvested(0), + _increased_capacity(0), + _committed_capacity(0), + _commit_failed(false) {} + + void reset_for_retry() { + assert(_satisfied_from_cache_vmem.is_null(), "Incompatible with reset"); + + _partition = nullptr; + _partial_vmems.clear(); + _num_harvested = 0; + _harvested = 0; + _increased_capacity = 0; + _committed_capacity = 0; + _commit_failed = false; + } + + size_t size() const { + return _size; + } + + ZPartition& partition() const { + assert(_partition != nullptr, "Should have been initialized"); + return *_partition; + } + + void set_partition(ZPartition* partition) { + assert(_partition == nullptr, "Should be initialized only once"); + _partition = partition; + } + + ZVirtualMemory satisfied_from_cache_vmem() const { + return _satisfied_from_cache_vmem; + } + + void set_satisfied_from_cache_vmem(ZVirtualMemory vmem) { + precond(_satisfied_from_cache_vmem.is_null()); + precond(vmem.size() == size()); + precond(_partial_vmems.is_empty()); + + _satisfied_from_cache_vmem = vmem; + } + + ZArray* partial_vmems() { + return &_partial_vmems; + } + + const ZArray* partial_vmems() const { + return &_partial_vmems; + } + + int num_harvested() const { + return _num_harvested; + } + + size_t harvested() const { + return _harvested; + } + + void set_harvested(int num_harvested, size_t harvested) { + _num_harvested = num_harvested; + _harvested = harvested; + } + + size_t increased_capacity() const { + return _increased_capacity; + } + + void set_increased_capacity(size_t increased_capacity) { + _increased_capacity = increased_capacity; + } + + size_t committed_capacity() const { + return _committed_capacity; + } + + void set_committed_capacity(size_t committed_capacity) { + assert(_committed_capacity == 0, "Should only commit once"); + _committed_capacity = committed_capacity; + _commit_failed = committed_capacity != _increased_capacity; + } + + bool commit_failed() const { + return _commit_failed; + } + + static void destroy(ZMemoryAllocation* allocation) { + delete allocation; + } + + static void merge(const ZMemoryAllocation& allocation, ZMemoryAllocation** merge_location) { + ZMemoryAllocation* const other_allocation = *merge_location; + if (other_allocation == nullptr) { + // First allocation, allocate new partition + *merge_location = new ZMemoryAllocation(allocation); + } else { + // Merge with other allocation + *merge_location = new ZMemoryAllocation(allocation, *other_allocation); + + // Delete old allocation + delete other_allocation; + } + } +}; + +class ZSinglePartitionAllocation { +private: + ZMemoryAllocation _allocation; + +public: + ZSinglePartitionAllocation(size_t size) + : _allocation(size) {} + + size_t size() const { + return _allocation.size(); + } + + ZMemoryAllocation* allocation() { + return &_allocation; + } + + const ZMemoryAllocation* allocation() const { + return &_allocation; + } + + void reset_for_retry() { + _allocation.reset_for_retry(); + } +}; + +class ZMultiPartitionAllocation : public StackObj { +private: + const size_t _size; + ZArray _allocations; + +public: + ZMultiPartitionAllocation(size_t size) + : _size(size), + _allocations(0) {} + + ~ZMultiPartitionAllocation() { + for (ZMemoryAllocation* allocation : _allocations) { + ZMemoryAllocation::destroy(allocation); + } + } + + void initialize() { + precond(_allocations.is_empty()); + + // The multi-partition allocation creates at most one allocation per partition. + const int length = (int)ZNUMA::count(); + + _allocations.reserve(length); + } + + void reset_for_retry() { + for (ZMemoryAllocation* allocation : _allocations) { + ZMemoryAllocation::destroy(allocation); + } + _allocations.clear(); + } + + size_t size() const { + return _size; + } + + ZArray* allocations() { + return &_allocations; + } + + const ZArray* allocations() const { + return &_allocations; + } + + void register_allocation(const ZMemoryAllocation& allocation) { + ZMemoryAllocation** const slot = allocation_slot(allocation.partition().numa_id()); + + ZMemoryAllocation::merge(allocation, slot); + } + + ZMemoryAllocation** allocation_slot(uint32_t numa_id) { + // Try to find an existing allocation for numa_id + for (int i = 0; i < _allocations.length(); ++i) { + ZMemoryAllocation** const slot_addr = _allocations.adr_at(i); + ZMemoryAllocation* const allocation = *slot_addr; + if (allocation->partition().numa_id() == numa_id) { + // Found an existing slot + return slot_addr; + } + } + + // Push an empty slot for the numa_id + _allocations.push(nullptr); + + // Return the address of the slot + return &_allocations.last(); + } + + int sum_num_harvested_vmems() const { + int total = 0; + + for (const ZMemoryAllocation* allocation : _allocations) { + total += allocation->num_harvested(); + } + + return total; + } + + size_t sum_harvested() const { + size_t total = 0; + + for (const ZMemoryAllocation* allocation : _allocations) { + total += allocation->harvested(); + } + + return total; + } + + size_t sum_committed_increased_capacity() const { + size_t total = 0; + + for (const ZMemoryAllocation* allocation : _allocations) { + total += allocation->committed_capacity(); + } + + return total; + } +}; + +struct ZPageAllocationStats { + int _num_harvested_vmems; + size_t _total_harvested; + size_t _total_committed_capacity; + + ZPageAllocationStats(int num_harvested_vmems, size_t total_harvested, size_t total_committed_capacity) + : _num_harvested_vmems(num_harvested_vmems), + _total_harvested(total_harvested), + _total_committed_capacity(total_committed_capacity) {} +}; class ZPageAllocation : public StackObj { friend class ZList; @@ -104,27 +396,39 @@ private: const ZPageType _type; const size_t _size; const ZAllocationFlags _flags; + const ZPageAge _age; + const Ticks _start_timestamp; const uint32_t _young_seqnum; const uint32_t _old_seqnum; - size_t _flushed; - size_t _committed; - ZList _pages; + const uint32_t _initiating_numa_id; + bool _is_multi_partition; + ZSinglePartitionAllocation _single_partition_allocation; + ZMultiPartitionAllocation _multi_partition_allocation; ZListNode _node; ZFuture _stall_result; public: - ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags) + ZPageAllocation(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) : _type(type), _size(size), _flags(flags), + _age(age), + _start_timestamp(Ticks::now()), _young_seqnum(ZGeneration::young()->seqnum()), _old_seqnum(ZGeneration::old()->seqnum()), - _flushed(0), - _committed(0), - _pages(), + _initiating_numa_id(ZNUMA::id()), + _is_multi_partition(false), + _single_partition_allocation(size), + _multi_partition_allocation(size), _node(), _stall_result() {} + void reset_for_retry() { + _is_multi_partition = false; + _single_partition_allocation.reset_for_retry(); + _multi_partition_allocation.reset_for_retry(); + } + ZPageType type() const { return _type; } @@ -137,6 +441,10 @@ public: return _flags; } + ZPageAge age() const { + return _age; + } + uint32_t young_seqnum() const { return _young_seqnum; } @@ -145,30 +453,56 @@ public: return _old_seqnum; } - size_t flushed() const { - return _flushed; + uint32_t initiating_numa_id() const { + return _initiating_numa_id; } - void set_flushed(size_t flushed) { - _flushed = flushed; + bool is_multi_partition() const { + return _is_multi_partition; } - size_t committed() const { - return _committed; + void initiate_multi_partition_allocation() { + assert(!_is_multi_partition, "Reinitialization?"); + _is_multi_partition = true; + _multi_partition_allocation.initialize(); } - void set_committed(size_t committed) { - _committed = committed; + ZMultiPartitionAllocation* multi_partition_allocation() { + assert(_is_multi_partition, "multi-partition allocation must be initiated"); + + return &_multi_partition_allocation; + } + + const ZMultiPartitionAllocation* multi_partition_allocation() const { + assert(_is_multi_partition, "multi-partition allocation must be initiated"); + + return &_multi_partition_allocation; + } + + ZSinglePartitionAllocation* single_partition_allocation() { + assert(!_is_multi_partition, "multi-partition allocation must not have been initiated"); + + return &_single_partition_allocation; + } + + const ZSinglePartitionAllocation* single_partition_allocation() const { + assert(!_is_multi_partition, "multi-partition allocation must not have been initiated"); + + return &_single_partition_allocation; + } + + ZVirtualMemory satisfied_from_cache_vmem() const { + precond(!_is_multi_partition); + + const ZMemoryAllocation* const allocation = _single_partition_allocation.allocation(); + + return allocation->satisfied_from_cache_vmem(); } bool wait() { return _stall_result.get(); } - ZList* pages() { - return &_pages; - } - void satisfy(bool result) { _stall_result.set(result); } @@ -176,6 +510,741 @@ public: bool gc_relocation() const { return _flags.gc_relocation(); } + + ZPageAllocationStats stats() const { + if (_is_multi_partition) { + return ZPageAllocationStats( + _multi_partition_allocation.sum_num_harvested_vmems(), + _multi_partition_allocation.sum_harvested(), + _multi_partition_allocation.sum_committed_increased_capacity()); + } else { + return ZPageAllocationStats( + _single_partition_allocation.allocation()->num_harvested(), + _single_partition_allocation.allocation()->harvested(), + _single_partition_allocation.allocation()->committed_capacity()); + } + } + + void send_event(bool successful) { + EventZPageAllocation event; + + Ticks end_timestamp = Ticks::now(); + const ZPageAllocationStats st = stats(); + + event.commit(_start_timestamp, + end_timestamp, + (u8)_type, + _size, + st._total_harvested, + st._total_committed_capacity, + (unsigned)st._num_harvested_vmems, + _is_multi_partition, + successful, + _flags.non_blocking()); + } +}; + +const ZVirtualMemoryManager& ZPartition::virtual_memory_manager() const { + return _page_allocator->_virtual; +} + +ZVirtualMemoryManager& ZPartition::virtual_memory_manager() { + return _page_allocator->_virtual; +} + +const ZPhysicalMemoryManager& ZPartition::physical_memory_manager() const { + return _page_allocator->_physical; +} + +ZPhysicalMemoryManager& ZPartition::physical_memory_manager() { + return _page_allocator->_physical; +} + +#ifdef ASSERT + +void ZPartition::verify_virtual_memory_multi_partition_association(const ZVirtualMemory& vmem) const { + const ZVirtualMemoryManager& manager = virtual_memory_manager(); + + assert(manager.is_in_multi_partition(vmem), + "Virtual memory must be associated with the extra space " + "actual: %u", virtual_memory_manager().lookup_partition_id(vmem)); +} + +void ZPartition::verify_virtual_memory_association(const ZVirtualMemory& vmem, bool check_multi_partition) const { + const ZVirtualMemoryManager& manager = virtual_memory_manager(); + + if (check_multi_partition && manager.is_in_multi_partition(vmem)) { + // We allow claim/free/commit physical operation in multi-partition allocations + // to use virtual memory associated with the extra space. + return; + } + + const uint32_t vmem_numa_id = virtual_memory_manager().lookup_partition_id(vmem); + assert(_numa_id == vmem_numa_id, + "Virtual memory must be associated with the current partition " + "expected: %u, actual: %u", _numa_id, vmem_numa_id); +} + +void ZPartition::verify_virtual_memory_association(const ZArray* vmems) const { + for (const ZVirtualMemory& vmem : *vmems) { + verify_virtual_memory_association(vmem); + } +} + +void ZPartition::verify_memory_allocation_association(const ZMemoryAllocation* allocation) const { + assert(this == &allocation->partition(), + "Memory allocation must be associated with the current partition " + "expected: %u, actual: %u", _numa_id, allocation->partition().numa_id()); +} + +#endif // ASSERT + +ZPartition::ZPartition(uint32_t numa_id, ZPageAllocator* page_allocator) + : _page_allocator(page_allocator), + _cache(), + _uncommitter(numa_id, this), + _min_capacity(ZNUMA::calculate_share(numa_id, page_allocator->min_capacity())), + _max_capacity(ZNUMA::calculate_share(numa_id, page_allocator->max_capacity())), + _current_max_capacity(_max_capacity), + _capacity(0), + _claimed(0), + _used(0), + _last_commit(0.0), + _last_uncommit(0.0), + _to_uncommit(0), + _numa_id(numa_id) {} + +uint32_t ZPartition::numa_id() const { + return _numa_id; +} + +size_t ZPartition::available() const { + return _current_max_capacity - _used - _claimed; +} + +size_t ZPartition::increase_capacity(size_t size) { + const size_t increased = MIN2(size, _current_max_capacity - _capacity); + + if (increased > 0) { + // Update atomically since we have concurrent readers + Atomic::add(&_capacity, increased); + + _last_commit = os::elapsedTime(); + _last_uncommit = 0; + _cache.reset_min(); + } + + return increased; +} + +void ZPartition::decrease_capacity(size_t size, bool set_max_capacity) { + // Update capacity atomically since we have concurrent readers + Atomic::sub(&_capacity, size); + + // Adjust current max capacity to avoid further attempts to increase capacity + if (set_max_capacity) { + const size_t current_max_capacity_before = _current_max_capacity; + Atomic::store(&_current_max_capacity, _capacity); + + log_debug_p(gc)("Forced to lower max partition (%u) capacity from " + "%zuM(%.0f%%) to %zuM(%.0f%%)", + _numa_id, + current_max_capacity_before / M, percent_of(current_max_capacity_before, _max_capacity), + _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity)); + } +} + +void ZPartition::increase_used(size_t size) { + // The partition usage tracking is only read and updated under the page + // allocator lock. Usage statistics for generations and GC cycles are + // collected on the ZPageAllocator level. + _used += size; +} + +void ZPartition::decrease_used(size_t size) { + // The partition usage tracking is only read and updated under the page + // allocator lock. Usage statistics for generations and GC cycles are + // collected on the ZPageAllocator level. + _used -= size; +} + +void ZPartition::free_memory(const ZVirtualMemory& vmem) { + const size_t size = vmem.size(); + + // Cache the vmem + _cache.insert(vmem); + + // Update accounting + decrease_used(size); +} + +void ZPartition::claim_from_cache_or_increase_capacity(ZMemoryAllocation* allocation) { + const size_t size = allocation->size(); + ZArray* const out = allocation->partial_vmems(); + + // We are guaranteed to succeed the claiming of capacity here + assert(available() >= size, "Must be"); + + // Associate the allocation with this partition. + allocation->set_partition(this); + + // Try to allocate one contiguous vmem + ZVirtualMemory vmem = _cache.remove_contiguous(size); + if (!vmem.is_null()) { + // Found a satisfying vmem in the cache + allocation->set_satisfied_from_cache_vmem(vmem); + + // Done + return; + } + + // Try increase capacity + const size_t increased_capacity = increase_capacity(size); + + allocation->set_increased_capacity(increased_capacity); + + if (increased_capacity == size) { + // Capacity increase covered the entire request, done. + return; + } + + // Could not increase capacity enough to satisfy the allocation completely. + // Try removing multiple vmems from the mapped cache. + const size_t remaining = size - increased_capacity; + const size_t harvested = _cache.remove_discontiguous(remaining, out); + const int num_harvested = out->length(); + + allocation->set_harvested(num_harvested, harvested); + + assert(harvested + increased_capacity == size, + "Mismatch harvested: %zu increased_capacity: %zu size: %zu", + harvested, increased_capacity, size); + + return; +} + +bool ZPartition::claim_capacity(ZMemoryAllocation* allocation) { + const size_t size = allocation->size(); + + if (available() < size) { + // Out of memory + return false; + } + + claim_from_cache_or_increase_capacity(allocation); + + // Updated used statistics + increase_used(size); + + // Success + return true; +} + +size_t ZPartition::uncommit(uint64_t* timeout) { + ZArray flushed_vmems; + size_t flushed = 0; + + { + // We need to join the suspendible thread set while manipulating capacity + // and used, to make sure GC safepoints will have a consistent view. + SuspendibleThreadSetJoiner sts_joiner; + ZLocker locker(&_page_allocator->_lock); + + const double now = os::elapsedTime(); + const double time_since_last_commit = std::floor(now - _last_commit); + const double time_since_last_uncommit = std::floor(now - _last_uncommit); + + if (time_since_last_commit < double(ZUncommitDelay)) { + // We have committed within the delay, stop uncommitting. + *timeout = uint64_t(double(ZUncommitDelay) - time_since_last_commit); + return 0; + } + + // We flush out and uncommit chunks at a time (~0.8% of the max capacity, + // but at least one granule and at most 256M), in case demand for memory + // increases while we are uncommitting. + const size_t limit_upper_bound = MAX2(ZGranuleSize, align_down(256 * M / ZNUMA::count(), ZGranuleSize)); + const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), limit_upper_bound); + + if (limit == 0) { + // This may occur if the current max capacity for this partition is 0 + + // Set timeout to ZUncommitDelay + *timeout = ZUncommitDelay; + return 0; + } + + if (time_since_last_uncommit < double(ZUncommitDelay)) { + // We are in the uncommit phase + const size_t num_uncommits_left = _to_uncommit / limit; + const double time_left = double(ZUncommitDelay) - time_since_last_uncommit; + if (time_left < *timeout * num_uncommits_left) { + // Running out of time, speed up. + uint64_t new_timeout = uint64_t(std::floor(time_left / double(num_uncommits_left + 1))); + *timeout = new_timeout; + } + } else { + // We are about to start uncommitting + _to_uncommit = _cache.reset_min(); + _last_uncommit = now; + + const size_t split = _to_uncommit / limit + 1; + uint64_t new_timeout = ZUncommitDelay / split; + *timeout = new_timeout; + } + + // Never uncommit below min capacity. + const size_t retain = MAX2(_used, _min_capacity); + const size_t release = _capacity - retain; + const size_t flush = MIN3(release, limit, _to_uncommit); + + if (flush == 0) { + // Nothing to flush + return 0; + } + + // Flush memory from the mapped cache to uncommit + flushed = _cache.remove_from_min(flush, &flushed_vmems); + if (flushed == 0) { + // Nothing flushed + return 0; + } + + // Record flushed memory as claimed and how much we've flushed for this partition + Atomic::add(&_claimed, flushed); + _to_uncommit -= flushed; + } + + // Unmap and uncommit flushed memory + for (const ZVirtualMemory vmem : flushed_vmems) { + unmap_virtual(vmem); + uncommit_physical(vmem); + free_physical(vmem); + free_virtual(vmem); + } + + { + SuspendibleThreadSetJoiner sts_joiner; + ZLocker locker(&_page_allocator->_lock); + + // Adjust claimed and capacity to reflect the uncommit + Atomic::sub(&_claimed, flushed); + decrease_capacity(flushed, false /* set_max_capacity */); + } + + return flushed; +} + +void ZPartition::sort_segments_physical(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem, true /* check_multi_partition */); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Sort physical segments + manager.sort_segments_physical(vmem); +} + +void ZPartition::claim_physical(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem, true /* check_multi_partition */); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Alloc physical memory + manager.alloc(vmem, _numa_id); +} + +void ZPartition::free_physical(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem, true /* check_multi_partition */); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Free physical memory + manager.free(vmem, _numa_id); +} + +size_t ZPartition::commit_physical(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem, true /* check_multi_partition */); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Commit physical memory + return manager.commit(vmem, _numa_id); +} + +size_t ZPartition::uncommit_physical(const ZVirtualMemory& vmem) { + assert(ZUncommit, "should not uncommit when uncommit is disabled"); + verify_virtual_memory_association(vmem); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Uncommit physical memory + return manager.uncommit(vmem); +} + +void ZPartition::map_virtual(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Map virtual memory to physical memory + manager.map(vmem, _numa_id); +} + +void ZPartition::unmap_virtual(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Unmap virtual memory from physical memory + manager.unmap(vmem); +} + +void ZPartition::map_virtual_from_multi_partition(const ZVirtualMemory& vmem) { + verify_virtual_memory_multi_partition_association(vmem); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Sort physical segments + manager.sort_segments_physical(vmem); + + // Map virtual memory to physical memory + manager.map(vmem, _numa_id); +} + +void ZPartition::unmap_virtual_from_multi_partition(const ZVirtualMemory& vmem) { + verify_virtual_memory_multi_partition_association(vmem); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Unmap virtual memory from physical memory + manager.unmap(vmem); +} + +ZVirtualMemory ZPartition::claim_virtual(size_t size) { + ZVirtualMemoryManager& manager = virtual_memory_manager(); + + return manager.remove_from_low(size, _numa_id); +} + +size_t ZPartition::claim_virtual(size_t size, ZArray* vmems_out) { + ZVirtualMemoryManager& manager = virtual_memory_manager(); + + return manager.remove_from_low_many_at_most(size, _numa_id, vmems_out); +} + +void ZPartition::free_virtual(const ZVirtualMemory& vmem) { + verify_virtual_memory_association(vmem); + + ZVirtualMemoryManager& manager = virtual_memory_manager(); + + // Free virtual memory + manager.insert(vmem, _numa_id); +} + +void ZPartition::free_and_claim_virtual_from_low_many(const ZVirtualMemory& vmem, ZArray* vmems_out) { + verify_virtual_memory_association(vmem); + + ZVirtualMemoryManager& manager = virtual_memory_manager(); + + // Shuffle virtual memory + manager.insert_and_remove_from_low_many(vmem, _numa_id, vmems_out); +} + +ZVirtualMemory ZPartition::free_and_claim_virtual_from_low_exact_or_many(size_t size, ZArray* vmems_in_out) { + verify_virtual_memory_association(vmems_in_out); + + ZVirtualMemoryManager& manager = virtual_memory_manager(); + + // Shuffle virtual memory + return manager.insert_and_remove_from_low_exact_or_many(size, _numa_id, vmems_in_out); +} + +static void pretouch_memory(zoffset start, size_t size) { + // At this point we know that we have a valid zoffset / zaddress. + const zaddress zaddr = ZOffset::address(start); + const uintptr_t addr = untype(zaddr); + const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size(); + os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); +} + +class ZPreTouchTask : public ZTask { +private: + volatile uintptr_t _current; + const uintptr_t _end; + +public: + ZPreTouchTask(zoffset start, zoffset_end end) + : ZTask("ZPreTouchTask"), + _current(untype(start)), + _end(untype(end)) {} + + virtual void work() { + const size_t size = ZGranuleSize; + + for (;;) { + // Claim an offset for this thread + const uintptr_t claimed = Atomic::fetch_then_add(&_current, size); + if (claimed >= _end) { + // Done + break; + } + + // At this point we know that we have a valid zoffset / zaddress. + const zoffset offset = to_zoffset(claimed); + + // Pre-touch the granule + pretouch_memory(offset, size); + } + } +}; + +bool ZPartition::prime(ZWorkers* workers, size_t size) { + if (size == 0) { + return true; + } + + // Claim virtual memory + const ZVirtualMemory vmem = claim_virtual(size); + + // Increase capacity + increase_capacity(size); + + // Claim the backing physical memory + claim_physical(vmem); + + // Commit the claimed physical memory + const size_t committed = commit_physical(vmem); + + if (committed != vmem.size()) { + // This is a failure state. We do not cleanup the maybe partially committed memory. + return false; + } + + map_virtual(vmem); + + check_numa_mismatch(vmem, _numa_id); + + if (AlwaysPreTouch) { + // Pre-touch memory + ZPreTouchTask task(vmem.start(), vmem.end()); + workers->run_all(&task); + } + + // We don't have to take a lock here as no other threads will access the cache + // until we're finished + _cache.insert(vmem); + + return true; +} + +ZVirtualMemory ZPartition::prepare_harvested_and_claim_virtual(ZMemoryAllocation* allocation) { + verify_memory_allocation_association(allocation); + + // Unmap virtual memory + for (const ZVirtualMemory vmem : *allocation->partial_vmems()) { + unmap_virtual(vmem); + } + + const size_t harvested = allocation->harvested(); + const int granule_count = (int)(harvested >> ZGranuleSizeShift); + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Stash segments + ZArray stash(granule_count); + manager.stash_segments(*allocation->partial_vmems(), &stash); + + // Shuffle virtual memory. We attempt to allocate enough memory to cover the + // entire allocation size, not just for the harvested memory. + const ZVirtualMemory result = free_and_claim_virtual_from_low_exact_or_many(allocation->size(), allocation->partial_vmems()); + + // Restore segments + if (!result.is_null()) { + // Got exact match. Restore stashed physical segments for the harvested part. + manager.restore_segments(result.first_part(harvested), stash); + } else { + // Got many partial vmems + manager.restore_segments(*allocation->partial_vmems(), stash); + } + + if (result.is_null()) { + // Before returning harvested memory to the cache it must be mapped. + for (const ZVirtualMemory vmem : *allocation->partial_vmems()) { + map_virtual(vmem); + } + } + + return result; +} + +void ZPartition::copy_physical_segments_to_partition(const ZVirtualMemory& at, const ZVirtualMemory& from) { + verify_virtual_memory_association(at); + verify_virtual_memory_association(from, true /* check_multi_partition */); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + // Copy segments + manager.copy_physical_segments(at, from); +} + +void ZPartition::copy_physical_segments_from_partition(const ZVirtualMemory& at, const ZVirtualMemory& to) { + verify_virtual_memory_association(at); + verify_virtual_memory_association(to, true /* check_multi_partition */); + + ZPhysicalMemoryManager& manager = physical_memory_manager(); + + + // Copy segments + manager.copy_physical_segments(to, at); +} + +void ZPartition::commit_increased_capacity(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem) { + assert(allocation->increased_capacity() > 0, "Nothing to commit"); + + const size_t already_committed = allocation->harvested(); + + const ZVirtualMemory already_committed_vmem = vmem.first_part(already_committed); + const ZVirtualMemory to_be_committed_vmem = vmem.last_part(already_committed); + + // Try to commit the uncommitted physical memory + const size_t committed = commit_physical(to_be_committed_vmem); + + // Keep track of the committed amount + allocation->set_committed_capacity(committed); +} + +void ZPartition::map_memory(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem) { + sort_segments_physical(vmem); + map_virtual(vmem); + + check_numa_mismatch(vmem, allocation->partition().numa_id()); +} + +void ZPartition::free_memory_alloc_failed(ZMemoryAllocation* allocation) { + verify_memory_allocation_association(allocation); + + // Only decrease the overall used and not the generation used, + // since the allocation failed and generation used wasn't bumped. + decrease_used(allocation->size()); + + size_t freed = 0; + + // Free mapped memory + for (const ZVirtualMemory vmem : *allocation->partial_vmems()) { + freed += vmem.size(); + _cache.insert(vmem); + } + assert(allocation->harvested() + allocation->committed_capacity() == freed, "must have freed all"); + + // Adjust capacity to reflect the failed capacity increase + const size_t remaining = allocation->size() - freed; + if (remaining > 0) { + const bool set_max_capacity = allocation->commit_failed(); + decrease_capacity(remaining, set_max_capacity); + } +} + +void ZPartition::threads_do(ThreadClosure* tc) const { + tc->do_thread(const_cast(&_uncommitter)); +} + +void ZPartition::print_on(outputStream* st) const { + st->print("Partition %u", _numa_id); + st->fill_to(17); + st->print_cr("used %zuM, capacity %zuM, max capacity %zuM", + _used / M, _capacity / M, _max_capacity / M); + + streamIndentor indentor(st, 1); + print_cache_on(st); +} + +void ZPartition::print_cache_on(outputStream* st) const { + _cache.print_on(st); +} + +void ZPartition::print_extended_on_error(outputStream* st) const { + st->print_cr("Partition %u", _numa_id); + + streamIndentor indentor(st, 1); + + _cache.print_extended_on(st); +} + +class ZMultiPartitionTracker : CHeapObj { +private: + struct Element { + ZVirtualMemory _vmem; + ZPartition* _partition; + }; + + ZArray _map; + + ZMultiPartitionTracker(int capacity) + : _map(capacity) {} + + const ZArray* map() const { + return &_map; + } + + ZArray* map() { + return &_map; + } + +public: + void prepare_memory_for_free(const ZVirtualMemory& vmem, ZArray* vmems_out) const { + // Remap memory back to original partition + for (const Element partial_allocation : *map()) { + ZVirtualMemory remaining_vmem = partial_allocation._vmem; + ZPartition& partition = *partial_allocation._partition; + + const size_t size = remaining_vmem.size(); + + // Allocate new virtual address ranges + const int start_index = vmems_out->length(); + const size_t claimed_virtual = partition.claim_virtual(remaining_vmem.size(), vmems_out); + + // We are holding memory associated with this partition, and we do not + // overcommit virtual memory claiming. So virtual memory must always + // be available. + assert(claimed_virtual == size, "must succeed"); + + // Remap to the newly allocated virtual address ranges + for (const ZVirtualMemory& to_vmem : vmems_out->slice_back(start_index)) { + const ZVirtualMemory from_vmem = remaining_vmem.shrink_from_front(to_vmem.size()); + + // Copy physical segments + partition.copy_physical_segments_to_partition(to_vmem, from_vmem); + + // Unmap from_vmem + partition.unmap_virtual_from_multi_partition(from_vmem); + + // Map to_vmem + partition.map_virtual(to_vmem); + } + assert(remaining_vmem.size() == 0, "must have mapped all claimed virtual memory"); + } + } + + static void destroy(const ZMultiPartitionTracker* tracker) { + delete tracker; + } + + static ZMultiPartitionTracker* create(const ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + const ZArray* const partial_allocations = multi_partition_allocation->allocations(); + + ZMultiPartitionTracker* const tracker = new ZMultiPartitionTracker(partial_allocations->length()); + + ZVirtualMemory remaining = vmem; + + // Each partial allocation is mapped to the virtual memory in order + for (ZMemoryAllocation* partial_allocation : *partial_allocations) { + // Track each separate vmem's partition + const ZVirtualMemory partial_vmem = remaining.shrink_from_front(partial_allocation->size()); + ZPartition* const partition = &partial_allocation->partition(); + tracker->map()->push({partial_vmem, partition}); + } + + return tracker; + } }; ZPageAllocator::ZPageAllocator(size_t min_capacity, @@ -183,23 +1252,16 @@ ZPageAllocator::ZPageAllocator(size_t min_capacity, size_t soft_max_capacity, size_t max_capacity) : _lock(), - _cache(), _virtual(max_capacity), _physical(max_capacity), _min_capacity(min_capacity), - _initial_capacity(initial_capacity), _max_capacity(max_capacity), - _current_max_capacity(max_capacity), - _capacity(0), - _claimed(0), _used(0), - _used_generations{0, 0}, - _collection_stats{{0, 0}, {0, 0}}, + _used_generations{0,0}, + _collection_stats{{0, 0},{0, 0}}, + _partitions(ZValueIdTagType{}, this), _stalled(), - _unmapper(new ZUnmapper(this)), - _uncommitter(new ZUncommitter(this)), _safe_destroy(), - _safe_recycle(this), _initialized(false) { if (!_virtual.is_initialized() || !_physical.is_initialized()) { @@ -231,69 +1293,20 @@ bool ZPageAllocator::is_initialized() const { return _initialized; } -class ZPreTouchTask : public ZTask { -private: - volatile uintptr_t _current; - const uintptr_t _end; +bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { + ZPartitionIterator iter = partition_iterator(); + for (ZPartition* partition; iter.next(&partition);) { + const uint32_t numa_id = partition->numa_id(); + const size_t to_prime = ZNUMA::calculate_share(numa_id, size); - static void pretouch(zaddress zaddr, size_t size) { - const uintptr_t addr = untype(zaddr); - const size_t page_size = ZLargePages::is_explicit() ? ZGranuleSize : os::vm_page_size(); - os::pretouch_memory((void*)addr, (void*)(addr + size), page_size); - } - -public: - ZPreTouchTask(zoffset start, zoffset_end end) - : ZTask("ZPreTouchTask"), - _current(untype(start)), - _end(untype(end)) {} - - virtual void work() { - const size_t size = ZGranuleSize; - - for (;;) { - // Claim an offset for this thread - const uintptr_t claimed = Atomic::fetch_then_add(&_current, size); - if (claimed >= _end) { - // Done - break; - } - - // At this point we know that we have a valid zoffset / zaddress. - const zoffset offset = to_zoffset(claimed); - const zaddress addr = ZOffset::address(offset); - - // Pre-touch the granule - pretouch(addr, size); + if (!partition->prime(workers, to_prime)) { + return false; } } -}; - -bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) { - ZAllocationFlags flags; - flags.set_non_blocking(); - flags.set_low_address(); - - ZPage* const page = alloc_page(ZPageType::large, size, flags, ZPageAge::eden); - if (page == nullptr) { - return false; - } - - if (AlwaysPreTouch) { - // Pre-touch page - ZPreTouchTask task(page->start(), page->end()); - workers->run_all(&task); - } - - free_page(page, false /* allow_defragment */); return true; } -size_t ZPageAllocator::initial_capacity() const { - return _initial_capacity; -} - size_t ZPageAllocator::min_capacity() const { return _min_capacity; } @@ -303,14 +1316,31 @@ size_t ZPageAllocator::max_capacity() const { } size_t ZPageAllocator::soft_max_capacity() const { - // Note that SoftMaxHeapSize is a manageable flag - const size_t soft_max_capacity = Atomic::load(&SoftMaxHeapSize); - const size_t current_max_capacity = Atomic::load(&_current_max_capacity); - return MIN2(soft_max_capacity, current_max_capacity); + const size_t current_max_capacity = ZPageAllocator::current_max_capacity(); + const size_t soft_max_heapsize = Atomic::load(&SoftMaxHeapSize); + return MIN2(soft_max_heapsize, current_max_capacity); +} + +size_t ZPageAllocator::current_max_capacity() const { + size_t current_max_capacity = 0; + + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + current_max_capacity += Atomic::load(&partition->_current_max_capacity); + } + + return current_max_capacity; } size_t ZPageAllocator::capacity() const { - return Atomic::load(&_capacity); + size_t capacity = 0; + + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + capacity += Atomic::load(&partition->_capacity); + } + + return capacity; } size_t ZPageAllocator::used() const { @@ -322,19 +1352,27 @@ size_t ZPageAllocator::used_generation(ZGenerationId id) const { } size_t ZPageAllocator::unused() const { - const ssize_t capacity = (ssize_t)Atomic::load(&_capacity); - const ssize_t used = (ssize_t)Atomic::load(&_used); - const ssize_t claimed = (ssize_t)Atomic::load(&_claimed); + const ssize_t used = (ssize_t)ZPageAllocator::used(); + ssize_t capacity = 0; + ssize_t claimed = 0; + + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + capacity += (ssize_t)Atomic::load(&partition->_capacity); + claimed += (ssize_t)Atomic::load(&partition->_claimed); + } + const ssize_t unused = capacity - used - claimed; return unused > 0 ? (size_t)unused : 0; } ZPageAllocatorStats ZPageAllocator::stats(ZGeneration* generation) const { ZLocker locker(&_lock); + return ZPageAllocatorStats(_min_capacity, _max_capacity, soft_max_capacity(), - _capacity, + capacity(), _used, _collection_stats[(int)generation->id()]._used_high, _collection_stats[(int)generation->id()]._used_low, @@ -347,211 +1385,47 @@ ZPageAllocatorStats ZPageAllocator::stats(ZGeneration* generation) const { void ZPageAllocator::reset_statistics(ZGenerationId id) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - _collection_stats[(int)id]._used_high = _used; - _collection_stats[(int)id]._used_low = _used; -} +#ifdef ASSERT + { + // We may free without safepoint synchronization, take the lock to get + // consistent values. + ZLocker locker(&_lock); + size_t total_used = 0; -size_t ZPageAllocator::increase_capacity(size_t size) { - const size_t increased = MIN2(size, _current_max_capacity - _capacity); - - if (increased > 0) { - // Update atomically since we have concurrent readers - Atomic::add(&_capacity, increased); - - // Record time of last commit. When allocation, we prefer increasing - // the capacity over flushing the cache. That means there could be - // expired pages in the cache at this time. However, since we are - // increasing the capacity we are obviously in need of committed - // memory and should therefore not be uncommitting memory. - _cache.set_last_commit(); - } - - return increased; -} - -void ZPageAllocator::decrease_capacity(size_t size, bool set_max_capacity) { - // Update atomically since we have concurrent readers - Atomic::sub(&_capacity, size); - - if (set_max_capacity) { - // Adjust current max capacity to avoid further attempts to increase capacity - log_error_p(gc)("Forced to lower max Java heap size from " - "%zuM(%.0f%%) to %zuM(%.0f%%)", - _current_max_capacity / M, percent_of(_current_max_capacity, _max_capacity), - _capacity / M, percent_of(_capacity, _max_capacity)); - - // Update atomically since we have concurrent readers - Atomic::store(&_current_max_capacity, _capacity); - } -} - -void ZPageAllocator::increase_used(size_t size) { - // We don't track generation usage here because this page - // could be allocated by a thread that satisfies a stalling - // allocation. The stalled thread can wake up and potentially - // realize that the page alloc should be undone. If the alloc - // and the undo gets separated by a safepoint, the generation - // statistics could se a decreasing used value between mark - // start and mark end. - - // Update atomically since we have concurrent readers - const size_t used = Atomic::add(&_used, size); - - // Update used high - for (auto& stats : _collection_stats) { - if (used > stats._used_high) { - stats._used_high = used; + ZPartitionIterator iter(&_partitions); + for (ZPartition* partition; iter.next(&partition);) { + total_used += partition->_used; } - } -} -void ZPageAllocator::decrease_used(size_t size) { - // Update atomically since we have concurrent readers - const size_t used = Atomic::sub(&_used, size); - - // Update used low - for (auto& stats : _collection_stats) { - if (used < stats._used_low) { - stats._used_low = used; - } + assert(total_used == _used, "Must be consistent at safepoint %zu == %zu", total_used, _used); } +#endif + + // Read once, we may have concurrent writers. + const size_t used = Atomic::load(&_used); + + _collection_stats[(int)id]._used_high = used; + _collection_stats[(int)id]._used_low = used; } void ZPageAllocator::increase_used_generation(ZGenerationId id, size_t size) { - // Update atomically since we have concurrent readers + // Update atomically since we have concurrent readers and writers Atomic::add(&_used_generations[(int)id], size, memory_order_relaxed); } void ZPageAllocator::decrease_used_generation(ZGenerationId id, size_t size) { - // Update atomically since we have concurrent readers + // Update atomically since we have concurrent readers and writers Atomic::sub(&_used_generations[(int)id], size, memory_order_relaxed); } -void ZPageAllocator::promote_used(size_t size) { - decrease_used_generation(ZGenerationId::young, size); - increase_used_generation(ZGenerationId::old, size); -} +void ZPageAllocator::promote_used(const ZPage* from, const ZPage* to) { + assert(from->start() == to->start(), "pages start at same offset"); + assert(from->size() == to->size(), "pages are the same size"); + assert(from->age() != ZPageAge::old, "must be promotion"); + assert(to->age() == ZPageAge::old, "must be promotion"); -bool ZPageAllocator::commit_page(ZPage* page) { - // Commit physical memory - return _physical.commit(page->physical_memory()); -} - -void ZPageAllocator::uncommit_page(ZPage* page) { - if (!ZUncommit) { - return; - } - - // Uncommit physical memory - _physical.uncommit(page->physical_memory()); -} - -void ZPageAllocator::map_page(const ZPage* page) const { - // Map physical memory - _physical.map(page->start(), page->physical_memory()); -} - -void ZPageAllocator::unmap_page(const ZPage* page) const { - // Unmap physical memory - _physical.unmap(page->start(), page->size()); -} - -void ZPageAllocator::safe_destroy_page(ZPage* page) { - // Destroy page safely - _safe_destroy.schedule_delete(page); -} - -void ZPageAllocator::destroy_page(ZPage* page) { - // Free virtual memory - _virtual.free(page->virtual_memory()); - - // Free physical memory - _physical.free(page->physical_memory()); - - // Destroy page safely - safe_destroy_page(page); -} - -bool ZPageAllocator::should_defragment(const ZPage* page) const { - // A small page can end up at a high address (second half of the address space) - // if we've split a larger page or we have a constrained address space. To help - // fight address space fragmentation we remap such pages to a lower address, if - // a lower address is available. - return page->type() == ZPageType::small && - page->start() >= to_zoffset(_virtual.reserved() / 2) && - page->start() > _virtual.lowest_available_address(); -} - -ZPage* ZPageAllocator::defragment_page(ZPage* page) { - // Harvest the physical memory (which is committed) - ZPhysicalMemory pmem; - ZPhysicalMemory& old_pmem = page->physical_memory(); - pmem.add_segments(old_pmem); - old_pmem.remove_segments(); - - _unmapper->unmap_and_destroy_page(page); - - // Allocate new virtual memory at a low address - const ZVirtualMemory vmem = _virtual.alloc(pmem.size(), true /* force_low_address */); - - // Create the new page and map it - ZPage* new_page = new ZPage(ZPageType::small, vmem, pmem); - map_page(new_page); - - // Update statistics - ZStatInc(ZCounterDefragment); - - return new_page; -} - -bool ZPageAllocator::is_alloc_allowed(size_t size) const { - const size_t available = _current_max_capacity - _used - _claimed; - return available >= size; -} - -bool ZPageAllocator::alloc_page_common_inner(ZPageType type, size_t size, ZList* pages) { - if (!is_alloc_allowed(size)) { - // Out of memory - return false; - } - - // Try allocate from the page cache - ZPage* const page = _cache.alloc_page(type, size); - if (page != nullptr) { - // Success - pages->insert_last(page); - return true; - } - - // Try increase capacity - const size_t increased = increase_capacity(size); - if (increased < size) { - // Could not increase capacity enough to satisfy the allocation - // completely. Flush the page cache to satisfy the remainder. - const size_t remaining = size - increased; - _cache.flush_for_allocation(remaining, pages); - } - - // Success - return true; -} - -bool ZPageAllocator::alloc_page_common(ZPageAllocation* allocation) { - const ZPageType type = allocation->type(); - const size_t size = allocation->size(); - const ZAllocationFlags flags = allocation->flags(); - ZList* const pages = allocation->pages(); - - if (!alloc_page_common_inner(type, size, pages)) { - // Out of memory - return false; - } - - // Updated used statistics - increase_used(size); - - // Success - return true; + decrease_used_generation(ZGenerationId::young, to->size()); + increase_used_generation(ZGenerationId::old, to->size()); } static void check_out_of_memory_during_initialization() { @@ -560,6 +1434,42 @@ static void check_out_of_memory_during_initialization() { } } +ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) { + EventZPageAllocation event; + + ZPageAllocation allocation(type, size, flags, age); + + // Allocate the page + ZPage* const page = alloc_page_inner(&allocation); + if (page == nullptr) { + return nullptr; + } + + // Update allocation statistics. Exclude gc relocations to avoid + // artificial inflation of the allocation rate during relocation. + if (!flags.gc_relocation() && is_init_completed()) { + // Note that there are two allocation rate counters, which have + // different purposes and are sampled at different frequencies. + ZStatInc(ZCounterMutatorAllocationRate, size); + ZStatMutatorAllocRate::sample_allocation(size); + } + + const ZPageAllocationStats stats = allocation.stats(); + const int num_harvested_vmems = stats._num_harvested_vmems; + const size_t harvested = stats._total_harvested; + const size_t committed = stats._total_committed_capacity; + + if (harvested > 0) { + ZStatInc(ZCounterMappedCacheHarvest, harvested); + log_debug(gc, heap)("Mapped Cache Harvested: %zuM (%d)", harvested / M, num_harvested_vmems); + } + + // Send event for successful allocation + allocation.send_event(true /* successful */); + + return page; +} + bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) { ZStatTimer timer(ZCriticalPhaseAllocationStall); EventZAllocationStall event; @@ -591,16 +1501,70 @@ bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) { return result; } -bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) { +ZPage* ZPageAllocator::alloc_page_inner(ZPageAllocation* allocation) { +retry: + + // Claim the capacity needed for this allocation. + // + // The claimed capacity comes from memory already mapped in the cache, or + // from increasing the capacity. The increased capacity allows us to allocate + // physical memory from the physical memory manager later on. + // + // Note that this call might block in a safepoint if the non-blocking flag is + // not set. + if (!claim_capacity_or_stall(allocation)) { + // Out of memory + return nullptr; + } + + // If the entire claimed capacity came from claiming a single vmem from the + // mapped cache then the allocation has been satisfied and we are done. + const ZVirtualMemory cached_vmem = satisfied_from_cache_vmem(allocation); + if (!cached_vmem.is_null()) { + return create_page(allocation, cached_vmem); + } + + // We couldn't find a satisfying vmem in the cache, so we need to build one. + + // Claim virtual memory, either from remapping harvested vmems from the + // mapped cache or by claiming it straight from the virtual memory manager. + const ZVirtualMemory vmem = claim_virtual_memory(allocation); + if (vmem.is_null()) { + log_error(gc)("Out of address space"); + free_after_alloc_page_failed(allocation); + + // Crash in debug builds for more information + DEBUG_ONLY(fatal("Out of address space");) + + return nullptr; + } + + // Claim physical memory for the increased capacity. The previous claiming of + // capacity guarantees that this will succeed. + claim_physical_for_increased_capacity(allocation, vmem); + + // Commit memory for the increased capacity and map the entire vmem. + if (!commit_and_map(allocation, vmem)) { + free_after_alloc_page_failed(allocation); + goto retry; + } + + return create_page(allocation, vmem); +} + +bool ZPageAllocator::claim_capacity_or_stall(ZPageAllocation* allocation) { { ZLocker locker(&_lock); - if (alloc_page_common(allocation)) { - // Success + // Try to claim memory + if (claim_capacity(allocation)) { + // Keep track of usage + increase_used(allocation->size()); + return true; } - // Failed + // Failed to claim memory if (allocation->flags().non_blocking()) { // Don't stall return false; @@ -614,168 +1578,603 @@ bool ZPageAllocator::alloc_page_or_stall(ZPageAllocation* allocation) { return alloc_page_stall(allocation); } -ZPage* ZPageAllocator::alloc_page_create(ZPageAllocation* allocation) { - const size_t size = allocation->size(); +bool ZPageAllocator::claim_capacity(ZPageAllocation* allocation) { + const uint32_t start_numa_id = allocation->initiating_numa_id(); + const uint32_t start_partition = start_numa_id; + const uint32_t num_partitions = _partitions.count(); - // Allocate virtual memory. To make error handling a lot more straight - // forward, we allocate virtual memory before destroying flushed pages. - // Flushed pages are also unmapped and destroyed asynchronously, so we - // can't immediately reuse that part of the address space anyway. - const ZVirtualMemory vmem = _virtual.alloc(size, allocation->flags().low_address()); - if (vmem.is_null()) { - log_error(gc)("Out of address space"); - return nullptr; + // Round robin single-partition claiming + + for (uint32_t i = 0; i < num_partitions; ++i) { + const uint32_t partition_id = (start_partition + i) % num_partitions; + + if (claim_capacity_single_partition(allocation->single_partition_allocation(), partition_id)) { + return true; + } } - ZPhysicalMemory pmem; - size_t flushed = 0; - - // Harvest physical memory from flushed pages - ZListRemoveIterator iter(allocation->pages()); - for (ZPage* page; iter.next(&page);) { - flushed += page->size(); - - // Harvest flushed physical memory - ZPhysicalMemory& fmem = page->physical_memory(); - pmem.add_segments(fmem); - fmem.remove_segments(); - - // Unmap and destroy page - _unmapper->unmap_and_destroy_page(page); - } - - if (flushed > 0) { - allocation->set_flushed(flushed); - - // Update statistics - ZStatInc(ZCounterPageCacheFlush, flushed); - log_debug(gc, heap)("Page Cache Flushed: %zuM", flushed / M); - } - - // Allocate any remaining physical memory. Capacity and used has - // already been adjusted, we just need to fetch the memory, which - // is guaranteed to succeed. - if (flushed < size) { - const size_t remaining = size - flushed; - allocation->set_committed(remaining); - _physical.alloc(pmem, remaining); - } - - // Create new page - return new ZPage(allocation->type(), vmem, pmem); -} - -bool ZPageAllocator::is_alloc_satisfied(ZPageAllocation* allocation) const { - // The allocation is immediately satisfied if the list of pages contains - // exactly one page, with the type and size that was requested. However, - // even if the allocation is immediately satisfied we might still want to - // return false here to force the page to be remapped to fight address - // space fragmentation. - - if (allocation->pages()->size() != 1) { - // Not a single page + if (!is_multi_partition_enabled() || sum_available() < allocation->size()) { + // Multi-partition claiming is not possible return false; } - const ZPage* const page = allocation->pages()->first(); - if (page->type() != allocation->type() || - page->size() != allocation->size()) { - // Wrong type or size - return false; - } + // Multi-partition claiming + + // Flip allocation to multi-partition allocation + allocation->initiate_multi_partition_allocation(); + + ZMultiPartitionAllocation* const multi_partition_allocation = allocation->multi_partition_allocation(); + + claim_capacity_multi_partition(multi_partition_allocation, start_partition); - // Allocation immediately satisfied return true; } -ZPage* ZPageAllocator::alloc_page_finalize(ZPageAllocation* allocation) { - // Fast path - if (is_alloc_satisfied(allocation)) { - return allocation->pages()->remove_first(); - } +bool ZPageAllocator::claim_capacity_single_partition(ZSinglePartitionAllocation* single_partition_allocation, uint32_t partition_id) { + ZPartition& partition = _partitions.get(partition_id); - // Slow path - ZPage* const page = alloc_page_create(allocation); - if (page == nullptr) { - // Out of address space - return nullptr; - } - - // Commit page - if (commit_page(page)) { - // Success - map_page(page); - return page; - } - - // Failed or partially failed. Split of any successfully committed - // part of the page into a new page and insert it into list of pages, - // so that it will be re-inserted into the page cache. - ZPage* const committed_page = page->split_committed(); - destroy_page(page); - - if (committed_page != nullptr) { - map_page(committed_page); - allocation->pages()->insert_last(committed_page); - } - - return nullptr; + return partition.claim_capacity(single_partition_allocation->allocation()); } -ZPage* ZPageAllocator::alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age) { - EventZPageAllocation event; +void ZPageAllocator::claim_capacity_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, uint32_t start_partition) { + const size_t size = multi_partition_allocation->size(); + const uint32_t num_partitions = _partitions.count(); + const size_t split_size = align_up(size / num_partitions, ZGranuleSize); -retry: - ZPageAllocation allocation(type, size, flags); + size_t remaining = size; - // Allocate one or more pages from the page cache. If the allocation - // succeeds but the returned pages don't cover the complete allocation, - // then finalize phase is allowed to allocate the remaining memory - // directly from the physical memory manager. Note that this call might - // block in a safepoint if the non-blocking flag is not set. - if (!alloc_page_or_stall(&allocation)) { - // Out of memory - return nullptr; + const auto do_claim_one_partition = [&](ZPartition& partition, bool claim_evenly) { + if (remaining == 0) { + // All memory claimed + return false; + } + + const size_t max_alloc_size = claim_evenly ? MIN2(split_size, remaining) : remaining; + + // This guarantees that claim_physical below will succeed + const size_t alloc_size = MIN2(max_alloc_size, partition.available()); + + // Skip over empty allocations + if (alloc_size == 0) { + // Continue + return true; + } + + ZMemoryAllocation partial_allocation(alloc_size); + + // Claim capacity for this allocation - this should succeed + const bool result = partition.claim_capacity(&partial_allocation); + assert(result, "Should have succeeded"); + + // Register allocation + multi_partition_allocation->register_allocation(partial_allocation); + + // Update remaining + remaining -= alloc_size; + + // Continue + return true; + }; + + // Loops over every partition and claims memory + const auto do_claim_each_partition = [&](bool claim_evenly) { + for (uint32_t i = 0; i < num_partitions; ++i) { + const uint32_t partition_id = (start_partition + i) % num_partitions; + ZPartition& partition = _partitions.get(partition_id); + + if (!do_claim_one_partition(partition, claim_evenly)) { + // All memory claimed + break; + } + } + }; + + // Try to claim from multiple partitions + + // Try to claim up to split_size on each partition + do_claim_each_partition(true /* claim_evenly */); + + // Try claim the remaining + do_claim_each_partition(false /* claim_evenly */); + + assert(remaining == 0, "Must have claimed capacity for the whole allocation"); +} + +ZVirtualMemory ZPageAllocator::satisfied_from_cache_vmem(const ZPageAllocation* allocation) const { + if (allocation->is_multi_partition()) { + // Multi-partition allocations are always harvested and/or committed, so + // there's never a satisfying vmem from the caches. + return {}; } - ZPage* const page = alloc_page_finalize(&allocation); - if (page == nullptr) { - // Failed to commit or map. Clean up and retry, in the hope that - // we can still allocate by flushing the page cache (more aggressively). - free_pages_alloc_failed(&allocation); - goto retry; + return allocation->satisfied_from_cache_vmem(); +} + +ZVirtualMemory ZPageAllocator::claim_virtual_memory(ZPageAllocation* allocation) { + // Note: that the single-partition performs "shuffling" of already harvested + // vmem(s), while the multi-partition searches for available virtual memory + // area without shuffling. + + if (allocation->is_multi_partition()) { + return claim_virtual_memory_multi_partition(allocation->multi_partition_allocation()); + } else { + return claim_virtual_memory_single_partition(allocation->single_partition_allocation()); + } +} + +ZVirtualMemory ZPageAllocator::claim_virtual_memory_single_partition(ZSinglePartitionAllocation* single_partition_allocation) { + ZMemoryAllocation* const allocation = single_partition_allocation->allocation(); + ZPartition& partition = allocation->partition(); + + if (allocation->harvested() > 0) { + // We claim virtual memory from the harvested vmems and perhaps also + // allocate more to match the allocation request. + return partition.prepare_harvested_and_claim_virtual(allocation); + } else { + // Just try to claim virtual memory + return partition.claim_virtual(allocation->size()); + } +} + +ZVirtualMemory ZPageAllocator::claim_virtual_memory_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation) { + const size_t size = multi_partition_allocation->size(); + + const ZVirtualMemory vmem = _virtual.remove_from_low_multi_partition(size); + if (!vmem.is_null()) { + // Copy claimed multi-partition vmems, we leave the old vmems mapped until + // after we have committed. In case committing fails we can simply + // reinsert the initial vmems. + copy_claimed_physical_multi_partition(multi_partition_allocation, vmem); } - // The generation's used is tracked here when the page is handed out - // to the allocating thread. The overall heap "used" is tracked in - // the lower-level allocation code. - const ZGenerationId id = age == ZPageAge::old ? ZGenerationId::old : ZGenerationId::young; - increase_used_generation(id, size); + return vmem; +} - // Reset page. This updates the page's sequence number and must - // be done after we potentially blocked in a safepoint (stalled) - // where the global sequence number was updated. - page->reset(age); - page->reset_top_for_allocation(); - page->reset_livemap(); - if (age == ZPageAge::old) { - page->remset_alloc(); +void ZPageAllocator::copy_claimed_physical_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + // Start at the new dest offset + ZVirtualMemory remaining_dest_vmem = vmem; + + for (const ZMemoryAllocation* partial_allocation : *multi_partition_allocation->allocations()) { + // Split off the partial allocation's destination vmem + ZVirtualMemory partial_dest_vmem = remaining_dest_vmem.shrink_from_front(partial_allocation->size()); + + // Get the partial allocation's partition + ZPartition& partition = partial_allocation->partition(); + + // Copy all physical segments from the partition to the destination vmem + for (const ZVirtualMemory from_vmem : *partial_allocation->partial_vmems()) { + // Split off destination + const ZVirtualMemory to_vmem = partial_dest_vmem.shrink_from_front(from_vmem.size()); + + // Copy physical segments + partition.copy_physical_segments_from_partition(from_vmem, to_vmem); + } + } +} + +void ZPageAllocator::claim_physical_for_increased_capacity(ZPageAllocation* allocation, const ZVirtualMemory& vmem) { + assert(allocation->size() == vmem.size(), "vmem should be the final entry"); + + if (allocation->is_multi_partition()) { + claim_physical_for_increased_capacity_multi_partition(allocation->multi_partition_allocation(), vmem); + } else { + claim_physical_for_increased_capacity_single_partition(allocation->single_partition_allocation(), vmem); + } +} + +void ZPageAllocator::claim_physical_for_increased_capacity_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem) { + claim_physical_for_increased_capacity(single_partition_allocation->allocation(), vmem); +} + +void ZPageAllocator::claim_physical_for_increased_capacity_multi_partition(const ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + ZVirtualMemory remaining = vmem; + + for (ZMemoryAllocation* allocation : *multi_partition_allocation->allocations()) { + const ZVirtualMemory partial = remaining.shrink_from_front(allocation->size()); + claim_physical_for_increased_capacity(allocation, partial); + } +} + +void ZPageAllocator::claim_physical_for_increased_capacity(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem) { + // The previously harvested memory is memory that has already been committed + // and mapped. The rest of the vmem gets physical memory assigned here and + // will be committed in a subsequent function. + + const size_t already_committed = allocation->harvested(); + const size_t non_committed = allocation->size() - already_committed; + const size_t increased_capacity = allocation->increased_capacity(); + + assert(non_committed == increased_capacity, + "Mismatch non_committed: " PTR_FORMAT " increased_capacity: " PTR_FORMAT, + non_committed, increased_capacity); + + if (non_committed > 0) { + ZPartition& partition = allocation->partition(); + ZVirtualMemory non_committed_vmem = vmem.last_part(already_committed); + partition.claim_physical(non_committed_vmem); + } +} + +bool ZPageAllocator::commit_and_map(ZPageAllocation* allocation, const ZVirtualMemory& vmem) { + assert(allocation->size() == vmem.size(), "vmem should be the final entry"); + + if (allocation->is_multi_partition()) { + return commit_and_map_multi_partition(allocation->multi_partition_allocation(), vmem); + } else { + return commit_and_map_single_partition(allocation->single_partition_allocation(), vmem); + } +} + +bool ZPageAllocator::commit_and_map_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem) { + const bool commit_successful = commit_single_partition(single_partition_allocation, vmem); + + // Map the vmem + map_committed_single_partition(single_partition_allocation, vmem); + + if (commit_successful) { + return true; } - // Update allocation statistics. Exclude gc relocations to avoid - // artificial inflation of the allocation rate during relocation. - if (!flags.gc_relocation() && is_init_completed()) { - // Note that there are two allocation rate counters, which have - // different purposes and are sampled at different frequencies. - ZStatInc(ZCounterMutatorAllocationRate, size); - ZStatMutatorAllocRate::sample_allocation(size); + // Commit failed + cleanup_failed_commit_single_partition(single_partition_allocation, vmem); + + return false; +} + +bool ZPageAllocator::commit_and_map_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + if (commit_multi_partition(multi_partition_allocation, vmem)) { + // Commit successful + + // Unmap harvested vmems + unmap_harvested_multi_partition(multi_partition_allocation); + + // Map the vmem + map_committed_multi_partition(multi_partition_allocation, vmem); + + return true; } - // Send event - event.commit((u8)type, size, allocation.flushed(), allocation.committed(), - page->physical_memory().nsegments(), flags.non_blocking()); + // Commit failed + cleanup_failed_commit_multi_partition(multi_partition_allocation, vmem); - return page; + return false; +} + +void ZPageAllocator::commit(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem) { + ZPartition& partition = allocation->partition(); + + if (allocation->increased_capacity() > 0) { + // Commit memory + partition.commit_increased_capacity(allocation, vmem); + } +} + +bool ZPageAllocator::commit_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem) { + ZMemoryAllocation* const allocation = single_partition_allocation->allocation(); + + commit(allocation, vmem); + + return !allocation->commit_failed(); +} + +bool ZPageAllocator::commit_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + bool commit_failed = false; + ZVirtualMemory remaining = vmem; + for (ZMemoryAllocation* const allocation : *multi_partition_allocation->allocations()) { + // Split off the partial allocation's memory range + const ZVirtualMemory partial_vmem = remaining.shrink_from_front(allocation->size()); + + commit(allocation, partial_vmem); + + // Keep track if any partial allocation failed to commit + commit_failed |= allocation->commit_failed(); + } + + assert(remaining.size() == 0, "all memory must be accounted for"); + + return !commit_failed; +} + +void ZPageAllocator::unmap_harvested_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation) { + for (ZMemoryAllocation* const allocation : *multi_partition_allocation->allocations()) { + ZPartition& partition = allocation->partition(); + ZArray* const partial_vmems = allocation->partial_vmems(); + + // Unmap harvested vmems + while (!partial_vmems->is_empty()) { + const ZVirtualMemory to_unmap = partial_vmems->pop(); + partition.unmap_virtual(to_unmap); + partition.free_virtual(to_unmap); + } + } +} + +void ZPageAllocator::map_committed_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem) { + ZMemoryAllocation* const allocation = single_partition_allocation->allocation(); + ZPartition& partition = allocation->partition(); + + const size_t total_committed = allocation->harvested() + allocation->committed_capacity(); + const ZVirtualMemory total_committed_vmem = vmem.first_part(total_committed); + + if (total_committed_vmem.size() > 0) { + // Map all the committed memory + partition.map_memory(allocation, total_committed_vmem); + } +} + +void ZPageAllocator::map_committed_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + ZVirtualMemory remaining = vmem; + for (ZMemoryAllocation* const allocation : *multi_partition_allocation->allocations()) { + assert(!allocation->commit_failed(), "Sanity check"); + + ZPartition& partition = allocation->partition(); + + // Split off the partial allocation's memory range + const ZVirtualMemory to_vmem = remaining.shrink_from_front(allocation->size()); + + // Map the partial_allocation to partial_vmem + partition.map_virtual_from_multi_partition(to_vmem); + } + + assert(remaining.size() == 0, "all memory must be accounted for"); +} + +void ZPageAllocator::cleanup_failed_commit_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem) { + ZMemoryAllocation* const allocation = single_partition_allocation->allocation(); + + assert(allocation->commit_failed(), "Must have failed to commit"); + + const size_t committed = allocation->committed_capacity(); + const ZVirtualMemory non_harvested_vmem = vmem.last_part(allocation->harvested()); + const ZVirtualMemory committed_vmem = non_harvested_vmem.first_part(committed); + const ZVirtualMemory non_committed_vmem = non_harvested_vmem.last_part(committed); + + if (committed_vmem.size() > 0) { + // Register the committed and mapped memory. We insert the committed + // memory into partial_vmems so that it will be inserted into the cache + // in a subsequent step. + allocation->partial_vmems()->append(committed_vmem); + } + + // Free the virtual and physical memory we fetched to use but failed to commit + ZPartition& partition = allocation->partition(); + partition.free_physical(non_committed_vmem); + partition.free_virtual(non_committed_vmem); +} + +void ZPageAllocator::cleanup_failed_commit_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem) { + ZVirtualMemory remaining = vmem; + for (ZMemoryAllocation* const allocation : *multi_partition_allocation->allocations()) { + // Split off the partial allocation's memory range + const ZVirtualMemory partial_vmem = remaining.shrink_from_front(allocation->size()); + + if (allocation->harvested() == allocation->size()) { + // Everything is harvested, the mappings are already in the partial_vmems, + // nothing to cleanup. + continue; + } + + const size_t committed = allocation->committed_capacity(); + const ZVirtualMemory non_harvested_vmem = vmem.last_part(allocation->harvested()); + const ZVirtualMemory committed_vmem = non_harvested_vmem.first_part(committed); + const ZVirtualMemory non_committed_vmem = non_harvested_vmem.last_part(committed); + + ZPartition& partition = allocation->partition(); + + if (allocation->commit_failed()) { + // Free the physical memory we failed to commit. Virtual memory is later + // freed for the entire multi-partition allocation after all memory + // allocations have been visited. + partition.free_physical(non_committed_vmem); + } + + if (committed_vmem.size() == 0) { + // Nothing committed, nothing more to cleanup + continue; + } + + // Remove the harvested part + const ZVirtualMemory non_harvest_vmem = partial_vmem.last_part(allocation->harvested()); + + ZArray* const partial_vmems = allocation->partial_vmems(); + + // Keep track of the start index + const int start_index = partial_vmems->length(); + + // Claim virtual memory for the committed part + const size_t claimed_virtual = partition.claim_virtual(committed, partial_vmems); + + // We are holding memory associated with this partition, and we do not + // overcommit virtual memory claiming. So virtual memory must always be + // available. + assert(claimed_virtual == committed, "must succeed"); + + // Associate and map the physical memory with the partial vmems + + ZVirtualMemory remaining_committed_vmem = committed_vmem; + for (const ZVirtualMemory& to_vmem : partial_vmems->slice_back(start_index)) { + const ZVirtualMemory from_vmem = remaining_committed_vmem.shrink_from_front(to_vmem.size()); + + // Copy physical mappings + partition.copy_physical_segments_to_partition(to_vmem, from_vmem); + + // Map memory + partition.map_virtual(to_vmem); + } + + assert(remaining_committed_vmem.size() == 0, "all memory must be accounted for"); + } + + assert(remaining.size() == 0, "all memory must be accounted for"); + + // Free the unused virtual memory + _virtual.insert_multi_partition(vmem); +} + +void ZPageAllocator::free_after_alloc_page_failed(ZPageAllocation* allocation) { + // Send event for failed allocation + allocation->send_event(false /* successful */); + + ZLocker locker(&_lock); + + // Free memory + free_memory_alloc_failed(allocation); + + // Keep track of usage + decrease_used(allocation->size()); + + // Reset allocation for a potential retry + allocation->reset_for_retry(); + + // Try satisfy stalled allocations + satisfy_stalled(); +} + +void ZPageAllocator::free_memory_alloc_failed(ZPageAllocation* allocation) { + // The current max capacity may be decreased, store the value before freeing memory + const size_t current_max_capacity_before = current_max_capacity(); + + if (allocation->is_multi_partition()) { + free_memory_alloc_failed_multi_partition(allocation->multi_partition_allocation()); + } else { + free_memory_alloc_failed_single_partition(allocation->single_partition_allocation()); + } + + const size_t current_max_capacity_after = current_max_capacity(); + + if (current_max_capacity_before != current_max_capacity_after) { + log_error_p(gc)("Forced to lower max Java heap size from " + "%zuM(%.0f%%) to %zuM(%.0f%%)", + current_max_capacity_before / M, percent_of(current_max_capacity_before, _max_capacity), + current_max_capacity_after / M, percent_of(current_max_capacity_after, _max_capacity)); + } +} + +void ZPageAllocator::free_memory_alloc_failed_single_partition(ZSinglePartitionAllocation* single_partition_allocation) { + free_memory_alloc_failed(single_partition_allocation->allocation()); +} + +void ZPageAllocator::free_memory_alloc_failed_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation) { + for (ZMemoryAllocation* allocation : *multi_partition_allocation->allocations()) { + free_memory_alloc_failed(allocation); + } +} + +void ZPageAllocator::free_memory_alloc_failed(ZMemoryAllocation* allocation) { + ZPartition& partition = allocation->partition(); + + partition.free_memory_alloc_failed(allocation); +} + +ZPage* ZPageAllocator::create_page(ZPageAllocation* allocation, const ZVirtualMemory& vmem) { + // We don't track generation usage when claiming capacity, because this page + // could have been allocated by a thread that satisfies a stalling allocation. + // The stalled thread can wake up and potentially realize that the page alloc + // should be undone. If the alloc and the undo gets separated by a safepoint, + // the generation statistics could se a decreasing used value between mark + // start and mark end. At this point an allocation will be successful, so we + // update the generation usage. + const ZGenerationId id = allocation->age() == ZPageAge::old ? ZGenerationId::old : ZGenerationId::young; + increase_used_generation(id, allocation->size()); + + const ZPageType type = allocation->type(); + const ZPageAge age = allocation->age(); + + if (allocation->is_multi_partition()) { + const ZMultiPartitionAllocation* const multi_partition_allocation = allocation->multi_partition_allocation(); + ZMultiPartitionTracker* const tracker = ZMultiPartitionTracker::create(multi_partition_allocation, vmem); + + return new ZPage(type, age, vmem, tracker); + } + + const ZSinglePartitionAllocation* const single_partition_allocation = allocation->single_partition_allocation(); + const uint32_t partition_id = single_partition_allocation->allocation()->partition().numa_id(); + + return new ZPage(type, age, vmem, partition_id); +} + +void ZPageAllocator::prepare_memory_for_free(ZPage* page, ZArray* vmems) { + // Extract memory and destroy the page + const ZVirtualMemory vmem = page->virtual_memory(); + const ZPageType page_type = page->type(); + const ZMultiPartitionTracker* const tracker = page->multi_partition_tracker(); + + safe_destroy_page(page); + + // Multi-partition memory is always remapped + if (tracker != nullptr) { + tracker->prepare_memory_for_free(vmem, vmems); + + // Free the virtual memory + _virtual.insert_multi_partition(vmem); + + // Destroy the tracker + ZMultiPartitionTracker::destroy(tracker); + return; + } + + // Try to remap and defragment if page is large + if (page_type == ZPageType::large) { + remap_and_defragment(vmem, vmems); + return; + } + + // Leave the memory untouched + vmems->append(vmem); +} + +void ZPageAllocator::remap_and_defragment(const ZVirtualMemory& vmem, ZArray* vmems_out) { + ZPartition& partition = partition_from_vmem(vmem); + + // If no lower address can be found, don't remap/defrag + if (_virtual.lowest_available_address(partition.numa_id()) > vmem.start()) { + vmems_out->append(vmem); + return; + } + + ZStatInc(ZCounterDefragment); + + // Synchronously unmap the virtual memory + partition.unmap_virtual(vmem); + + // Stash segments + ZArray stash(vmem.granule_count()); + _physical.stash_segments(vmem, &stash); + + // Shuffle vmem - put new vmems in vmems_out + const int start_index = vmems_out->length(); + partition.free_and_claim_virtual_from_low_many(vmem, vmems_out); + + // The output array may contain results from other defragmentations as well, + // so we only operate on the result(s) we just got. + ZArraySlice defragmented_vmems = vmems_out->slice_back(start_index); + + // Restore segments + _physical.restore_segments(defragmented_vmems, stash); + + // Map and pre-touch + for (const ZVirtualMemory& claimed_vmem : defragmented_vmems) { + partition.map_virtual(claimed_vmem); + pretouch_memory(claimed_vmem.start(), claimed_vmem.size()); + } +} + +void ZPageAllocator::free_memory(ZArray* vmems) { + ZLocker locker(&_lock); + + // Free the vmems + for (const ZVirtualMemory vmem : *vmems) { + ZPartition& partition = partition_from_vmem(vmem); + + // Free the vmem + partition.free_memory(vmem); + + // Keep track of usage + decrease_used(vmem.size()); + } + + // Try satisfy stalled allocations + satisfy_stalled(); } void ZPageAllocator::satisfy_stalled() { @@ -786,11 +2185,14 @@ void ZPageAllocator::satisfy_stalled() { return; } - if (!alloc_page_common(allocation)) { + if (!claim_capacity(allocation)) { // Allocation could not be satisfied, give up return; } + // Keep track of usage + increase_used(allocation->size()); + // Allocation succeeded, dequeue and satisfy allocation request. // Note that we must dequeue the allocation request first, since // it will immediately be deallocated once it has been satisfied. @@ -799,164 +2201,94 @@ void ZPageAllocator::satisfy_stalled() { } } -ZPage* ZPageAllocator::prepare_to_recycle(ZPage* page, bool allow_defragment) { - // Make sure we have a page that is safe to recycle - ZPage* const to_recycle = _safe_recycle.register_and_clone_if_activated(page); +bool ZPageAllocator::is_multi_partition_enabled() const { + return _virtual.is_multi_partition_enabled(); +} - // Defragment the page before recycle if allowed and needed - if (allow_defragment && should_defragment(to_recycle)) { - return defragment_page(to_recycle); +const ZPartition& ZPageAllocator::partition_from_partition_id(uint32_t numa_id) const { + return _partitions.get(numa_id); +} + +ZPartition& ZPageAllocator::partition_from_partition_id(uint32_t numa_id) { + return _partitions.get(numa_id); +} + +ZPartition& ZPageAllocator::partition_from_vmem(const ZVirtualMemory& vmem) { + return partition_from_partition_id(_virtual.lookup_partition_id(vmem)); +} + +size_t ZPageAllocator::sum_available() const { + size_t total = 0; + + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + total += partition->available(); } - // Remove the remset before recycling - if (to_recycle->is_old() && to_recycle == page) { - to_recycle->remset_delete(); - } - - return to_recycle; + return total; } -void ZPageAllocator::recycle_page(ZPage* page) { - // Set time when last used - page->set_last_used(); +void ZPageAllocator::increase_used(size_t size) { + // Update atomically since we have concurrent readers + const size_t used = Atomic::add(&_used, size); - // Cache page - _cache.free_page(page); -} - -void ZPageAllocator::free_page(ZPage* page, bool allow_defragment) { - const ZGenerationId generation_id = page->generation_id(); - - // Prepare page for recycling before taking the lock - ZPage* const to_recycle = prepare_to_recycle(page, allow_defragment); - - ZLocker locker(&_lock); - - // Update used statistics - const size_t size = to_recycle->size(); - decrease_used(size); - decrease_used_generation(generation_id, size); - - // Free page - recycle_page(to_recycle); - - // Try satisfy stalled allocations - satisfy_stalled(); -} - -void ZPageAllocator::free_pages(const ZArray* pages) { - ZArray to_recycle_pages; - - size_t young_size = 0; - size_t old_size = 0; - - // Prepare pages for recycling before taking the lock - ZArrayIterator pages_iter(pages); - for (ZPage* page; pages_iter.next(&page);) { - if (page->is_young()) { - young_size += page->size(); - } else { - old_size += page->size(); + // Update used high + for (auto& stats : _collection_stats) { + if (used > stats._used_high) { + stats._used_high = used; } - - // Prepare to recycle - ZPage* const to_recycle = prepare_to_recycle(page, true /* allow_defragment */); - - // Register for recycling - to_recycle_pages.push(to_recycle); } - - ZLocker locker(&_lock); - - // Update used statistics - decrease_used(young_size + old_size); - decrease_used_generation(ZGenerationId::young, young_size); - decrease_used_generation(ZGenerationId::old, old_size); - - // Free pages - ZArrayIterator iter(&to_recycle_pages); - for (ZPage* page; iter.next(&page);) { - recycle_page(page); - } - - // Try satisfy stalled allocations - satisfy_stalled(); } -void ZPageAllocator::free_pages_alloc_failed(ZPageAllocation* allocation) { - // The page(s) in the allocation are either taken from the cache or a newly - // created, mapped and commited ZPage. These page(s) have not been inserted in - // the page table, nor allocated a remset, so prepare_to_recycle is not required. - ZLocker locker(&_lock); +void ZPageAllocator::decrease_used(size_t size) { + // Update atomically since we have concurrent readers + const size_t used = Atomic::sub(&_used, size); - // Only decrease the overall used and not the generation used, - // since the allocation failed and generation used wasn't bumped. - decrease_used(allocation->size()); - - size_t freed = 0; - - // Free any allocated/flushed pages - ZListRemoveIterator iter(allocation->pages()); - for (ZPage* page; iter.next(&page);) { - freed += page->size(); - recycle_page(page); - } - - // Adjust capacity and used to reflect the failed capacity increase - const size_t remaining = allocation->size() - freed; - decrease_capacity(remaining, true /* set_max_capacity */); - - // Try satisfy stalled allocations - satisfy_stalled(); -} - -size_t ZPageAllocator::uncommit(uint64_t* timeout) { - // We need to join the suspendible thread set while manipulating capacity and - // used, to make sure GC safepoints will have a consistent view. - ZList pages; - size_t flushed; - - { - SuspendibleThreadSetJoiner sts_joiner; - ZLocker locker(&_lock); - - // Never uncommit below min capacity. We flush out and uncommit chunks at - // a time (~0.8% of the max capacity, but at least one granule and at most - // 256M), in case demand for memory increases while we are uncommitting. - const size_t retain = MAX2(_used, _min_capacity); - const size_t release = _capacity - retain; - const size_t limit = MIN2(align_up(_current_max_capacity >> 7, ZGranuleSize), 256 * M); - const size_t flush = MIN2(release, limit); - - // Flush pages to uncommit - flushed = _cache.flush_for_uncommit(flush, &pages, timeout); - if (flushed == 0) { - // Nothing flushed - return 0; + // Update used low + for (auto& stats : _collection_stats) { + if (used < stats._used_low) { + stats._used_low = used; } + } +} - // Record flushed pages as claimed - Atomic::add(&_claimed, flushed); +void ZPageAllocator::safe_destroy_page(ZPage* page) { + // Destroy page safely + _safe_destroy.schedule_delete(page); +} + +void ZPageAllocator::free_page(ZPage* page) { + // Extract the id from the page + const ZGenerationId id = page->generation_id(); + const size_t size = page->size(); + + // Extract vmems and destroy the page + ZArray vmems; + prepare_memory_for_free(page, &vmems); + + // Updated used statistics + decrease_used_generation(id, size); + + // Free the extracted vmems + free_memory(&vmems); +} + +void ZPageAllocator::free_pages(ZGenerationId id, const ZArray* pages) { + // Prepare memory from pages to be cached + ZArray vmems; + for (ZPage* page : *pages) { + assert(page->generation_id() == id, "All pages must be from the same generation"); + const size_t size = page->size(); + + // Extract vmems and destroy the page + prepare_memory_for_free(page, &vmems); + + // Updated used statistics + decrease_used_generation(id, size); } - // Unmap, uncommit, and destroy flushed pages - ZListRemoveIterator iter(&pages); - for (ZPage* page; iter.next(&page);) { - unmap_page(page); - uncommit_page(page); - destroy_page(page); - } - - { - SuspendibleThreadSetJoiner sts_joiner; - ZLocker locker(&_lock); - - // Adjust claimed and capacity to reflect the uncommit - Atomic::sub(&_claimed, flushed); - decrease_capacity(flushed, false /* set_max_capacity */); - } - - return flushed; + // Free the extracted vmems + free_memory(&vmems); } void ZPageAllocator::enable_safe_destroy() const { @@ -967,14 +2299,6 @@ void ZPageAllocator::disable_safe_destroy() const { _safe_destroy.disable_deferred_delete(); } -void ZPageAllocator::enable_safe_recycle() const { - _safe_recycle.activate(); -} - -void ZPageAllocator::disable_safe_recycle() const { - _safe_recycle.deactivate(); -} - static bool has_alloc_seen_young(const ZPageAllocation* allocation) { return allocation->young_seqnum() != ZGeneration::young()->seqnum(); } @@ -1045,7 +2369,94 @@ void ZPageAllocator::handle_alloc_stalling_for_old(bool cleared_all_soft_refs) { restart_gc(); } -void ZPageAllocator::threads_do(ThreadClosure* tc) const { - tc->do_thread(_unmapper); - tc->do_thread(_uncommitter); +ZPartitionConstIterator ZPageAllocator::partition_iterator() const { + return ZPartitionConstIterator(&_partitions); +} + +ZPartitionIterator ZPageAllocator::partition_iterator() { + return ZPartitionIterator(&_partitions); +} + +void ZPageAllocator::threads_do(ThreadClosure* tc) const { + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + partition->threads_do(tc); + } +} + +void ZPageAllocator::print_on(outputStream* st) const { + ZLocker lock(&_lock); + print_on_inner(st); +} + +static bool try_lock_on_error(ZLock* lock) { + if (VMError::is_error_reported() && VMError::is_error_reported_in_current_thread()) { + return lock->try_lock(); + } + + lock->lock(); + + return true; +} + +void ZPageAllocator::print_extended_on_error(outputStream* st) const { + st->print_cr("ZMappedCache:"); + + streamIndentor indentor(st, 1); + + if (!try_lock_on_error(&_lock)) { + // We can't print without taking the lock since printing the contents of + // the cache requires iterating over the nodes in the cache's tree, which + // is not thread-safe. + st->print_cr(""); + + return; + } + + // Print each partition's cache content + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + partition->print_extended_on_error(st); + } + + _lock.unlock(); +} + +void ZPageAllocator::print_on_error(outputStream* st) const { + const bool locked = try_lock_on_error(&_lock); + + if (!locked) { + st->print_cr(""); + } + + // Print information even though we have not successfully taken the lock. + // This is thread-safe, but may produce inconsistent results. + print_on_inner(st); + + if (locked) { + _lock.unlock(); + } +} + +void ZPageAllocator::print_on_inner(outputStream* st) const { + // Print total usage + st->print("ZHeap"); + st->fill_to(17); + st->print_cr("used %zuM, capacity %zuM, max capacity %zuM", + used() / M, capacity() / M, max_capacity() / M); + + // Print per-partition + + streamIndentor indentor(st, 1); + + if (_partitions.count() == 1) { + // The summary printing is redundant if we only have one partition + _partitions.get(0).print_cache_on(st); + return; + } + + ZPartitionConstIterator iter = partition_iterator(); + for (const ZPartition* partition; iter.next(&partition);) { + partition->print_on(st); + } } diff --git a/src/hotspot/share/gc/z/zPageAllocator.hpp b/src/hotspot/share/gc/z/zPageAllocator.hpp index e80169fe260..05b0d6774d9 100644 --- a/src/hotspot/share/gc/z/zPageAllocator.hpp +++ b/src/hotspot/share/gc/z/zPageAllocator.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -24,68 +24,72 @@ #ifndef SHARE_GC_Z_ZPAGEALLOCATOR_HPP #define SHARE_GC_Z_ZPAGEALLOCATOR_HPP +#include "gc/z/zAddress.hpp" #include "gc/z/zAllocationFlags.hpp" #include "gc/z/zArray.hpp" +#include "gc/z/zGenerationId.hpp" +#include "gc/z/zGranuleMap.hpp" #include "gc/z/zList.hpp" #include "gc/z/zLock.hpp" +#include "gc/z/zMappedCache.hpp" +#include "gc/z/zPage.hpp" #include "gc/z/zPageAge.hpp" -#include "gc/z/zPageCache.hpp" #include "gc/z/zPageType.hpp" -#include "gc/z/zPhysicalMemory.hpp" +#include "gc/z/zPhysicalMemoryManager.hpp" #include "gc/z/zSafeDelete.hpp" -#include "gc/z/zVirtualMemory.hpp" +#include "gc/z/zUncommitter.hpp" +#include "gc/z/zValue.hpp" +#include "gc/z/zVirtualMemoryManager.hpp" +#include "utilities/ostream.hpp" class ThreadClosure; class ZGeneration; +class ZMemoryAllocation; +class ZMultiPartitionAllocation; class ZPageAllocation; class ZPageAllocator; class ZPageAllocatorStats; +class ZSegmentStash; +class ZSinglePartitionAllocation; +class ZVirtualMemory; class ZWorkers; -class ZUncommitter; -class ZUnmapper; -class ZSafePageRecycle { +class ZPartition { + friend class VMStructs; + friend class ZPageAllocator; + private: - ZPageAllocator* _page_allocator; - ZActivatedArray _unsafe_to_recycle; + ZPageAllocator* const _page_allocator; + ZMappedCache _cache; + ZUncommitter _uncommitter; + const size_t _min_capacity; + const size_t _max_capacity; + volatile size_t _current_max_capacity; + volatile size_t _capacity; + volatile size_t _claimed; + size_t _used; + double _last_commit; + double _last_uncommit; + size_t _to_uncommit; + const uint32_t _numa_id; + + const ZVirtualMemoryManager& virtual_memory_manager() const; + ZVirtualMemoryManager& virtual_memory_manager(); + + const ZPhysicalMemoryManager& physical_memory_manager() const; + ZPhysicalMemoryManager& physical_memory_manager(); + + void verify_virtual_memory_multi_partition_association(const ZVirtualMemory& vmem) const NOT_DEBUG_RETURN; + void verify_virtual_memory_association(const ZVirtualMemory& vmem, bool check_multi_partition = false) const NOT_DEBUG_RETURN; + void verify_virtual_memory_association(const ZArray* vmems) const NOT_DEBUG_RETURN; + void verify_memory_allocation_association(const ZMemoryAllocation* allocation) const NOT_DEBUG_RETURN; public: - ZSafePageRecycle(ZPageAllocator* page_allocator); + ZPartition(uint32_t numa_id, ZPageAllocator* page_allocator); - void activate(); - void deactivate(); + uint32_t numa_id() const; - ZPage* register_and_clone_if_activated(ZPage* page); -}; - -class ZPageAllocator { - friend class VMStructs; - friend class ZUnmapper; - friend class ZUncommitter; - -private: - mutable ZLock _lock; - ZPageCache _cache; - ZVirtualMemoryManager _virtual; - ZPhysicalMemoryManager _physical; - const size_t _min_capacity; - const size_t _initial_capacity; - const size_t _max_capacity; - volatile size_t _current_max_capacity; - volatile size_t _capacity; - volatile size_t _claimed; - volatile size_t _used; - size_t _used_generations[2]; - struct { - size_t _used_high; - size_t _used_low; - } _collection_stats[2]; - ZList _stalled; - ZUnmapper* _unmapper; - ZUncommitter* _uncommitter; - mutable ZSafeDelete _safe_destroy; - mutable ZSafePageRecycle _safe_recycle; - bool _initialized; + size_t available() const; size_t increase_capacity(size_t size); void decrease_capacity(size_t size, bool set_max_capacity); @@ -93,38 +97,146 @@ private: void increase_used(size_t size); void decrease_used(size_t size); - void increase_used_generation(ZGenerationId id, size_t size); - void decrease_used_generation(ZGenerationId id, size_t size); + void free_memory(const ZVirtualMemory& vmem); - bool commit_page(ZPage* page); - void uncommit_page(ZPage* page); - - void map_page(const ZPage* page) const; - void unmap_page(const ZPage* page) const; - - void destroy_page(ZPage* page); - - bool should_defragment(const ZPage* page) const; - ZPage* defragment_page(ZPage* page); - - bool is_alloc_allowed(size_t size) const; - - bool alloc_page_common_inner(ZPageType type, size_t size, ZList* pages); - bool alloc_page_common(ZPageAllocation* allocation); - bool alloc_page_stall(ZPageAllocation* allocation); - bool alloc_page_or_stall(ZPageAllocation* allocation); - bool is_alloc_satisfied(ZPageAllocation* allocation) const; - ZPage* alloc_page_create(ZPageAllocation* allocation); - ZPage* alloc_page_finalize(ZPageAllocation* allocation); - void free_pages_alloc_failed(ZPageAllocation* allocation); - - void satisfy_stalled(); + void claim_from_cache_or_increase_capacity(ZMemoryAllocation* allocation); + bool claim_capacity(ZMemoryAllocation* allocation); size_t uncommit(uint64_t* timeout); + void sort_segments_physical(const ZVirtualMemory& vmem); + + void claim_physical(const ZVirtualMemory& vmem); + void free_physical(const ZVirtualMemory& vmem); + size_t commit_physical(const ZVirtualMemory& vmem); + size_t uncommit_physical(const ZVirtualMemory& vmem); + + void map_virtual(const ZVirtualMemory& vmem); + void unmap_virtual(const ZVirtualMemory& vmem); + + void map_virtual_from_multi_partition(const ZVirtualMemory& vmem); + void unmap_virtual_from_multi_partition(const ZVirtualMemory& vmem); + + ZVirtualMemory claim_virtual(size_t size); + size_t claim_virtual(size_t size, ZArray* vmems_out); + void free_virtual(const ZVirtualMemory& vmem); + + void free_and_claim_virtual_from_low_many(const ZVirtualMemory& vmem, ZArray* vmems_out); + ZVirtualMemory free_and_claim_virtual_from_low_exact_or_many(size_t size, ZArray* vmems_in_out); + + bool prime(ZWorkers* workers, size_t size); + + ZVirtualMemory prepare_harvested_and_claim_virtual(ZMemoryAllocation* allocation); + + void copy_physical_segments_to_partition(const ZVirtualMemory& at, const ZVirtualMemory& from); + void copy_physical_segments_from_partition(const ZVirtualMemory& at, const ZVirtualMemory& to); + + void commit_increased_capacity(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem); + void map_memory(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem); + + void free_memory_alloc_failed(ZMemoryAllocation* allocation); + + void threads_do(ThreadClosure* tc) const; + + void print_on(outputStream* st) const; + void print_cache_on(outputStream* st) const; + void print_extended_on_error(outputStream* st) const; +}; + +using ZPartitionIterator = ZPerNUMAIterator; +using ZPartitionConstIterator = ZPerNUMAConstIterator; + +class ZPageAllocator { + friend class VMStructs; + friend class ZMultiPartitionTracker; + friend class ZPartition; + friend class ZUncommitter; + +private: + mutable ZLock _lock; + ZVirtualMemoryManager _virtual; + ZPhysicalMemoryManager _physical; + const size_t _min_capacity; + const size_t _max_capacity; + volatile size_t _used; + volatile size_t _used_generations[2]; + struct { + size_t _used_high; + size_t _used_low; + } _collection_stats[2]; + ZPerNUMA _partitions; + ZList _stalled; + mutable ZSafeDelete _safe_destroy; + bool _initialized; + + bool alloc_page_stall(ZPageAllocation* allocation); + ZPage* alloc_page_inner(ZPageAllocation* allocation); + + bool claim_capacity_or_stall(ZPageAllocation* allocation); + bool claim_capacity(ZPageAllocation* allocation); + bool claim_capacity_single_partition(ZSinglePartitionAllocation* single_partition_allocation, uint32_t partition_id); + void claim_capacity_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, uint32_t start_partition); + + ZVirtualMemory satisfied_from_cache_vmem(const ZPageAllocation* allocation) const; + + ZVirtualMemory claim_virtual_memory(ZPageAllocation* allocation); + ZVirtualMemory claim_virtual_memory_single_partition(ZSinglePartitionAllocation* single_partition_allocation); + ZVirtualMemory claim_virtual_memory_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation); + + void copy_claimed_physical_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem); + + void claim_physical_for_increased_capacity(ZPageAllocation* allocation, const ZVirtualMemory& vmem); + void claim_physical_for_increased_capacity_single_partition(ZSinglePartitionAllocation* allocation, const ZVirtualMemory& vmem); + void claim_physical_for_increased_capacity_multi_partition(const ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem); + void claim_physical_for_increased_capacity(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem); + + bool commit_and_map(ZPageAllocation* allocation, const ZVirtualMemory& vmem); + bool commit_and_map_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem); + bool commit_and_map_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem); + + void commit(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem); + bool commit_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem); + bool commit_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem); + + void unmap_harvested_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation); + + void map_committed_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem); + void map_committed_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem); + + void cleanup_failed_commit_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem); + void cleanup_failed_commit_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem); + + void free_after_alloc_page_failed(ZPageAllocation* allocation); + + void free_memory_alloc_failed(ZPageAllocation* allocation); + void free_memory_alloc_failed_single_partition(ZSinglePartitionAllocation* single_partition_allocation); + void free_memory_alloc_failed_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation); + void free_memory_alloc_failed(ZMemoryAllocation* allocation); + + ZPage* create_page(ZPageAllocation* allocation, const ZVirtualMemory& vmem); + + void prepare_memory_for_free(ZPage* page, ZArray* vmems); + void remap_and_defragment(const ZVirtualMemory& vmem, ZArray* vmems_out); + void free_memory(ZArray* vmems); + + void satisfy_stalled(); + + bool is_multi_partition_enabled() const; + + const ZPartition& partition_from_partition_id(uint32_t partition_id) const; + ZPartition& partition_from_partition_id(uint32_t partition_id); + ZPartition& partition_from_vmem(const ZVirtualMemory& vmem); + + size_t sum_available() const; + + void increase_used(size_t size); + void decrease_used(size_t size); + void notify_out_of_memory(); void restart_gc() const; + void print_on_inner(outputStream* st) const; + public: ZPageAllocator(size_t min_capacity, size_t initial_capacity, @@ -135,56 +247,61 @@ public: bool prime_cache(ZWorkers* workers, size_t size); - size_t initial_capacity() const; size_t min_capacity() const; size_t max_capacity() const; size_t soft_max_capacity() const; + size_t current_max_capacity() const; size_t capacity() const; size_t used() const; size_t used_generation(ZGenerationId id) const; size_t unused() const; - void promote_used(size_t size); + void increase_used_generation(ZGenerationId id, size_t size); + void decrease_used_generation(ZGenerationId id, size_t size); + + void promote_used(const ZPage* from, const ZPage* to); ZPageAllocatorStats stats(ZGeneration* generation) const; void reset_statistics(ZGenerationId id); ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age); - ZPage* prepare_to_recycle(ZPage* page, bool allow_defragment); - void recycle_page(ZPage* page); void safe_destroy_page(ZPage* page); - void free_page(ZPage* page, bool allow_defragment); - void free_pages(const ZArray* pages); + void free_page(ZPage* page); + void free_pages(ZGenerationId id, const ZArray* pages); void enable_safe_destroy() const; void disable_safe_destroy() const; - void enable_safe_recycle() const; - void disable_safe_recycle() const; - bool is_alloc_stalling() const; bool is_alloc_stalling_for_old() const; void handle_alloc_stalling_for_young(); void handle_alloc_stalling_for_old(bool cleared_soft_refs); + ZPartitionConstIterator partition_iterator() const; + ZPartitionIterator partition_iterator(); + void threads_do(ThreadClosure* tc) const; + + void print_on(outputStream* st) const; + void print_extended_on_error(outputStream* st) const; + void print_on_error(outputStream* st) const; }; class ZPageAllocatorStats { private: - size_t _min_capacity; - size_t _max_capacity; - size_t _soft_max_capacity; - size_t _capacity; - size_t _used; - size_t _used_high; - size_t _used_low; - size_t _used_generation; - size_t _freed; - size_t _promoted; - size_t _compacted; - size_t _allocation_stalls; + const size_t _min_capacity; + const size_t _max_capacity; + const size_t _soft_max_capacity; + const size_t _capacity; + const size_t _used; + const size_t _used_high; + const size_t _used_low; + const size_t _used_generation; + const size_t _freed; + const size_t _promoted; + const size_t _compacted; + const size_t _allocation_stalls; public: ZPageAllocatorStats(size_t min_capacity, diff --git a/src/hotspot/share/gc/z/zPageCache.cpp b/src/hotspot/share/gc/z/zPageCache.cpp deleted file mode 100644 index c8e8ec9bdbd..00000000000 --- a/src/hotspot/share/gc/z/zPageCache.cpp +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "gc/z/zGlobals.hpp" -#include "gc/z/zList.inline.hpp" -#include "gc/z/zNUMA.inline.hpp" -#include "gc/z/zPage.inline.hpp" -#include "gc/z/zPageCache.hpp" -#include "gc/z/zStat.hpp" -#include "gc/z/zValue.inline.hpp" -#include "memory/allocation.hpp" -#include "runtime/globals.hpp" -#include "runtime/os.hpp" - -static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond); -static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond); -static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond); -static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond); - -class ZPageCacheFlushClosure : public StackObj { - friend class ZPageCache; - -protected: - const size_t _requested; - size_t _flushed; - -public: - ZPageCacheFlushClosure(size_t requested); - virtual bool do_page(const ZPage* page) = 0; -}; - -ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested) - : _requested(requested), - _flushed(0) {} - -ZPageCache::ZPageCache() - : _small(), - _medium(), - _large(), - _last_commit(0) {} - -ZPage* ZPageCache::alloc_small_page() { - const uint32_t numa_id = ZNUMA::id(); - const uint32_t numa_count = ZNUMA::count(); - - // Try NUMA local page cache - ZPage* const l1_page = _small.get(numa_id).remove_first(); - if (l1_page != nullptr) { - ZStatInc(ZCounterPageCacheHitL1); - return l1_page; - } - - // Try NUMA remote page cache(s) - uint32_t remote_numa_id = numa_id + 1; - const uint32_t remote_numa_count = numa_count - 1; - for (uint32_t i = 0; i < remote_numa_count; i++) { - if (remote_numa_id == numa_count) { - remote_numa_id = 0; - } - - ZPage* const l2_page = _small.get(remote_numa_id).remove_first(); - if (l2_page != nullptr) { - ZStatInc(ZCounterPageCacheHitL2); - return l2_page; - } - - remote_numa_id++; - } - - return nullptr; -} - -ZPage* ZPageCache::alloc_medium_page() { - ZPage* const page = _medium.remove_first(); - if (page != nullptr) { - ZStatInc(ZCounterPageCacheHitL1); - return page; - } - - return nullptr; -} - -ZPage* ZPageCache::alloc_large_page(size_t size) { - // Find a page with the right size - ZListIterator iter(&_large); - for (ZPage* page; iter.next(&page);) { - if (size == page->size()) { - // Page found - _large.remove(page); - ZStatInc(ZCounterPageCacheHitL1); - return page; - } - } - - return nullptr; -} - -ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) { - if (size <= ZPageSizeMedium) { - return _medium.remove_first(); - } - - return nullptr; -} - -ZPage* ZPageCache::alloc_oversized_large_page(size_t size) { - // Find a page that is large enough - ZListIterator iter(&_large); - for (ZPage* page; iter.next(&page);) { - if (size <= page->size()) { - // Page found - _large.remove(page); - return page; - } - } - - return nullptr; -} - -ZPage* ZPageCache::alloc_oversized_page(size_t size) { - ZPage* page = alloc_oversized_large_page(size); - if (page == nullptr) { - page = alloc_oversized_medium_page(size); - } - - if (page != nullptr) { - ZStatInc(ZCounterPageCacheHitL3); - } - - return page; -} - -ZPage* ZPageCache::alloc_page(ZPageType type, size_t size) { - ZPage* page; - - // Try allocate exact page - if (type == ZPageType::small) { - page = alloc_small_page(); - } else if (type == ZPageType::medium) { - page = alloc_medium_page(); - } else { - page = alloc_large_page(size); - } - - if (page == nullptr) { - // Try allocate potentially oversized page - ZPage* const oversized = alloc_oversized_page(size); - if (oversized != nullptr) { - if (size < oversized->size()) { - // Split oversized page - page = oversized->split(type, size); - - // Cache remainder - free_page(oversized); - } else { - // Re-type correctly sized page - page = oversized->retype(type); - } - } - } - - if (page == nullptr) { - ZStatInc(ZCounterPageCacheMiss); - } - - return page; -} - -void ZPageCache::free_page(ZPage* page) { - const ZPageType type = page->type(); - if (type == ZPageType::small) { - _small.get(page->numa_id()).insert_first(page); - } else if (type == ZPageType::medium) { - _medium.insert_first(page); - } else { - _large.insert_first(page); - } -} - -bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to) { - ZPage* const page = from->last(); - if (page == nullptr || !cl->do_page(page)) { - // Don't flush page - return false; - } - - // Flush page - from->remove(page); - to->insert_last(page); - return true; -} - -void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList* from, ZList* to) { - while (flush_list_inner(cl, from, to)); -} - -void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA >* from, ZList* to) { - const uint32_t numa_count = ZNUMA::count(); - uint32_t numa_done = 0; - uint32_t numa_next = 0; - - // Flush lists round-robin - while (numa_done < numa_count) { - ZList* const numa_list = from->addr(numa_next); - if (++numa_next == numa_count) { - numa_next = 0; - } - - if (flush_list_inner(cl, numa_list, to)) { - // Not done - numa_done = 0; - } else { - // Done - numa_done++; - } - } -} - -void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList* to) { - // Prefer flushing large, then medium and last small pages - flush_list(cl, &_large, to); - flush_list(cl, &_medium, to); - flush_per_numa_lists(cl, &_small, to); - - if (cl->_flushed > cl->_requested) { - // Overflushed, re-insert part of last page into the cache - const size_t overflushed = cl->_flushed - cl->_requested; - ZPage* const reinsert = to->last()->split(overflushed); - free_page(reinsert); - cl->_flushed -= overflushed; - } -} - -class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure { -public: - ZPageCacheFlushForAllocationClosure(size_t requested) - : ZPageCacheFlushClosure(requested) {} - - virtual bool do_page(const ZPage* page) { - if (_flushed < _requested) { - // Flush page - _flushed += page->size(); - return true; - } - - // Don't flush page - return false; - } -}; - -void ZPageCache::flush_for_allocation(size_t requested, ZList* to) { - ZPageCacheFlushForAllocationClosure cl(requested); - flush(&cl, to); -} - -class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure { -private: - const uint64_t _now; - uint64_t* _timeout; - -public: - ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout) - : ZPageCacheFlushClosure(requested), - _now(now), - _timeout(timeout) { - // Set initial timeout - *_timeout = ZUncommitDelay; - } - - virtual bool do_page(const ZPage* page) { - const uint64_t expires = page->last_used() + ZUncommitDelay; - if (expires > _now) { - // Don't flush page, record shortest non-expired timeout - *_timeout = MIN2(*_timeout, expires - _now); - return false; - } - - if (_flushed >= _requested) { - // Don't flush page, requested amount flushed - return false; - } - - // Flush page - _flushed += page->size(); - return true; - } -}; - -size_t ZPageCache::flush_for_uncommit(size_t requested, ZList* to, uint64_t* timeout) { - const uint64_t now = (uint64_t)os::elapsedTime(); - const uint64_t expires = _last_commit + ZUncommitDelay; - if (expires > now) { - // Delay uncommit, set next timeout - *timeout = expires - now; - return 0; - } - - if (requested == 0) { - // Nothing to flush, set next timeout - *timeout = ZUncommitDelay; - return 0; - } - - ZPageCacheFlushForUncommitClosure cl(requested, now, timeout); - flush(&cl, to); - - return cl._flushed; -} - -void ZPageCache::set_last_commit() { - _last_commit = (uint64_t)ceil(os::elapsedTime()); -} diff --git a/src/hotspot/share/gc/z/zPageCache.hpp b/src/hotspot/share/gc/z/zPageCache.hpp deleted file mode 100644 index b28aaa6c10d..00000000000 --- a/src/hotspot/share/gc/z/zPageCache.hpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_ZPAGECACHE_HPP -#define SHARE_GC_Z_ZPAGECACHE_HPP - -#include "gc/z/zList.hpp" -#include "gc/z/zPage.hpp" -#include "gc/z/zPageType.hpp" -#include "gc/z/zValue.hpp" - -class ZPageCacheFlushClosure; - -class ZPageCache { -private: - ZPerNUMA > _small; - ZList _medium; - ZList _large; - uint64_t _last_commit; - - ZPage* alloc_small_page(); - ZPage* alloc_medium_page(); - ZPage* alloc_large_page(size_t size); - - ZPage* alloc_oversized_medium_page(size_t size); - ZPage* alloc_oversized_large_page(size_t size); - ZPage* alloc_oversized_page(size_t size); - - bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList* from, ZList* to); - void flush_list(ZPageCacheFlushClosure* cl, ZList* from, ZList* to); - void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA >* from, ZList* to); - void flush(ZPageCacheFlushClosure* cl, ZList* to); - -public: - ZPageCache(); - - ZPage* alloc_page(ZPageType type, size_t size); - void free_page(ZPage* page); - - void flush_for_allocation(size_t requested, ZList* to); - size_t flush_for_uncommit(size_t requested, ZList* to, uint64_t* timeout); - - void set_last_commit(); -}; - -#endif // SHARE_GC_Z_ZPAGECACHE_HPP diff --git a/src/hotspot/share/gc/z/zPageTable.cpp b/src/hotspot/share/gc/z/zPageTable.cpp index d960270c451..5bf94c6bae3 100644 --- a/src/hotspot/share/gc/z/zPageTable.cpp +++ b/src/hotspot/share/gc/z/zPageTable.cpp @@ -81,11 +81,9 @@ ZGenerationPagesParallelIterator::ZGenerationPagesParallelIterator(const ZPageTa _generation_id(id), _page_allocator(page_allocator) { _page_allocator->enable_safe_destroy(); - _page_allocator->enable_safe_recycle(); } ZGenerationPagesParallelIterator::~ZGenerationPagesParallelIterator() { - _page_allocator->disable_safe_recycle(); _page_allocator->disable_safe_destroy(); } @@ -94,10 +92,8 @@ ZGenerationPagesIterator::ZGenerationPagesIterator(const ZPageTable* page_table, _generation_id(id), _page_allocator(page_allocator) { _page_allocator->enable_safe_destroy(); - _page_allocator->enable_safe_recycle(); } ZGenerationPagesIterator::~ZGenerationPagesIterator() { - _page_allocator->disable_safe_recycle(); _page_allocator->disable_safe_destroy(); } diff --git a/src/hotspot/share/gc/z/zPageTable.inline.hpp b/src/hotspot/share/gc/z/zPageTable.inline.hpp index 583017d5c9c..6310a2104df 100644 --- a/src/hotspot/share/gc/z/zPageTable.inline.hpp +++ b/src/hotspot/share/gc/z/zPageTable.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -103,11 +103,9 @@ inline bool ZGenerationPagesIterator::next(ZPage** page) { template inline void ZGenerationPagesIterator::yield(Function function) { _page_allocator->disable_safe_destroy(); - _page_allocator->disable_safe_recycle(); function(); - _page_allocator->enable_safe_recycle(); _page_allocator->enable_safe_destroy(); } diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.cpp b/src/hotspot/share/gc/z/zPhysicalMemory.cpp deleted file mode 100644 index 5b209a4c01c..00000000000 --- a/src/hotspot/share/gc/z/zPhysicalMemory.cpp +++ /dev/null @@ -1,386 +0,0 @@ -/* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/z/zAddress.inline.hpp" -#include "gc/z/zArray.inline.hpp" -#include "gc/z/zGlobals.hpp" -#include "gc/z/zLargePages.inline.hpp" -#include "gc/z/zList.inline.hpp" -#include "gc/z/zNMT.hpp" -#include "gc/z/zNUMA.inline.hpp" -#include "gc/z/zPhysicalMemory.inline.hpp" -#include "logging/log.hpp" -#include "runtime/globals.hpp" -#include "runtime/globals_extension.hpp" -#include "runtime/init.hpp" -#include "runtime/os.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" -#include "utilities/globalDefinitions.hpp" -#include "utilities/powerOfTwo.hpp" - -ZPhysicalMemory::ZPhysicalMemory() - : _segments() {} - -ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment) - : _segments() { - _segments.append(segment); -} - -ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem) - : _segments(pmem.nsegments()) { - _segments.appendAll(&pmem._segments); -} - -const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) { - // Check for self-assignment - if (this == &pmem) { - return *this; - } - - // Free and copy segments - _segments.clear_and_deallocate(); - _segments.reserve(pmem.nsegments()); - _segments.appendAll(&pmem._segments); - - return *this; -} - -size_t ZPhysicalMemory::size() const { - size_t size = 0; - - for (int i = 0; i < _segments.length(); i++) { - size += _segments.at(i).size(); - } - - return size; -} - -void ZPhysicalMemory::insert_segment(int index, zoffset start, size_t size, bool committed) { - _segments.insert_before(index, ZPhysicalMemorySegment(start, size, committed)); -} - -void ZPhysicalMemory::replace_segment(int index, zoffset start, size_t size, bool committed) { - _segments.at_put(index, ZPhysicalMemorySegment(start, size, committed)); -} - -void ZPhysicalMemory::remove_segment(int index) { - _segments.remove_at(index); -} - -void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) { - for (int i = 0; i < pmem.nsegments(); i++) { - add_segment(pmem.segment(i)); - } -} - -void ZPhysicalMemory::remove_segments() { - _segments.clear_and_deallocate(); -} - -static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) { - return before.end() == after.start() && before.is_committed() == after.is_committed(); -} - -void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) { - // Insert segments in address order, merge segments when possible - for (int i = _segments.length(); i > 0; i--) { - const int current = i - 1; - - if (_segments.at(current).end() <= segment.start()) { - if (is_mergable(_segments.at(current), segment)) { - if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { - // Merge with end of current segment and start of next segment - const zoffset start = _segments.at(current).start(); - const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size(); - replace_segment(current, start, size, segment.is_committed()); - remove_segment(current + 1); - return; - } - - // Merge with end of current segment - const zoffset start = _segments.at(current).start(); - const size_t size = _segments.at(current).size() + segment.size(); - replace_segment(current, start, size, segment.is_committed()); - return; - } else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) { - // Merge with start of next segment - const zoffset start = segment.start(); - const size_t size = segment.size() + _segments.at(current + 1).size(); - replace_segment(current + 1, start, size, segment.is_committed()); - return; - } - - // Insert after current segment - insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed()); - return; - } - } - - if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) { - // Merge with start of first segment - const zoffset start = segment.start(); - const size_t size = segment.size() + _segments.at(0).size(); - replace_segment(0, start, size, segment.is_committed()); - return; - } - - // Insert before first segment - insert_segment(0, segment.start(), segment.size(), segment.is_committed()); -} - -bool ZPhysicalMemory::commit_segment(int index, size_t size) { - assert(size <= _segments.at(index).size(), "Invalid size"); - assert(!_segments.at(index).is_committed(), "Invalid state"); - - if (size == _segments.at(index).size()) { - // Completely committed - _segments.at(index).set_committed(true); - return true; - } - - if (size > 0) { - // Partially committed, split segment - insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, false /* committed */); - replace_segment(index, _segments.at(index).start(), size, true /* committed */); - } - - return false; -} - -bool ZPhysicalMemory::uncommit_segment(int index, size_t size) { - assert(size <= _segments.at(index).size(), "Invalid size"); - assert(_segments.at(index).is_committed(), "Invalid state"); - - if (size == _segments.at(index).size()) { - // Completely uncommitted - _segments.at(index).set_committed(false); - return true; - } - - if (size > 0) { - // Partially uncommitted, split segment - insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, true /* committed */); - replace_segment(index, _segments.at(index).start(), size, false /* committed */); - } - - return false; -} - -ZPhysicalMemory ZPhysicalMemory::split(size_t size) { - ZPhysicalMemory pmem; - int nsegments = 0; - - for (int i = 0; i < _segments.length(); i++) { - const ZPhysicalMemorySegment& segment = _segments.at(i); - if (pmem.size() < size) { - if (pmem.size() + segment.size() <= size) { - // Transfer segment - pmem.add_segment(segment); - } else { - // Split segment - const size_t split_size = size - pmem.size(); - pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed())); - _segments.at_put(nsegments++, ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed())); - } - } else { - // Keep segment - _segments.at_put(nsegments++, segment); - } - } - - _segments.trunc_to(nsegments); - - return pmem; -} - -ZPhysicalMemory ZPhysicalMemory::split_committed() { - ZPhysicalMemory pmem; - int nsegments = 0; - - for (int i = 0; i < _segments.length(); i++) { - const ZPhysicalMemorySegment& segment = _segments.at(i); - if (segment.is_committed()) { - // Transfer segment - pmem.add_segment(segment); - } else { - // Keep segment - _segments.at_put(nsegments++, segment); - } - } - - _segments.trunc_to(nsegments); - - return pmem; -} - -ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) - : _backing(max_capacity) { - // Make the whole range free - _manager.register_range(zoffset(0), max_capacity); -} - -bool ZPhysicalMemoryManager::is_initialized() const { - return _backing.is_initialized(); -} - -void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const { - _backing.warn_commit_limits(max_capacity); -} - -void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) { - assert(!is_init_completed(), "Invalid state"); - - // If uncommit is not explicitly disabled, max capacity is greater than - // min capacity, and uncommit is supported by the platform, then uncommit - // will be enabled. - if (!ZUncommit) { - log_info_p(gc, init)("Uncommit: Disabled"); - return; - } - - if (max_capacity == min_capacity) { - log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)"); - FLAG_SET_ERGO(ZUncommit, false); - return; - } - - // Test if uncommit is supported by the operating system by committing - // and then uncommitting a granule. - ZPhysicalMemory pmem(ZPhysicalMemorySegment(zoffset(0), ZGranuleSize, false /* committed */)); - if (!commit(pmem) || !uncommit(pmem)) { - log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)"); - FLAG_SET_ERGO(ZUncommit, false); - return; - } - - log_info_p(gc, init)("Uncommit: Enabled"); - log_info_p(gc, init)("Uncommit Delay: %zus", ZUncommitDelay); -} - -void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) { - assert(is_aligned(size, ZGranuleSize), "Invalid size"); - - // Allocate segments - while (size > 0) { - size_t allocated = 0; - const zoffset start = _manager.alloc_low_address_at_most(size, &allocated); - assert(start != zoffset(UINTPTR_MAX), "Allocation should never fail"); - pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */)); - size -= allocated; - } -} - -void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) { - // Free segments - for (int i = 0; i < pmem.nsegments(); i++) { - const ZPhysicalMemorySegment& segment = pmem.segment(i); - _manager.free(segment.start(), segment.size()); - } -} - -bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) { - // Commit segments - for (int i = 0; i < pmem.nsegments(); i++) { - const ZPhysicalMemorySegment& segment = pmem.segment(i); - if (segment.is_committed()) { - // Segment already committed - continue; - } - - // Commit segment - const size_t committed = _backing.commit(segment.start(), segment.size()); - - // Register with NMT - if (committed > 0) { - ZNMT::commit(segment.start(), committed); - } - - // Register committed segment - if (!pmem.commit_segment(i, committed)) { - // Failed or partially failed - return false; - } - } - - // Success - return true; -} - -bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) { - // Commit segments - for (int i = 0; i < pmem.nsegments(); i++) { - const ZPhysicalMemorySegment& segment = pmem.segment(i); - if (!segment.is_committed()) { - // Segment already uncommitted - continue; - } - - // Uncommit segment - const size_t uncommitted = _backing.uncommit(segment.start(), segment.size()); - - // Unregister with NMT - if (uncommitted > 0) { - ZNMT::uncommit(segment.start(), uncommitted); - } - - // Deregister uncommitted segment - if (!pmem.uncommit_segment(i, uncommitted)) { - // Failed or partially failed - return false; - } - } - - // Success - return true; -} - -// Map virtual memory to physcial memory -void ZPhysicalMemoryManager::map(zoffset offset, const ZPhysicalMemory& pmem) const { - const zaddress_unsafe addr = ZOffset::address_unsafe(offset); - - size_t size = 0; - - // Map segments - for (int i = 0; i < pmem.nsegments(); i++) { - const ZPhysicalMemorySegment& segment = pmem.segment(i); - _backing.map(addr + size, segment.size(), segment.start()); - size += segment.size(); - } - - // Setup NUMA interleaving for large pages - if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) { - // To get granule-level NUMA interleaving when using large pages, - // we simply let the kernel interleave the memory for us at page - // fault time. - os::numa_make_global((char*)addr, size); - } -} - -// Unmap virtual memory from physical memory -void ZPhysicalMemoryManager::unmap(zoffset offset, size_t size) const { - const zaddress_unsafe addr = ZOffset::address_unsafe(offset); - - _backing.unmap(addr, size); -} diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.hpp deleted file mode 100644 index 09f71013258..00000000000 --- a/src/hotspot/share/gc/z/zPhysicalMemory.hpp +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_HPP -#define SHARE_GC_Z_ZPHYSICALMEMORY_HPP - -#include "gc/z/zAddress.hpp" -#include "gc/z/zArray.hpp" -#include "gc/z/zMemory.hpp" -#include "memory/allocation.hpp" -#include OS_HEADER(gc/z/zPhysicalMemoryBacking) - -class ZPhysicalMemorySegment : public CHeapObj { -private: - zoffset _start; - zoffset_end _end; - bool _committed; - -public: - ZPhysicalMemorySegment(); - ZPhysicalMemorySegment(zoffset start, size_t size, bool committed); - - zoffset start() const; - zoffset_end end() const; - size_t size() const; - - bool is_committed() const; - void set_committed(bool committed); -}; - -class ZPhysicalMemory { -private: - ZArray _segments; - - void insert_segment(int index, zoffset start, size_t size, bool committed); - void replace_segment(int index, zoffset start, size_t size, bool committed); - void remove_segment(int index); - -public: - ZPhysicalMemory(); - ZPhysicalMemory(const ZPhysicalMemorySegment& segment); - ZPhysicalMemory(const ZPhysicalMemory& pmem); - const ZPhysicalMemory& operator=(const ZPhysicalMemory& pmem); - - bool is_null() const; - size_t size() const; - - int nsegments() const; - const ZPhysicalMemorySegment& segment(int index) const; - - void add_segments(const ZPhysicalMemory& pmem); - void remove_segments(); - - void add_segment(const ZPhysicalMemorySegment& segment); - bool commit_segment(int index, size_t size); - bool uncommit_segment(int index, size_t size); - - ZPhysicalMemory split(size_t size); - ZPhysicalMemory split_committed(); -}; - -class ZPhysicalMemoryManager { -private: - ZPhysicalMemoryBacking _backing; - ZMemoryManager _manager; - -public: - ZPhysicalMemoryManager(size_t max_capacity); - - bool is_initialized() const; - - void warn_commit_limits(size_t max_capacity) const; - void try_enable_uncommit(size_t min_capacity, size_t max_capacity); - - void alloc(ZPhysicalMemory& pmem, size_t size); - void free(const ZPhysicalMemory& pmem); - - bool commit(ZPhysicalMemory& pmem); - bool uncommit(ZPhysicalMemory& pmem); - - void map(zoffset offset, const ZPhysicalMemory& pmem) const; - void unmap(zoffset offset, size_t size) const; -}; - -#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP diff --git a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp b/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp deleted file mode 100644 index 6d2380c9013..00000000000 --- a/src/hotspot/share/gc/z/zPhysicalMemory.inline.hpp +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP -#define SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP - -#include "gc/z/zPhysicalMemory.hpp" - -#include "gc/z/zAddress.inline.hpp" -#include "utilities/debug.hpp" - -inline ZPhysicalMemorySegment::ZPhysicalMemorySegment() - : _start(zoffset(UINTPTR_MAX)), - _end(zoffset_end(UINTPTR_MAX)), - _committed(false) {} - -inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(zoffset start, size_t size, bool committed) - : _start(start), - _end(to_zoffset_end(start, size)), - _committed(committed) {} - -inline zoffset ZPhysicalMemorySegment::start() const { - return _start; -} - -inline zoffset_end ZPhysicalMemorySegment::end() const { - return _end; -} - -inline size_t ZPhysicalMemorySegment::size() const { - return _end - _start; -} - -inline bool ZPhysicalMemorySegment::is_committed() const { - return _committed; -} - -inline void ZPhysicalMemorySegment::set_committed(bool committed) { - _committed = committed; -} - -inline bool ZPhysicalMemory::is_null() const { - return _segments.length() == 0; -} - -inline int ZPhysicalMemory::nsegments() const { - return _segments.length(); -} - -inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(int index) const { - return _segments.at(index); -} - -#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp b/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp new file mode 100644 index 00000000000..8f9b8f2a285 --- /dev/null +++ b/src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp @@ -0,0 +1,376 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zArray.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zLargePages.inline.hpp" +#include "gc/z/zList.inline.hpp" +#include "gc/z/zNMT.hpp" +#include "gc/z/zNUMA.inline.hpp" +#include "gc/z/zPhysicalMemoryManager.hpp" +#include "gc/z/zRangeRegistry.inline.hpp" +#include "gc/z/zUtils.inline.hpp" +#include "gc/z/zValue.inline.hpp" +#include "gc/z/zVirtualMemory.inline.hpp" +#include "logging/log.hpp" +#include "runtime/globals.hpp" +#include "runtime/globals_extension.hpp" +#include "runtime/init.hpp" +#include "runtime/os.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/powerOfTwo.hpp" + +ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity) + : _backing(max_capacity), + _physical_mappings(ZAddressOffsetMax) { + assert(is_aligned(max_capacity, ZGranuleSize), "must be granule aligned"); + + // Setup backing storage limits + ZBackingOffsetMax = max_capacity; + ZBackingIndexMax = checked_cast(max_capacity >> ZGranuleSizeShift); + + // Install capacity into the registry + const size_t num_segments_total = max_capacity >> ZGranuleSizeShift; + zbacking_index_end next_index = zbacking_index_end::zero; + uint32_t numa_id; + ZPerNUMAIterator iter(&_partition_registries); + for (ZBackingIndexRegistry* registry; iter.next(®istry, &numa_id);) { + const size_t num_segments = ZNUMA::calculate_share(numa_id, num_segments_total, 1 /* granule */); + + if (num_segments == 0) { + // If the capacity consist of less granules than the number of partitions, + // some partitions will be empty. + break; + } + + const zbacking_index index = to_zbacking_index(next_index); + + // Insert the next number of segment indices into id's partition's registry + registry->insert({index, num_segments}); + + // Advance to next index by the inserted number of segment indices + next_index += num_segments; + } + + assert(untype(next_index) == ZBackingIndexMax, "must insert all capacity"); +} + +bool ZPhysicalMemoryManager::is_initialized() const { + return _backing.is_initialized(); +} + +void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const { + _backing.warn_commit_limits(max_capacity); +} + +void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) { + assert(!is_init_completed(), "Invalid state"); + + // If uncommit is not explicitly disabled, max capacity is greater than + // min capacity, and uncommit is supported by the platform, then uncommit + // will be enabled. + if (!ZUncommit) { + log_info_p(gc, init)("Uncommit: Disabled"); + return; + } + + if (max_capacity == min_capacity) { + log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)"); + FLAG_SET_ERGO(ZUncommit, false); + return; + } + + // Test if uncommit is supported by the operating system by committing + // and then uncommitting a granule. + const ZVirtualMemory vmem(zoffset(0), ZGranuleSize); + if (!commit(vmem, (uint32_t)-1) || !uncommit(vmem)) { + log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)"); + FLAG_SET_ERGO(ZUncommit, false); + return; + } + + log_info_p(gc, init)("Uncommit: Enabled"); + log_info_p(gc, init)("Uncommit Delay: %zus", ZUncommitDelay); +} + +void ZPhysicalMemoryManager::alloc(const ZVirtualMemory& vmem, uint32_t numa_id) { + zbacking_index* const pmem = _physical_mappings.addr(vmem.start()); + const size_t size = vmem.size(); + + assert(is_aligned(size, ZGranuleSize), "Invalid size"); + + size_t current_segment = 0; + size_t remaining_segments = size >> ZGranuleSizeShift; + + while (remaining_segments != 0) { + // Allocate a range of backing segment indices + ZBackingIndexRegistry& registry = _partition_registries.get(numa_id); + const ZBackingIndexRange range = registry.remove_from_low_at_most(remaining_segments); + assert(!range.is_null(), "Allocation should never fail"); + + const size_t num_allocated_segments = range.size(); + + // Insert backing segment indices in pmem + const zbacking_index start_i = range.start(); + for (size_t i = 0; i < num_allocated_segments; i++) { + pmem[current_segment + i] = start_i + i; + } + + // Advance by number of allocated segments + remaining_segments -= num_allocated_segments; + current_segment += num_allocated_segments; + } +} + +template +struct IterateInvoker { + template + bool operator()(Function function, zbacking_offset segment_start, size_t segment_size) const { + return function(segment_start, segment_size); + } +}; + +template<> +struct IterateInvoker { + template + bool operator()(Function function, zbacking_offset segment_start, size_t segment_size) const { + function(segment_start, segment_size); + return true; + } +}; + +template +bool for_each_segment_apply(const zbacking_index* pmem, size_t size, Function function) { + IterateInvoker invoker; + + // Total number of segment indices + const size_t num_segments = size >> ZGranuleSizeShift; + + // Apply the function over all zbacking_offset ranges consisting of consecutive indices + for (size_t i = 0; i < num_segments; i++) { + const size_t start_i = i; + + // Find index corresponding to the last index in the consecutive range starting at start_i + while (i + 1 < num_segments && to_zbacking_index_end(pmem[i], 1) == pmem[i + 1]) { + i++; + } + + const size_t last_i = i; + + // [start_i, last_i] now forms a consecutive range of indicies in pmem + const size_t num_indicies = last_i - start_i + 1; + const zbacking_offset start = to_zbacking_offset(pmem[start_i]); + const size_t size = num_indicies * ZGranuleSize; + + // Invoke function on zbacking_offset Range [start, start + size[ + if (!invoker(function, start, size)) { + return false; + } + } + + return true; +} + +void ZPhysicalMemoryManager::free(const ZVirtualMemory& vmem, uint32_t numa_id) { + zbacking_index* const pmem = _physical_mappings.addr(vmem.start()); + const size_t size = vmem.size(); + + // Free segments + for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) { + const size_t num_segments = segment_size >> ZGranuleSizeShift; + const zbacking_index index = to_zbacking_index(segment_start); + + // Insert the free segment indices + _partition_registries.get(numa_id).insert({index, num_segments}); + }); +} + +size_t ZPhysicalMemoryManager::commit(const ZVirtualMemory& vmem, uint32_t numa_id) { + zbacking_index* const pmem = _physical_mappings.addr(vmem.start()); + const size_t size = vmem.size(); + + size_t total_committed = 0; + + // Commit segments + for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) { + // Commit segment + const size_t committed = _backing.commit(segment_start, segment_size, numa_id); + + total_committed += committed; + + // Register with NMT + if (committed > 0) { + ZNMT::commit(segment_start, committed); + } + + return segment_size == committed; + }); + + // Success + return total_committed; +} + +size_t ZPhysicalMemoryManager::uncommit(const ZVirtualMemory& vmem) { + zbacking_index* const pmem = _physical_mappings.addr(vmem.start()); + const size_t size = vmem.size(); + + size_t total_uncommitted = 0; + + // Uncommit segments + for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) { + // Uncommit segment + const size_t uncommitted = _backing.uncommit(segment_start, segment_size); + + total_uncommitted += uncommitted; + + // Unregister with NMT + if (uncommitted > 0) { + ZNMT::uncommit(segment_start, uncommitted); + } + + return segment_size == uncommitted; + }); + + // Success + return total_uncommitted; +} + +// Map virtual memory to physical memory +void ZPhysicalMemoryManager::map(const ZVirtualMemory& vmem, uint32_t numa_id) const { + const zbacking_index* const pmem = _physical_mappings.addr(vmem.start()); + const zaddress_unsafe addr = ZOffset::address_unsafe(vmem.start()); + const size_t size = vmem.size(); + + size_t mapped = 0; + + for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) { + _backing.map(addr + mapped, segment_size, segment_start); + mapped += segment_size; + }); + + postcond(mapped == size); + + // Setup NUMA preferred for large pages + if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) { + os::numa_make_local((char*)addr, size, (int)numa_id); + } +} + +// Unmap virtual memory from physical memory +void ZPhysicalMemoryManager::unmap(const ZVirtualMemory& vmem) const { + const zaddress_unsafe addr = ZOffset::address_unsafe(vmem.start()); + const size_t size = vmem.size(); + _backing.unmap(addr, size); +} + +void ZPhysicalMemoryManager::copy_physical_segments(const ZVirtualMemory& to, const ZVirtualMemory& from) { + assert(to.size() == from.size(), "must be of the same size"); + + zbacking_index* const dest = _physical_mappings.addr(to.start()); + const zbacking_index* const src = _physical_mappings.addr(from.start()); + const int granule_count = from.granule_count(); + + ZUtils::copy_disjoint(dest, src, granule_count); +} + +static void sort_zbacking_index_array(zbacking_index* array, int count) { + ZUtils::sort(array, count, [](const zbacking_index* e1, const zbacking_index* e2) { + return *e1 < *e2 ? -1 : 1; + }); +} + +void ZPhysicalMemoryManager::sort_segments_physical(const ZVirtualMemory& vmem) { + zbacking_index* const pmem = _physical_mappings.addr(vmem.start()); + const int granule_count = vmem.granule_count(); + + // Sort physical segments + sort_zbacking_index_array(pmem, granule_count); +} + +void ZPhysicalMemoryManager::copy_to_stash(ZArraySlice stash, const ZVirtualMemory& vmem) const { + zbacking_index* const dest = stash.adr_at(0); + const zbacking_index* const src = _physical_mappings.addr(vmem.start()); + const int granule_count = vmem.granule_count(); + + // Check bounds + assert(granule_count <= stash.length(), "Copy overflow %d <= %d", granule_count, stash.length()); + + // Copy to stash + ZUtils::copy_disjoint(dest, src, granule_count); +} + +void ZPhysicalMemoryManager::copy_from_stash(const ZArraySlice stash, const ZVirtualMemory& vmem) { + zbacking_index* const dest = _physical_mappings.addr(vmem.start()); + const zbacking_index* const src = stash.adr_at(0); + const int granule_count = vmem.granule_count(); + + // Check bounds + assert(granule_count <= stash.length(), "Copy overflow %d <= %d", granule_count, stash.length()); + + // Copy from stash + ZUtils::copy_disjoint(dest, src, granule_count); +} + +void ZPhysicalMemoryManager::stash_segments(const ZVirtualMemory& vmem, ZArray* stash_out) const { + precond(stash_out->is_empty()); + + stash_out->at_grow(vmem.granule_count() - 1); + copy_to_stash(*stash_out, vmem); + sort_zbacking_index_array(stash_out->adr_at(0), stash_out->length()); +} + +void ZPhysicalMemoryManager::restore_segments(const ZVirtualMemory& vmem, const ZArray& stash) { + assert(vmem.granule_count() == stash.length(), "Must match stash size"); + + copy_from_stash(stash, vmem); +} + +void ZPhysicalMemoryManager::stash_segments(const ZArraySlice& vmems, ZArray* stash_out) const { + precond(stash_out->is_empty()); + + int stash_index = 0; + for (const ZVirtualMemory& vmem : vmems) { + const int granule_count = vmem.granule_count(); + stash_out->at_grow(stash_index + vmem.granule_count() - 1); + copy_to_stash(stash_out->slice_back(stash_index), vmem); + stash_index += granule_count; + } + + sort_zbacking_index_array(stash_out->adr_at(0), stash_out->length()); + +} + +void ZPhysicalMemoryManager::restore_segments(const ZArraySlice& vmems, const ZArray& stash) { + int stash_index = 0; + + for (const ZVirtualMemory& vmem : vmems) { + copy_from_stash(stash.slice_back(stash_index), vmem); + stash_index += vmem.granule_count(); + } + + assert(stash_index == stash.length(), "Must have emptied the stash"); +} diff --git a/src/hotspot/share/gc/z/zPhysicalMemoryManager.hpp b/src/hotspot/share/gc/z/zPhysicalMemoryManager.hpp new file mode 100644 index 00000000000..99de34beda7 --- /dev/null +++ b/src/hotspot/share/gc/z/zPhysicalMemoryManager.hpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZPHYSICALMEMORYMANAGER_HPP +#define SHARE_GC_Z_ZPHYSICALMEMORYMANAGER_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zArray.hpp" +#include "gc/z/zGranuleMap.hpp" +#include "gc/z/zRange.hpp" +#include "gc/z/zRangeRegistry.hpp" +#include "gc/z/zValue.hpp" +#include "memory/allocation.hpp" +#include OS_HEADER(gc/z/zPhysicalMemoryBacking) + +class ZVirtualMemory; + +using ZBackingIndexRange = ZRange; + +class ZPhysicalMemoryManager { +private: + using ZBackingIndexRegistry = ZRangeRegistry; + + ZPhysicalMemoryBacking _backing; + ZPerNUMA _partition_registries; + ZGranuleMap _physical_mappings; + + void copy_to_stash(ZArraySlice stash, const ZVirtualMemory& vmem) const; + void copy_from_stash(const ZArraySlice stash, const ZVirtualMemory& vmem); + +public: + ZPhysicalMemoryManager(size_t max_capacity); + + bool is_initialized() const; + + void warn_commit_limits(size_t max_capacity) const; + void try_enable_uncommit(size_t min_capacity, size_t max_capacity); + + void alloc(const ZVirtualMemory& vmem, uint32_t numa_id); + void free(const ZVirtualMemory& vmem, uint32_t numa_id); + + size_t commit(const ZVirtualMemory& vmem, uint32_t numa_id); + size_t uncommit(const ZVirtualMemory& vmem); + + void map(const ZVirtualMemory& vmem, uint32_t numa_id) const; + void unmap(const ZVirtualMemory& vmem) const; + + void copy_physical_segments(const ZVirtualMemory& to, const ZVirtualMemory& from); + + void sort_segments_physical(const ZVirtualMemory& vmem); + + void stash_segments(const ZVirtualMemory& vmem, ZArray* stash_out) const; + void restore_segments(const ZVirtualMemory& vmem, const ZArray& stash); + + void stash_segments(const ZArraySlice& vmems, ZArray* stash_out) const; + void restore_segments(const ZArraySlice& vmems, const ZArray& stash); +}; + +#endif // SHARE_GC_Z_ZPHYSICALMEMORYMANAGER_HPP diff --git a/src/hotspot/share/gc/z/zRange.hpp b/src/hotspot/share/gc/z/zRange.hpp new file mode 100644 index 00000000000..53c9980608e --- /dev/null +++ b/src/hotspot/share/gc/z/zRange.hpp @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZRANGE_HPP +#define SHARE_GC_Z_ZRANGE_HPP + +#include "utilities/globalDefinitions.hpp" + +template +class ZRange { + friend class VMStructs; + +public: + using offset = Start; + using offset_end = End; + +private: + End _start; + size_t _size; + + // Used internally to create a ZRange. + // + // The end parameter is only used for verification and to distinguish + // the constructors if End == Start. + ZRange(End start, size_t size, End end); + +public: + ZRange(); + ZRange(Start start, size_t size); + + bool is_null() const; + + Start start() const; + End end() const; + + size_t size() const; + + bool operator==(const ZRange& other) const; + bool operator!=(const ZRange& other) const; + + bool contains(const ZRange& other) const; + + void grow_from_front(size_t size); + void grow_from_back(size_t size); + + ZRange shrink_from_front(size_t size); + ZRange shrink_from_back(size_t size); + + ZRange partition(size_t offset, size_t partition_size) const; + ZRange first_part(size_t split_offset) const; + ZRange last_part(size_t split_offset) const; + + bool adjacent_to(const ZRange& other) const; +}; + +#endif // SHARE_GC_Z_ZRANGE_HPP diff --git a/src/hotspot/share/gc/z/zRange.inline.hpp b/src/hotspot/share/gc/z/zRange.inline.hpp new file mode 100644 index 00000000000..d99cbefd32a --- /dev/null +++ b/src/hotspot/share/gc/z/zRange.inline.hpp @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZRANGE_INLINE_HPP +#define SHARE_GC_Z_ZRANGE_INLINE_HPP + +#include "gc/z/zRange.hpp" + +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" + +template +inline ZRange::ZRange(End start, size_t size, End end) + : _start(start), + _size(size) { + postcond(this->end() == end); +} + +template +inline ZRange::ZRange() + : _start(End::invalid), + _size(0) {} + +template +inline ZRange::ZRange(Start start, size_t size) + : _start(to_end_type(start, 0)), + _size(size) {} + +template +inline bool ZRange::is_null() const { + return _start == End::invalid; +} + +template +inline Start ZRange::start() const { + return to_start_type(_start); +} + +template +inline End ZRange::end() const { + return _start + _size; +} + +template +inline size_t ZRange::size() const { + return _size; +} + +template +inline bool ZRange::operator==(const ZRange& other) const { + precond(!is_null()); + precond(!other.is_null()); + + return _start == other._start && _size == other._size; +} + +template +inline bool ZRange::operator!=(const ZRange& other) const { + return !operator==(other); +} + +template +inline bool ZRange::contains(const ZRange& other) const { + precond(!is_null()); + precond(!other.is_null()); + + return _start <= other._start && other.end() <= end(); +} + +template +inline void ZRange::grow_from_front(size_t size) { + precond(size_t(start()) >= size); + + _start -= size; + _size += size; +} + +template +inline void ZRange::grow_from_back(size_t size) { + _size += size; +} + +template +inline ZRange ZRange::shrink_from_front(size_t size) { + precond(this->size() >= size); + + _start += size; + _size -= size; + + return ZRange(_start - size, size, _start); +} + +template +inline ZRange ZRange::shrink_from_back(size_t size) { + precond(this->size() >= size); + + _size -= size; + + return ZRange(end(), size, end() + size); +} + +template +inline ZRange ZRange::partition(size_t offset, size_t partition_size) const { + precond(size() - offset >= partition_size); + + return ZRange(_start + offset, partition_size, _start + offset + partition_size); +} + +template +inline ZRange ZRange::first_part(size_t split_offset) const { + return partition(0, split_offset); +} + +template +inline ZRange ZRange::last_part(size_t split_offset) const { + return partition(split_offset, size() - split_offset); +} + +template +inline bool ZRange::adjacent_to(const ZRange& other) const { + return end() == other.start() || other.end() == start(); +} + +#endif // SHARE_GC_Z_ZRANGE_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zRangeRegistry.hpp b/src/hotspot/share/gc/z/zRangeRegistry.hpp new file mode 100644 index 00000000000..8a5ba74da6e --- /dev/null +++ b/src/hotspot/share/gc/z/zRangeRegistry.hpp @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZRANGEREGISTRY_HPP +#define SHARE_GC_Z_ZRANGEREGISTRY_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zList.hpp" +#include "gc/z/zLock.hpp" +#include "memory/allocation.hpp" +#include "utilities/globalDefinitions.hpp" + +template +class ZArray; + +template +class ZRangeRegistry { + friend class ZVirtualMemoryManagerTest; + +private: + // The node type for the list of Ranges + class Node; + +public: + using offset = typename Range::offset; + using offset_end = typename Range::offset_end; + + typedef void (*CallbackPrepare)(const Range& range); + typedef void (*CallbackResize)(const Range& from, const Range& to); + + struct Callbacks { + CallbackPrepare _prepare_for_hand_out; + CallbackPrepare _prepare_for_hand_back; + CallbackResize _grow; + CallbackResize _shrink; + + Callbacks(); + }; + +private: + mutable ZLock _lock; + ZList _list; + Callbacks _callbacks; + Range _limits; + + void move_into(const Range& range); + + void insert_inner(const Range& range); + void register_inner(const Range& range); + + void grow_from_front(Range* range, size_t size); + void grow_from_back(Range* range, size_t size); + + Range shrink_from_front(Range* range, size_t size); + Range shrink_from_back(Range* range, size_t size); + + Range remove_from_low_inner(size_t size); + Range remove_from_low_at_most_inner(size_t size); + + size_t remove_from_low_many_at_most_inner(size_t size, ZArray* out); + + bool check_limits(const Range& range) const; + +public: + ZRangeRegistry(); + + void register_callbacks(const Callbacks& callbacks); + + void register_range(const Range& range); + bool unregister_first(Range* out); + + bool is_empty() const; + bool is_contiguous() const; + + void anchor_limits(); + bool limits_contain(const Range& range) const; + + offset peek_low_address() const; + offset_end peak_high_address_end() const; + + void insert(const Range& range); + + void insert_and_remove_from_low_many(const Range& range, ZArray* out); + Range insert_and_remove_from_low_exact_or_many(size_t size, ZArray* in_out); + + Range remove_from_low(size_t size); + Range remove_from_low_at_most(size_t size); + size_t remove_from_low_many_at_most(size_t size, ZArray* out); + Range remove_from_high(size_t size); + + void transfer_from_low(ZRangeRegistry* other, size_t size); +}; + +template +class ZRangeRegistry::Node : public CHeapObj { + friend class ZList; + +private: + using offset = typename Range::offset; + using offset_end = typename Range::offset_end; + + Range _range; + ZListNode _node; + +public: + Node(offset start, size_t size) + : _range(start, size), + _node() {} + + Node(const Range& other) + : Node(other.start(), other.size()) {} + + Range* range() { + return &_range; + } + + offset start() const { + return _range.start(); + } + + offset_end end() const { + return _range.end(); + } + + size_t size() const { + return _range.size(); + } +}; + +#endif // SHARE_GC_Z_ZRANGEREGISTRY_HPP diff --git a/src/hotspot/share/gc/z/zRangeRegistry.inline.hpp b/src/hotspot/share/gc/z/zRangeRegistry.inline.hpp new file mode 100644 index 00000000000..de34aca07c4 --- /dev/null +++ b/src/hotspot/share/gc/z/zRangeRegistry.inline.hpp @@ -0,0 +1,469 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZRANGEREGISTRY_INLINE_HPP +#define SHARE_GC_Z_ZRANGEREGISTRY_INLINE_HPP + +#include "gc/z/zRangeRegistry.hpp" + +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zList.inline.hpp" +#include "gc/z/zLock.inline.hpp" + +template +void ZRangeRegistry::move_into(const Range& range) { + assert(!range.is_null(), "Invalid range"); + assert(check_limits(range), "Range outside limits"); + + const offset start = range.start(); + const offset_end end = range.end(); + const size_t size = range.size(); + + ZListIterator iter(&_list); + for (Node* node; iter.next(&node);) { + if (node->start() < start) { + continue; + } + + Node* const prev = _list.prev(node); + if (prev != nullptr && start == prev->end()) { + if (end == node->start()) { + // Merge with prev and current ranges + grow_from_back(prev->range(), size); + grow_from_back(prev->range(), node->size()); + _list.remove(node); + delete node; + } else { + // Merge with prev range + grow_from_back(prev->range(), size); + } + } else if (end == node->start()) { + // Merge with current range + grow_from_front(node->range(), size); + } else { + // Insert range before current range + assert(end < node->start(), "Areas must not overlap"); + Node* const new_node = new Node(start, size); + _list.insert_before(node, new_node); + } + + // Done + return; + } + + // Insert last + Node* const last = _list.last(); + if (last != nullptr && start == last->end()) { + // Merge with last range + grow_from_back(last->range(), size); + } else { + // Insert new node last + Node* const new_node = new Node(start, size); + _list.insert_last(new_node); + } +} + +template +void ZRangeRegistry::insert_inner(const Range& range) { + if (_callbacks._prepare_for_hand_back != nullptr) { + _callbacks._prepare_for_hand_back(range); + } + move_into(range); +} + +template +void ZRangeRegistry::register_inner(const Range& range) { + move_into(range); +} + +template +void ZRangeRegistry::grow_from_front(Range* range, size_t size) { + if (_callbacks._grow != nullptr) { + const Range from = *range; + const Range to = Range(from.start() - size, from.size() + size); + _callbacks._grow(from, to); + } + range->grow_from_front(size); +} + +template +void ZRangeRegistry::grow_from_back(Range* range, size_t size) { + if (_callbacks._grow != nullptr) { + const Range from = *range; + const Range to = Range(from.start(), from.size() + size); + _callbacks._grow(from, to); + } + range->grow_from_back(size); +} + +template +Range ZRangeRegistry::shrink_from_front(Range* range, size_t size) { + if (_callbacks._shrink != nullptr) { + const Range from = *range; + const Range to = from.last_part(size); + _callbacks._shrink(from, to); + } + return range->shrink_from_front(size); +} + +template +Range ZRangeRegistry::shrink_from_back(Range* range, size_t size) { + if (_callbacks._shrink != nullptr) { + const Range from = *range; + const Range to = from.first_part(from.size() - size); + _callbacks._shrink(from, to); + } + return range->shrink_from_back(size); +} + +template +Range ZRangeRegistry::remove_from_low_inner(size_t size) { + ZListIterator iter(&_list); + for (Node* node; iter.next(&node);) { + if (node->size() >= size) { + Range range; + + if (node->size() == size) { + // Exact match, remove range + _list.remove(node); + range = *node->range(); + delete node; + } else { + // Larger than requested, shrink range + range = shrink_from_front(node->range(), size); + } + + if (_callbacks._prepare_for_hand_out != nullptr) { + _callbacks._prepare_for_hand_out(range); + } + + return range; + } + } + + // Out of memory + return Range(); +} + +template +Range ZRangeRegistry::remove_from_low_at_most_inner(size_t size) { + Node* const node = _list.first(); + if (node == nullptr) { + // List is empty + return Range(); + } + + Range range; + + if (node->size() <= size) { + // Smaller than or equal to requested, remove range + _list.remove(node); + range = *node->range(); + delete node; + } else { + // Larger than requested, shrink range + range = shrink_from_front(node->range(), size); + } + + if (_callbacks._prepare_for_hand_out) { + _callbacks._prepare_for_hand_out(range); + } + + return range; +} + +template +size_t ZRangeRegistry::remove_from_low_many_at_most_inner(size_t size, ZArray* out) { + size_t to_remove = size; + + while (to_remove > 0) { + const Range range = remove_from_low_at_most_inner(to_remove); + + if (range.is_null()) { + // The requested amount is not available + return size - to_remove; + } + + to_remove -= range.size(); + out->append(range); + } + + return size; +} + +template +ZRangeRegistry::Callbacks::Callbacks() + : _prepare_for_hand_out(nullptr), + _prepare_for_hand_back(nullptr), + _grow(nullptr), + _shrink(nullptr) {} + +template +ZRangeRegistry::ZRangeRegistry() + : _list(), + _callbacks(), + _limits() {} + +template +void ZRangeRegistry::register_callbacks(const Callbacks& callbacks) { + _callbacks = callbacks; +} + +template +void ZRangeRegistry::register_range(const Range& range) { + ZLocker locker(&_lock); + register_inner(range); +} + +template +bool ZRangeRegistry::unregister_first(Range* out) { + // Unregistering a range doesn't call a "prepare_to_hand_out" callback + // because the range is unregistered and not handed out to be used. + + ZLocker locker(&_lock); + + if (_list.is_empty()) { + return false; + } + + // Don't invoke the "prepare_to_hand_out" callback + + Node* const node = _list.remove_first(); + + // Return the range + *out = *node->range(); + + delete node; + + return true; +} + +template +inline bool ZRangeRegistry::is_empty() const { + return _list.is_empty(); +} + +template +bool ZRangeRegistry::is_contiguous() const { + return _list.size() == 1; +} + +template +void ZRangeRegistry::anchor_limits() { + assert(_limits.is_null(), "Should only anchor limits once"); + + if (_list.is_empty()) { + return; + } + + const offset start = _list.first()->start(); + const size_t size = _list.last()->end() - start; + + _limits = Range(start, size); +} + +template +bool ZRangeRegistry::limits_contain(const Range& range) const { + if (_limits.is_null() || range.is_null()) { + return false; + } + + return range.start() >= _limits.start() && range.end() <= _limits.end(); +} + +template +bool ZRangeRegistry::check_limits(const Range& range) const { + if (_limits.is_null()) { + // Limits not anchored + return true; + } + + // Otherwise, check that other is within the limits + return limits_contain(range); +} + +template +typename ZRangeRegistry::offset ZRangeRegistry::peek_low_address() const { + ZLocker locker(&_lock); + + const Node* const node = _list.first(); + if (node != nullptr) { + return node->start(); + } + + // Out of memory + return offset::invalid; +} + +template +typename ZRangeRegistry::offset_end ZRangeRegistry::peak_high_address_end() const { + ZLocker locker(&_lock); + + const Node* const node = _list.last(); + if (node != nullptr) { + return node->end(); + } + + // Out of memory + return offset_end::invalid; +} + +template +void ZRangeRegistry::insert(const Range& range) { + ZLocker locker(&_lock); + insert_inner(range); +} + +template +void ZRangeRegistry::insert_and_remove_from_low_many(const Range& range, ZArray* out) { + ZLocker locker(&_lock); + + const size_t size = range.size(); + + // Insert the range + insert_inner(range); + + // Remove (hopefully) at a lower address + const size_t removed = remove_from_low_many_at_most_inner(size, out); + + // This should always succeed since we freed the same amount. + assert(removed == size, "must succeed"); +} + +template +Range ZRangeRegistry::insert_and_remove_from_low_exact_or_many(size_t size, ZArray* in_out) { + ZLocker locker(&_lock); + + size_t inserted = 0; + + // Insert everything + ZArrayIterator iter(in_out); + for (Range mem; iter.next(&mem);) { + insert_inner(mem); + inserted += mem.size(); + } + + // Clear stored memory so that we can populate it below + in_out->clear(); + + // Try to find and remove a contiguous chunk + Range range = remove_from_low_inner(size); + if (!range.is_null()) { + return range; + } + + // Failed to find a contiguous chunk, split it up into smaller chunks and + // only remove up to as much that has been inserted. + size_t removed = remove_from_low_many_at_most_inner(inserted, in_out); + assert(removed == inserted, "Should be able to get back as much as we previously inserted"); + return Range(); +} + +template +Range ZRangeRegistry::remove_from_low(size_t size) { + ZLocker locker(&_lock); + Range range = remove_from_low_inner(size); + return range; +} + +template +Range ZRangeRegistry::remove_from_low_at_most(size_t size) { + ZLocker lock(&_lock); + Range range = remove_from_low_at_most_inner(size); + return range; +} + +template +size_t ZRangeRegistry::remove_from_low_many_at_most(size_t size, ZArray* out) { + ZLocker lock(&_lock); + return remove_from_low_many_at_most_inner(size, out); +} + +template +Range ZRangeRegistry::remove_from_high(size_t size) { + ZLocker locker(&_lock); + + ZListReverseIterator iter(&_list); + for (Node* node; iter.next(&node);) { + if (node->size() >= size) { + Range range; + + if (node->size() == size) { + // Exact match, remove range + _list.remove(node); + range = *node->range(); + delete node; + } else { + // Larger than requested, shrink range + range = shrink_from_back(node->range(), size); + } + + if (_callbacks._prepare_for_hand_out != nullptr) { + _callbacks._prepare_for_hand_out(range); + } + + return range; + } + } + + // Out of memory + return Range(); +} + +template +void ZRangeRegistry::transfer_from_low(ZRangeRegistry* other, size_t size) { + assert(other->_list.is_empty(), "Should only be used for initialization"); + + ZLocker locker(&_lock); + size_t to_move = size; + + ZListIterator iter(&_list); + for (Node* node; iter.next(&node);) { + Node* to_transfer; + + if (node->size() <= to_move) { + // Smaller than or equal to requested, remove range + _list.remove(node); + to_transfer = node; + } else { + // Larger than requested, shrink range + const Range range = shrink_from_front(node->range(), to_move); + to_transfer = new Node(range); + } + + // Insert into the other list + // + // The from list is sorted, the other list starts empty, and the inserts + // come in sort order, so we can insert_last here. + other->_list.insert_last(to_transfer); + + to_move -= to_transfer->size(); + if (to_move == 0) { + break; + } + } + + assert(to_move == 0, "Should have transferred requested size"); +} + +#endif // SHARE_GC_Z_ZRANGEREGISTRY_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zRelocate.cpp b/src/hotspot/share/gc/z/zRelocate.cpp index 213452e8d05..b4ae78bc8d9 100644 --- a/src/hotspot/share/gc/z/zRelocate.cpp +++ b/src/hotspot/share/gc/z/zRelocate.cpp @@ -410,7 +410,7 @@ static void retire_target_page(ZGeneration* generation, ZPage* page) { // relocate the remaining objects, leaving the target page empty when // relocation completed. if (page->used() == 0) { - ZHeap::heap()->free_page(page, true /* allow_defragment */); + ZHeap::heap()->free_page(page); } } @@ -841,14 +841,12 @@ private: const bool promotion = _forwarding->is_promotion(); // Promotions happen through a new cloned page - ZPage* const to_page = promotion ? from_page->clone_limited() : from_page; + ZPage* const to_page = promotion + ? from_page->clone_for_promotion() + : from_page->reset(to_age); // Reset page for in-place relocation - to_page->reset(to_age); to_page->reset_top_for_allocation(); - if (promotion) { - to_page->remset_alloc(); - } // Verify that the inactive remset is clear when resetting the page for // in-place relocation. @@ -1011,7 +1009,7 @@ public: page->log_msg(" (relocate page done normal)"); // Free page - ZHeap::heap()->free_page(page, true /* allow_defragment */); + ZHeap::heap()->free_page(page); } } }; @@ -1260,14 +1258,12 @@ public: prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)"); // Setup to-space page - ZPage* const new_page = promotion ? prev_page->clone_limited() : prev_page; + ZPage* const new_page = promotion + ? prev_page->clone_for_promotion() + : prev_page->reset(to_age); // Reset page for flip aging - new_page->reset(to_age); new_page->reset_livemap(); - if (promotion) { - new_page->remset_alloc(); - } if (promotion) { ZGeneration::young()->flip_promote(prev_page, new_page); diff --git a/src/hotspot/share/gc/z/zRemembered.cpp b/src/hotspot/share/gc/z/zRemembered.cpp index 3bb17152d41..b94d676242c 100644 --- a/src/hotspot/share/gc/z/zRemembered.cpp +++ b/src/hotspot/share/gc/z/zRemembered.cpp @@ -473,11 +473,9 @@ public: _remset_table_iterator(remembered) { _mark->prepare_work(); _remembered->_page_allocator->enable_safe_destroy(); - _remembered->_page_allocator->enable_safe_recycle(); } ~ZRememberedScanMarkFollowTask() { - _remembered->_page_allocator->disable_safe_recycle(); _remembered->_page_allocator->disable_safe_destroy(); _mark->finish_work(); // We are done scanning the set of old pages. diff --git a/src/hotspot/share/gc/z/zRememberedSet.cpp b/src/hotspot/share/gc/z/zRememberedSet.cpp index f833c5b2336..2de62752d91 100644 --- a/src/hotspot/share/gc/z/zRememberedSet.cpp +++ b/src/hotspot/share/gc/z/zRememberedSet.cpp @@ -54,12 +54,6 @@ void ZRememberedSet::initialize(size_t page_size) { _bitmap[1].initialize(size_in_bits, true /* clear */); } -void ZRememberedSet::delete_all() { - assert(is_initialized(), "precondition"); - _bitmap[0].resize(0); - _bitmap[1].resize(0); -} - bool ZRememberedSet::is_cleared_current() const { return current()->is_empty(); } diff --git a/src/hotspot/share/gc/z/zRememberedSet.hpp b/src/hotspot/share/gc/z/zRememberedSet.hpp index c3c6e8bc313..6aca8b2d74e 100644 --- a/src/hotspot/share/gc/z/zRememberedSet.hpp +++ b/src/hotspot/share/gc/z/zRememberedSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -114,7 +114,6 @@ public: bool is_initialized() const; void initialize(size_t page_size); - void delete_all(); bool at_current(uintptr_t offset) const; bool at_previous(uintptr_t offset) const; diff --git a/src/hotspot/share/gc/z/zUncommitter.cpp b/src/hotspot/share/gc/z/zUncommitter.cpp index 50731592108..7993bbd56a9 100644 --- a/src/hotspot/share/gc/z/zUncommitter.cpp +++ b/src/hotspot/share/gc/z/zUncommitter.cpp @@ -31,11 +31,12 @@ static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond); -ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator) - : _page_allocator(page_allocator), +ZUncommitter::ZUncommitter(uint32_t id, ZPartition* partition) + : _id(id), + _partition(partition), _lock(), _stop(false) { - set_name("ZUncommitter"); + set_name("ZUncommitter#%u", id); create_and_start(); } @@ -46,7 +47,7 @@ bool ZUncommitter::wait(uint64_t timeout) const { } if (!_stop && timeout > 0) { - log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout); + log_debug(gc, heap)("Uncommitter (%u) Timeout: " UINT64_FORMAT "s", _id, timeout); _lock.wait(timeout * MILLIUNITS); } @@ -63,27 +64,27 @@ void ZUncommitter::run_thread() { while (wait(timeout)) { EventZUncommit event; - size_t uncommitted = 0; + size_t total_uncommitted = 0; while (should_continue()) { // Uncommit chunk - const size_t flushed = _page_allocator->uncommit(&timeout); - if (flushed == 0) { + const size_t uncommitted = _partition->uncommit(&timeout); + if (uncommitted == 0) { // Done break; } - uncommitted += flushed; + total_uncommitted += uncommitted; } - if (uncommitted > 0) { + if (total_uncommitted > 0) { // Update statistics - ZStatInc(ZCounterUncommit, uncommitted); - log_info(gc, heap)("Uncommitted: %zuM(%.0f%%)", - uncommitted / M, percent_of(uncommitted, ZHeap::heap()->max_capacity())); + ZStatInc(ZCounterUncommit, total_uncommitted); + log_info(gc, heap)("Uncommitter (%u) Uncommitted: %zuM(%.0f%%)", + _id, total_uncommitted / M, percent_of(total_uncommitted, ZHeap::heap()->max_capacity())); // Send event - event.commit(uncommitted); + event.commit(total_uncommitted); } } } diff --git a/src/hotspot/share/gc/z/zUncommitter.hpp b/src/hotspot/share/gc/z/zUncommitter.hpp index b626df8dddf..f8630f7b7fb 100644 --- a/src/hotspot/share/gc/z/zUncommitter.hpp +++ b/src/hotspot/share/gc/z/zUncommitter.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -27,11 +27,12 @@ #include "gc/z/zLock.hpp" #include "gc/z/zThread.hpp" -class ZPageAllocator; +class ZPartition; class ZUncommitter : public ZThread { private: - ZPageAllocator* const _page_allocator; + const uint32_t _id; + ZPartition* const _partition; mutable ZConditionLock _lock; bool _stop; @@ -43,7 +44,7 @@ protected: virtual void terminate(); public: - ZUncommitter(ZPageAllocator* page_allocator); + ZUncommitter(uint32_t id, ZPartition* partition); }; #endif // SHARE_GC_Z_ZUNCOMMITTER_HPP diff --git a/src/hotspot/share/gc/z/zUnmapper.cpp b/src/hotspot/share/gc/z/zUnmapper.cpp deleted file mode 100644 index edd31805c49..00000000000 --- a/src/hotspot/share/gc/z/zUnmapper.cpp +++ /dev/null @@ -1,129 +0,0 @@ -/* - * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/z/zList.inline.hpp" -#include "gc/z/zLock.inline.hpp" -#include "gc/z/zPage.inline.hpp" -#include "gc/z/zPageAllocator.hpp" -#include "gc/z/zUnmapper.hpp" -#include "jfr/jfrEvents.hpp" -#include "runtime/globals.hpp" - -ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator) - : _page_allocator(page_allocator), - _lock(), - _queue(), - _enqueued_bytes(0), - _warned_sync_unmapping(false), - _stop(false) { - set_name("ZUnmapper"); - create_and_start(); -} - -ZPage* ZUnmapper::dequeue() { - ZLocker locker(&_lock); - - for (;;) { - if (_stop) { - return nullptr; - } - - ZPage* const page = _queue.remove_first(); - if (page != nullptr) { - _enqueued_bytes -= page->size(); - return page; - } - - _lock.wait(); - } -} - -bool ZUnmapper::try_enqueue(ZPage* page) { - // Enqueue for asynchronous unmap and destroy - ZLocker locker(&_lock); - if (is_saturated()) { - // The unmapper thread is lagging behind and is unable to unmap memory fast enough - if (!_warned_sync_unmapping) { - _warned_sync_unmapping = true; - log_warning_p(gc)("WARNING: Encountered synchronous unmapping because asynchronous unmapping could not keep up"); - } - log_debug(gc, unmap)("Synchronous unmapping %zuM page", page->size() / M); - return false; - } - - log_trace(gc, unmap)("Asynchronous unmapping %zuM page (%zuM / %zuM enqueued)", - page->size() / M, _enqueued_bytes / M, queue_capacity() / M); - - _queue.insert_last(page); - _enqueued_bytes += page->size(); - _lock.notify_all(); - - return true; -} - -size_t ZUnmapper::queue_capacity() const { - return align_up((size_t)(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0), ZGranuleSize); -} - -bool ZUnmapper::is_saturated() const { - return _enqueued_bytes >= queue_capacity(); -} - -void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const { - EventZUnmap event; - const size_t unmapped = page->size(); - - // Unmap and destroy - _page_allocator->unmap_page(page); - _page_allocator->destroy_page(page); - - // Send event - event.commit(unmapped); -} - -void ZUnmapper::unmap_and_destroy_page(ZPage* page) { - if (!try_enqueue(page)) { - // Synchronously unmap and destroy - do_unmap_and_destroy_page(page); - } -} - -void ZUnmapper::run_thread() { - for (;;) { - ZPage* const page = dequeue(); - if (page == nullptr) { - // Stop - return; - } - - do_unmap_and_destroy_page(page); - } -} - -void ZUnmapper::terminate() { - ZLocker locker(&_lock); - _stop = true; - _lock.notify_all(); -} diff --git a/src/hotspot/share/gc/z/zUtils.hpp b/src/hotspot/share/gc/z/zUtils.hpp index 59e789d5b38..731136c4c99 100644 --- a/src/hotspot/share/gc/z/zUtils.hpp +++ b/src/hotspot/share/gc/z/zUtils.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -47,6 +47,16 @@ public: // Memory static void fill(uintptr_t* addr, size_t count, uintptr_t value); + template + static void copy_disjoint(T* dest, const T* src, size_t count); + template + static void copy_disjoint(T* dest, const T* src, int count); + + // Sort + template + static void sort(T* array, size_t count, Comparator comparator); + template + static void sort(T* array, int count, Comparator comparator); }; #endif // SHARE_GC_Z_ZUTILS_HPP diff --git a/src/hotspot/share/gc/z/zUtils.inline.hpp b/src/hotspot/share/gc/z/zUtils.inline.hpp index b6acf12df30..07a49f144e9 100644 --- a/src/hotspot/share/gc/z/zUtils.inline.hpp +++ b/src/hotspot/share/gc/z/zUtils.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -69,4 +69,35 @@ inline void ZUtils::object_copy_conjoint(zaddress from, zaddress to, size_t size } } +template +inline void ZUtils::copy_disjoint(T* dest, const T* src, size_t count) { + memcpy(dest, src, sizeof(T) * count); +} + +template +inline void ZUtils::copy_disjoint(T* dest, const T* src, int count) { + assert(count >= 0, "must be positive %d", count); + + copy_disjoint(dest, src, static_cast(count)); +} + +template +inline void ZUtils::sort(T* array, size_t count, Comparator comparator) { + using SortType = int(const void*, const void*); + using ComparatorType = int(const T*, const T*); + + static constexpr bool IsComparatorCompatible = std::is_assignable::value; + static_assert(IsComparatorCompatible, "Incompatible Comparator, must decay to plain function pointer"); + + // We rely on ABI compatibility between ComparatorType and SortType + qsort(array, count, sizeof(T), reinterpret_cast(static_cast(comparator))); +} + +template +inline void ZUtils::sort(T* array, int count, Comparator comparator) { + assert(count >= 0, "must be positive %d", count); + + sort(array, static_cast(count), comparator); +} + #endif // SHARE_GC_Z_ZUTILS_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zValue.hpp b/src/hotspot/share/gc/z/zValue.hpp index 2661a906dff..ab27c6a9227 100644 --- a/src/hotspot/share/gc/z/zValue.hpp +++ b/src/hotspot/share/gc/z/zValue.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -76,8 +76,12 @@ public: // Value // +struct ZValueIdTagType {}; + template class ZValue : public CHeapObj { + friend class VMStructs; + private: const uintptr_t _addr; @@ -86,6 +90,8 @@ private: public: ZValue(); ZValue(const T& value); + template + ZValue(ZValueIdTagType, Args&&... args); const T* addr(uint32_t value_id = S::id()) const; T* addr(uint32_t value_id = S::id()); @@ -95,6 +101,8 @@ public: void set(const T& value, uint32_t value_id = S::id()); void set_all(const T& value); + + uint32_t count() const; }; template using ZContended = ZValue; @@ -106,16 +114,23 @@ template using ZPerWorker = ZValue; // Iterator // +template +class ZValueConstIterator; + template class ZValueIterator { + friend class ZValueConstIterator; + private: ZValue* const _value; uint32_t _value_id; public: ZValueIterator(ZValue* value); + ZValueIterator(const ZValueIterator&) = default; bool next(T** value); + bool next(T** value, uint32_t* value_id); }; template using ZPerCPUIterator = ZValueIterator; @@ -130,6 +145,8 @@ private: public: ZValueConstIterator(const ZValue* value); + ZValueConstIterator(const ZValueIterator& other); + ZValueConstIterator(const ZValueConstIterator&) = default; bool next(const T** value); }; diff --git a/src/hotspot/share/gc/z/zValue.inline.hpp b/src/hotspot/share/gc/z/zValue.inline.hpp index c2aa8bbbb40..f0ff891c3bc 100644 --- a/src/hotspot/share/gc/z/zValue.inline.hpp +++ b/src/hotspot/share/gc/z/zValue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ #include "gc/shared/workerThread.hpp" #include "gc/z/zCPU.inline.hpp" #include "gc/z/zGlobals.hpp" -#include "gc/z/zNUMA.hpp" +#include "gc/z/zNUMA.inline.hpp" #include "gc/z/zUtils.inline.hpp" #include "runtime/globals.hpp" #include "utilities/align.hpp" @@ -142,6 +142,18 @@ inline ZValue::ZValue(const T& value) } } +template +template +inline ZValue::ZValue(ZValueIdTagType, Args&&... args) + : _addr(S::alloc(sizeof(T))) { + // Initialize all instances + uint32_t value_id; + ZValueIterator iter(this); + for (T* addr; iter.next(&addr, &value_id);) { + ::new (addr) T(value_id, args...); + } +} + template inline const T* ZValue::addr(uint32_t value_id) const { return reinterpret_cast(value_addr(value_id)); @@ -175,6 +187,11 @@ inline void ZValue::set_all(const T& value) { } } +template +uint32_t ZValue::count() const { + return S::count(); +} + // // Iterator // @@ -192,12 +209,26 @@ inline bool ZValueIterator::next(T** value) { } return false; } +template +inline bool ZValueIterator::next(T** value, uint32_t* value_id) { + if (_value_id < S::count()) { + *value_id = _value_id; + *value = _value->addr(_value_id++); + return true; + } + return false; +} template inline ZValueConstIterator::ZValueConstIterator(const ZValue* value) : _value(value), _value_id(0) {} +template +inline ZValueConstIterator::ZValueConstIterator(const ZValueIterator& other) + : _value(other._value), + _value_id(other._value_id) {} + template inline bool ZValueConstIterator::next(const T** value) { if (_value_id < S::count()) { diff --git a/src/hotspot/share/gc/z/zVirtualMemory.cpp b/src/hotspot/share/gc/z/zVirtualMemory.cpp deleted file mode 100644 index 471fc6f505e..00000000000 --- a/src/hotspot/share/gc/z/zVirtualMemory.cpp +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - */ - -#include "gc/shared/gc_globals.hpp" -#include "gc/shared/gcLogPrecious.hpp" -#include "gc/z/zAddress.inline.hpp" -#include "gc/z/zAddressSpaceLimit.hpp" -#include "gc/z/zGlobals.hpp" -#include "gc/z/zInitialize.hpp" -#include "gc/z/zNMT.hpp" -#include "gc/z/zVirtualMemory.inline.hpp" -#include "utilities/align.hpp" -#include "utilities/debug.hpp" - -ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) - : _manager(), - _reserved(0), - _initialized(false) { - - assert(max_capacity <= ZAddressOffsetMax, "Too large max_capacity"); - - // Initialize platform specific parts before reserving address space - pd_initialize_before_reserve(); - - // Register the Windows callbacks - pd_register_callbacks(&_manager); - - // Reserve address space - if (!reserve(max_capacity)) { - ZInitialize::error_d("Failed to reserve enough address space for Java heap"); - return; - } - - // Set ZAddressOffsetMax to the highest address end available after reservation - ZAddressOffsetMax = untype(highest_available_address_end()); - - // Successfully initialized - _initialized = true; -} - -#ifdef ASSERT -size_t ZVirtualMemoryManager::force_reserve_discontiguous(size_t size) { - const size_t min_range = calculate_min_range(size); - const size_t max_range = MAX2(align_down(size / ZForceDiscontiguousHeapReservations, ZGranuleSize), min_range); - size_t reserved = 0; - - // Try to reserve ZForceDiscontiguousHeapReservations number of virtual memory - // ranges. Starting with higher addresses. - uintptr_t end = ZAddressOffsetMax; - while (reserved < size && end >= max_range) { - const size_t remaining = size - reserved; - const size_t reserve_size = MIN2(max_range, remaining); - const uintptr_t reserve_start = end - reserve_size; - - if (reserve_contiguous(to_zoffset(reserve_start), reserve_size)) { - reserved += reserve_size; - } - - end -= reserve_size * 2; - } - - // If (reserved < size) attempt to reserve the rest via normal divide and conquer - uintptr_t start = 0; - while (reserved < size && start < ZAddressOffsetMax) { - const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start); - reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range); - start += remaining; - } - - return reserved; -} -#endif - -size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size, size_t min_range) { - if (size < min_range) { - // Too small - return 0; - } - - assert(is_aligned(size, ZGranuleSize), "Misaligned"); - - if (reserve_contiguous(start, size)) { - return size; - } - - const size_t half = size / 2; - if (half < min_range) { - // Too small - return 0; - } - - // Divide and conquer - const size_t first_part = align_down(half, ZGranuleSize); - const size_t second_part = size - first_part; - const size_t first_size = reserve_discontiguous(start, first_part, min_range); - const size_t second_size = reserve_discontiguous(start + first_part, second_part, min_range); - return first_size + second_size; -} - -size_t ZVirtualMemoryManager::calculate_min_range(size_t size) { - // Don't try to reserve address ranges smaller than 1% of the requested size. - // This avoids an explosion of reservation attempts in case large parts of the - // address space is already occupied. - return align_up(size / ZMaxVirtualReservations, ZGranuleSize); -} - -size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) { - const size_t min_range = calculate_min_range(size); - uintptr_t start = 0; - size_t reserved = 0; - - // Reserve size somewhere between [0, ZAddressOffsetMax) - while (reserved < size && start < ZAddressOffsetMax) { - const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start); - reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range); - start += remaining; - } - - return reserved; -} - -bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) { - assert(is_aligned(size, ZGranuleSize), "Must be granule aligned 0x%zx", size); - - // Reserve address views - const zaddress_unsafe addr = ZOffset::address_unsafe(start); - - // Reserve address space - if (!pd_reserve(addr, size)) { - return false; - } - - // Register address views with native memory tracker - ZNMT::reserve(addr, size); - - // Make the address range free - _manager.register_range(start, size); - - return true; -} - -bool ZVirtualMemoryManager::reserve_contiguous(size_t size) { - // Allow at most 8192 attempts spread evenly across [0, ZAddressOffsetMax) - const size_t unused = ZAddressOffsetMax - size; - const size_t increment = MAX2(align_up(unused / 8192, ZGranuleSize), ZGranuleSize); - - for (uintptr_t start = 0; start + size <= ZAddressOffsetMax; start += increment) { - if (reserve_contiguous(to_zoffset(start), size)) { - // Success - return true; - } - } - - // Failed - return false; -} - -bool ZVirtualMemoryManager::reserve(size_t max_capacity) { - const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap()); - const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit); - - auto do_reserve = [&]() { -#ifdef ASSERT - if (ZForceDiscontiguousHeapReservations > 0) { - return force_reserve_discontiguous(size); - } -#endif - - // Prefer a contiguous address space - if (reserve_contiguous(size)) { - return size; - } - - // Fall back to a discontiguous address space - return reserve_discontiguous(size); - }; - - const size_t reserved = do_reserve(); - - const bool contiguous = _manager.free_is_contiguous(); - - log_info_p(gc, init)("Address Space Type: %s/%s/%s", - (contiguous ? "Contiguous" : "Discontiguous"), - (limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"), - (reserved == size ? "Complete" : "Degraded")); - log_info_p(gc, init)("Address Space Size: %zuM", reserved / M); - - // Record reserved - _reserved = reserved; - - return reserved >= max_capacity; -} - -void ZVirtualMemoryManager::unreserve(zoffset start, size_t size) { - const zaddress_unsafe addr = ZOffset::address_unsafe(start); - - // Unregister the reserved memory from NMT - ZNMT::unreserve(addr, size); - - // Unreserve address space - pd_unreserve(addr, size); -} - -void ZVirtualMemoryManager::unreserve_all() { - zoffset start; - size_t size; - - while (_manager.unregister_first(&start, &size)) { - unreserve(start, size); - } -} - -bool ZVirtualMemoryManager::is_initialized() const { - return _initialized; -} - -ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) { - zoffset start; - - // Small pages are allocated at low addresses, while medium/large pages - // are allocated at high addresses (unless forced to be at a low address). - if (force_low_address || size <= ZPageSizeSmall) { - start = _manager.alloc_low_address(size); - } else { - start = _manager.alloc_high_address(size); - } - - if (start == zoffset(UINTPTR_MAX)) { - return ZVirtualMemory(); - } - - return ZVirtualMemory(start, size); -} - -void ZVirtualMemoryManager::free(const ZVirtualMemory& vmem) { - _manager.free(vmem.start(), vmem.size()); -} diff --git a/src/hotspot/share/gc/z/zVirtualMemory.hpp b/src/hotspot/share/gc/z/zVirtualMemory.hpp index f5185549e8a..46fec6ac79e 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.hpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,67 +25,16 @@ #define SHARE_GC_Z_ZVIRTUALMEMORY_HPP #include "gc/z/zAddress.hpp" -#include "gc/z/zMemory.hpp" - -class ZVirtualMemory { - friend class VMStructs; - -private: - zoffset _start; - zoffset_end _end; +#include "gc/z/zRange.hpp" +#include "utilities/globalDefinitions.hpp" +class ZVirtualMemory : public ZRange { public: ZVirtualMemory(); ZVirtualMemory(zoffset start, size_t size); + ZVirtualMemory(const ZRange& range); - bool is_null() const; - zoffset start() const; - zoffset_end end() const; - size_t size() const; - - ZVirtualMemory split(size_t size); -}; - -class ZVirtualMemoryManager { - friend class ZMapperTest; - friend class ZVirtualMemoryManagerTest; - -private: - static size_t calculate_min_range(size_t size); - - ZMemoryManager _manager; - size_t _reserved; - bool _initialized; - - // Platform specific implementation - void pd_initialize_before_reserve(); - void pd_register_callbacks(ZMemoryManager* manager); - bool pd_reserve(zaddress_unsafe addr, size_t size); - void pd_unreserve(zaddress_unsafe addr, size_t size); - - bool reserve_contiguous(zoffset start, size_t size); - bool reserve_contiguous(size_t size); - size_t reserve_discontiguous(zoffset start, size_t size, size_t min_range); - size_t reserve_discontiguous(size_t size); - bool reserve(size_t max_capacity); - - void unreserve(zoffset start, size_t size); - - DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);) - -public: - ZVirtualMemoryManager(size_t max_capacity); - - bool is_initialized() const; - - size_t reserved() const; - zoffset lowest_available_address() const; - zoffset_end highest_available_address_end() const; - - ZVirtualMemory alloc(size_t size, bool force_low_address); - void free(const ZVirtualMemory& vmem); - - void unreserve_all(); + int granule_count() const; }; #endif // SHARE_GC_Z_ZVIRTUALMEMORY_HPP diff --git a/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp b/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp index 9d5fe7dd57a..3cbe1409b52 100644 --- a/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp +++ b/src/hotspot/share/gc/z/zVirtualMemory.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -26,47 +26,32 @@ #include "gc/z/zVirtualMemory.hpp" -#include "gc/z/zMemory.inline.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zRange.inline.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" inline ZVirtualMemory::ZVirtualMemory() - : _start(zoffset(UINTPTR_MAX)), - _end(zoffset_end(UINTPTR_MAX)) {} + : ZRange() {} inline ZVirtualMemory::ZVirtualMemory(zoffset start, size_t size) - : _start(start), - _end(to_zoffset_end(start, size)) {} - -inline bool ZVirtualMemory::is_null() const { - return _start == zoffset(UINTPTR_MAX); + : ZRange(start, size) { + // ZVirtualMemory is only used for ZGranuleSize multiple ranges + assert(is_aligned(untype(start), ZGranuleSize), "must be multiple of ZGranuleSize"); + assert(is_aligned(size, ZGranuleSize), "must be multiple of ZGranuleSize"); } -inline zoffset ZVirtualMemory::start() const { - return _start; -} +inline ZVirtualMemory::ZVirtualMemory(const ZRange& range) + : ZVirtualMemory(range.start(), range.size()) {} -inline zoffset_end ZVirtualMemory::end() const { - return _end; -} +inline int ZVirtualMemory::granule_count() const { + const size_t granule_count = size() >> ZGranuleSizeShift; -inline size_t ZVirtualMemory::size() const { - return _end - _start; -} + assert(granule_count <= static_cast(std::numeric_limits::max()), + "must not overflow an int %zu", granule_count); -inline ZVirtualMemory ZVirtualMemory::split(size_t size) { - _start += size; - return ZVirtualMemory(_start - size, size); -} - -inline size_t ZVirtualMemoryManager::reserved() const { - return _reserved; -} - -inline zoffset ZVirtualMemoryManager::lowest_available_address() const { - return _manager.peek_low_address(); -} - -inline zoffset_end ZVirtualMemoryManager::highest_available_address_end() const { - return _manager.peak_high_address_end(); + return static_cast(granule_count); } #endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP diff --git a/src/hotspot/share/gc/z/zVirtualMemoryManager.cpp b/src/hotspot/share/gc/z/zVirtualMemoryManager.cpp new file mode 100644 index 00000000000..2f81a5cfe09 --- /dev/null +++ b/src/hotspot/share/gc/z/zVirtualMemoryManager.cpp @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "gc/shared/gc_globals.hpp" +#include "gc/shared/gcLogPrecious.hpp" +#include "gc/z/zAddress.inline.hpp" +#include "gc/z/zAddressSpaceLimit.hpp" +#include "gc/z/zArray.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zInitialize.hpp" +#include "gc/z/zNMT.hpp" +#include "gc/z/zNUMA.inline.hpp" +#include "gc/z/zValue.inline.hpp" +#include "gc/z/zVirtualMemory.inline.hpp" +#include "gc/z/zVirtualMemoryManager.inline.hpp" +#include "utilities/align.hpp" +#include "utilities/debug.hpp" + +ZVirtualMemoryReserver::ZVirtualMemoryReserver(size_t size) + : _registry(), + _reserved(reserve(size)) {} + +void ZVirtualMemoryReserver::initialize_partition_registry(ZVirtualMemoryRegistry* partition_registry, size_t size) { + assert(partition_registry->is_empty(), "Should be empty when initializing"); + + // Registers the Windows callbacks + pd_register_callbacks(partition_registry); + + _registry.transfer_from_low(partition_registry, size); + + // Set the limits according to the virtual memory given to this partition + partition_registry->anchor_limits(); +} + +void ZVirtualMemoryReserver::unreserve(const ZVirtualMemory& vmem) { + const zaddress_unsafe addr = ZOffset::address_unsafe(vmem.start()); + + // Unregister the reserved memory from NMT + ZNMT::unreserve(addr, vmem.size()); + + // Unreserve address space + pd_unreserve(addr, vmem.size()); +} + +void ZVirtualMemoryReserver::unreserve_all() { + for (ZVirtualMemory vmem; _registry.unregister_first(&vmem);) { + unreserve(vmem); + } +} + +bool ZVirtualMemoryReserver::is_empty() const { + return _registry.is_empty(); +} + +bool ZVirtualMemoryReserver::is_contiguous() const { + return _registry.is_contiguous(); +} + +size_t ZVirtualMemoryReserver::reserved() const { + return _reserved; +} + +zoffset_end ZVirtualMemoryReserver::highest_available_address_end() const { + return _registry.peak_high_address_end(); +} + +#ifdef ASSERT +size_t ZVirtualMemoryReserver::force_reserve_discontiguous(size_t size) { + const size_t min_range = calculate_min_range(size); + const size_t max_range = MAX2(align_down(size / ZForceDiscontiguousHeapReservations, ZGranuleSize), min_range); + size_t reserved = 0; + + // Try to reserve ZForceDiscontiguousHeapReservations number of virtual memory + // ranges. Starting with higher addresses. + uintptr_t end = ZAddressOffsetMax; + while (reserved < size && end >= max_range) { + const size_t remaining = size - reserved; + const size_t reserve_size = MIN2(max_range, remaining); + const uintptr_t reserve_start = end - reserve_size; + + if (reserve_contiguous(to_zoffset(reserve_start), reserve_size)) { + reserved += reserve_size; + } + + end -= reserve_size * 2; + } + + // If (reserved < size) attempt to reserve the rest via normal divide and conquer + uintptr_t start = 0; + while (reserved < size && start < ZAddressOffsetMax) { + const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start); + reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range); + start += remaining; + } + + return reserved; +} +#endif + +size_t ZVirtualMemoryReserver::reserve_discontiguous(zoffset start, size_t size, size_t min_range) { + if (size < min_range) { + // Too small + return 0; + } + + assert(is_aligned(size, ZGranuleSize), "Misaligned"); + + if (reserve_contiguous(start, size)) { + return size; + } + + const size_t half = size / 2; + if (half < min_range) { + // Too small + return 0; + } + + // Divide and conquer + const size_t first_part = align_down(half, ZGranuleSize); + const size_t second_part = size - first_part; + const size_t first_size = reserve_discontiguous(start, first_part, min_range); + const size_t second_size = reserve_discontiguous(start + first_part, second_part, min_range); + return first_size + second_size; +} + +size_t ZVirtualMemoryReserver::calculate_min_range(size_t size) { + // Don't try to reserve address ranges smaller than 1% of the requested size. + // This avoids an explosion of reservation attempts in case large parts of the + // address space is already occupied. + return align_up(size / ZMaxVirtualReservations, ZGranuleSize); +} + +size_t ZVirtualMemoryReserver::reserve_discontiguous(size_t size) { + const size_t min_range = calculate_min_range(size); + uintptr_t start = 0; + size_t reserved = 0; + + // Reserve size somewhere between [0, ZAddressOffsetMax) + while (reserved < size && start < ZAddressOffsetMax) { + const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start); + reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range); + start += remaining; + } + + return reserved; +} + +bool ZVirtualMemoryReserver::reserve_contiguous(zoffset start, size_t size) { + assert(is_aligned(size, ZGranuleSize), "Must be granule aligned 0x%zx", size); + + // Reserve address views + const zaddress_unsafe addr = ZOffset::address_unsafe(start); + + // Reserve address space + if (!pd_reserve(addr, size)) { + return false; + } + + // Register address views with native memory tracker + ZNMT::reserve(addr, size); + + // Register the memory reservation + _registry.register_range({start, size}); + + return true; +} + +bool ZVirtualMemoryReserver::reserve_contiguous(size_t size) { + // Allow at most 8192 attempts spread evenly across [0, ZAddressOffsetMax) + const size_t unused = ZAddressOffsetMax - size; + const size_t increment = MAX2(align_up(unused / 8192, ZGranuleSize), ZGranuleSize); + + for (uintptr_t start = 0; start + size <= ZAddressOffsetMax; start += increment) { + if (reserve_contiguous(to_zoffset(start), size)) { + // Success + return true; + } + } + + // Failed + return false; +} + +size_t ZVirtualMemoryReserver::reserve(size_t size) { + // Register Windows callbacks + pd_register_callbacks(&_registry); + + // Reserve address space + +#ifdef ASSERT + if (ZForceDiscontiguousHeapReservations > 0) { + return force_reserve_discontiguous(size); + } +#endif + + // Prefer a contiguous address space + if (reserve_contiguous(size)) { + return size; + } + + // Fall back to a discontiguous address space + return reserve_discontiguous(size); +} + +ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity) + : _partition_registries(), + _multi_partition_registry(), + _is_multi_partition_enabled(false), + _initialized(false) { + + assert(max_capacity <= ZAddressOffsetMax, "Too large max_capacity"); + + ZAddressSpaceLimit::print_limits(); + + const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap()); + + const size_t desired_for_partitions = max_capacity * ZVirtualToPhysicalRatio; + const size_t desired_for_multi_partition = ZNUMA::count() > 1 ? desired_for_partitions : 0; + + const size_t desired = desired_for_partitions + desired_for_multi_partition; + const size_t requested = desired <= limit + ? desired + : MIN2(desired_for_partitions, limit); + + // Reserve virtual memory for the heap + ZVirtualMemoryReserver reserver(requested); + + const size_t reserved = reserver.reserved(); + const bool is_contiguous = reserver.is_contiguous(); + + log_debug_p(gc, init)("Reserved Space: limit " EXACTFMT ", desired " EXACTFMT ", requested " EXACTFMT, + EXACTFMTARGS(limit), EXACTFMTARGS(desired), EXACTFMTARGS(requested)); + + if (reserved < max_capacity) { + ZInitialize::error_d("Failed to reserve " EXACTFMT " address space for Java heap", EXACTFMTARGS(max_capacity)); + return; + } + + // Set ZAddressOffsetMax to the highest address end available after reservation + ZAddressOffsetMax = untype(reserver.highest_available_address_end()); + + const size_t size_for_partitions = MIN2(reserved, desired_for_partitions); + + // Divide size_for_partitions virtual memory over the NUMA nodes + initialize_partitions(&reserver, size_for_partitions); + + // Set up multi-partition or unreserve the surplus memory + if (desired_for_multi_partition > 0 && reserved == desired) { + // Enough left to setup the multi-partition memory reservation + reserver.initialize_partition_registry(&_multi_partition_registry, desired_for_multi_partition); + _is_multi_partition_enabled = true; + } else { + // Failed to reserve enough memory for multi-partition, unreserve unused memory + reserver.unreserve_all(); + } + + assert(reserver.is_empty(), "Must have handled all reserved memory"); + + log_info_p(gc, init)("Reserved Space Type: %s/%s/%s", + (is_contiguous ? "Contiguous" : "Discontiguous"), + (requested == desired ? "Unrestricted" : "Restricted"), + (reserved == desired ? "Complete" : ((reserved < desired_for_partitions) ? "Degraded" : "NUMA-Degraded"))); + log_info_p(gc, init)("Reserved Space Size: " EXACTFMT, EXACTFMTARGS(reserved)); + + // Successfully initialized + _initialized = true; +} + +void ZVirtualMemoryManager::initialize_partitions(ZVirtualMemoryReserver* reserver, size_t size_for_partitions) { + precond(is_aligned(size_for_partitions, ZGranuleSize)); + + // If the capacity consist of less granules than the number of partitions + // some partitions will be empty. Distribute these shares on the none empty + // partitions. + const uint32_t first_empty_numa_id = MIN2(static_cast(size_for_partitions >> ZGranuleSizeShift), ZNUMA::count()); + const uint32_t ignore_count = ZNUMA::count() - first_empty_numa_id; + + // Install reserved memory into registry(s) + uint32_t numa_id; + ZPerNUMAIterator iter(&_partition_registries); + for (ZVirtualMemoryRegistry* registry; iter.next(®istry, &numa_id);) { + if (numa_id == first_empty_numa_id) { + break; + } + + // Calculate how much reserved memory this partition gets + const size_t reserved_for_partition = ZNUMA::calculate_share(numa_id, size_for_partitions, ZGranuleSize, ignore_count); + + // Transfer reserved memory + reserver->initialize_partition_registry(registry, reserved_for_partition); + } +} + +bool ZVirtualMemoryManager::is_initialized() const { + return _initialized; +} + +ZVirtualMemoryRegistry& ZVirtualMemoryManager::registry(uint32_t partition_id) { + return _partition_registries.get(partition_id); +} + +const ZVirtualMemoryRegistry& ZVirtualMemoryManager::registry(uint32_t partition_id) const { + return _partition_registries.get(partition_id); +} + +zoffset ZVirtualMemoryManager::lowest_available_address(uint32_t partition_id) const { + return registry(partition_id).peek_low_address(); +} + +void ZVirtualMemoryManager::insert(const ZVirtualMemory& vmem, uint32_t partition_id) { + assert(partition_id == lookup_partition_id(vmem), "wrong partition_id for vmem"); + registry(partition_id).insert(vmem); +} + +void ZVirtualMemoryManager::insert_multi_partition(const ZVirtualMemory& vmem) { + _multi_partition_registry.insert(vmem); +} + +size_t ZVirtualMemoryManager::remove_from_low_many_at_most(size_t size, uint32_t partition_id, ZArray* vmems_out) { + return registry(partition_id).remove_from_low_many_at_most(size, vmems_out); +} + +ZVirtualMemory ZVirtualMemoryManager::remove_from_low(size_t size, uint32_t partition_id) { + return registry(partition_id).remove_from_low(size); +} + +ZVirtualMemory ZVirtualMemoryManager::remove_from_low_multi_partition(size_t size) { + return _multi_partition_registry.remove_from_low(size); +} + +void ZVirtualMemoryManager::insert_and_remove_from_low_many(const ZVirtualMemory& vmem, uint32_t partition_id, ZArray* vmems_out) { + registry(partition_id).insert_and_remove_from_low_many(vmem, vmems_out); +} + +ZVirtualMemory ZVirtualMemoryManager::insert_and_remove_from_low_exact_or_many(size_t size, uint32_t partition_id, ZArray* vmems_in_out) { + return registry(partition_id).insert_and_remove_from_low_exact_or_many(size, vmems_in_out); +} diff --git a/src/hotspot/share/gc/z/zVirtualMemoryManager.hpp b/src/hotspot/share/gc/z/zVirtualMemoryManager.hpp new file mode 100644 index 00000000000..a9ab86761ac --- /dev/null +++ b/src/hotspot/share/gc/z/zVirtualMemoryManager.hpp @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_HPP +#define SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_HPP + +#include "gc/z/zAddress.hpp" +#include "gc/z/zArray.hpp" +#include "gc/z/zRangeRegistry.hpp" +#include "gc/z/zValue.hpp" +#include "gc/z/zVirtualMemory.hpp" + +using ZVirtualMemoryRegistry = ZRangeRegistry; + +class ZVirtualMemoryReserver { + friend class ZMapperTest; + friend class ZVirtualMemoryManagerTest; + +private: + + ZVirtualMemoryRegistry _registry; + const size_t _reserved; + + static size_t calculate_min_range(size_t size); + + // Platform specific implementation + void pd_register_callbacks(ZVirtualMemoryRegistry* registry); + bool pd_reserve(zaddress_unsafe addr, size_t size); + void pd_unreserve(zaddress_unsafe addr, size_t size); + + bool reserve_contiguous(zoffset start, size_t size); + bool reserve_contiguous(size_t size); + size_t reserve_discontiguous(zoffset start, size_t size, size_t min_range); + size_t reserve_discontiguous(size_t size); + + size_t reserve(size_t size); + void unreserve(const ZVirtualMemory& vmem); + + DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);) + +public: + ZVirtualMemoryReserver(size_t size); + + void initialize_partition_registry(ZVirtualMemoryRegistry* partition_registry, size_t size); + + void unreserve_all(); + + bool is_empty() const; + bool is_contiguous() const; + + size_t reserved() const; + + zoffset_end highest_available_address_end() const; +}; + +class ZVirtualMemoryManager { +private: + ZPerNUMA _partition_registries; + ZVirtualMemoryRegistry _multi_partition_registry; + bool _is_multi_partition_enabled; + bool _initialized; + + ZVirtualMemoryRegistry& registry(uint32_t partition_id); + const ZVirtualMemoryRegistry& registry(uint32_t partition_id) const; + +public: + ZVirtualMemoryManager(size_t max_capacity); + + void initialize_partitions(ZVirtualMemoryReserver* reserver, size_t size_for_partitions); + + bool is_initialized() const; + bool is_multi_partition_enabled() const; + bool is_in_multi_partition(const ZVirtualMemory& vmem) const; + + uint32_t lookup_partition_id(const ZVirtualMemory& vmem) const; + zoffset lowest_available_address(uint32_t partition_id) const; + + void insert(const ZVirtualMemory& vmem, uint32_t partition_id); + void insert_multi_partition(const ZVirtualMemory& vmem); + + size_t remove_from_low_many_at_most(size_t size, uint32_t partition_id, ZArray* vmems_out); + ZVirtualMemory remove_from_low(size_t size, uint32_t partition_id); + ZVirtualMemory remove_from_low_multi_partition(size_t size); + + void insert_and_remove_from_low_many(const ZVirtualMemory& vmem, uint32_t partition_id, ZArray* vmems_out); + ZVirtualMemory insert_and_remove_from_low_exact_or_many(size_t size, uint32_t partition_id, ZArray* vmems_in_out); +}; + +#endif // SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_HPP diff --git a/src/hotspot/share/gc/z/zVirtualMemoryManager.inline.hpp b/src/hotspot/share/gc/z/zVirtualMemoryManager.inline.hpp new file mode 100644 index 00000000000..78f966d0f84 --- /dev/null +++ b/src/hotspot/share/gc/z/zVirtualMemoryManager.inline.hpp @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_INLINE_HPP +#define SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_INLINE_HPP + +#include "gc/z/zVirtualMemoryManager.hpp" + +#include "utilities/globalDefinitions.hpp" +#include "gc/z/zRangeRegistry.inline.hpp" + + +inline bool ZVirtualMemoryManager::is_multi_partition_enabled() const { + return _is_multi_partition_enabled; +} + +inline bool ZVirtualMemoryManager::is_in_multi_partition(const ZVirtualMemory& vmem) const { + return _multi_partition_registry.limits_contain(vmem); +} + +inline uint32_t ZVirtualMemoryManager::lookup_partition_id(const ZVirtualMemory& vmem) const { + const uint32_t num_partitions = _partition_registries.count(); + for (uint32_t partition_id = 0; partition_id < num_partitions; partition_id++) { + if (registry(partition_id).limits_contain(vmem)) { + return partition_id; + } + } + + ShouldNotReachHere(); +} + +#endif // SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_INLINE_HPP diff --git a/src/hotspot/share/gc/z/z_globals.hpp b/src/hotspot/share/gc/z/z_globals.hpp index 4555b470cac..17a77a12ca4 100644 --- a/src/hotspot/share/gc/z/z_globals.hpp +++ b/src/hotspot/share/gc/z/z_globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -68,13 +68,6 @@ product(bool, ZCollectionIntervalOnly, false, \ "Only use timers for GC heuristics") \ \ - product(double, ZAsyncUnmappingLimit, 100.0, DIAGNOSTIC, \ - "Specify the max amount (percentage of max heap size) of async " \ - "unmapping that can be in-flight before unmapping requests are " \ - "temporarily forced to be synchronous instead. " \ - "The default means after an amount of pages proportional to the " \ - "max capacity is enqueued, we resort to synchronous unmapping.") \ - \ product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \ "Time between statistics print outs (in seconds)") \ range(1, (uint)-1) \ @@ -118,6 +111,11 @@ develop(bool, ZVerifyOops, false, \ "Verify accessed oops") \ \ + develop(uint, ZFakeNUMA, 1, \ + "ZFakeNUMA is used to test the internal NUMA memory support " \ + "without the need for UseNUMA") \ + range(1, 16) \ + \ develop(size_t, ZForceDiscontiguousHeapReservations, 0, \ "The gc will attempt to split the heap reservation into this " \ "many reservations, subject to available virtual address space " \ diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index 562d31b828d..6aa3e05cda1 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -1,7 +1,7 @@