mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8350441: ZGC: Overhaul Page Allocation
Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org> Co-authored-by: Erik Österlund <eosterlund@openjdk.org> Co-authored-by: Stefan Karlsson <stefank@openjdk.org> Co-authored-by: Stefan Johansson <sjohanss@openjdk.org> Reviewed-by: stefank, aboldtch, eosterlund
This commit is contained in:
parent
6c2667018a
commit
7e69b98e05
@ -95,7 +95,7 @@ size_t ZPlatformAddressOffsetBits() {
|
||||
static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = ZGlobalsPointers::min_address_offset_request();
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
@ -92,7 +92,7 @@ size_t ZPlatformAddressOffsetBits() {
|
||||
static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = ZGlobalsPointers::min_address_offset_request();
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ size_t ZPlatformAddressOffsetBits() {
|
||||
static const size_t valid_max_address_offset_bits = probe_valid_max_address_bit() + 1;
|
||||
const size_t max_address_offset_bits = valid_max_address_offset_bits - 3;
|
||||
const size_t min_address_offset_bits = max_address_offset_bits - 2;
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = ZGlobalsPointers::min_address_offset_request();
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ size_t ZPointerLoadShift;
|
||||
size_t ZPlatformAddressOffsetBits() {
|
||||
const size_t min_address_offset_bits = 42; // 4TB
|
||||
const size_t max_address_offset_bits = 44; // 16TB
|
||||
const size_t address_offset = round_up_power_of_2(MaxHeapSize * ZVirtualToPhysicalRatio);
|
||||
const size_t address_offset = ZGlobalsPointers::min_address_offset_request();
|
||||
const size_t address_offset_bits = log2i_exact(address_offset);
|
||||
return clamp(address_offset_bits, min_address_offset_bits, max_address_offset_bits);
|
||||
}
|
||||
|
||||
@ -21,15 +21,24 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zCPU.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
|
||||
void ZNUMA::pd_initialize() {
|
||||
_enabled = false;
|
||||
_count = 1;
|
||||
_count = !FLAG_IS_DEFAULT(ZFakeNUMA)
|
||||
? ZFakeNUMA
|
||||
: 1;
|
||||
}
|
||||
|
||||
uint32_t ZNUMA::id() {
|
||||
if (is_faked()) {
|
||||
// ZFakeNUMA testing, ignores _enabled
|
||||
return ZCPU::id() % ZFakeNUMA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -26,7 +26,6 @@
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemoryBacking_bsd.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
@ -97,12 +96,12 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
|
||||
bool ZPhysicalMemoryBacking::commit_inner(zbacking_offset offset, size_t length) const {
|
||||
assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
log_trace(gc, heap)("Committing memory: %zuM-%zuM (%zuM)",
|
||||
untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
|
||||
untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M);
|
||||
|
||||
const uintptr_t addr = _base + untype(offset);
|
||||
const void* const res = mmap((void*)addr, length, PROT_READ | PROT_WRITE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
@ -116,7 +115,7 @@ bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit(zbacking_offset offset, size_t length, uint32_t /* numa_id - ignored */) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
@ -124,8 +123,8 @@ size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
|
||||
}
|
||||
|
||||
// Failed, try to commit as much as possible
|
||||
zoffset start = offset;
|
||||
zoffset end = offset + length;
|
||||
zbacking_offset start = offset;
|
||||
zbacking_offset end = offset + length;
|
||||
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, ZGranuleSize);
|
||||
@ -144,12 +143,12 @@ size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zbacking_offset offset, size_t length) const {
|
||||
assert(is_aligned(untype(offset), os::vm_page_size()), "Invalid offset");
|
||||
assert(is_aligned(length, os::vm_page_size()), "Invalid length");
|
||||
|
||||
log_trace(gc, heap)("Uncommitting memory: %zuM-%zuM (%zuM)",
|
||||
untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
|
||||
untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M);
|
||||
|
||||
const uintptr_t start = _base + untype(offset);
|
||||
const void* const res = mmap((void*)start, length, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE, -1, 0);
|
||||
@ -162,7 +161,7 @@ size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
|
||||
return length;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const {
|
||||
const ZErrno err = mremap(_base + untype(offset), untype(addr), size);
|
||||
if (err) {
|
||||
fatal("Failed to remap memory (%s)", err.to_string());
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@ private:
|
||||
uintptr_t _base;
|
||||
bool _initialized;
|
||||
|
||||
bool commit_inner(zoffset offset, size_t length) const;
|
||||
bool commit_inner(zbacking_offset offset, size_t length) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
@ -40,10 +40,10 @@ public:
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(zoffset offset, size_t length) const;
|
||||
size_t uncommit(zoffset offset, size_t length) const;
|
||||
size_t commit(zbacking_offset offset, size_t length, uint32_t numa_id) const;
|
||||
size_t uncommit(zbacking_offset offset, size_t length) const;
|
||||
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const;
|
||||
void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const;
|
||||
void unmap(zaddress_unsafe addr, size_t size) const;
|
||||
};
|
||||
|
||||
|
||||
@ -21,23 +21,34 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zCPU.inline.hpp"
|
||||
#include "gc/z/zErrno.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zSyscall_linux.hpp"
|
||||
#include "os_linux.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
void ZNUMA::pd_initialize() {
|
||||
_enabled = UseNUMA;
|
||||
|
||||
// UseNUMA and is_faked() are mutually excluded in zArguments.cpp.
|
||||
_count = UseNUMA
|
||||
? os::Linux::numa_max_node() + 1
|
||||
: 1;
|
||||
: !FLAG_IS_DEFAULT(ZFakeNUMA)
|
||||
? ZFakeNUMA
|
||||
: 1; // No NUMA nodes
|
||||
}
|
||||
|
||||
uint32_t ZNUMA::id() {
|
||||
if (is_faked()) {
|
||||
// ZFakeNUMA testing, ignores _enabled
|
||||
return ZCPU::id() % ZFakeNUMA;
|
||||
}
|
||||
|
||||
if (!_enabled) {
|
||||
// NUMA support not enabled
|
||||
return 0;
|
||||
|
||||
@ -388,7 +388,7 @@ bool ZPhysicalMemoryBacking::tmpfs_supports_transparent_huge_pages() const {
|
||||
return access(ZFILENAME_SHMEM_ENABLED, R_OK) == 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_hugetlbfs(zbacking_offset offset, size_t length, bool touch) const {
|
||||
// On hugetlbfs, mapping a file segment will fail immediately, without
|
||||
// the need to touch the mapped pages first, if there aren't enough huge
|
||||
// pages available to back the mapping.
|
||||
@ -439,7 +439,7 @@ static bool safe_touch_mapping(void* addr, size_t length, size_t page_size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zbacking_offset offset, size_t length) const {
|
||||
// On tmpfs, we need to touch the mapped pages to figure out
|
||||
// if there are enough pages available to back the mapping.
|
||||
void* const addr = mmap(nullptr, length, PROT_READ|PROT_WRITE, MAP_SHARED, _fd, untype(offset));
|
||||
@ -468,11 +468,11 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_mmap_tmpfs(zoffset offset, size_
|
||||
return backed ? 0 : ENOMEM;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zbacking_offset offset, size_t length) const {
|
||||
uint8_t data = 0;
|
||||
|
||||
// Allocate backing memory by writing to each block
|
||||
for (zoffset pos = offset; pos < offset + length; pos += _block_size) {
|
||||
for (zbacking_offset pos = offset; pos < offset + length; pos += _block_size) {
|
||||
if (pwrite(_fd, &data, sizeof(data), untype(pos)) == -1) {
|
||||
// Failed
|
||||
return errno;
|
||||
@ -483,7 +483,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_compat_pwrite(zoffset offset, size_t le
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zbacking_offset offset, size_t length) const {
|
||||
// fallocate(2) is only supported by tmpfs since Linux 3.5, and by hugetlbfs
|
||||
// since Linux 4.3. When fallocate(2) is not supported we emulate it using
|
||||
// mmap/munmap (for hugetlbfs and tmpfs with transparent huge pages) or pwrite
|
||||
@ -497,7 +497,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_compat(zoffset offset, size_t
|
||||
}
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zbacking_offset offset, size_t length) const {
|
||||
const int mode = 0; // Allocate
|
||||
const int res = ZSyscall::fallocate(_fd, mode, untype(offset), length);
|
||||
if (res == -1) {
|
||||
@ -509,7 +509,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole_syscall(zoffset offset, size_
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zbacking_offset offset, size_t length) const {
|
||||
// Using compat mode is more efficient when allocating space on hugetlbfs.
|
||||
// Note that allocating huge pages this way will only reserve them, and not
|
||||
// associate them with segments of the file. We must guarantee that we at
|
||||
@ -536,7 +536,7 @@ ZErrno ZPhysicalMemoryBacking::fallocate_fill_hole(zoffset offset, size_t length
|
||||
return fallocate_fill_hole_compat(offset, length);
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zbacking_offset offset, size_t length) const {
|
||||
if (ZLargePages::is_explicit()) {
|
||||
// We can only punch hole in pages that have been touched. Non-touched
|
||||
// pages are only reserved, and not associated with any specific file
|
||||
@ -559,9 +559,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate_punch_hole(zoffset offset, size_t lengt
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zbacking_offset offset, size_t length) const {
|
||||
// Try first half
|
||||
const zoffset offset0 = offset;
|
||||
const zbacking_offset offset0 = offset;
|
||||
const size_t length0 = align_up(length / 2, _block_size);
|
||||
const ZErrno err0 = fallocate(punch_hole, offset0, length0);
|
||||
if (err0) {
|
||||
@ -569,7 +569,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offs
|
||||
}
|
||||
|
||||
// Try second half
|
||||
const zoffset offset1 = offset0 + length0;
|
||||
const zbacking_offset offset1 = offset0 + length0;
|
||||
const size_t length1 = length - length0;
|
||||
const ZErrno err1 = fallocate(punch_hole, offset1, length1);
|
||||
if (err1) {
|
||||
@ -580,7 +580,7 @@ ZErrno ZPhysicalMemoryBacking::split_and_fallocate(bool punch_hole, zoffset offs
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t length) const {
|
||||
ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zbacking_offset offset, size_t length) const {
|
||||
assert(is_aligned(untype(offset), _block_size), "Invalid offset");
|
||||
assert(is_aligned(length, _block_size), "Invalid length");
|
||||
|
||||
@ -596,9 +596,9 @@ ZErrno ZPhysicalMemoryBacking::fallocate(bool punch_hole, zoffset offset, size_t
|
||||
return err;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryBacking::commit_inner(zoffset offset, size_t length) const {
|
||||
bool ZPhysicalMemoryBacking::commit_inner(zbacking_offset offset, size_t length) const {
|
||||
log_trace(gc, heap)("Committing memory: %zuM-%zuM (%zuM)",
|
||||
untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
|
||||
untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M);
|
||||
|
||||
retry:
|
||||
const ZErrno err = fallocate(false /* punch_hole */, offset, length);
|
||||
@ -627,30 +627,11 @@ retry:
|
||||
return true;
|
||||
}
|
||||
|
||||
static int offset_to_node(zoffset offset) {
|
||||
const GrowableArray<int>* mapping = os::Linux::numa_nindex_to_node();
|
||||
const size_t nindex = (untype(offset) >> ZGranuleSizeShift) % mapping->length();
|
||||
return mapping->at((int)nindex);
|
||||
}
|
||||
size_t ZPhysicalMemoryBacking::commit_numa_preferred(zbacking_offset offset, size_t length, uint32_t numa_id) const {
|
||||
// Setup NUMA policy to allocate memory from a preferred node
|
||||
os::Linux::numa_set_preferred((int)numa_id);
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t length) const {
|
||||
size_t committed = 0;
|
||||
|
||||
// Commit one granule at a time, so that each granule
|
||||
// can be allocated from a different preferred node.
|
||||
while (committed < length) {
|
||||
const zoffset granule_offset = offset + committed;
|
||||
|
||||
// Setup NUMA policy to allocate memory from a preferred node
|
||||
os::Linux::numa_set_preferred(offset_to_node(granule_offset));
|
||||
|
||||
if (!commit_inner(granule_offset, ZGranuleSize)) {
|
||||
// Failed
|
||||
break;
|
||||
}
|
||||
|
||||
committed += ZGranuleSize;
|
||||
}
|
||||
const size_t committed = commit_default(offset, length);
|
||||
|
||||
// Restore NUMA policy
|
||||
os::Linux::numa_set_preferred(-1);
|
||||
@ -658,7 +639,7 @@ size_t ZPhysicalMemoryBacking::commit_numa_interleaved(zoffset offset, size_t le
|
||||
return committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit_default(zbacking_offset offset, size_t length) const {
|
||||
// Try to commit the whole region
|
||||
if (commit_inner(offset, length)) {
|
||||
// Success
|
||||
@ -666,8 +647,8 @@ size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) con
|
||||
}
|
||||
|
||||
// Failed, try to commit as much as possible
|
||||
zoffset start = offset;
|
||||
zoffset end = offset + length;
|
||||
zbacking_offset start = offset;
|
||||
zbacking_offset_end end = to_zbacking_offset_end(offset, length);
|
||||
|
||||
for (;;) {
|
||||
length = align_down((end - start) / 2, ZGranuleSize);
|
||||
@ -686,19 +667,19 @@ size_t ZPhysicalMemoryBacking::commit_default(zoffset offset, size_t length) con
|
||||
}
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::commit(zbacking_offset offset, size_t length, uint32_t numa_id) const {
|
||||
if (ZNUMA::is_enabled() && !ZLargePages::is_explicit()) {
|
||||
// To get granule-level NUMA interleaving when using non-large pages,
|
||||
// we must explicitly interleave the memory at commit/fallocate time.
|
||||
return commit_numa_interleaved(offset, length);
|
||||
// The memory is required to be preferred at the time it is paged in. As a
|
||||
// consequence we must prefer the memory when committing non-large pages.
|
||||
return commit_numa_preferred(offset, length, numa_id);
|
||||
}
|
||||
|
||||
return commit_default(offset, length);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zbacking_offset offset, size_t length) const {
|
||||
log_trace(gc, heap)("Uncommitting memory: %zuM-%zuM (%zuM)",
|
||||
untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
|
||||
untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M);
|
||||
|
||||
const ZErrno err = fallocate(true /* punch_hole */, offset, length);
|
||||
if (err) {
|
||||
@ -709,7 +690,7 @@ size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) const {
|
||||
return length;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const {
|
||||
const void* const res = mmap((void*)untype(addr), size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, _fd, untype(offset));
|
||||
if (res == MAP_FAILED) {
|
||||
ZErrno err;
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -48,19 +48,19 @@ private:
|
||||
bool is_hugetlbfs() const;
|
||||
bool tmpfs_supports_transparent_huge_pages() const;
|
||||
|
||||
ZErrno fallocate_compat_mmap_hugetlbfs(zoffset offset, size_t length, bool touch) const;
|
||||
ZErrno fallocate_compat_mmap_tmpfs(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_compat_pwrite(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_syscall(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole(zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_punch_hole(zoffset offset, size_t length) const;
|
||||
ZErrno split_and_fallocate(bool punch_hole, zoffset offset, size_t length) const;
|
||||
ZErrno fallocate(bool punch_hole, zoffset offset, size_t length) const;
|
||||
ZErrno fallocate_compat_mmap_hugetlbfs(zbacking_offset offset, size_t length, bool touch) const;
|
||||
ZErrno fallocate_compat_mmap_tmpfs(zbacking_offset offset, size_t length) const;
|
||||
ZErrno fallocate_compat_pwrite(zbacking_offset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_compat(zbacking_offset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole_syscall(zbacking_offset offset, size_t length) const;
|
||||
ZErrno fallocate_fill_hole(zbacking_offset offset, size_t length) const;
|
||||
ZErrno fallocate_punch_hole(zbacking_offset offset, size_t length) const;
|
||||
ZErrno split_and_fallocate(bool punch_hole, zbacking_offset offset, size_t length) const;
|
||||
ZErrno fallocate(bool punch_hole, zbacking_offset offset, size_t length) const;
|
||||
|
||||
bool commit_inner(zoffset offset, size_t length) const;
|
||||
size_t commit_numa_interleaved(zoffset offset, size_t length) const;
|
||||
size_t commit_default(zoffset offset, size_t length) const;
|
||||
bool commit_inner(zbacking_offset offset, size_t length) const;
|
||||
size_t commit_numa_preferred(zbacking_offset offset, size_t length, uint32_t numa_id) const;
|
||||
size_t commit_default(zbacking_offset offset, size_t length) const;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryBacking(size_t max_capacity);
|
||||
@ -69,10 +69,10 @@ public:
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(zoffset offset, size_t length) const;
|
||||
size_t uncommit(zoffset offset, size_t length) const;
|
||||
size_t commit(zbacking_offset offset, size_t length, uint32_t numa_id) const;
|
||||
size_t uncommit(zbacking_offset offset, size_t length) const;
|
||||
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const;
|
||||
void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const;
|
||||
void unmap(zaddress_unsafe addr, size_t size) const;
|
||||
};
|
||||
|
||||
|
||||
@ -22,21 +22,16 @@
|
||||
*/
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "gc/z/zVirtualMemoryManager.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
|
||||
void ZVirtualMemoryManager::pd_initialize_before_reserve() {
|
||||
void ZVirtualMemoryReserver::pd_register_callbacks(ZVirtualMemoryRegistry* registry) {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_register_callbacks(ZMemoryManager* manager) {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
bool ZVirtualMemoryReserver::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
void* const res = mmap((void*)untype(addr), size, PROT_NONE, MAP_ANONYMOUS|MAP_PRIVATE|MAP_NORESERVE, -1, 0);
|
||||
if (res == MAP_FAILED) {
|
||||
// Failed to reserve memory
|
||||
@ -53,7 +48,7 @@ bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) {
|
||||
void ZVirtualMemoryReserver::pd_unreserve(zaddress_unsafe addr, size_t size) {
|
||||
const int res = munmap((void*)untype(addr), size);
|
||||
assert(res == 0, "Failed to unmap memory");
|
||||
}
|
||||
@ -24,6 +24,9 @@
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zSyscall_windows.hpp"
|
||||
|
||||
void ZVirtualMemoryReserverImpl_initialize();
|
||||
|
||||
void ZInitialize::pd_initialize() {
|
||||
ZSyscall::initialize();
|
||||
ZVirtualMemoryReserverImpl_initialize();
|
||||
}
|
||||
|
||||
@ -21,14 +21,24 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zCPU.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
|
||||
void ZNUMA::pd_initialize() {
|
||||
_enabled = false;
|
||||
_count = 1;
|
||||
_count = !FLAG_IS_DEFAULT(ZFakeNUMA)
|
||||
? ZFakeNUMA
|
||||
: 1;
|
||||
}
|
||||
|
||||
uint32_t ZNUMA::id() {
|
||||
if (is_faked()) {
|
||||
// ZFakeNUMA testing
|
||||
return ZCPU::id() % ZFakeNUMA;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -33,9 +33,9 @@
|
||||
|
||||
class ZPhysicalMemoryBackingImpl : public CHeapObj<mtGC> {
|
||||
public:
|
||||
virtual size_t commit(zoffset offset, size_t size) = 0;
|
||||
virtual size_t uncommit(zoffset offset, size_t size) = 0;
|
||||
virtual void map(zaddress_unsafe addr, size_t size, zoffset offset) const = 0;
|
||||
virtual size_t commit(zbacking_offset offset, size_t size) = 0;
|
||||
virtual size_t uncommit(zbacking_offset offset, size_t size) = 0;
|
||||
virtual void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const = 0;
|
||||
virtual void unmap(zaddress_unsafe addr, size_t size) const = 0;
|
||||
};
|
||||
|
||||
@ -50,21 +50,29 @@ class ZPhysicalMemoryBackingSmallPages : public ZPhysicalMemoryBackingImpl {
|
||||
private:
|
||||
ZGranuleMap<HANDLE> _handles;
|
||||
|
||||
HANDLE get_handle(zoffset offset) const {
|
||||
HANDLE const handle = _handles.get(offset);
|
||||
static zoffset to_zoffset(zbacking_offset offset) {
|
||||
// A zbacking_offset is always a valid zoffset
|
||||
return zoffset(untype(offset));
|
||||
}
|
||||
|
||||
HANDLE get_handle(zbacking_offset offset) const {
|
||||
const zoffset z_offset = to_zoffset(offset);
|
||||
HANDLE const handle = _handles.get(z_offset);
|
||||
assert(handle != 0, "Should be set");
|
||||
return handle;
|
||||
}
|
||||
|
||||
void put_handle(zoffset offset, HANDLE handle) {
|
||||
void put_handle(zbacking_offset offset, HANDLE handle) {
|
||||
const zoffset z_offset = to_zoffset(offset);
|
||||
assert(handle != INVALID_HANDLE_VALUE, "Invalid handle");
|
||||
assert(_handles.get(offset) == 0, "Should be cleared");
|
||||
_handles.put(offset, handle);
|
||||
assert(_handles.get(z_offset) == 0, "Should be cleared");
|
||||
_handles.put(z_offset, handle);
|
||||
}
|
||||
|
||||
void clear_handle(zoffset offset) {
|
||||
assert(_handles.get(offset) != 0, "Should be set");
|
||||
_handles.put(offset, 0);
|
||||
void clear_handle(zbacking_offset offset) {
|
||||
const zoffset z_offset = to_zoffset(offset);
|
||||
assert(_handles.get(z_offset) != 0, "Should be set");
|
||||
_handles.put(z_offset, 0);
|
||||
}
|
||||
|
||||
public:
|
||||
@ -72,7 +80,7 @@ public:
|
||||
: ZPhysicalMemoryBackingImpl(),
|
||||
_handles(max_capacity) {}
|
||||
|
||||
size_t commit(zoffset offset, size_t size) {
|
||||
size_t commit(zbacking_offset offset, size_t size) {
|
||||
for (size_t i = 0; i < size; i += ZGranuleSize) {
|
||||
HANDLE const handle = ZMapper::create_and_commit_paging_file_mapping(ZGranuleSize);
|
||||
if (handle == 0) {
|
||||
@ -85,7 +93,7 @@ public:
|
||||
return size;
|
||||
}
|
||||
|
||||
size_t uncommit(zoffset offset, size_t size) {
|
||||
size_t uncommit(zbacking_offset offset, size_t size) {
|
||||
for (size_t i = 0; i < size; i += ZGranuleSize) {
|
||||
HANDLE const handle = get_handle(offset + i);
|
||||
clear_handle(offset + i);
|
||||
@ -95,7 +103,7 @@ public:
|
||||
return size;
|
||||
}
|
||||
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const {
|
||||
assert(is_aligned(untype(offset), ZGranuleSize), "Misaligned");
|
||||
assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned");
|
||||
assert(is_aligned(size, ZGranuleSize), "Misaligned");
|
||||
@ -149,7 +157,7 @@ public:
|
||||
: ZPhysicalMemoryBackingImpl(),
|
||||
_page_array(alloc_page_array(max_capacity)) {}
|
||||
|
||||
size_t commit(zoffset offset, size_t size) {
|
||||
size_t commit(zbacking_offset offset, size_t size) {
|
||||
const size_t index = untype(offset) >> ZGranuleSizeShift;
|
||||
const size_t npages = size >> ZGranuleSizeShift;
|
||||
|
||||
@ -167,7 +175,7 @@ public:
|
||||
return npages_res << ZGranuleSizeShift;
|
||||
}
|
||||
|
||||
size_t uncommit(zoffset offset, size_t size) {
|
||||
size_t uncommit(zbacking_offset offset, size_t size) {
|
||||
const size_t index = untype(offset) >> ZGranuleSizeShift;
|
||||
const size_t npages = size >> ZGranuleSizeShift;
|
||||
|
||||
@ -181,7 +189,7 @@ public:
|
||||
return npages_res << ZGranuleSizeShift;
|
||||
}
|
||||
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const {
|
||||
const size_t npages = size >> ZGranuleSizeShift;
|
||||
const size_t index = untype(offset) >> ZGranuleSizeShift;
|
||||
|
||||
@ -222,21 +230,21 @@ void ZPhysicalMemoryBacking::warn_commit_limits(size_t max_capacity) const {
|
||||
// Does nothing
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::commit(zoffset offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::commit(zbacking_offset offset, size_t length, uint32_t /* numa_id - ignored */) {
|
||||
log_trace(gc, heap)("Committing memory: %zuM-%zuM (%zuM)",
|
||||
untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
|
||||
untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M);
|
||||
|
||||
return _impl->commit(offset, length);
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zoffset offset, size_t length) {
|
||||
size_t ZPhysicalMemoryBacking::uncommit(zbacking_offset offset, size_t length) {
|
||||
log_trace(gc, heap)("Uncommitting memory: %zuM-%zuM (%zuM)",
|
||||
untype(offset) / M, untype(to_zoffset_end(offset, length)) / M, length / M);
|
||||
untype(offset) / M, untype(to_zbacking_offset_end(offset, length)) / M, length / M);
|
||||
|
||||
return _impl->uncommit(offset, length);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zoffset offset) const {
|
||||
void ZPhysicalMemoryBacking::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const {
|
||||
assert(is_aligned(untype(offset), ZGranuleSize), "Misaligned: " PTR_FORMAT, untype(offset));
|
||||
assert(is_aligned(untype(addr), ZGranuleSize), "Misaligned: " PTR_FORMAT, addr);
|
||||
assert(is_aligned(size, ZGranuleSize), "Misaligned: " PTR_FORMAT, size);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,10 +42,10 @@ public:
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
|
||||
size_t commit(zoffset offset, size_t length);
|
||||
size_t uncommit(zoffset offset, size_t length);
|
||||
size_t commit(zbacking_offset offset, size_t length, uint32_t numa_id);
|
||||
size_t uncommit(zbacking_offset offset, size_t length);
|
||||
|
||||
void map(zaddress_unsafe addr, size_t size, zoffset offset) const;
|
||||
void map(zaddress_unsafe addr, size_t size, zbacking_offset offset) const;
|
||||
void unmap(zaddress_unsafe addr, size_t size) const;
|
||||
};
|
||||
|
||||
|
||||
@ -26,25 +26,26 @@
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "gc/z/zMapper_windows.hpp"
|
||||
#include "gc/z/zSyscall_windows.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "gc/z/zVirtualMemoryManager.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
class ZVirtualMemoryManagerImpl : public CHeapObj<mtGC> {
|
||||
class ZVirtualMemoryReserverImpl : public CHeapObj<mtGC> {
|
||||
public:
|
||||
virtual void initialize_before_reserve() {}
|
||||
virtual void register_callbacks(ZMemoryManager* manager) {}
|
||||
virtual void register_callbacks(ZVirtualMemoryRegistry* registry) {}
|
||||
virtual bool reserve(zaddress_unsafe addr, size_t size) = 0;
|
||||
virtual void unreserve(zaddress_unsafe addr, size_t size) = 0;
|
||||
};
|
||||
|
||||
// Implements small pages (paged) support using placeholder reservation.
|
||||
//
|
||||
// When a memory area is free (kept by the virtual memory manager) a
|
||||
// When a memory area is available (kept by the virtual memory manager) a
|
||||
// single placeholder is covering that memory area. When memory is
|
||||
// allocated from the manager the placeholder is split into granule
|
||||
// removed from the registry the placeholder is split into granule
|
||||
// sized placeholders to allow mapping operations on that granularity.
|
||||
class ZVirtualMemoryManagerSmallPages : public ZVirtualMemoryManagerImpl {
|
||||
class ZVirtualMemoryReserverSmallPages : public ZVirtualMemoryReserverImpl {
|
||||
private:
|
||||
class PlaceholderCallbacks : public AllStatic {
|
||||
private:
|
||||
@ -84,7 +85,7 @@ private:
|
||||
// Called when a memory area is going to be handed out to be used.
|
||||
//
|
||||
// Splits the memory area into granule-sized placeholders.
|
||||
static void prepare_for_hand_out_callback(const ZMemory& area) {
|
||||
static void prepare_for_hand_out_callback(const ZVirtualMemory& area) {
|
||||
assert(is_aligned(area.size(), ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
split_into_granule_sized_placeholders(area.start(), area.size());
|
||||
@ -93,7 +94,7 @@ private:
|
||||
// Called when a memory area is handed back to the memory manager.
|
||||
//
|
||||
// Combines the granule-sized placeholders into one placeholder.
|
||||
static void prepare_for_hand_back_callback(const ZMemory& area) {
|
||||
static void prepare_for_hand_back_callback(const ZVirtualMemory& area) {
|
||||
assert(is_aligned(area.size(), ZGranuleSize), "Must be granule aligned");
|
||||
|
||||
coalesce_into_one_placeholder(area.start(), area.size());
|
||||
@ -103,7 +104,7 @@ private:
|
||||
// existing, adjacent memory area.
|
||||
//
|
||||
// Coalesces the underlying placeholders into one.
|
||||
static void grow_callback(const ZMemory& from, const ZMemory& to) {
|
||||
static void grow_callback(const ZVirtualMemory& from, const ZVirtualMemory& to) {
|
||||
assert(is_aligned(from.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(is_aligned(to.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(from != to, "Must have grown");
|
||||
@ -116,7 +117,7 @@ private:
|
||||
// memory area.
|
||||
//
|
||||
// Splits the memory into two placeholders.
|
||||
static void shrink_callback(const ZMemory& from, const ZMemory& to) {
|
||||
static void shrink_callback(const ZVirtualMemory& from, const ZVirtualMemory& to) {
|
||||
assert(is_aligned(from.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(is_aligned(to.size(), ZGranuleSize), "Must be granule aligned");
|
||||
assert(from != to, "Must have shrunk");
|
||||
@ -129,7 +130,7 @@ private:
|
||||
}
|
||||
|
||||
public:
|
||||
static ZMemoryManager::Callbacks callbacks() {
|
||||
static ZVirtualMemoryRegistry::Callbacks callbacks() {
|
||||
// Each reserved virtual memory address area registered in _manager is
|
||||
// exactly covered by a single placeholder. Callbacks are installed so
|
||||
// that whenever a memory area changes, the corresponding placeholder
|
||||
@ -153,7 +154,7 @@ private:
|
||||
// See comment in zMapper_windows.cpp explaining why placeholders are
|
||||
// split into ZGranuleSize sized placeholders.
|
||||
|
||||
ZMemoryManager::Callbacks callbacks;
|
||||
ZVirtualMemoryRegistry::Callbacks callbacks;
|
||||
|
||||
callbacks._prepare_for_hand_out = &prepare_for_hand_out_callback;
|
||||
callbacks._prepare_for_hand_back = &prepare_for_hand_back_callback;
|
||||
@ -164,8 +165,8 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
virtual void register_callbacks(ZMemoryManager* manager) {
|
||||
manager->register_callbacks(PlaceholderCallbacks::callbacks());
|
||||
virtual void register_callbacks(ZVirtualMemoryRegistry* registry) {
|
||||
registry->register_callbacks(PlaceholderCallbacks::callbacks());
|
||||
}
|
||||
|
||||
virtual bool reserve(zaddress_unsafe addr, size_t size) {
|
||||
@ -185,12 +186,8 @@ private:
|
||||
// ZPhysicalMemory layer needs access to the section
|
||||
HANDLE ZAWESection;
|
||||
|
||||
class ZVirtualMemoryManagerLargePages : public ZVirtualMemoryManagerImpl {
|
||||
class ZVirtualMemoryReserverLargePages : public ZVirtualMemoryReserverImpl {
|
||||
private:
|
||||
virtual void initialize_before_reserve() {
|
||||
ZAWESection = ZMapper::create_shared_awe_section();
|
||||
}
|
||||
|
||||
virtual bool reserve(zaddress_unsafe addr, size_t size) {
|
||||
const zaddress_unsafe res = ZMapper::reserve_for_shared_awe(ZAWESection, addr, size);
|
||||
|
||||
@ -201,27 +198,33 @@ private:
|
||||
virtual void unreserve(zaddress_unsafe addr, size_t size) {
|
||||
ZMapper::unreserve_for_shared_awe(addr, size);
|
||||
}
|
||||
|
||||
public:
|
||||
ZVirtualMemoryReserverLargePages() {
|
||||
ZAWESection = ZMapper::create_shared_awe_section();
|
||||
}
|
||||
};
|
||||
|
||||
static ZVirtualMemoryManagerImpl* _impl = nullptr;
|
||||
static ZVirtualMemoryReserverImpl* _impl = nullptr;
|
||||
|
||||
void ZVirtualMemoryReserverImpl_initialize() {
|
||||
assert(_impl == nullptr, "Should only initialize once");
|
||||
|
||||
void ZVirtualMemoryManager::pd_initialize_before_reserve() {
|
||||
if (ZLargePages::is_enabled()) {
|
||||
_impl = new ZVirtualMemoryManagerLargePages();
|
||||
_impl = new ZVirtualMemoryReserverLargePages();
|
||||
} else {
|
||||
_impl = new ZVirtualMemoryManagerSmallPages();
|
||||
_impl = new ZVirtualMemoryReserverSmallPages();
|
||||
}
|
||||
_impl->initialize_before_reserve();
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_register_callbacks(ZMemoryManager* manager) {
|
||||
_impl->register_callbacks(manager);
|
||||
void ZVirtualMemoryReserver::pd_register_callbacks(ZVirtualMemoryRegistry* registry) {
|
||||
_impl->register_callbacks(registry);
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
bool ZVirtualMemoryReserver::pd_reserve(zaddress_unsafe addr, size_t size) {
|
||||
return _impl->reserve(addr, size);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::pd_unreserve(zaddress_unsafe addr, size_t size) {
|
||||
void ZVirtualMemoryReserver::pd_unreserve(zaddress_unsafe addr, size_t size) {
|
||||
_impl->unreserve(addr, size);
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,8 +29,11 @@
|
||||
#include "gc/z/zForwarding.hpp"
|
||||
#include "gc/z/zGranuleMap.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zPageType.hpp"
|
||||
#include "gc/z/zValue.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
// Expose some ZGC globals to the SA agent.
|
||||
@ -61,6 +64,7 @@ public:
|
||||
typedef ZGranuleMap<ZPage*> ZGranuleMapForPageTable;
|
||||
typedef ZGranuleMap<ZForwarding*> ZGranuleMapForForwarding;
|
||||
typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwarding;
|
||||
typedef ZValue<ZPerNUMAStorage, ZPartition> ZPerNUMAZPartition;
|
||||
|
||||
#define VM_STRUCTS_Z(nonstatic_field, volatile_nonstatic_field, static_field) \
|
||||
static_field(ZGlobalsForVMStructs, _instance_p, ZGlobalsForVMStructs*) \
|
||||
@ -87,8 +91,13 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
||||
volatile_nonstatic_field(ZPage, _top, zoffset_end) \
|
||||
\
|
||||
nonstatic_field(ZPageAllocator, _max_capacity, const size_t) \
|
||||
volatile_nonstatic_field(ZPageAllocator, _capacity, size_t) \
|
||||
volatile_nonstatic_field(ZPageAllocator, _used, size_t) \
|
||||
nonstatic_field(ZPageAllocator, _partitions, ZPerNUMAZPartition) \
|
||||
\
|
||||
static_field(ZNUMA, _count, uint32_t) \
|
||||
nonstatic_field(ZPerNUMAZPartition, _addr, const uintptr_t) \
|
||||
\
|
||||
volatile_nonstatic_field(ZPartition, _capacity, size_t) \
|
||||
nonstatic_field(ZPartition, _used, size_t) \
|
||||
\
|
||||
nonstatic_field(ZPageTable, _map, ZGranuleMapForPageTable) \
|
||||
\
|
||||
@ -97,8 +106,8 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
||||
\
|
||||
nonstatic_field(ZForwardingTable, _map, ZGranuleMapForForwarding) \
|
||||
\
|
||||
nonstatic_field(ZVirtualMemory, _start, const zoffset) \
|
||||
nonstatic_field(ZVirtualMemory, _end, const zoffset_end) \
|
||||
nonstatic_field(ZVirtualMemory, _start, const zoffset_end) \
|
||||
nonstatic_field(ZVirtualMemory, _size, const size_t) \
|
||||
\
|
||||
nonstatic_field(ZForwarding, _virtual, const ZVirtualMemory) \
|
||||
nonstatic_field(ZForwarding, _object_alignment_shift, const size_t) \
|
||||
@ -134,6 +143,9 @@ typedef ZAttachedArray<ZForwarding, ZForwardingEntry> ZAttachedArrayForForwardin
|
||||
declare_toplevel_type(ZPageType) \
|
||||
declare_toplevel_type(ZPageAllocator) \
|
||||
declare_toplevel_type(ZPageTable) \
|
||||
declare_toplevel_type(ZPartition) \
|
||||
declare_toplevel_type(ZNUMA) \
|
||||
declare_toplevel_type(ZPerNUMAZPartition) \
|
||||
declare_toplevel_type(ZAttachedArrayForForwarding) \
|
||||
declare_toplevel_type(ZGranuleMapForPageTable) \
|
||||
declare_toplevel_type(ZGranuleMapForForwarding) \
|
||||
|
||||
@ -24,6 +24,7 @@
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zVerify.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -36,6 +37,10 @@ size_t ZAddressOffsetBits;
|
||||
uintptr_t ZAddressOffsetMask;
|
||||
size_t ZAddressOffsetMax;
|
||||
|
||||
size_t ZBackingOffsetMax;
|
||||
|
||||
uint32_t ZBackingIndexMax;
|
||||
|
||||
uintptr_t ZPointerRemapped;
|
||||
uintptr_t ZPointerRemappedYoungMask;
|
||||
uintptr_t ZPointerRemappedOldMask;
|
||||
@ -145,3 +150,10 @@ void ZGlobalsPointers::flip_old_relocate_start() {
|
||||
ZPointerRemappedOldMask ^= ZPointerRemappedMask;
|
||||
set_good_masks();
|
||||
}
|
||||
|
||||
size_t ZGlobalsPointers::min_address_offset_request() {
|
||||
// See ZVirtualMemoryReserver for logic around setting up the heap for NUMA
|
||||
const size_t desired_for_heap = MaxHeapSize * ZVirtualToPhysicalRatio;
|
||||
const size_t desired_for_numa_multiplier = ZNUMA::count() > 1 ? 2 : 1;
|
||||
return round_up_power_of_2(desired_for_heap * desired_for_numa_multiplier);
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -39,6 +39,12 @@ const size_t ZAddressOffsetShift = 0;
|
||||
extern uintptr_t ZAddressOffsetMask;
|
||||
extern size_t ZAddressOffsetMax;
|
||||
|
||||
// Describes the maximal offset inside the backing storage.
|
||||
extern size_t ZBackingOffsetMax;
|
||||
|
||||
// Describes the maximal granule index inside the backing storage.
|
||||
extern uint32_t ZBackingIndexMax;
|
||||
|
||||
// Layout of metadata bits in colored pointer / zpointer.
|
||||
//
|
||||
// A zpointer is a combination of the address bits (heap base bit + offset)
|
||||
@ -223,16 +229,25 @@ const int ZPointerStoreGoodMaskLowOrderBitsOffset = LITTLE_ENDIAN_ONLY(0
|
||||
|
||||
// Offsets
|
||||
// - Virtual address range offsets
|
||||
// - Physical memory offsets
|
||||
enum class zoffset : uintptr_t {};
|
||||
enum class zoffset : uintptr_t { invalid = UINTPTR_MAX };
|
||||
// Offsets including end of offset range
|
||||
enum class zoffset_end : uintptr_t {};
|
||||
enum class zoffset_end : uintptr_t { invalid = UINTPTR_MAX };
|
||||
|
||||
// - Physical memory segment offsets
|
||||
enum class zbacking_offset : uintptr_t {};
|
||||
// Offsets including end of offset range
|
||||
enum class zbacking_offset_end : uintptr_t {};
|
||||
|
||||
// - Physical memory segment indicies
|
||||
enum class zbacking_index : uint32_t { zero = 0, invalid = UINT32_MAX };
|
||||
// Offsets including end of indicies range
|
||||
enum class zbacking_index_end : uint32_t { zero = 0, invalid = UINT32_MAX };
|
||||
|
||||
// Colored oop
|
||||
enum class zpointer : uintptr_t { null = 0 };
|
||||
enum class zpointer : uintptr_t { null = 0 };
|
||||
|
||||
// Uncolored oop - safe to dereference
|
||||
enum class zaddress : uintptr_t { null = 0 };
|
||||
enum class zaddress : uintptr_t { null = 0 };
|
||||
|
||||
// Uncolored oop - not safe to dereference, could point uncommitted memory
|
||||
enum class zaddress_unsafe : uintptr_t { null = 0 };
|
||||
@ -307,6 +322,8 @@ public:
|
||||
static void flip_young_relocate_start();
|
||||
static void flip_old_mark_start();
|
||||
static void flip_old_relocate_start();
|
||||
|
||||
static size_t min_address_offset_request();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZADDRESS_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,14 +27,124 @@
|
||||
#include "gc/z/zAddress.hpp"
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
#include CPU_HEADER_INLINE(gc/z/zAddress)
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
// Offset Operator Macro
|
||||
// Creates operators for the offset, offset_end style types
|
||||
|
||||
#define CREATE_ZOFFSET_OPERATORS(offset_type) \
|
||||
\
|
||||
/* Arithmetic operators for offset_type */ \
|
||||
\
|
||||
inline offset_type operator+(offset_type offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type>>(size); \
|
||||
return to_##offset_type(untype(offset) + size_value); \
|
||||
} \
|
||||
\
|
||||
inline offset_type& operator+=(offset_type& offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type>>(size); \
|
||||
offset = to_##offset_type(untype(offset) + size_value); \
|
||||
return offset; \
|
||||
} \
|
||||
\
|
||||
inline offset_type operator-(offset_type offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type>>(size); \
|
||||
return to_##offset_type(untype(offset) - size_value); \
|
||||
} \
|
||||
\
|
||||
inline size_t operator-(offset_type first, offset_type second) { \
|
||||
return untype(first - untype(second)); \
|
||||
} \
|
||||
\
|
||||
inline offset_type& operator-=(offset_type& offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type>>(size); \
|
||||
offset = to_##offset_type(untype(offset) - size_value); \
|
||||
return offset; \
|
||||
} \
|
||||
\
|
||||
/* Arithmetic operators for offset_type##_end */ \
|
||||
\
|
||||
inline offset_type##_end operator+(offset_type##_end offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type##_end>>(size); \
|
||||
return to_##offset_type##_end(untype(offset) + size_value); \
|
||||
} \
|
||||
\
|
||||
inline offset_type##_end& operator+=(offset_type##_end& offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type##_end>>(size); \
|
||||
offset = to_##offset_type##_end(untype(offset) + size_value); \
|
||||
return offset; \
|
||||
} \
|
||||
\
|
||||
inline offset_type##_end operator-(offset_type##_end first, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type##_end>>(size); \
|
||||
return to_##offset_type##_end(untype(first) - size_value); \
|
||||
} \
|
||||
\
|
||||
inline size_t operator-(offset_type##_end first, offset_type##_end second) { \
|
||||
return untype(first - untype(second)); \
|
||||
} \
|
||||
\
|
||||
inline offset_type##_end& operator-=(offset_type##_end& offset, size_t size) { \
|
||||
const auto size_value = checked_cast<std::underlying_type_t<offset_type##_end>>(size); \
|
||||
offset = to_##offset_type##_end(untype(offset) - size_value); \
|
||||
return offset; \
|
||||
} \
|
||||
\
|
||||
/* Arithmetic operators for offset_type cross offset_type##_end */ \
|
||||
\
|
||||
inline size_t operator-(offset_type##_end first, offset_type second) { \
|
||||
return untype(first - untype(second)); \
|
||||
} \
|
||||
\
|
||||
/* Logical operators for offset_type cross offset_type##_end */ \
|
||||
\
|
||||
inline bool operator!=(offset_type first, offset_type##_end second) { \
|
||||
return untype(first) != untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator!=(offset_type##_end first, offset_type second) { \
|
||||
return untype(first) != untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator==(offset_type first, offset_type##_end second) { \
|
||||
return untype(first) == untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator==(offset_type##_end first, offset_type second) { \
|
||||
return untype(first) == untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator<(offset_type##_end first, offset_type second) { \
|
||||
return untype(first) < untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator<(offset_type first, offset_type##_end second) { \
|
||||
return untype(first) < untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator<=(offset_type##_end first, offset_type second) { \
|
||||
return untype(first) <= untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator>(offset_type first, offset_type##_end second) { \
|
||||
return untype(first) > untype(second); \
|
||||
} \
|
||||
\
|
||||
inline bool operator>=(offset_type first, offset_type##_end second) { \
|
||||
return untype(first) >= untype(second); \
|
||||
} \
|
||||
|
||||
// zoffset functions
|
||||
|
||||
inline uintptr_t untype(zoffset offset) {
|
||||
@ -59,31 +169,6 @@ inline zoffset to_zoffset(zoffset_end offset) {
|
||||
return to_zoffset(value);
|
||||
}
|
||||
|
||||
inline zoffset operator+(zoffset offset, size_t size) {
|
||||
return to_zoffset(untype(offset) + size);
|
||||
}
|
||||
|
||||
inline zoffset& operator+=(zoffset& offset, size_t size) {
|
||||
offset = to_zoffset(untype(offset) + size);
|
||||
return offset;
|
||||
}
|
||||
|
||||
inline zoffset operator-(zoffset offset, size_t size) {
|
||||
const uintptr_t value = untype(offset) - size;
|
||||
return to_zoffset(value);
|
||||
}
|
||||
|
||||
inline size_t operator-(zoffset left, zoffset right) {
|
||||
const size_t diff = untype(left) - untype(right);
|
||||
assert(diff < ZAddressOffsetMax, "Underflow");
|
||||
return diff;
|
||||
}
|
||||
|
||||
inline zoffset& operator-=(zoffset& offset, size_t size) {
|
||||
offset = to_zoffset(untype(offset) - size);
|
||||
return offset;
|
||||
}
|
||||
|
||||
inline bool to_zoffset_end(zoffset_end* result, zoffset_end start, size_t size) {
|
||||
const uintptr_t value = untype(start) + size;
|
||||
if (value <= ZAddressOffsetMax) {
|
||||
@ -109,62 +194,124 @@ inline zoffset_end to_zoffset_end(zoffset offset) {
|
||||
return zoffset_end(untype(offset));
|
||||
}
|
||||
|
||||
inline bool operator!=(zoffset first, zoffset_end second) {
|
||||
return untype(first) != untype(second);
|
||||
CREATE_ZOFFSET_OPERATORS(zoffset)
|
||||
|
||||
// zbacking_offset functions
|
||||
|
||||
inline uintptr_t untype(zbacking_offset offset) {
|
||||
const uintptr_t value = static_cast<uintptr_t>(offset);
|
||||
assert(value < ZBackingOffsetMax, "must have no other bits");
|
||||
return value;
|
||||
}
|
||||
|
||||
inline bool operator!=(zoffset_end first, zoffset second) {
|
||||
return untype(first) != untype(second);
|
||||
inline uintptr_t untype(zbacking_offset_end offset) {
|
||||
const uintptr_t value = static_cast<uintptr_t>(offset);
|
||||
assert(value <= ZBackingOffsetMax, "must have no other bits");
|
||||
return value;
|
||||
}
|
||||
|
||||
inline bool operator==(zoffset first, zoffset_end second) {
|
||||
return untype(first) == untype(second);
|
||||
inline zbacking_offset to_zbacking_offset(uintptr_t value) {
|
||||
assert(value < ZBackingOffsetMax, "must have no other bits");
|
||||
return zbacking_offset(value);
|
||||
}
|
||||
|
||||
inline bool operator==(zoffset_end first, zoffset second) {
|
||||
return untype(first) == untype(second);
|
||||
inline zbacking_offset to_zbacking_offset(zbacking_offset_end offset) {
|
||||
const uintptr_t value = untype(offset);
|
||||
return to_zbacking_offset(value);
|
||||
}
|
||||
|
||||
inline bool operator<(zoffset_end first, zoffset second) {
|
||||
return untype(first) < untype(second);
|
||||
inline zbacking_offset_end to_zbacking_offset_end(zbacking_offset start, size_t size) {
|
||||
const uintptr_t value = untype(start) + size;
|
||||
assert(value <= ZBackingOffsetMax, "Overflow start: " PTR_FORMAT " size: " PTR_FORMAT " value: " PTR_FORMAT,
|
||||
untype(start), size, value);
|
||||
return zbacking_offset_end(value);
|
||||
}
|
||||
|
||||
inline bool operator<(zoffset first, zoffset_end second) {
|
||||
return untype(first) < untype(second);
|
||||
inline zbacking_offset_end to_zbacking_offset_end(uintptr_t value) {
|
||||
assert(value <= ZBackingOffsetMax, "must have no other bits");
|
||||
return zbacking_offset_end(value);
|
||||
}
|
||||
|
||||
inline bool operator<=(zoffset_end first, zoffset second) {
|
||||
return untype(first) <= untype(second);
|
||||
inline zbacking_offset_end to_zbacking_offset_end(zbacking_offset offset) {
|
||||
return zbacking_offset_end(untype(offset));
|
||||
}
|
||||
|
||||
inline bool operator>(zoffset first, zoffset_end second) {
|
||||
return untype(first) > untype(second);
|
||||
CREATE_ZOFFSET_OPERATORS(zbacking_offset)
|
||||
|
||||
// zbacking_index functions
|
||||
|
||||
inline uint32_t untype(zbacking_index index) {
|
||||
const uint32_t value = static_cast<uint32_t>(index);
|
||||
assert(value < ZBackingIndexMax, "must have no other bits");
|
||||
return value;
|
||||
}
|
||||
|
||||
inline bool operator>=(zoffset first, zoffset_end second) {
|
||||
return untype(first) >= untype(second);
|
||||
inline uint32_t untype(zbacking_index_end index) {
|
||||
const uint32_t value = static_cast<uint32_t>(index);
|
||||
assert(value <= ZBackingIndexMax, "must have no other bits");
|
||||
return value;
|
||||
}
|
||||
|
||||
inline size_t operator-(zoffset_end first, zoffset second) {
|
||||
return untype(first) - untype(second);
|
||||
inline zbacking_index to_zbacking_index(uint32_t value) {
|
||||
assert(value < ZBackingIndexMax, "must have no other bits");
|
||||
return zbacking_index(value);
|
||||
}
|
||||
|
||||
inline zoffset_end operator-(zoffset_end first, size_t second) {
|
||||
return to_zoffset_end(untype(first) - second);
|
||||
inline zbacking_index to_zbacking_index(zbacking_index_end index) {
|
||||
const uint32_t value = untype(index);
|
||||
return to_zbacking_index(value);
|
||||
}
|
||||
|
||||
inline size_t operator-(zoffset_end first, zoffset_end second) {
|
||||
return untype(first) - untype(second);
|
||||
inline zbacking_index_end to_zbacking_index_end(zbacking_index start, size_t size) {
|
||||
const uint32_t start_value = untype(start);
|
||||
const uint32_t value = start_value + checked_cast<uint32_t>(size);
|
||||
assert(value <= ZBackingIndexMax && start_value <= value,
|
||||
"Overflow start: %x size: %zu value: %x", start_value, size, value);
|
||||
return zbacking_index_end(value);
|
||||
}
|
||||
|
||||
inline zoffset_end& operator-=(zoffset_end& offset, size_t size) {
|
||||
offset = to_zoffset_end(untype(offset) - size);
|
||||
return offset;
|
||||
inline zbacking_index_end to_zbacking_index_end(uint32_t value) {
|
||||
assert(value <= ZBackingIndexMax, "must have no other bits");
|
||||
return zbacking_index_end(value);
|
||||
}
|
||||
|
||||
inline zoffset_end& operator+=(zoffset_end& offset, size_t size) {
|
||||
offset = to_zoffset_end(untype(offset) + size);
|
||||
return offset;
|
||||
inline zbacking_index_end to_zbacking_index_end(zbacking_index index) {
|
||||
return zbacking_index_end(untype(index));
|
||||
}
|
||||
|
||||
CREATE_ZOFFSET_OPERATORS(zbacking_index)
|
||||
|
||||
#undef CREATE_ZOFFSET_OPERATORS
|
||||
|
||||
// zbacking_offset <-> zbacking_index conversion functions
|
||||
|
||||
inline zbacking_index to_zbacking_index(zbacking_offset offset) {
|
||||
const uintptr_t value = untype(offset);
|
||||
assert(is_aligned(value, ZGranuleSize), "must be granule aligned");
|
||||
return to_zbacking_index((uint32_t)(value >> ZGranuleSizeShift));
|
||||
}
|
||||
|
||||
inline zbacking_offset to_zbacking_offset(zbacking_index index) {
|
||||
const uintptr_t value = untype(index);
|
||||
return to_zbacking_offset(value << ZGranuleSizeShift);
|
||||
}
|
||||
|
||||
// ZRange helper functions
|
||||
|
||||
inline zoffset to_start_type(zoffset_end offset) {
|
||||
return to_zoffset(offset);
|
||||
}
|
||||
|
||||
inline zbacking_index to_start_type(zbacking_index_end offset) {
|
||||
return to_zbacking_index(offset);
|
||||
}
|
||||
|
||||
inline zoffset_end to_end_type(zoffset start, size_t size) {
|
||||
return to_zoffset_end(start, size);
|
||||
}
|
||||
|
||||
inline zbacking_index_end to_end_type(zbacking_index start, size_t size) {
|
||||
return to_zbacking_index_end(start, size);
|
||||
}
|
||||
|
||||
// zpointer functions
|
||||
|
||||
@ -22,11 +22,13 @@
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddressSpaceLimit.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
static size_t address_space_limit() {
|
||||
size_t limit = 0;
|
||||
@ -44,3 +46,13 @@ size_t ZAddressSpaceLimit::heap() {
|
||||
const size_t limit = address_space_limit() / MaxVirtMemFraction;
|
||||
return align_up(limit, ZGranuleSize);
|
||||
}
|
||||
|
||||
void ZAddressSpaceLimit::print_limits() {
|
||||
const size_t limit = address_space_limit();
|
||||
|
||||
if (limit == SIZE_MAX) {
|
||||
log_info_p(gc, init)("Address Space Size: unlimited");
|
||||
} else {
|
||||
log_info_p(gc, init)("Address Space Size: limited (" EXACTFMT ")", EXACTFMTARGS(limit));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,6 +30,8 @@
|
||||
class ZAddressSpaceLimit : public AllStatic {
|
||||
public:
|
||||
static size_t heap();
|
||||
|
||||
static void print_limits();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZADDRESSSPACELIMIT_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,25 +31,22 @@
|
||||
// Allocation flags layout
|
||||
// -----------------------
|
||||
//
|
||||
// 7 2 1 0
|
||||
// +-----+-+-+-+
|
||||
// |00000|1|1|1|
|
||||
// +-----+-+-+-+
|
||||
// | | | |
|
||||
// | | | * 0-0 Non-Blocking Flag (1-bit)
|
||||
// | | |
|
||||
// | | * 1-1 GC Relocation Flag (1-bit)
|
||||
// | |
|
||||
// | * 2-2 Low Address Flag (1-bit)
|
||||
// 7 1 0
|
||||
// +------+-+-+
|
||||
// |000000|1|1|
|
||||
// +------+-+-+
|
||||
// | | |
|
||||
// | | * 0-0 Non-Blocking Flag (1-bit)
|
||||
// | |
|
||||
// | * 1-1 GC Relocation Flag (1-bit)
|
||||
// |
|
||||
// * 7-3 Unused (5-bits)
|
||||
// * 7-2 Unused (6-bits)
|
||||
//
|
||||
|
||||
class ZAllocationFlags {
|
||||
private:
|
||||
typedef ZBitField<uint8_t, bool, 0, 1> field_non_blocking;
|
||||
typedef ZBitField<uint8_t, bool, 1, 1> field_gc_relocation;
|
||||
typedef ZBitField<uint8_t, bool, 2, 1> field_low_address;
|
||||
|
||||
uint8_t _flags;
|
||||
|
||||
@ -65,10 +62,6 @@ public:
|
||||
_flags |= field_gc_relocation::encode(true);
|
||||
}
|
||||
|
||||
void set_low_address() {
|
||||
_flags |= field_low_address::encode(true);
|
||||
}
|
||||
|
||||
bool non_blocking() const {
|
||||
return field_non_blocking::decode(_flags);
|
||||
}
|
||||
@ -76,10 +69,6 @@ public:
|
||||
bool gc_relocation() const {
|
||||
return field_gc_relocation::decode(_flags);
|
||||
}
|
||||
|
||||
bool low_address() const {
|
||||
return field_low_address::decode(_flags);
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZALLOCATIONFLAGS_HPP
|
||||
|
||||
@ -122,7 +122,7 @@ void ZArguments::initialize() {
|
||||
GCArguments::initialize();
|
||||
|
||||
// Enable NUMA by default
|
||||
if (FLAG_IS_DEFAULT(UseNUMA)) {
|
||||
if (FLAG_IS_DEFAULT(UseNUMA) && FLAG_IS_DEFAULT(ZFakeNUMA)) {
|
||||
FLAG_SET_DEFAULT(UseNUMA, true);
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,9 +32,49 @@
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
template<typename T> class ZArray;
|
||||
class ZLock;
|
||||
|
||||
template <typename T> using ZArray = GrowableArrayCHeap<T, mtGC>;
|
||||
template <typename T>
|
||||
class ZArraySlice : public GrowableArrayView<T> {
|
||||
friend class ZArray<T>;
|
||||
friend class ZArray<std::remove_const_t<T>>;
|
||||
friend class ZArraySlice<std::remove_const_t<T>>;
|
||||
friend class ZArraySlice<const T>;
|
||||
|
||||
private:
|
||||
ZArraySlice(T* data, int len);
|
||||
|
||||
public:
|
||||
ZArraySlice<T> slice_front(int end);
|
||||
ZArraySlice<const T> slice_front(int end) const;
|
||||
|
||||
ZArraySlice<T> slice_back(int start);
|
||||
ZArraySlice<const T> slice_back(int start) const;
|
||||
|
||||
ZArraySlice<T> slice(int start, int end);
|
||||
ZArraySlice<const T> slice(int start, int end) const;
|
||||
|
||||
operator ZArraySlice<const T>() const;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ZArray : public GrowableArrayCHeap<T, mtGC> {
|
||||
public:
|
||||
using GrowableArrayCHeap<T, mtGC>::GrowableArrayCHeap;
|
||||
|
||||
ZArraySlice<T> slice_front(int end);
|
||||
ZArraySlice<const T> slice_front(int end) const;
|
||||
|
||||
ZArraySlice<T> slice_back(int start);
|
||||
ZArraySlice<const T> slice_back(int start) const;
|
||||
|
||||
ZArraySlice<T> slice(int start, int end);
|
||||
ZArraySlice<const T> slice(int start, int end) const;
|
||||
|
||||
operator ZArraySlice<T>();
|
||||
operator ZArraySlice<const T>() const;
|
||||
};
|
||||
|
||||
template <typename T, bool Parallel>
|
||||
class ZArrayIteratorImpl : public StackObj {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -29,6 +29,93 @@
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T>::ZArraySlice(T* data, int len)
|
||||
: GrowableArrayView<T>(data, len, len) {}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T> ZArraySlice<T>::slice_front(int end) {
|
||||
return slice(0, end);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<const T> ZArraySlice<T>::slice_front(int end) const {
|
||||
return slice(0, end);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T> ZArraySlice<T>::slice_back(int start) {
|
||||
return slice(start, this->_len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<const T> ZArraySlice<T>::slice_back(int start) const {
|
||||
return slice(start, this->_len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T> ZArraySlice<T>::slice(int start, int end) {
|
||||
assert(0 <= start && start <= end && end <= this->_len,
|
||||
"slice called with invalid range (%d, %d) for length %d", start, end, this->_len);
|
||||
return ZArraySlice<T>(this->_data + start, end - start);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<const T> ZArraySlice<T>::slice(int start, int end) const {
|
||||
assert(0 <= start && start <= end && end <= this->_len,
|
||||
"slice called with invalid range (%d, %d) for length %d", start, end, this->_len);
|
||||
return ZArraySlice<const T>(this->_data + start, end - start);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T>::operator ZArraySlice<const T>() const {
|
||||
return slice(0, this->_len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T> ZArray<T>::slice_front(int end) {
|
||||
return slice(0, end);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<const T> ZArray<T>::slice_front(int end) const {
|
||||
return slice(0, end);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T> ZArray<T>::slice_back(int start) {
|
||||
return slice(start, this->_len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<const T> ZArray<T>::slice_back(int start) const {
|
||||
return slice(start, this->_len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<T> ZArray<T>::slice(int start, int end) {
|
||||
assert(0 <= start && start <= end && end <= this->_len,
|
||||
"slice called with invalid range (%d, %d) for length %d", start, end, this->_len);
|
||||
return ZArraySlice<T>(this->_data + start, end - start);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArraySlice<const T> ZArray<T>::slice(int start, int end) const {
|
||||
assert(0 <= start && start <= end && end <= this->_len,
|
||||
"slice called with invalid range (%d, %d) for length %d", start, end, this->_len);
|
||||
return ZArraySlice<const T>(this->_data + start, end - start);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArray<T>::operator ZArraySlice<T>() {
|
||||
return slice(0, this->_len);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ZArray<T>::operator ZArraySlice<const T>() const {
|
||||
return slice(0, this->_len);
|
||||
}
|
||||
|
||||
template <typename T, bool Parallel>
|
||||
inline bool ZArrayIteratorImpl<T, Parallel>::next_serial(size_t* index) {
|
||||
if (_next == _end) {
|
||||
|
||||
@ -53,6 +53,7 @@
|
||||
#include "runtime/stackWatermarkSet.hpp"
|
||||
#include "services/memoryUsage.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
ZCollectedHeap* ZCollectedHeap::heap() {
|
||||
return named_heap<ZCollectedHeap>(CollectedHeap::Z);
|
||||
@ -245,7 +246,7 @@ size_t ZCollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
|
||||
}
|
||||
|
||||
MemoryUsage ZCollectedHeap::memory_usage() {
|
||||
const size_t initial_size = ZHeap::heap()->initial_capacity();
|
||||
const size_t initial_size = InitialHeapSize;
|
||||
const size_t committed = ZHeap::heap()->capacity();
|
||||
const size_t used = MIN2(ZHeap::heap()->used(), committed);
|
||||
const size_t max_size = ZHeap::heap()->max_capacity();
|
||||
@ -355,10 +356,14 @@ void ZCollectedHeap::prepare_for_verify() {
|
||||
}
|
||||
|
||||
void ZCollectedHeap::print_on(outputStream* st) const {
|
||||
StreamAutoIndentor auto_indentor(st);
|
||||
|
||||
_heap.print_on(st);
|
||||
}
|
||||
|
||||
void ZCollectedHeap::print_on_error(outputStream* st) const {
|
||||
StreamAutoIndentor auto_indentor(st);
|
||||
|
||||
_heap.print_on_error(st);
|
||||
}
|
||||
|
||||
|
||||
@ -160,7 +160,7 @@ void ZGeneration::free_empty_pages(ZRelocationSetSelector* selector, int bulk) {
|
||||
// the page allocator lock, and trying to satisfy stalled allocations
|
||||
// too frequently.
|
||||
if (selector->should_free_empty_pages(bulk)) {
|
||||
const size_t freed = ZHeap::heap()->free_empty_pages(selector->empty_pages());
|
||||
const size_t freed = ZHeap::heap()->free_empty_pages(_id, selector->empty_pages());
|
||||
increase_freed(freed);
|
||||
selector->clear_empty_pages();
|
||||
}
|
||||
@ -190,17 +190,6 @@ void ZGeneration::select_relocation_set(bool promote_all) {
|
||||
for (ZPage* page; pt_iter.next(&page);) {
|
||||
if (!page->is_relocatable()) {
|
||||
// Not relocatable, don't register
|
||||
// Note that the seqnum can change under our feet here as the page
|
||||
// can be concurrently freed and recycled by a concurrent generation
|
||||
// collection. However this property is stable across such transitions.
|
||||
// If it was not relocatable before recycling, then it won't be
|
||||
// relocatable after it gets recycled either, as the seqnum atomically
|
||||
// becomes allocating for the given generation. The opposite property
|
||||
// also holds: if the page is relocatable, then it can't have been
|
||||
// concurrently freed; if it was re-allocated it would not be
|
||||
// relocatable, and if it was not re-allocated we know that it was
|
||||
// allocated earlier than mark start of the current generation
|
||||
// collection.
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -213,15 +202,14 @@ void ZGeneration::select_relocation_set(bool promote_all) {
|
||||
|
||||
// Reclaim empty pages in bulk
|
||||
|
||||
// An active iterator blocks immediate recycle and delete of pages.
|
||||
// The intent it to allow the code that iterates over the pages to
|
||||
// safely read the properties of the pages without them being changed
|
||||
// by another thread. However, this function both iterates over the
|
||||
// pages AND frees/recycles them. We "yield" the iterator, so that we
|
||||
// can perform immediate recycling (as long as no other thread is
|
||||
// iterating over the pages). The contract is that the pages that are
|
||||
// about to be freed are "owned" by this thread, and no other thread
|
||||
// will change their states.
|
||||
// An active iterator blocks immediate deletion of pages. The intent is
|
||||
// to allow the code that iterates over pages to safely read properties
|
||||
// of the pages without them being freed/deleted. However, this
|
||||
// function both iterates over the pages AND frees them. We "yield" the
|
||||
// iterator, so that we can perform immediate deletion (as long as no
|
||||
// other thread is iterating over the pages). The contract is that the
|
||||
// pages that are about to be freed are "owned" by this thread, and no
|
||||
// other thread will change their states.
|
||||
pt_iter.yield([&]() {
|
||||
free_empty_pages(&selector, 64 /* bulk */);
|
||||
});
|
||||
@ -934,7 +922,7 @@ void ZGenerationYoung::flip_promote(ZPage* from_page, ZPage* to_page) {
|
||||
_page_table->replace(from_page, to_page);
|
||||
|
||||
// Update statistics
|
||||
_page_allocator->promote_used(from_page->size());
|
||||
_page_allocator->promote_used(from_page, to_page);
|
||||
increase_freed(from_page->size());
|
||||
increase_promoted(from_page->live_bytes());
|
||||
}
|
||||
@ -943,7 +931,7 @@ void ZGenerationYoung::in_place_relocate_promote(ZPage* from_page, ZPage* to_pag
|
||||
_page_table->replace(from_page, to_page);
|
||||
|
||||
// Update statistics
|
||||
_page_allocator->promote_used(from_page->size());
|
||||
_page_allocator->promote_used(from_page, to_page);
|
||||
}
|
||||
|
||||
void ZGenerationYoung::register_flip_promoted(const ZArray<ZPage*>& pages) {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -55,6 +55,9 @@ public:
|
||||
T get_acquire(zoffset offset) const;
|
||||
void release_put(zoffset offset, T value);
|
||||
void release_put(zoffset offset, size_t size, T value);
|
||||
|
||||
const T* addr(zoffset offset) const;
|
||||
T* addr(zoffset offset);
|
||||
};
|
||||
|
||||
template <typename T, bool Parallel>
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -101,6 +101,17 @@ inline void ZGranuleMap<T>::release_put(zoffset offset, size_t size, T value) {
|
||||
put(offset, size, value);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline const T* ZGranuleMap<T>::addr(zoffset offset) const {
|
||||
const size_t index = index_for_offset(offset);
|
||||
return _map + index;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline T* ZGranuleMap<T>::addr(zoffset offset) {
|
||||
return const_cast<T*>(const_cast<const ZGranuleMap<T>*>(this)->addr(offset));
|
||||
}
|
||||
|
||||
template <typename T, bool Parallel>
|
||||
inline ZGranuleMapIterator<T, Parallel>::ZGranuleMapIterator(const ZGranuleMap<T>* granule_map)
|
||||
: ZArrayIteratorImpl<T, Parallel>(granule_map->_map, granule_map->_size) {}
|
||||
|
||||
@ -59,7 +59,7 @@ ZHeap::ZHeap()
|
||||
_page_table(),
|
||||
_allocator_eden(),
|
||||
_allocator_relocation(),
|
||||
_serviceability(initial_capacity(), min_capacity(), max_capacity()),
|
||||
_serviceability(InitialHeapSize, min_capacity(), max_capacity()),
|
||||
_old(&_page_table, &_page_allocator),
|
||||
_young(&_page_table, _old.forwarding_table(), &_page_allocator),
|
||||
_initialized(false) {
|
||||
@ -94,10 +94,6 @@ bool ZHeap::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
size_t ZHeap::initial_capacity() const {
|
||||
return _page_allocator.initial_capacity();
|
||||
}
|
||||
|
||||
size_t ZHeap::min_capacity() const {
|
||||
return _page_allocator.min_capacity();
|
||||
}
|
||||
@ -240,18 +236,18 @@ void ZHeap::undo_alloc_page(ZPage* page) {
|
||||
log_trace(gc)("Undo page allocation, thread: " PTR_FORMAT " (%s), page: " PTR_FORMAT ", size: %zu",
|
||||
p2i(Thread::current()), ZUtils::thread_name(), p2i(page), page->size());
|
||||
|
||||
free_page(page, false /* allow_defragment */);
|
||||
free_page(page);
|
||||
}
|
||||
|
||||
void ZHeap::free_page(ZPage* page, bool allow_defragment) {
|
||||
void ZHeap::free_page(ZPage* page) {
|
||||
// Remove page table entry
|
||||
_page_table.remove(page);
|
||||
|
||||
// Free page
|
||||
_page_allocator.free_page(page, allow_defragment);
|
||||
_page_allocator.free_page(page);
|
||||
}
|
||||
|
||||
size_t ZHeap::free_empty_pages(const ZArray<ZPage*>* pages) {
|
||||
size_t ZHeap::free_empty_pages(ZGenerationId id, const ZArray<ZPage*>* pages) {
|
||||
size_t freed = 0;
|
||||
// Remove page table entries
|
||||
ZArrayIterator<ZPage*> iter(pages);
|
||||
@ -261,7 +257,7 @@ size_t ZHeap::free_empty_pages(const ZArray<ZPage*>* pages) {
|
||||
}
|
||||
|
||||
// Free pages
|
||||
_page_allocator.free_pages(pages);
|
||||
_page_allocator.free_pages(id, pages);
|
||||
|
||||
return freed;
|
||||
}
|
||||
@ -319,21 +315,32 @@ ZServiceabilityCounters* ZHeap::serviceability_counters() {
|
||||
}
|
||||
|
||||
void ZHeap::print_on(outputStream* st) const {
|
||||
st->print_cr(" ZHeap used %zuM, capacity %zuM, max capacity %zuM",
|
||||
used() / M,
|
||||
capacity() / M,
|
||||
max_capacity() / M);
|
||||
streamIndentor indentor(st, 1);
|
||||
_page_allocator.print_on(st);
|
||||
|
||||
// Metaspace printing prepends spaces instead of using outputStream indentation
|
||||
streamIndentor indentor_back(st, -1);
|
||||
MetaspaceUtils::print_on(st);
|
||||
}
|
||||
|
||||
void ZHeap::print_on_error(outputStream* st) const {
|
||||
print_on(st);
|
||||
{
|
||||
streamIndentor indentor(st, 1);
|
||||
_page_allocator.print_on_error(st);
|
||||
|
||||
// Metaspace printing prepends spaces instead of using outputStream indentation
|
||||
streamIndentor indentor_back(st, -1);
|
||||
MetaspaceUtils::print_on(st);
|
||||
}
|
||||
st->cr();
|
||||
|
||||
print_globals_on(st);
|
||||
st->cr();
|
||||
|
||||
print_page_table_on(st);
|
||||
st->cr();
|
||||
|
||||
_page_allocator.print_extended_on_error(st);
|
||||
}
|
||||
|
||||
void ZHeap::print_globals_on(outputStream* st) const {
|
||||
@ -366,9 +373,12 @@ void ZHeap::print_page_table_on(outputStream* st) const {
|
||||
|
||||
// Print all pages
|
||||
st->print_cr("ZGC Page Table:");
|
||||
ZPageTableIterator iter(&_page_table);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
page->print_on(st);
|
||||
{
|
||||
streamIndentor indentor(st, 1);
|
||||
ZPageTableIterator iter(&_page_table);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
page->print_on(st);
|
||||
}
|
||||
}
|
||||
|
||||
// Allow pages to be deleted
|
||||
|
||||
@ -67,7 +67,6 @@ public:
|
||||
void out_of_memory();
|
||||
|
||||
// Heap metrics
|
||||
size_t initial_capacity() const;
|
||||
size_t min_capacity() const;
|
||||
size_t max_capacity() const;
|
||||
size_t soft_max_capacity() const;
|
||||
@ -104,8 +103,8 @@ public:
|
||||
// Page allocation
|
||||
ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age);
|
||||
void undo_alloc_page(ZPage* page);
|
||||
void free_page(ZPage* page, bool allow_defragment);
|
||||
size_t free_empty_pages(const ZArray<ZPage*>* pages);
|
||||
void free_page(ZPage* page);
|
||||
size_t free_empty_pages(ZGenerationId id, const ZArray<ZPage*>* pages);
|
||||
|
||||
// Object allocation
|
||||
bool is_alloc_stalling() const;
|
||||
|
||||
@ -57,8 +57,8 @@ void ZInitialize::initialize(ZBarrierSet* barrier_set) {
|
||||
|
||||
// Early initialization
|
||||
ZNMT::initialize();
|
||||
ZGlobalsPointers::initialize();
|
||||
ZNUMA::initialize();
|
||||
ZGlobalsPointers::initialize();
|
||||
ZCPU::initialize();
|
||||
ZStatValue::initialize();
|
||||
ZThreadLocalAllocBuffer::initialize();
|
||||
|
||||
293
src/hotspot/share/gc/z/zIntrusiveRBTree.hpp
Normal file
293
src/hotspot/share/gc/z/zIntrusiveRBTree.hpp
Normal file
@ -0,0 +1,293 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZINTRUSIVERBTREE_HPP
|
||||
#define SHARE_GC_Z_ZINTRUSIVERBTREE_HPP
|
||||
|
||||
#include "metaprogramming/enableIf.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
enum class ZIntrusiveRBTreeDirection { LEFT, RIGHT };
|
||||
|
||||
class ZIntrusiveRBTreeNode {
|
||||
template <typename Key, typename Compare>
|
||||
friend class ZIntrusiveRBTree;
|
||||
|
||||
public:
|
||||
enum Color { RED = 0b0, BLACK = 0b1 };
|
||||
|
||||
private:
|
||||
class ColoredNodePtr {
|
||||
private:
|
||||
static constexpr uintptr_t COLOR_MASK = 0b1;
|
||||
static constexpr uintptr_t NODE_MASK = ~COLOR_MASK;
|
||||
|
||||
uintptr_t _value;
|
||||
|
||||
public:
|
||||
ColoredNodePtr(ZIntrusiveRBTreeNode* node = nullptr, Color color = RED);
|
||||
|
||||
constexpr Color color() const;
|
||||
constexpr bool is_black() const;
|
||||
constexpr bool is_red() const;
|
||||
|
||||
ZIntrusiveRBTreeNode* node() const;
|
||||
ZIntrusiveRBTreeNode* red_node() const;
|
||||
ZIntrusiveRBTreeNode* black_node() const;
|
||||
};
|
||||
|
||||
private:
|
||||
ColoredNodePtr _colored_parent;
|
||||
ZIntrusiveRBTreeNode* _left;
|
||||
ZIntrusiveRBTreeNode* _right;
|
||||
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
const ZIntrusiveRBTreeNode* find_next_node() const;
|
||||
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
const ZIntrusiveRBTreeNode* child() const;
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
ZIntrusiveRBTreeNode* child();
|
||||
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
ZIntrusiveRBTreeNode* const* child_addr() const;
|
||||
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
bool has_child() const;
|
||||
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
void update_child(ZIntrusiveRBTreeNode* new_child);
|
||||
|
||||
void link_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode** insert_location);
|
||||
|
||||
void copy_parent_and_color(ZIntrusiveRBTreeNode* other);
|
||||
void update_parent_and_color(ZIntrusiveRBTreeNode* parent, Color color);
|
||||
|
||||
void update_parent(ZIntrusiveRBTreeNode* parent);
|
||||
void update_color(Color color);
|
||||
|
||||
void update_left_child(ZIntrusiveRBTreeNode* new_child);
|
||||
void update_right_child(ZIntrusiveRBTreeNode* new_child);
|
||||
|
||||
const ZIntrusiveRBTreeNode* parent() const;
|
||||
ZIntrusiveRBTreeNode* parent();
|
||||
const ZIntrusiveRBTreeNode* red_parent() const;
|
||||
ZIntrusiveRBTreeNode* red_parent();
|
||||
const ZIntrusiveRBTreeNode* black_parent() const;
|
||||
ZIntrusiveRBTreeNode* black_parent();
|
||||
|
||||
bool has_parent() const;
|
||||
|
||||
Color color() const;
|
||||
bool is_black() const;
|
||||
bool is_red() const;
|
||||
static bool is_black(ZIntrusiveRBTreeNode* node);
|
||||
|
||||
ZIntrusiveRBTreeNode* const* left_child_addr() const;
|
||||
ZIntrusiveRBTreeNode* const* right_child_addr() const;
|
||||
|
||||
const ZIntrusiveRBTreeNode* left_child() const;
|
||||
ZIntrusiveRBTreeNode* left_child();
|
||||
const ZIntrusiveRBTreeNode* right_child() const;
|
||||
ZIntrusiveRBTreeNode* right_child();
|
||||
|
||||
bool has_left_child() const;
|
||||
bool has_right_child() const;
|
||||
|
||||
public:
|
||||
ZIntrusiveRBTreeNode();
|
||||
|
||||
const ZIntrusiveRBTreeNode* prev() const;
|
||||
ZIntrusiveRBTreeNode* prev();
|
||||
const ZIntrusiveRBTreeNode* next() const;
|
||||
ZIntrusiveRBTreeNode* next();
|
||||
};
|
||||
|
||||
template <typename Key, typename Compare>
|
||||
class ZIntrusiveRBTree {
|
||||
public:
|
||||
class FindCursor {
|
||||
friend class ZIntrusiveRBTree<Key, Compare>;
|
||||
|
||||
private:
|
||||
ZIntrusiveRBTreeNode** _insert_location;
|
||||
ZIntrusiveRBTreeNode* _parent;
|
||||
bool _left_most;
|
||||
bool _right_most;
|
||||
DEBUG_ONLY(uintptr_t _sequence_number;)
|
||||
|
||||
FindCursor(ZIntrusiveRBTreeNode** insert_location, ZIntrusiveRBTreeNode* parent, bool left_most, bool right_most DEBUG_ONLY(COMMA uintptr_t sequence_number));
|
||||
FindCursor();
|
||||
|
||||
#ifdef ASSERT
|
||||
bool is_valid(uintptr_t sequence_number) const;
|
||||
#endif
|
||||
|
||||
public:
|
||||
FindCursor(const FindCursor&) = default;
|
||||
FindCursor& operator=(const FindCursor&) = default;
|
||||
|
||||
bool is_valid() const;
|
||||
bool found() const;
|
||||
ZIntrusiveRBTreeNode* node() const;
|
||||
bool is_left_most() const;
|
||||
bool is_right_most() const;
|
||||
ZIntrusiveRBTreeNode* parent() const;
|
||||
ZIntrusiveRBTreeNode** insert_location() const;
|
||||
};
|
||||
|
||||
private:
|
||||
ZIntrusiveRBTreeNode* _root_node;
|
||||
ZIntrusiveRBTreeNode* _left_most;
|
||||
ZIntrusiveRBTreeNode* _right_most;
|
||||
DEBUG_ONLY(uintptr_t _sequence_number;)
|
||||
|
||||
NONCOPYABLE(ZIntrusiveRBTree);
|
||||
|
||||
#ifdef ASSERT
|
||||
template <bool swap_left_right>
|
||||
bool verify_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* left_child, ZIntrusiveRBTreeNode* right_child);
|
||||
template <bool swap_left_right>
|
||||
bool verify_node(ZIntrusiveRBTreeNode* parent);
|
||||
template <bool swap_left_right>
|
||||
bool verify_node(ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* left_child);
|
||||
struct any_t {};
|
||||
template <bool swap_left_right>
|
||||
bool verify_node(ZIntrusiveRBTreeNode* parent, any_t, ZIntrusiveRBTreeNode* right_child);
|
||||
#endif // ASSERT
|
||||
|
||||
ZIntrusiveRBTreeNode* const* root_node_addr() const;
|
||||
|
||||
void update_child_or_root(ZIntrusiveRBTreeNode* old_node, ZIntrusiveRBTreeNode* new_node, ZIntrusiveRBTreeNode* parent);
|
||||
void rotate_and_update_child_or_root(ZIntrusiveRBTreeNode* old_node, ZIntrusiveRBTreeNode* new_node, ZIntrusiveRBTreeNode::Color color);
|
||||
|
||||
template <ZIntrusiveRBTreeDirection PARENT_SIBLING_DIRECTION>
|
||||
void rebalance_insert_with_sibling(ZIntrusiveRBTreeNode* node, ZIntrusiveRBTreeNode* parent, ZIntrusiveRBTreeNode* grand_parent);
|
||||
template <ZIntrusiveRBTreeDirection PARENT_SIBLING_DIRECTION>
|
||||
bool rebalance_insert_with_parent_sibling(ZIntrusiveRBTreeNode** node_addr, ZIntrusiveRBTreeNode** parent_addr, ZIntrusiveRBTreeNode* grand_parent);
|
||||
void rebalance_insert(ZIntrusiveRBTreeNode* new_node);
|
||||
|
||||
template <ZIntrusiveRBTreeDirection SIBLING_DIRECTION>
|
||||
bool rebalance_remove_with_sibling(ZIntrusiveRBTreeNode** node_addr, ZIntrusiveRBTreeNode** parent_addr);
|
||||
void rebalance_remove(ZIntrusiveRBTreeNode* rebalance_from);
|
||||
|
||||
FindCursor make_cursor(ZIntrusiveRBTreeNode* const* insert_location, ZIntrusiveRBTreeNode* parent, bool left_most, bool right_most) const;
|
||||
template <ZIntrusiveRBTreeDirection DIRECTION>
|
||||
FindCursor find_next(const FindCursor& cursor) const;
|
||||
|
||||
public:
|
||||
ZIntrusiveRBTree();
|
||||
|
||||
ZIntrusiveRBTreeNode* first() const;
|
||||
ZIntrusiveRBTreeNode* last() const;
|
||||
|
||||
FindCursor root_cursor() const;
|
||||
FindCursor get_cursor(const ZIntrusiveRBTreeNode* node) const;
|
||||
FindCursor prev_cursor(const ZIntrusiveRBTreeNode* node) const;
|
||||
FindCursor next_cursor(const ZIntrusiveRBTreeNode* node) const;
|
||||
FindCursor prev(const FindCursor& cursor) const;
|
||||
FindCursor next(const FindCursor& cursor) const;
|
||||
FindCursor find(const Key& key) const;
|
||||
|
||||
void insert(ZIntrusiveRBTreeNode* new_node, const FindCursor& find_cursor);
|
||||
void replace(ZIntrusiveRBTreeNode* new_node, const FindCursor& find_cursor);
|
||||
void remove(const FindCursor& find_cursor);
|
||||
|
||||
void verify_tree();
|
||||
|
||||
public:
|
||||
template <bool IsConst, bool Reverse>
|
||||
class IteratorImplementation;
|
||||
|
||||
using Iterator = IteratorImplementation<false, false>;
|
||||
using ConstIterator = IteratorImplementation<true, false>;
|
||||
using ReverseIterator = IteratorImplementation<false, true>;
|
||||
using ConstReverseIterator = IteratorImplementation<true, true>;
|
||||
|
||||
// remove and replace invalidate the iterators
|
||||
// however the iterators provide a remove and replace
|
||||
// function which does not invalidate that iterator nor
|
||||
// any end iterator
|
||||
Iterator begin();
|
||||
Iterator end();
|
||||
ConstIterator begin() const;
|
||||
ConstIterator end() const;
|
||||
ConstIterator cbegin() const;
|
||||
ConstIterator cend() const;
|
||||
ReverseIterator rbegin();
|
||||
ReverseIterator rend();
|
||||
ConstReverseIterator rbegin() const;
|
||||
ConstReverseIterator rend() const;
|
||||
ConstReverseIterator crbegin() const;
|
||||
ConstReverseIterator crend() const;
|
||||
};
|
||||
|
||||
template <typename Key, typename Compare>
|
||||
template <bool IsConst, bool Reverse>
|
||||
class ZIntrusiveRBTree<Key, Compare>::IteratorImplementation {
|
||||
friend IteratorImplementation<true, Reverse>;
|
||||
|
||||
public:
|
||||
using difference_type = std::ptrdiff_t;
|
||||
using value_type = const ZIntrusiveRBTreeNode;
|
||||
using pointer = value_type*;
|
||||
using reference = value_type&;
|
||||
|
||||
private:
|
||||
ZIntrusiveRBTree<Key, Compare>* _tree;
|
||||
const ZIntrusiveRBTreeNode* _node;
|
||||
bool _removed;
|
||||
|
||||
bool at_end() const;
|
||||
|
||||
public:
|
||||
IteratorImplementation(ZIntrusiveRBTree<Key, Compare>& tree, pointer node);
|
||||
IteratorImplementation(const IteratorImplementation<IsConst, Reverse>&) = default;
|
||||
template <bool Enable = IsConst, ENABLE_IF(Enable)>
|
||||
IteratorImplementation(const IteratorImplementation<false, Reverse>& other);
|
||||
|
||||
reference operator*() const;
|
||||
pointer operator->();
|
||||
IteratorImplementation& operator--();
|
||||
IteratorImplementation operator--(int);
|
||||
IteratorImplementation& operator++();
|
||||
IteratorImplementation operator++(int);
|
||||
|
||||
template <bool Enable = !IsConst, ENABLE_IF(Enable)>
|
||||
void replace(ZIntrusiveRBTreeNode * new_node);
|
||||
template <bool Enable = !IsConst, ENABLE_IF(Enable)>
|
||||
void remove();
|
||||
|
||||
// Note: friend operator overloads defined inside class declaration because of problems with ADL
|
||||
friend bool operator==(const IteratorImplementation& a, const IteratorImplementation& b) {
|
||||
precond(a._tree == b._tree);
|
||||
return a._node == b._node;
|
||||
}
|
||||
friend bool operator!=(const IteratorImplementation& a, const IteratorImplementation& b) {
|
||||
precond(a._tree == b._tree);
|
||||
return a._node != b._node;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZINTRUSIVERBTREE_HPP
|
||||
1351
src/hotspot/share/gc/z/zIntrusiveRBTree.inline.hpp
Normal file
1351
src/hotspot/share/gc/z/zIntrusiveRBTree.inline.hpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,7 @@
|
||||
#define SHARE_GC_Z_ZLIST_HPP
|
||||
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
template <typename T> class ZList;
|
||||
@ -46,7 +47,12 @@ private:
|
||||
|
||||
public:
|
||||
ZListNode();
|
||||
~ZListNode();
|
||||
~ZListNode() {
|
||||
// Implementation placed here to make it easier easier to embed ZListNode
|
||||
// instances without having to include zListNode.inline.hpp.
|
||||
assert(_next == this, "Should not be in a list");
|
||||
assert(_prev == this, "Should not be in a list");
|
||||
}
|
||||
};
|
||||
|
||||
// Doubly linked list
|
||||
@ -59,6 +65,7 @@ private:
|
||||
NONCOPYABLE(ZList);
|
||||
|
||||
void verify_head() const;
|
||||
void verify_head_error_reporter_safe() const;
|
||||
|
||||
void insert(ZListNode<T>* before, ZListNode<T>* node);
|
||||
|
||||
@ -68,6 +75,9 @@ private:
|
||||
public:
|
||||
ZList();
|
||||
|
||||
size_t size_error_reporter_safe() const;
|
||||
bool is_empty_error_reporter_safe() const;
|
||||
|
||||
size_t size() const;
|
||||
bool is_empty() const;
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,17 +27,13 @@
|
||||
#include "gc/z/zList.hpp"
|
||||
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/vmError.hpp"
|
||||
|
||||
template <typename T>
|
||||
inline ZListNode<T>::ZListNode()
|
||||
: _next(this),
|
||||
_prev(this) {}
|
||||
|
||||
template <typename T>
|
||||
inline ZListNode<T>::~ZListNode() {
|
||||
verify_links_unlinked();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZListNode<T>::verify_links() const {
|
||||
assert(_next->_prev == this, "Corrupt list node");
|
||||
@ -62,6 +58,16 @@ inline void ZList<T>::verify_head() const {
|
||||
_head.verify_links();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZList<T>::verify_head_error_reporter_safe() const {
|
||||
if (VMError::is_error_reported() && VMError::is_error_reported_in_current_thread()) {
|
||||
// Do not verify if this thread is in the process of reporting an error.
|
||||
return;
|
||||
}
|
||||
|
||||
verify_head();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZList<T>::insert(ZListNode<T>* before, ZListNode<T>* node) {
|
||||
verify_head();
|
||||
@ -97,6 +103,17 @@ inline ZList<T>::ZList()
|
||||
verify_head();
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline size_t ZList<T>::size_error_reporter_safe() const {
|
||||
verify_head_error_reporter_safe();
|
||||
return _size;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline bool ZList<T>::is_empty_error_reporter_safe() const {
|
||||
return size_error_reporter_safe() == 0;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline size_t ZList<T>::size() const {
|
||||
verify_head();
|
||||
|
||||
@ -34,24 +34,19 @@
|
||||
static const ZStatCounter ZCounterMarkSeqNumResetContention("Contention", "Mark SeqNum Reset Contention", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterMarkSegmentResetContention("Contention", "Mark Segment Reset Contention", ZStatUnitOpsPerSecond);
|
||||
|
||||
static size_t bitmap_size(uint32_t size, size_t NumSegments) {
|
||||
// We need at least one bit per segment
|
||||
return MAX2<size_t>(size, NumSegments) * 2;
|
||||
}
|
||||
|
||||
ZLiveMap::ZLiveMap(uint32_t size)
|
||||
: _seqnum(0),
|
||||
ZLiveMap::ZLiveMap(uint32_t object_max_count)
|
||||
: _segment_size((object_max_count == 1 ? 1u : (object_max_count / NumSegments)) * BitsPerObject),
|
||||
_segment_shift(log2i_exact(_segment_size)),
|
||||
_seqnum(0),
|
||||
_live_objects(0),
|
||||
_live_bytes(0),
|
||||
_segment_live_bits(0),
|
||||
_segment_claim_bits(0),
|
||||
_bitmap_size(bitmap_size(size, NumSegments)),
|
||||
_bitmap(0),
|
||||
_segment_shift(log2i_exact(segment_size())) {}
|
||||
_bitmap(0) {}
|
||||
|
||||
void ZLiveMap::allocate_bitmap() {
|
||||
if (_bitmap.size() != _bitmap_size) {
|
||||
_bitmap.initialize(_bitmap_size, false /* clear */);
|
||||
void ZLiveMap::initialize_bitmap() {
|
||||
if (_bitmap.size() == 0) {
|
||||
_bitmap.initialize(size_t(_segment_size) * size_t(NumSegments), false /* clear */);
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,14 +66,14 @@ void ZLiveMap::reset(ZGenerationId id) {
|
||||
_live_bytes = 0;
|
||||
_live_objects = 0;
|
||||
|
||||
// We lazily initialize the bitmap the first time the page is
|
||||
// marked, i.e. a bit is about to be set for the first time.
|
||||
allocate_bitmap();
|
||||
|
||||
// Clear segment claimed/live bits
|
||||
segment_live_bits().clear();
|
||||
segment_claim_bits().clear();
|
||||
|
||||
// We lazily initialize the bitmap the first time the page is marked, i.e.
|
||||
// a bit is about to be set for the first time.
|
||||
initialize_bitmap();
|
||||
|
||||
assert(_seqnum == seqnum_initializing, "Invalid");
|
||||
|
||||
// Make sure the newly reset marking information is ordered
|
||||
@ -125,7 +120,7 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) {
|
||||
// Segment claimed, clear it
|
||||
const BitMap::idx_t start_index = segment_start(segment);
|
||||
const BitMap::idx_t end_index = segment_end(segment);
|
||||
if (segment_size() / BitsPerWord >= 32) {
|
||||
if (_segment_size / BitsPerWord >= 32) {
|
||||
_bitmap.clear_large_range(start_index, end_index);
|
||||
} else {
|
||||
_bitmap.clear_range(start_index, end_index);
|
||||
@ -135,13 +130,3 @@ void ZLiveMap::reset_segment(BitMap::idx_t segment) {
|
||||
const bool success = set_segment_live(segment);
|
||||
assert(success, "Should never fail");
|
||||
}
|
||||
|
||||
void ZLiveMap::resize(uint32_t size) {
|
||||
const size_t new_bitmap_size = bitmap_size(size, NumSegments);
|
||||
_bitmap_size = new_bitmap_size;
|
||||
_segment_shift = log2i_exact(segment_size());
|
||||
|
||||
if (_bitmap.size() != 0 && _bitmap.size() != new_bitmap_size) {
|
||||
_bitmap.reinitialize(new_bitmap_size, false /* clear */);
|
||||
}
|
||||
}
|
||||
|
||||
@ -35,16 +35,18 @@ class ZLiveMap {
|
||||
friend class ZLiveMapTest;
|
||||
|
||||
private:
|
||||
static const size_t NumSegments = 64;
|
||||
static const uint32_t NumSegments = 64;
|
||||
static const uint32_t BitsPerObject = 2;
|
||||
|
||||
const uint32_t _segment_size;
|
||||
const int _segment_shift;
|
||||
|
||||
volatile uint32_t _seqnum;
|
||||
volatile uint32_t _live_objects;
|
||||
volatile size_t _live_bytes;
|
||||
BitMap::bm_word_t _segment_live_bits;
|
||||
BitMap::bm_word_t _segment_claim_bits;
|
||||
size_t _bitmap_size;
|
||||
ZBitMap _bitmap;
|
||||
int _segment_shift;
|
||||
|
||||
const BitMapView segment_live_bits() const;
|
||||
const BitMapView segment_claim_bits() const;
|
||||
@ -52,8 +54,6 @@ private:
|
||||
BitMapView segment_live_bits();
|
||||
BitMapView segment_claim_bits();
|
||||
|
||||
BitMap::idx_t segment_size() const;
|
||||
|
||||
BitMap::idx_t segment_start(BitMap::idx_t segment) const;
|
||||
BitMap::idx_t segment_end(BitMap::idx_t segment) const;
|
||||
|
||||
@ -66,7 +66,7 @@ private:
|
||||
|
||||
bool claim_segment(BitMap::idx_t segment);
|
||||
|
||||
void allocate_bitmap();
|
||||
void initialize_bitmap();
|
||||
|
||||
void reset(ZGenerationId id);
|
||||
void reset_segment(BitMap::idx_t segment);
|
||||
@ -77,11 +77,10 @@ private:
|
||||
void iterate_segment(BitMap::idx_t segment, Function function);
|
||||
|
||||
public:
|
||||
ZLiveMap(uint32_t size);
|
||||
ZLiveMap(uint32_t object_max_count);
|
||||
ZLiveMap(const ZLiveMap& other) = delete;
|
||||
|
||||
void reset();
|
||||
void resize(uint32_t size);
|
||||
|
||||
bool is_marked(ZGenerationId id) const;
|
||||
|
||||
|
||||
@ -87,10 +87,6 @@ inline BitMap::idx_t ZLiveMap::next_live_segment(BitMap::idx_t segment) const {
|
||||
return segment_live_bits().find_first_set_bit(segment + 1, NumSegments);
|
||||
}
|
||||
|
||||
inline BitMap::idx_t ZLiveMap::segment_size() const {
|
||||
return _bitmap_size / NumSegments;
|
||||
}
|
||||
|
||||
inline BitMap::idx_t ZLiveMap::index_to_segment(BitMap::idx_t index) const {
|
||||
return index >> _segment_shift;
|
||||
}
|
||||
@ -125,11 +121,11 @@ inline void ZLiveMap::inc_live(uint32_t objects, size_t bytes) {
|
||||
}
|
||||
|
||||
inline BitMap::idx_t ZLiveMap::segment_start(BitMap::idx_t segment) const {
|
||||
return segment_size() * segment;
|
||||
return segment * _segment_size;
|
||||
}
|
||||
|
||||
inline BitMap::idx_t ZLiveMap::segment_end(BitMap::idx_t segment) const {
|
||||
return segment_start(segment) + segment_size();
|
||||
return segment_start(segment) + _segment_size;
|
||||
}
|
||||
|
||||
inline size_t ZLiveMap::do_object(ObjectClosure* cl, zaddress addr) const {
|
||||
|
||||
629
src/hotspot/share/gc/z/zMappedCache.cpp
Normal file
629
src/hotspot/share/gc/z/zMappedCache.cpp
Normal file
@ -0,0 +1,629 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zIntrusiveRBTree.inline.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zMappedCache.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
class ZMappedCacheEntry {
|
||||
private:
|
||||
ZVirtualMemory _vmem;
|
||||
ZMappedCache::TreeNode _tree_node;
|
||||
ZMappedCache::SizeClassListNode _size_class_list_node;
|
||||
|
||||
public:
|
||||
ZMappedCacheEntry(ZVirtualMemory vmem)
|
||||
: _vmem(vmem),
|
||||
_tree_node(),
|
||||
_size_class_list_node() {}
|
||||
|
||||
static ZMappedCacheEntry* cast_to_entry(ZMappedCache::TreeNode* tree_node);
|
||||
static const ZMappedCacheEntry* cast_to_entry(const ZMappedCache::TreeNode* tree_node);
|
||||
static ZMappedCacheEntry* cast_to_entry(ZMappedCache::SizeClassListNode* list_node);
|
||||
|
||||
zoffset start() const {
|
||||
return _vmem.start();
|
||||
}
|
||||
|
||||
zoffset_end end() const {
|
||||
return _vmem.end();
|
||||
}
|
||||
|
||||
ZVirtualMemory vmem() const {
|
||||
return _vmem;
|
||||
}
|
||||
|
||||
ZMappedCache::TreeNode* node_addr() {
|
||||
return &_tree_node;
|
||||
}
|
||||
|
||||
void update_start(ZVirtualMemory vmem) {
|
||||
precond(vmem.end() == end());
|
||||
|
||||
_vmem = vmem;
|
||||
}
|
||||
|
||||
ZMappedCache::ZSizeClassListNode* size_class_node() {
|
||||
return &_size_class_list_node;
|
||||
}
|
||||
};
|
||||
|
||||
ZMappedCacheEntry* ZMappedCacheEntry::cast_to_entry(ZMappedCache::TreeNode* tree_node) {
|
||||
return const_cast<ZMappedCacheEntry*>(ZMappedCacheEntry::cast_to_entry(const_cast<const ZMappedCache::TreeNode*>(tree_node)));
|
||||
}
|
||||
|
||||
const ZMappedCacheEntry* ZMappedCacheEntry::cast_to_entry(const ZMappedCache::TreeNode* tree_node) {
|
||||
return (const ZMappedCacheEntry*)((uintptr_t)tree_node - offset_of(ZMappedCacheEntry, _tree_node));
|
||||
}
|
||||
|
||||
ZMappedCacheEntry* ZMappedCacheEntry::cast_to_entry(ZMappedCache::SizeClassListNode* list_node) {
|
||||
const size_t offset = offset_of(ZMappedCacheEntry, _size_class_list_node);
|
||||
return (ZMappedCacheEntry*)((uintptr_t)list_node - offset);
|
||||
}
|
||||
|
||||
static void* entry_address_for_zoffset_end(zoffset_end offset) {
|
||||
STATIC_ASSERT(is_aligned(ZCacheLineSize, alignof(ZMappedCacheEntry)));;
|
||||
|
||||
// This spreads out the location of the entries in an effort to combat hyper alignment.
|
||||
// Verify if this is an efficient and worthwhile optimization.
|
||||
|
||||
constexpr size_t aligned_entry_size = align_up(sizeof(ZMappedCacheEntry), ZCacheLineSize);
|
||||
|
||||
// Do not use the last location
|
||||
constexpr size_t number_of_locations = ZGranuleSize / aligned_entry_size - 1;
|
||||
const size_t granule_index = untype(offset) >> ZGranuleSizeShift;
|
||||
const size_t index = granule_index % number_of_locations;
|
||||
const uintptr_t end_addr = untype(offset) + ZAddressHeapBase;
|
||||
|
||||
return reinterpret_cast<void*>(end_addr - aligned_entry_size * (index + 1));
|
||||
}
|
||||
|
||||
static ZMappedCacheEntry* create_entry(const ZVirtualMemory& vmem) {
|
||||
precond(vmem.size() >= ZGranuleSize);
|
||||
|
||||
void* placement_addr = entry_address_for_zoffset_end(vmem.end());
|
||||
ZMappedCacheEntry* entry = new (placement_addr) ZMappedCacheEntry(vmem);
|
||||
|
||||
postcond(entry->start() == vmem.start());
|
||||
postcond(entry->end() == vmem.end());
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
int ZMappedCache::EntryCompare::operator()(ZMappedCache::TreeNode* a, ZMappedCache::TreeNode* b) {
|
||||
const ZVirtualMemory vmem_a = ZMappedCacheEntry::cast_to_entry(a)->vmem();
|
||||
const ZVirtualMemory vmem_b = ZMappedCacheEntry::cast_to_entry(b)->vmem();
|
||||
|
||||
if (vmem_a.end() < vmem_b.start()) { return -1; }
|
||||
if (vmem_b.end() < vmem_a.start()) { return 1; }
|
||||
|
||||
return 0; // Overlapping
|
||||
}
|
||||
|
||||
int ZMappedCache::EntryCompare::operator()(zoffset key, ZMappedCache::TreeNode* node) {
|
||||
const ZVirtualMemory vmem = ZMappedCacheEntry::cast_to_entry(node)->vmem();
|
||||
|
||||
if (key < vmem.start()) { return -1; }
|
||||
if (key > vmem.end()) { return 1; }
|
||||
|
||||
return 0; // Containing
|
||||
}
|
||||
|
||||
int ZMappedCache::size_class_index(size_t size) {
|
||||
// Returns the size class index of for size, or -1 if smaller than the smallest size class.
|
||||
const int size_class_power = log2i_graceful(size) - (int)ZGranuleSizeShift;
|
||||
|
||||
if (size_class_power < MinSizeClassShift) {
|
||||
// Allocation is smaller than the smallest size class minimum size.
|
||||
return -1;
|
||||
}
|
||||
|
||||
return MIN2(size_class_power, MaxSizeClassShift) - MinSizeClassShift;
|
||||
}
|
||||
|
||||
int ZMappedCache::guaranteed_size_class_index(size_t size) {
|
||||
// Returns the size class index of the smallest size class which can always
|
||||
// accommodate a size allocation, or -1 otherwise.
|
||||
const int size_class_power = log2i_ceil(size) - (int)ZGranuleSizeShift;
|
||||
|
||||
if (size_class_power > MaxSizeClassShift) {
|
||||
// Allocation is larger than the largest size class minimum size.
|
||||
return -1;
|
||||
}
|
||||
|
||||
return MAX2(size_class_power, MinSizeClassShift) - MinSizeClassShift;
|
||||
}
|
||||
|
||||
void ZMappedCache::tree_insert(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem) {
|
||||
ZMappedCacheEntry* const entry = create_entry(vmem);
|
||||
|
||||
// Insert creates a new entry
|
||||
_entry_count += 1;
|
||||
|
||||
// Insert in tree
|
||||
_tree.insert(entry->node_addr(), cursor);
|
||||
|
||||
// Insert in size-class lists
|
||||
const size_t size = vmem.size();
|
||||
const int index = size_class_index(size);
|
||||
if (index != -1) {
|
||||
_size_class_lists[index].insert_first(entry->size_class_node());
|
||||
}
|
||||
}
|
||||
|
||||
void ZMappedCache::tree_remove(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem) {
|
||||
ZMappedCacheEntry* entry = ZMappedCacheEntry::cast_to_entry(cursor.node());
|
||||
|
||||
// Remove destroys an old entry
|
||||
_entry_count -= 1;
|
||||
|
||||
// Remove from tree
|
||||
_tree.remove(cursor);
|
||||
|
||||
// Insert in size-class lists
|
||||
const size_t size = vmem.size();
|
||||
const int index = size_class_index(size);
|
||||
if (index != -1) {
|
||||
_size_class_lists[index].remove(entry->size_class_node());
|
||||
}
|
||||
|
||||
// Destroy entry
|
||||
entry->~ZMappedCacheEntry();
|
||||
}
|
||||
|
||||
void ZMappedCache::tree_replace(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem) {
|
||||
ZMappedCacheEntry* const entry = create_entry(vmem);
|
||||
|
||||
ZMappedCache::TreeNode* const node = cursor.node();
|
||||
ZMappedCacheEntry* const old_entry = ZMappedCacheEntry::cast_to_entry(node);
|
||||
assert(old_entry->end() != vmem.end(), "should not replace, use update");
|
||||
|
||||
// Replace in tree
|
||||
_tree.replace(entry->node_addr(), cursor);
|
||||
|
||||
// Replace in size-class lists
|
||||
|
||||
// Remove old
|
||||
const size_t old_size = old_entry->vmem().size();
|
||||
const int old_index = size_class_index(old_size);
|
||||
if (old_index != -1) {
|
||||
_size_class_lists[old_index].remove(old_entry->size_class_node());
|
||||
}
|
||||
|
||||
// Insert new
|
||||
const size_t new_size = vmem.size();
|
||||
const int new_index = size_class_index(new_size);
|
||||
if (new_index != -1) {
|
||||
_size_class_lists[new_index].insert_first(entry->size_class_node());
|
||||
}
|
||||
|
||||
// Destroy old entry
|
||||
old_entry->~ZMappedCacheEntry();
|
||||
}
|
||||
|
||||
void ZMappedCache::tree_update(ZMappedCacheEntry* entry, const ZVirtualMemory& vmem) {
|
||||
assert(entry->end() == vmem.end(), "must be");
|
||||
|
||||
// Remove or add to size-class lists if required
|
||||
|
||||
const size_t old_size = entry->vmem().size();
|
||||
const size_t new_size = vmem.size();
|
||||
const int old_index = size_class_index(old_size);
|
||||
const int new_index = size_class_index(new_size);
|
||||
|
||||
if (old_index != new_index) {
|
||||
// Size class changed
|
||||
|
||||
// Remove old
|
||||
if (old_index != -1) {
|
||||
_size_class_lists[old_index].remove(entry->size_class_node());
|
||||
}
|
||||
|
||||
// Insert new
|
||||
if (new_index != -1) {
|
||||
_size_class_lists[new_index].insert_first(entry->size_class_node());
|
||||
}
|
||||
}
|
||||
|
||||
// And update entry
|
||||
entry->update_start(vmem);
|
||||
}
|
||||
|
||||
template <ZMappedCache::RemovalStrategy strategy, typename SelectFunction>
|
||||
ZVirtualMemory ZMappedCache::remove_vmem(ZMappedCacheEntry* const entry, size_t min_size, SelectFunction select) {
|
||||
ZVirtualMemory vmem = entry->vmem();
|
||||
const size_t size = vmem.size();
|
||||
|
||||
if (size < min_size) {
|
||||
// Do not select this, smaller than min_size
|
||||
return ZVirtualMemory();
|
||||
}
|
||||
|
||||
// Query how much to remove
|
||||
const size_t to_remove = select(size);
|
||||
assert(to_remove <= size, "must not remove more than size");
|
||||
|
||||
if (to_remove == 0) {
|
||||
// Nothing to remove
|
||||
return ZVirtualMemory();
|
||||
}
|
||||
|
||||
if (to_remove != size) {
|
||||
// Partial removal
|
||||
if (strategy == RemovalStrategy::LowestAddress) {
|
||||
const size_t unused_size = size - to_remove;
|
||||
const ZVirtualMemory unused_vmem = vmem.shrink_from_back(unused_size);
|
||||
tree_update(entry, unused_vmem);
|
||||
|
||||
} else {
|
||||
assert(strategy == RemovalStrategy::HighestAddress, "must be LowestAddress or HighestAddress");
|
||||
|
||||
const size_t unused_size = size - to_remove;
|
||||
const ZVirtualMemory unused_vmem = vmem.shrink_from_front(unused_size);
|
||||
|
||||
auto cursor = _tree.get_cursor(entry->node_addr());
|
||||
assert(cursor.is_valid(), "must be");
|
||||
tree_replace(cursor, unused_vmem);
|
||||
}
|
||||
|
||||
} else {
|
||||
// Whole removal
|
||||
auto cursor = _tree.get_cursor(entry->node_addr());
|
||||
assert(cursor.is_valid(), "must be");
|
||||
tree_remove(cursor, vmem);
|
||||
}
|
||||
|
||||
// Update statistics
|
||||
_size -= to_remove;
|
||||
_min = MIN2(_size, _min);
|
||||
|
||||
postcond(to_remove == vmem.size());
|
||||
return vmem;
|
||||
}
|
||||
|
||||
template <typename SelectFunction, typename ConsumeFunction>
|
||||
bool ZMappedCache::try_remove_vmem_size_class(size_t min_size, SelectFunction select, ConsumeFunction consume) {
|
||||
new_max_size:
|
||||
// Query the max select size possible given the size of the cache
|
||||
const size_t max_size = select(_size);
|
||||
|
||||
if (max_size < min_size) {
|
||||
// Never select less than min_size
|
||||
return false;
|
||||
}
|
||||
|
||||
// Start scanning from max_size guaranteed size class to the largest size class
|
||||
const int guaranteed_index = guaranteed_size_class_index(max_size);
|
||||
for (int index = guaranteed_index; index != -1 && index < NumSizeClasses; ++index) {
|
||||
ZList<ZSizeClassListNode>& list = _size_class_lists[index];
|
||||
if (!list.is_empty()) {
|
||||
ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(list.first());
|
||||
|
||||
// Because this is guaranteed, select should always succeed
|
||||
const ZVirtualMemory vmem = remove_vmem<RemovalStrategy::LowestAddress>(entry, min_size, select);
|
||||
assert(!vmem.is_null(), "select must succeed");
|
||||
|
||||
if (consume(vmem)) {
|
||||
// consume is satisfied
|
||||
return true;
|
||||
}
|
||||
|
||||
// Continue with a new max_size
|
||||
goto new_max_size;
|
||||
}
|
||||
}
|
||||
|
||||
// Consume the rest starting at max_size's size class to min_size's size class
|
||||
const int max_size_index = size_class_index(max_size);
|
||||
const int min_size_index = size_class_index(min_size);
|
||||
const int lowest_index = MAX2(min_size_index, 0);
|
||||
|
||||
for (int index = max_size_index; index >= lowest_index; --index) {
|
||||
ZListIterator<ZSizeClassListNode> iter(&_size_class_lists[index]);
|
||||
for (ZSizeClassListNode* list_node; iter.next(&list_node);) {
|
||||
ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(list_node);
|
||||
|
||||
// Try remove
|
||||
const ZVirtualMemory vmem = remove_vmem<RemovalStrategy::LowestAddress>(entry, min_size, select);
|
||||
|
||||
if (!vmem.is_null() && consume(vmem)) {
|
||||
// Found a vmem and consume is satisfied
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// consume was not satisfied
|
||||
return false;
|
||||
}
|
||||
|
||||
template <ZMappedCache::RemovalStrategy strategy, typename SelectFunction, typename ConsumeFunction>
|
||||
void ZMappedCache::scan_remove_vmem(size_t min_size, SelectFunction select, ConsumeFunction consume) {
|
||||
if (strategy == RemovalStrategy::SizeClasses) {
|
||||
if (try_remove_vmem_size_class(min_size, select, consume)) {
|
||||
// Satisfied using size classes
|
||||
return;
|
||||
}
|
||||
|
||||
if (size_class_index(min_size) != -1) {
|
||||
// There exists a size class for our min size. All possibilities must have
|
||||
// been exhausted, do not scan the tree.
|
||||
return;
|
||||
}
|
||||
|
||||
// Fallthrough to tree scan
|
||||
}
|
||||
|
||||
if (strategy == RemovalStrategy::HighestAddress) {
|
||||
// Scan whole tree starting at the highest address
|
||||
for (ZMappedCache::TreeNode* node = _tree.last(); node != nullptr; node = node->prev()) {
|
||||
ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(node);
|
||||
|
||||
const ZVirtualMemory vmem = remove_vmem<RemovalStrategy::HighestAddress>(entry, min_size, select);
|
||||
|
||||
if (!vmem.is_null() && consume(vmem)) {
|
||||
// Found a vmem and consume is satisfied.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
assert(strategy == RemovalStrategy::SizeClasses || strategy == RemovalStrategy::LowestAddress, "unknown strategy");
|
||||
|
||||
// Scan whole tree starting at the lowest address
|
||||
for (ZMappedCache::TreeNode* node = _tree.first(); node != nullptr; node = node->next()) {
|
||||
ZMappedCacheEntry* const entry = ZMappedCacheEntry::cast_to_entry(node);
|
||||
|
||||
const ZVirtualMemory vmem = remove_vmem<RemovalStrategy::LowestAddress>(entry, min_size, select);
|
||||
|
||||
if (!vmem.is_null() && consume(vmem)) {
|
||||
// Found a vmem and consume is satisfied.
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <ZMappedCache::RemovalStrategy strategy, typename SelectFunction, typename ConsumeFunction>
|
||||
void ZMappedCache::scan_remove_vmem(SelectFunction select, ConsumeFunction consume) {
|
||||
// Scan without a min_size
|
||||
scan_remove_vmem<strategy>(0, select, consume);
|
||||
}
|
||||
|
||||
template <ZMappedCache::RemovalStrategy strategy>
|
||||
size_t ZMappedCache::remove_discontiguous_with_strategy(size_t size, ZArray<ZVirtualMemory>* out) {
|
||||
precond(size > 0);
|
||||
precond(is_aligned(size, ZGranuleSize));
|
||||
|
||||
size_t remaining = size;
|
||||
|
||||
const auto select_size_fn = [&](size_t vmem_size) {
|
||||
// Select at most remaining
|
||||
return MIN2(remaining, vmem_size);
|
||||
};
|
||||
|
||||
const auto consume_vmem_fn = [&](ZVirtualMemory vmem) {
|
||||
const size_t vmem_size = vmem.size();
|
||||
out->append(vmem);
|
||||
|
||||
assert(vmem_size <= remaining, "consumed to much");
|
||||
|
||||
// Track remaining, and stop when it reaches zero
|
||||
remaining -= vmem_size;
|
||||
|
||||
return remaining == 0;
|
||||
};
|
||||
|
||||
scan_remove_vmem<strategy>(select_size_fn, consume_vmem_fn);
|
||||
|
||||
return size - remaining;
|
||||
}
|
||||
|
||||
ZMappedCache::ZMappedCache()
|
||||
: _tree(),
|
||||
_entry_count(0),
|
||||
_size_class_lists{},
|
||||
_size(0),
|
||||
_min(_size) {}
|
||||
|
||||
void ZMappedCache::insert(const ZVirtualMemory& vmem) {
|
||||
_size += vmem.size();
|
||||
|
||||
Tree::FindCursor current_cursor = _tree.find(vmem.start());
|
||||
Tree::FindCursor next_cursor = _tree.next(current_cursor);
|
||||
|
||||
const bool extends_left = current_cursor.found();
|
||||
const bool extends_right = next_cursor.is_valid() && next_cursor.found() &&
|
||||
ZMappedCacheEntry::cast_to_entry(next_cursor.node())->start() == vmem.end();
|
||||
|
||||
if (extends_left && extends_right) {
|
||||
ZMappedCacheEntry* next_entry = ZMappedCacheEntry::cast_to_entry(next_cursor.node());
|
||||
|
||||
const ZVirtualMemory left_vmem = ZMappedCacheEntry::cast_to_entry(current_cursor.node())->vmem();
|
||||
const ZVirtualMemory right_vmem = next_entry->vmem();
|
||||
assert(left_vmem.adjacent_to(vmem), "must be");
|
||||
assert(vmem.adjacent_to(right_vmem), "must be");
|
||||
|
||||
ZVirtualMemory new_vmem = left_vmem;
|
||||
new_vmem.grow_from_back(vmem.size());
|
||||
new_vmem.grow_from_back(right_vmem.size());
|
||||
|
||||
// Remove current (left vmem)
|
||||
tree_remove(current_cursor, left_vmem);
|
||||
|
||||
// And update next's start
|
||||
tree_update(next_entry, new_vmem);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (extends_left) {
|
||||
const ZVirtualMemory left_vmem = ZMappedCacheEntry::cast_to_entry(current_cursor.node())->vmem();
|
||||
assert(left_vmem.adjacent_to(vmem), "must be");
|
||||
|
||||
ZVirtualMemory new_vmem = left_vmem;
|
||||
new_vmem.grow_from_back(vmem.size());
|
||||
|
||||
tree_replace(current_cursor, new_vmem);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (extends_right) {
|
||||
ZMappedCacheEntry* next_entry = ZMappedCacheEntry::cast_to_entry(next_cursor.node());
|
||||
|
||||
const ZVirtualMemory right_vmem = next_entry->vmem();
|
||||
assert(vmem.adjacent_to(right_vmem), "must be");
|
||||
|
||||
ZVirtualMemory new_vmem = vmem;
|
||||
new_vmem.grow_from_back(right_vmem.size());
|
||||
|
||||
// Update next's start
|
||||
tree_update(next_entry, new_vmem);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
tree_insert(current_cursor, vmem);
|
||||
}
|
||||
|
||||
ZVirtualMemory ZMappedCache::remove_contiguous(size_t size) {
|
||||
precond(size > 0);
|
||||
precond(is_aligned(size, ZGranuleSize));
|
||||
|
||||
ZVirtualMemory result;
|
||||
|
||||
const auto select_size_fn = [&](size_t) {
|
||||
// We always select the size
|
||||
return size;
|
||||
};
|
||||
|
||||
const auto consume_vmem_fn = [&](ZVirtualMemory vmem) {
|
||||
assert(result.is_null(), "only consume once");
|
||||
assert(vmem.size() == size, "wrong size consumed");
|
||||
|
||||
result = vmem;
|
||||
|
||||
// Only require one vmem
|
||||
return true;
|
||||
};
|
||||
|
||||
if (size == ZPageSizeSmall) {
|
||||
// Small page allocations allocate at the lowest possible address
|
||||
scan_remove_vmem<RemovalStrategy::LowestAddress>(size, select_size_fn, consume_vmem_fn);
|
||||
} else {
|
||||
// Other sizes uses approximate best fit size classes first
|
||||
scan_remove_vmem<RemovalStrategy::SizeClasses>(size, select_size_fn, consume_vmem_fn);
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t ZMappedCache::remove_discontiguous(size_t size, ZArray<ZVirtualMemory>* out) {
|
||||
return remove_discontiguous_with_strategy<RemovalStrategy::SizeClasses>(size, out);
|
||||
}
|
||||
|
||||
size_t ZMappedCache::reset_min() {
|
||||
const size_t old_min = _min;
|
||||
|
||||
_min = _size;
|
||||
|
||||
return old_min;
|
||||
}
|
||||
|
||||
size_t ZMappedCache::remove_from_min(size_t max_size, ZArray<ZVirtualMemory>* out) {
|
||||
const size_t size = MIN2(_min, max_size);
|
||||
|
||||
if (size == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return remove_discontiguous_with_strategy<RemovalStrategy::HighestAddress>(size, out);
|
||||
}
|
||||
|
||||
void ZMappedCache::print_on(outputStream* st) const {
|
||||
// This may be called from error printing where we may not hold the lock, so
|
||||
// values may be inconsistent. As such we read the _entry_count only once. And
|
||||
// use is_empty_error_reporter_safe and size_error_reporter_safe on the size
|
||||
// class lists.
|
||||
const size_t entry_count = Atomic::load(&_entry_count);
|
||||
|
||||
st->print("Cache");
|
||||
st->fill_to(17);
|
||||
st->print_cr("%zuM (%zu)", _size / M, entry_count);
|
||||
|
||||
if (entry_count == 0) {
|
||||
// Empty cache, skip printing size classes
|
||||
return;
|
||||
}
|
||||
|
||||
// Aggregate the number of size class entries
|
||||
size_t size_class_entry_count = 0;
|
||||
for (int index = 0; index < NumSizeClasses; ++index) {
|
||||
size_class_entry_count += _size_class_lists[index].size_error_reporter_safe();
|
||||
}
|
||||
|
||||
// Print information on size classes
|
||||
streamIndentor indentor(st, 1);
|
||||
|
||||
st->print("size classes");
|
||||
st->fill_to(17);
|
||||
|
||||
// Print the number of entries smaller than the min size class's size
|
||||
const size_t small_entry_size_count = entry_count - size_class_entry_count;
|
||||
bool first = true;
|
||||
if (small_entry_size_count != 0) {
|
||||
st->print(EXACTFMT " (%zu)", EXACTFMTARGS(ZGranuleSize), small_entry_size_count);
|
||||
first = false;
|
||||
}
|
||||
|
||||
for (int index = 0; index < NumSizeClasses; ++index) {
|
||||
const ZList<ZSizeClassListNode>& list = _size_class_lists[index];
|
||||
if (!list.is_empty_error_reporter_safe()) {
|
||||
const int shift = index + MinSizeClassShift + (int)ZGranuleSizeShift;
|
||||
const size_t size = (size_t)1 << shift;
|
||||
|
||||
st->print("%s" EXACTFMT " (%zu)", first ? "" : ", ", EXACTFMTARGS(size), list.size_error_reporter_safe());
|
||||
first = false;
|
||||
}
|
||||
}
|
||||
|
||||
st->cr();
|
||||
}
|
||||
|
||||
void ZMappedCache::print_extended_on(outputStream* st) const {
|
||||
// Print the ranges and size of all nodes in the tree
|
||||
for (ZMappedCache::TreeNode* node = _tree.first(); node != nullptr; node = node->next()) {
|
||||
const ZVirtualMemory vmem = ZMappedCacheEntry::cast_to_entry(node)->vmem();
|
||||
|
||||
st->print_cr(PTR_FORMAT " " PTR_FORMAT " " EXACTFMT,
|
||||
untype(vmem.start()), untype(vmem.end()), EXACTFMTARGS(vmem.size()));
|
||||
}
|
||||
}
|
||||
112
src/hotspot/share/gc/z/zMappedCache.hpp
Normal file
112
src/hotspot/share/gc/z/zMappedCache.hpp
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZMAPPEDCACHE_HPP
|
||||
#define SHARE_GC_Z_ZMAPPEDCACHE_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zIntrusiveRBTree.hpp"
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
class ZMappedCacheEntry;
|
||||
class ZVirtualMemory;
|
||||
|
||||
class ZMappedCache {
|
||||
friend class ZMappedCacheEntry;
|
||||
|
||||
private:
|
||||
struct EntryCompare {
|
||||
int operator()(ZIntrusiveRBTreeNode* a, ZIntrusiveRBTreeNode* b);
|
||||
int operator()(zoffset key, ZIntrusiveRBTreeNode* node);
|
||||
};
|
||||
|
||||
struct ZSizeClassListNode {
|
||||
ZListNode<ZSizeClassListNode> _node;
|
||||
};
|
||||
|
||||
using Tree = ZIntrusiveRBTree<zoffset, EntryCompare>;
|
||||
using TreeNode = ZIntrusiveRBTreeNode;
|
||||
using SizeClassList = ZList<ZSizeClassListNode>;
|
||||
using SizeClassListNode = ZSizeClassListNode;
|
||||
|
||||
// Maintain size class lists from 4MB to 16GB
|
||||
static constexpr int MaxLongArraySizeClassShift = 3 /* 8 byte */ + 31 /* max length */;
|
||||
static constexpr int MinSizeClassShift = 1;
|
||||
static constexpr int MaxSizeClassShift = MaxLongArraySizeClassShift - ZGranuleSizeShift;
|
||||
static constexpr int NumSizeClasses = MaxSizeClassShift - MinSizeClassShift + 1;
|
||||
|
||||
Tree _tree;
|
||||
size_t _entry_count;
|
||||
SizeClassList _size_class_lists[NumSizeClasses];
|
||||
size_t _size;
|
||||
size_t _min;
|
||||
|
||||
static int size_class_index(size_t size);
|
||||
static int guaranteed_size_class_index(size_t size);
|
||||
|
||||
void tree_insert(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem);
|
||||
void tree_remove(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem);
|
||||
void tree_replace(const Tree::FindCursor& cursor, const ZVirtualMemory& vmem);
|
||||
void tree_update(ZMappedCacheEntry* entry, const ZVirtualMemory& vmem);
|
||||
|
||||
enum class RemovalStrategy {
|
||||
LowestAddress,
|
||||
HighestAddress,
|
||||
SizeClasses,
|
||||
};
|
||||
|
||||
template <RemovalStrategy strategy, typename SelectFunction>
|
||||
ZVirtualMemory remove_vmem(ZMappedCacheEntry* const entry, size_t min_size, SelectFunction select);
|
||||
|
||||
template <typename SelectFunction, typename ConsumeFunction>
|
||||
bool try_remove_vmem_size_class(size_t min_size, SelectFunction select, ConsumeFunction consume);
|
||||
|
||||
template <RemovalStrategy strategy, typename SelectFunction, typename ConsumeFunction>
|
||||
void scan_remove_vmem(size_t min_size, SelectFunction select, ConsumeFunction consume);
|
||||
|
||||
template <RemovalStrategy strategy, typename SelectFunction, typename ConsumeFunction>
|
||||
void scan_remove_vmem(SelectFunction select, ConsumeFunction consume);
|
||||
|
||||
template <RemovalStrategy strategy>
|
||||
size_t remove_discontiguous_with_strategy(size_t size, ZArray<ZVirtualMemory>* out);
|
||||
|
||||
public:
|
||||
ZMappedCache();
|
||||
|
||||
void insert(const ZVirtualMemory& vmem);
|
||||
|
||||
ZVirtualMemory remove_contiguous(size_t size);
|
||||
size_t remove_discontiguous(size_t size, ZArray<ZVirtualMemory>* out);
|
||||
|
||||
size_t reset_min();
|
||||
size_t remove_from_min(size_t max_size, ZArray<ZVirtualMemory>* out);
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void print_extended_on(outputStream* st) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZMAPPEDCACHE_HPP
|
||||
@ -1,284 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
|
||||
void ZMemoryManager::shrink_from_front(ZMemory* area, size_t size) {
|
||||
if (_callbacks._shrink != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start() + size, area->size() - size);
|
||||
_callbacks._shrink(*from, to);
|
||||
}
|
||||
area->shrink_from_front(size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::shrink_from_back(ZMemory* area, size_t size) {
|
||||
if (_callbacks._shrink != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start(), area->size() - size);
|
||||
_callbacks._shrink(*from, to);
|
||||
}
|
||||
area->shrink_from_back(size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::grow_from_front(ZMemory* area, size_t size) {
|
||||
if (_callbacks._grow != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start() - size, area->size() + size);
|
||||
_callbacks._grow(*from, to);
|
||||
}
|
||||
area->grow_from_front(size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::grow_from_back(ZMemory* area, size_t size) {
|
||||
if (_callbacks._grow != nullptr) {
|
||||
const ZMemory* from = area;
|
||||
const ZMemory to(area->start(), area->size() + size);
|
||||
_callbacks._grow(*from, to);
|
||||
}
|
||||
area->grow_from_back(size);
|
||||
}
|
||||
|
||||
ZMemoryManager::Callbacks::Callbacks()
|
||||
: _prepare_for_hand_out(nullptr),
|
||||
_prepare_for_hand_back(nullptr),
|
||||
_grow(nullptr),
|
||||
_shrink(nullptr) {}
|
||||
|
||||
ZMemoryManager::ZMemoryManager()
|
||||
: _freelist(),
|
||||
_callbacks() {}
|
||||
|
||||
bool ZMemoryManager::free_is_contiguous() const {
|
||||
return _freelist.size() == 1;
|
||||
}
|
||||
|
||||
void ZMemoryManager::register_callbacks(const Callbacks& callbacks) {
|
||||
_callbacks = callbacks;
|
||||
}
|
||||
|
||||
zoffset ZMemoryManager::peek_low_address() const {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
const ZMemory* const area = _freelist.first();
|
||||
if (area != nullptr) {
|
||||
return area->start();
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return zoffset(UINTPTR_MAX);
|
||||
}
|
||||
|
||||
zoffset_end ZMemoryManager::peak_high_address_end() const {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
const ZMemory* const area = _freelist.last();
|
||||
if (area != nullptr) {
|
||||
return area->end();
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return zoffset_end(UINTPTR_MAX);
|
||||
}
|
||||
|
||||
zoffset ZMemoryManager::alloc_low_address(size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (area->size() >= size) {
|
||||
zoffset start;
|
||||
|
||||
if (area->size() == size) {
|
||||
// Exact match, remove area
|
||||
start = area->start();
|
||||
_freelist.remove(area);
|
||||
delete area;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
start = area->start();
|
||||
shrink_from_front(area, size);
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(ZMemory(start, size));
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return zoffset(UINTPTR_MAX);
|
||||
}
|
||||
|
||||
zoffset ZMemoryManager::alloc_low_address_at_most(size_t size, size_t* allocated) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZMemory* const area = _freelist.first();
|
||||
if (area != nullptr) {
|
||||
const zoffset start = area->start();
|
||||
|
||||
if (area->size() <= size) {
|
||||
// Smaller than or equal to requested, remove area
|
||||
_freelist.remove(area);
|
||||
*allocated = area->size();
|
||||
delete area;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
shrink_from_front(area, size);
|
||||
*allocated = size;
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(ZMemory(start, *allocated));
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
*allocated = 0;
|
||||
return zoffset(UINTPTR_MAX);
|
||||
}
|
||||
|
||||
zoffset ZMemoryManager::alloc_high_address(size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListReverseIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (area->size() >= size) {
|
||||
zoffset start;
|
||||
|
||||
if (area->size() == size) {
|
||||
// Exact match, remove area
|
||||
start = area->start();
|
||||
_freelist.remove(area);
|
||||
delete area;
|
||||
} else {
|
||||
// Larger than requested, shrink area
|
||||
shrink_from_back(area, size);
|
||||
start = to_zoffset(area->end());
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(ZMemory(start, size));
|
||||
}
|
||||
|
||||
return start;
|
||||
}
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return zoffset(UINTPTR_MAX);
|
||||
}
|
||||
|
||||
void ZMemoryManager::move_into(zoffset start, size_t size) {
|
||||
assert(start != zoffset(UINTPTR_MAX), "Invalid address");
|
||||
const zoffset_end end = to_zoffset_end(start, size);
|
||||
|
||||
ZListIterator<ZMemory> iter(&_freelist);
|
||||
for (ZMemory* area; iter.next(&area);) {
|
||||
if (start < area->start()) {
|
||||
ZMemory* const prev = _freelist.prev(area);
|
||||
if (prev != nullptr && start == prev->end()) {
|
||||
if (end == area->start()) {
|
||||
// Merge with prev and current area
|
||||
grow_from_back(prev, size + area->size());
|
||||
_freelist.remove(area);
|
||||
delete area;
|
||||
} else {
|
||||
// Merge with prev area
|
||||
grow_from_back(prev, size);
|
||||
}
|
||||
} else if (end == area->start()) {
|
||||
// Merge with current area
|
||||
grow_from_front(area, size);
|
||||
} else {
|
||||
// Insert new area before current area
|
||||
assert(end < area->start(), "Areas must not overlap");
|
||||
ZMemory* const new_area = new ZMemory(start, size);
|
||||
_freelist.insert_before(area, new_area);
|
||||
}
|
||||
|
||||
// Done
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Insert last
|
||||
ZMemory* const last = _freelist.last();
|
||||
if (last != nullptr && start == last->end()) {
|
||||
// Merge with last area
|
||||
grow_from_back(last, size);
|
||||
} else {
|
||||
// Insert new area last
|
||||
ZMemory* const new_area = new ZMemory(start, size);
|
||||
_freelist.insert_last(new_area);
|
||||
}
|
||||
}
|
||||
|
||||
void ZMemoryManager::free(zoffset start, size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
if (_callbacks._prepare_for_hand_back != nullptr) {
|
||||
_callbacks._prepare_for_hand_back(ZMemory(start, size));
|
||||
}
|
||||
|
||||
move_into(start, size);
|
||||
}
|
||||
|
||||
void ZMemoryManager::register_range(zoffset start, size_t size) {
|
||||
// Note that there's no need to call the _prepare_for_hand_back when memory
|
||||
// is added the first time. We don't have to undo the effects of a previous
|
||||
// _prepare_for_hand_out callback.
|
||||
|
||||
// No need to lock during initialization.
|
||||
|
||||
move_into(start, size);
|
||||
}
|
||||
|
||||
bool ZMemoryManager::unregister_first(zoffset* start_out, size_t* size_out) {
|
||||
// Note that this doesn't hand out memory to be used, so we don't call the
|
||||
// _prepare_for_hand_out callback.
|
||||
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
if (_freelist.is_empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't invoke the _prepare_for_hand_out callback
|
||||
|
||||
ZMemory* const area = _freelist.remove_first();
|
||||
|
||||
// Return the range
|
||||
*start_out = area->start();
|
||||
*size_out = area->size();
|
||||
|
||||
delete area;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1,104 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZMEMORY_HPP
|
||||
#define SHARE_GC_Z_ZMEMORY_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
|
||||
class ZMemory : public CHeapObj<mtGC> {
|
||||
friend class ZList<ZMemory>;
|
||||
|
||||
private:
|
||||
zoffset _start;
|
||||
zoffset_end _end;
|
||||
ZListNode<ZMemory> _node;
|
||||
|
||||
public:
|
||||
ZMemory(zoffset start, size_t size);
|
||||
|
||||
zoffset start() const;
|
||||
zoffset_end end() const;
|
||||
size_t size() const;
|
||||
|
||||
bool operator==(const ZMemory& other) const;
|
||||
bool operator!=(const ZMemory& other) const;
|
||||
|
||||
bool contains(const ZMemory& other) const;
|
||||
|
||||
void shrink_from_front(size_t size);
|
||||
void shrink_from_back(size_t size);
|
||||
void grow_from_front(size_t size);
|
||||
void grow_from_back(size_t size);
|
||||
};
|
||||
|
||||
class ZMemoryManager {
|
||||
friend class ZVirtualMemoryManagerTest;
|
||||
|
||||
public:
|
||||
typedef void (*CallbackPrepare)(const ZMemory& area);
|
||||
typedef void (*CallbackResize)(const ZMemory& from, const ZMemory& to);
|
||||
|
||||
struct Callbacks {
|
||||
CallbackPrepare _prepare_for_hand_out;
|
||||
CallbackPrepare _prepare_for_hand_back;
|
||||
CallbackResize _grow;
|
||||
CallbackResize _shrink;
|
||||
|
||||
Callbacks();
|
||||
};
|
||||
|
||||
private:
|
||||
mutable ZLock _lock;
|
||||
ZList<ZMemory> _freelist;
|
||||
Callbacks _callbacks;
|
||||
|
||||
void shrink_from_front(ZMemory* area, size_t size);
|
||||
void shrink_from_back(ZMemory* area, size_t size);
|
||||
void grow_from_front(ZMemory* area, size_t size);
|
||||
void grow_from_back(ZMemory* area, size_t size);
|
||||
|
||||
void move_into(zoffset start, size_t size);
|
||||
|
||||
public:
|
||||
ZMemoryManager();
|
||||
|
||||
bool free_is_contiguous() const;
|
||||
|
||||
void register_callbacks(const Callbacks& callbacks);
|
||||
|
||||
zoffset peek_low_address() const;
|
||||
zoffset_end peak_high_address_end() const;
|
||||
zoffset alloc_low_address(size_t size);
|
||||
zoffset alloc_low_address_at_most(size_t size, size_t* allocated);
|
||||
zoffset alloc_high_address(size_t size);
|
||||
|
||||
void free(zoffset start, size_t size);
|
||||
void register_range(zoffset start, size_t size);
|
||||
bool unregister_first(zoffset* start_out, size_t* size_out);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZMEMORY_HPP
|
||||
@ -1,80 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZMEMORY_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZMEMORY_INLINE_HPP
|
||||
|
||||
#include "gc/z/zMemory.hpp"
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline ZMemory::ZMemory(zoffset start, size_t size)
|
||||
: _start(start),
|
||||
_end(to_zoffset_end(start, size)) {}
|
||||
|
||||
inline zoffset ZMemory::start() const {
|
||||
return _start;
|
||||
}
|
||||
|
||||
inline zoffset_end ZMemory::end() const {
|
||||
return _end;
|
||||
}
|
||||
|
||||
inline size_t ZMemory::size() const {
|
||||
return end() - start();
|
||||
}
|
||||
|
||||
inline bool ZMemory::operator==(const ZMemory& other) const {
|
||||
return _start == other._start && _end == other._end;
|
||||
}
|
||||
|
||||
inline bool ZMemory::operator!=(const ZMemory& other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
inline bool ZMemory::contains(const ZMemory& other) const {
|
||||
return _start <= other._start && other.end() <= end();
|
||||
}
|
||||
|
||||
inline void ZMemory::shrink_from_front(size_t size) {
|
||||
assert(this->size() > size, "Too small");
|
||||
_start += size;
|
||||
}
|
||||
|
||||
inline void ZMemory::shrink_from_back(size_t size) {
|
||||
assert(this->size() > size, "Too small");
|
||||
_end -= size;
|
||||
}
|
||||
|
||||
inline void ZMemory::grow_from_front(size_t size) {
|
||||
assert(size_t(start()) >= size, "Too big");
|
||||
_start -= size;
|
||||
}
|
||||
|
||||
inline void ZMemory::grow_from_back(size_t size) {
|
||||
_end += size;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZMEMORY_INLINE_HPP
|
||||
@ -24,7 +24,6 @@
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zNMT.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
@ -60,15 +59,15 @@ void ZNMT::unreserve(zaddress_unsafe start, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
void ZNMT::commit(zoffset offset, size_t size) {
|
||||
void ZNMT::commit(zbacking_offset offset, size_t size) {
|
||||
MemTracker::allocate_memory_in(ZNMT::_device, untype(offset), size, CALLER_PC, mtJavaHeap);
|
||||
}
|
||||
|
||||
void ZNMT::uncommit(zoffset offset, size_t size) {
|
||||
void ZNMT::uncommit(zbacking_offset offset, size_t size) {
|
||||
MemTracker::free_memory_in(ZNMT::_device, untype(offset), size);
|
||||
}
|
||||
|
||||
void ZNMT::map(zaddress_unsafe addr, size_t size, zoffset offset) {
|
||||
void ZNMT::map(zaddress_unsafe addr, size_t size, zbacking_offset offset) {
|
||||
// NMT doesn't track mappings at the moment.
|
||||
}
|
||||
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,13 +26,10 @@
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zMemory.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "nmt/memTracker.hpp"
|
||||
#include "nmt/memoryFileTracker.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/nativeCallStack.hpp"
|
||||
|
||||
class ZNMT : public AllStatic {
|
||||
private:
|
||||
@ -44,10 +41,10 @@ public:
|
||||
static void reserve(zaddress_unsafe start, size_t size);
|
||||
static void unreserve(zaddress_unsafe start, size_t size);
|
||||
|
||||
static void commit(zoffset offset, size_t size);
|
||||
static void uncommit(zoffset offset, size_t size);
|
||||
static void commit(zbacking_offset offset, size_t size);
|
||||
static void uncommit(zbacking_offset offset, size_t size);
|
||||
|
||||
static void map(zaddress_unsafe addr, size_t size, zoffset offset);
|
||||
static void map(zaddress_unsafe addr, size_t size, zbacking_offset offset);
|
||||
static void unmap(zaddress_unsafe addr, size_t size);
|
||||
};
|
||||
|
||||
|
||||
@ -21,8 +21,10 @@
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "utilities/macros.hpp"
|
||||
|
||||
bool ZNUMA::_enabled;
|
||||
uint32_t ZNUMA::_count;
|
||||
@ -31,11 +33,20 @@ void ZNUMA::initialize() {
|
||||
pd_initialize();
|
||||
|
||||
log_info_p(gc, init)("NUMA Support: %s", to_string());
|
||||
|
||||
if (_enabled) {
|
||||
assert(!is_faked(), "Currently not supported");
|
||||
log_info_p(gc, init)("NUMA Nodes: %u", _count);
|
||||
|
||||
} else if (is_faked()) {
|
||||
log_info_p(gc, init)("Fake NUMA Nodes: %u", count());
|
||||
}
|
||||
}
|
||||
|
||||
const char* ZNUMA::to_string() {
|
||||
if (is_faked()) {
|
||||
return "Faked";
|
||||
}
|
||||
|
||||
return _enabled ? "Enabled" : "Disabled";
|
||||
}
|
||||
|
||||
@ -24,10 +24,15 @@
|
||||
#ifndef SHARE_GC_Z_ZNUMA_HPP
|
||||
#define SHARE_GC_Z_ZNUMA_HPP
|
||||
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class ZNUMA : public AllStatic {
|
||||
friend class VMStructs;
|
||||
friend class ZNUMATest;
|
||||
friend class ZTest;
|
||||
|
||||
private:
|
||||
static bool _enabled;
|
||||
static uint32_t _count;
|
||||
@ -36,13 +41,17 @@ private:
|
||||
|
||||
public:
|
||||
static void initialize();
|
||||
|
||||
static bool is_enabled();
|
||||
static bool is_faked();
|
||||
|
||||
static uint32_t count();
|
||||
static uint32_t id();
|
||||
|
||||
static uint32_t memory_id(uintptr_t addr);
|
||||
|
||||
static size_t calculate_share(uint32_t numa_id, size_t total, size_t granule = ZGranuleSize, uint32_t ignore_count = 0);
|
||||
|
||||
static const char* to_string();
|
||||
};
|
||||
|
||||
|
||||
@ -26,12 +26,36 @@
|
||||
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
|
||||
inline bool ZNUMA::is_enabled() {
|
||||
return _enabled;
|
||||
}
|
||||
|
||||
inline bool ZNUMA::is_faked() {
|
||||
return ZFakeNUMA > 1;
|
||||
}
|
||||
|
||||
inline uint32_t ZNUMA::count() {
|
||||
return _count;
|
||||
}
|
||||
|
||||
inline size_t ZNUMA::calculate_share(uint32_t numa_id, size_t total, size_t granule, uint32_t ignore_count) {
|
||||
assert(total % granule == 0, "total must be divisible by granule");
|
||||
assert(ignore_count < count(), "must not ignore all nodes");
|
||||
assert(numa_id < count() - ignore_count, "numa_id must be in bounds");
|
||||
|
||||
const uint32_t num_nodes = count() - ignore_count;
|
||||
const size_t base_share = ((total / num_nodes) / granule) * granule;
|
||||
|
||||
const size_t extra_share_nodes = (total - base_share * num_nodes) / granule;
|
||||
if (numa_id < extra_share_nodes) {
|
||||
return base_share + granule;
|
||||
}
|
||||
|
||||
return base_share;
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZNUMA_INLINE_HPP
|
||||
|
||||
@ -138,10 +138,10 @@ zaddress ZObjectAllocator::alloc_object_in_medium_page(size_t size,
|
||||
}
|
||||
|
||||
if (is_null(addr)) {
|
||||
// When a new medium page is required, we synchronize the allocation
|
||||
// of the new page using a lock. This is to avoid having multiple
|
||||
// threads requesting a medium page from the page cache when we know
|
||||
// only one of the will succeed in installing the page at this layer.
|
||||
// When a new medium page is required, we synchronize the allocation of the
|
||||
// new page using a lock. This is to avoid having multiple threads allocate
|
||||
// medium pages when we know only one of them will succeed in installing
|
||||
// the page at this layer.
|
||||
ZLocker<ZLock> locker(&_medium_page_alloc_lock);
|
||||
|
||||
// When holding the lock we can't allow the page allocator to stall,
|
||||
|
||||
@ -23,42 +23,47 @@
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zGeneration.inline.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "gc/z/zPageAge.hpp"
|
||||
#include "gc/z/zRememberedSet.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
ZPage::ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem)
|
||||
ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker, uint32_t partition_id)
|
||||
: _type(type),
|
||||
_generation_id(ZGenerationId::young),
|
||||
_age(ZPageAge::eden),
|
||||
_numa_id((uint8_t)-1),
|
||||
_seqnum(0),
|
||||
_seqnum_other(0),
|
||||
_generation_id(/* set in reset */),
|
||||
_age(/* set in reset */),
|
||||
_seqnum(/* set in reset */),
|
||||
_seqnum_other(/* set in reset */),
|
||||
_single_partition_id(partition_id),
|
||||
_virtual(vmem),
|
||||
_top(to_zoffset_end(start())),
|
||||
_livemap(object_max_count()),
|
||||
_remembered_set(),
|
||||
_last_used(0),
|
||||
_physical(pmem),
|
||||
_node() {
|
||||
_multi_partition_tracker(multi_partition_tracker) {
|
||||
assert(!_virtual.is_null(), "Should not be null");
|
||||
assert(!_physical.is_null(), "Should not be null");
|
||||
assert(_virtual.size() == _physical.size(), "Virtual/Physical size mismatch");
|
||||
assert((_type == ZPageType::small && size() == ZPageSizeSmall) ||
|
||||
(_type == ZPageType::medium && size() == ZPageSizeMedium) ||
|
||||
(_type == ZPageType::large && is_aligned(size(), ZGranuleSize)),
|
||||
"Page type/size mismatch");
|
||||
reset(age);
|
||||
|
||||
if (is_old()) {
|
||||
remset_alloc();
|
||||
}
|
||||
}
|
||||
|
||||
ZPage* ZPage::clone_limited() const {
|
||||
ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, uint32_t partition_id)
|
||||
: ZPage(type, age, vmem, nullptr /* multi_partition_tracker */, partition_id) {}
|
||||
|
||||
ZPage::ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker)
|
||||
: ZPage(type, age, vmem, multi_partition_tracker, -1u /* partition_id */) {}
|
||||
|
||||
ZPage* ZPage::clone_for_promotion() const {
|
||||
assert(_age != ZPageAge::old, "must be used for promotion");
|
||||
// Only copy type and memory layouts, and also update _top. Let the rest be
|
||||
// lazily reconstructed when needed.
|
||||
ZPage* const page = new ZPage(_type, _virtual, _physical);
|
||||
ZPage* const page = new ZPage(_type, ZPageAge::old, _virtual, _multi_partition_tracker, _single_partition_id);
|
||||
page->_top = _top;
|
||||
|
||||
return page;
|
||||
@ -85,19 +90,16 @@ void ZPage::remset_alloc() {
|
||||
_remembered_set.initialize(size());
|
||||
}
|
||||
|
||||
void ZPage::remset_delete() {
|
||||
_remembered_set.delete_all();
|
||||
}
|
||||
|
||||
void ZPage::reset(ZPageAge age) {
|
||||
ZPage* ZPage::reset(ZPageAge age) {
|
||||
_age = age;
|
||||
_last_used = 0;
|
||||
|
||||
_generation_id = age == ZPageAge::old
|
||||
? ZGenerationId::old
|
||||
: ZGenerationId::young;
|
||||
|
||||
reset_seqnum();
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
void ZPage::reset_livemap() {
|
||||
@ -108,59 +110,6 @@ void ZPage::reset_top_for_allocation() {
|
||||
_top = to_zoffset_end(start());
|
||||
}
|
||||
|
||||
void ZPage::reset_type_and_size(ZPageType type) {
|
||||
_type = type;
|
||||
_livemap.resize(object_max_count());
|
||||
}
|
||||
|
||||
ZPage* ZPage::retype(ZPageType type) {
|
||||
assert(_type != type, "Invalid retype");
|
||||
reset_type_and_size(type);
|
||||
return this;
|
||||
}
|
||||
|
||||
ZPage* ZPage::split(size_t split_of_size) {
|
||||
return split(type_from_size(split_of_size), split_of_size);
|
||||
}
|
||||
|
||||
ZPage* ZPage::split_with_pmem(ZPageType type, const ZPhysicalMemory& pmem) {
|
||||
// Resize this page
|
||||
const ZVirtualMemory vmem = _virtual.split(pmem.size());
|
||||
assert(vmem.end() == _virtual.start(), "Should be consecutive");
|
||||
|
||||
reset_type_and_size(type_from_size(_virtual.size()));
|
||||
|
||||
log_trace(gc, page)("Split page [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT "]",
|
||||
untype(vmem.start()),
|
||||
untype(vmem.end()),
|
||||
untype(_virtual.end()));
|
||||
|
||||
// Create new page
|
||||
return new ZPage(type, vmem, pmem);
|
||||
}
|
||||
|
||||
ZPage* ZPage::split(ZPageType type, size_t split_of_size) {
|
||||
assert(_virtual.size() > split_of_size, "Invalid split");
|
||||
|
||||
const ZPhysicalMemory pmem = _physical.split(split_of_size);
|
||||
|
||||
return split_with_pmem(type, pmem);
|
||||
}
|
||||
|
||||
ZPage* ZPage::split_committed() {
|
||||
// Split any committed part of this page into a separate page,
|
||||
// leaving this page with only uncommitted physical memory.
|
||||
const ZPhysicalMemory pmem = _physical.split_committed();
|
||||
if (pmem.is_null()) {
|
||||
// Nothing committed
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
assert(!_physical.is_null(), "Should not be null");
|
||||
|
||||
return split_with_pmem(type_from_size(pmem.size()), pmem);
|
||||
}
|
||||
|
||||
class ZFindBaseOopClosure : public ObjectClosure {
|
||||
private:
|
||||
volatile zpointer* _p;
|
||||
@ -215,18 +164,19 @@ void* ZPage::remset_current() {
|
||||
return _remembered_set.current();
|
||||
}
|
||||
|
||||
void ZPage::print_on_msg(outputStream* out, const char* msg) const {
|
||||
out->print_cr(" %-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s/%-4u %s%s%s",
|
||||
void ZPage::print_on_msg(outputStream* st, const char* msg) const {
|
||||
st->print_cr("%-6s " PTR_FORMAT " " PTR_FORMAT " " PTR_FORMAT " %s/%-4u %s%s%s%s",
|
||||
type_to_string(), untype(start()), untype(top()), untype(end()),
|
||||
is_young() ? "Y" : "O",
|
||||
seqnum(),
|
||||
is_allocating() ? " Allocating " : "",
|
||||
is_relocatable() ? " Relocatable" : "",
|
||||
msg == nullptr ? "" : msg);
|
||||
is_allocating() ? " Allocating" : "",
|
||||
is_allocating() && msg != nullptr ? " " : "",
|
||||
msg != nullptr ? msg : "");
|
||||
}
|
||||
|
||||
void ZPage::print_on(outputStream* out) const {
|
||||
print_on_msg(out, nullptr);
|
||||
void ZPage::print_on(outputStream* st) const {
|
||||
print_on_msg(st, nullptr);
|
||||
}
|
||||
|
||||
void ZPage::print() const {
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,38 +25,34 @@
|
||||
#define SHARE_GC_Z_ZPAGE_HPP
|
||||
|
||||
#include "gc/z/zGenerationId.hpp"
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zLiveMap.hpp"
|
||||
#include "gc/z/zPageAge.hpp"
|
||||
#include "gc/z/zPageType.hpp"
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "gc/z/zRememberedSet.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
class ZGeneration;
|
||||
class ZMultiPartitionTracker;
|
||||
|
||||
class ZPage : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
friend class ZList<ZPage>;
|
||||
friend class ZForwardingTest;
|
||||
|
||||
private:
|
||||
ZPageType _type;
|
||||
ZGenerationId _generation_id;
|
||||
ZPageAge _age;
|
||||
uint8_t _numa_id;
|
||||
uint32_t _seqnum;
|
||||
uint32_t _seqnum_other;
|
||||
ZVirtualMemory _virtual;
|
||||
volatile zoffset_end _top;
|
||||
ZLiveMap _livemap;
|
||||
ZRememberedSet _remembered_set;
|
||||
uint64_t _last_used;
|
||||
ZPhysicalMemory _physical;
|
||||
ZListNode<ZPage> _node;
|
||||
const ZPageType _type;
|
||||
ZGenerationId _generation_id;
|
||||
ZPageAge _age;
|
||||
uint32_t _seqnum;
|
||||
uint32_t _seqnum_other;
|
||||
const uint32_t _single_partition_id;
|
||||
const ZVirtualMemory _virtual;
|
||||
volatile zoffset_end _top;
|
||||
ZLiveMap _livemap;
|
||||
ZRememberedSet _remembered_set;
|
||||
ZMultiPartitionTracker* const _multi_partition_tracker;
|
||||
|
||||
ZPageType type_from_size(size_t size) const;
|
||||
const char* type_to_string() const;
|
||||
|
||||
BitMap::idx_t bit_index(zaddress addr) const;
|
||||
@ -71,12 +67,13 @@ private:
|
||||
|
||||
void reset_seqnum();
|
||||
|
||||
ZPage* split_with_pmem(ZPageType type, const ZPhysicalMemory& pmem);
|
||||
ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker, uint32_t partition_id);
|
||||
|
||||
public:
|
||||
ZPage(ZPageType type, const ZVirtualMemory& vmem, const ZPhysicalMemory& pmem);
|
||||
ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, uint32_t partition_id);
|
||||
ZPage(ZPageType type, ZPageAge age, const ZVirtualMemory& vmem, ZMultiPartitionTracker* multi_partition_tracker);
|
||||
|
||||
ZPage* clone_limited() const;
|
||||
ZPage* clone_for_promotion() const;
|
||||
|
||||
uint32_t object_max_count() const;
|
||||
size_t object_alignment_shift() const;
|
||||
@ -99,28 +96,20 @@ public:
|
||||
size_t used() const;
|
||||
|
||||
const ZVirtualMemory& virtual_memory() const;
|
||||
const ZPhysicalMemory& physical_memory() const;
|
||||
ZPhysicalMemory& physical_memory();
|
||||
|
||||
uint8_t numa_id();
|
||||
uint32_t single_partition_id() const;
|
||||
bool is_multi_partition() const;
|
||||
ZMultiPartitionTracker* multi_partition_tracker() const;
|
||||
|
||||
ZPageAge age() const;
|
||||
|
||||
uint32_t seqnum() const;
|
||||
bool is_allocating() const;
|
||||
bool is_relocatable() const;
|
||||
|
||||
uint64_t last_used() const;
|
||||
void set_last_used();
|
||||
|
||||
void reset(ZPageAge age);
|
||||
ZPage* reset(ZPageAge age);
|
||||
void reset_livemap();
|
||||
void reset_top_for_allocation();
|
||||
void reset_type_and_size(ZPageType type);
|
||||
|
||||
ZPage* retype(ZPageType type);
|
||||
ZPage* split(size_t split_of_size);
|
||||
ZPage* split(ZPageType type, size_t split_of_size);
|
||||
ZPage* split_committed();
|
||||
|
||||
bool is_in(zoffset offset) const;
|
||||
bool is_in(zaddress addr) const;
|
||||
@ -156,7 +145,6 @@ public:
|
||||
void swap_remset_bitmaps();
|
||||
|
||||
void remset_alloc();
|
||||
void remset_delete();
|
||||
|
||||
ZBitMap::ReverseIterator remset_reverse_iterator_previous();
|
||||
BitMap::Iterator remset_iterator_limited_current(uintptr_t l_offset, size_t size);
|
||||
@ -193,8 +181,8 @@ public:
|
||||
|
||||
void log_msg(const char* msg_format, ...) const ATTRIBUTE_PRINTF(2, 3);
|
||||
|
||||
void print_on_msg(outputStream* out, const char* msg) const;
|
||||
void print_on(outputStream* out) const;
|
||||
void print_on_msg(outputStream* st, const char* msg) const;
|
||||
void print_on(outputStream* st) const;
|
||||
void print() const;
|
||||
|
||||
// Verification
|
||||
|
||||
@ -30,28 +30,14 @@
|
||||
#include "gc/z/zGeneration.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLiveMap.inline.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "gc/z/zRememberedSet.inline.hpp"
|
||||
#include "gc/z/zUtils.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/checkedCast.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline ZPageType ZPage::type_from_size(size_t size) const {
|
||||
if (size == ZPageSizeSmall) {
|
||||
return ZPageType::small;
|
||||
} else if (size == ZPageSizeMedium) {
|
||||
return ZPageType::medium;
|
||||
} else {
|
||||
return ZPageType::large;
|
||||
}
|
||||
}
|
||||
|
||||
inline const char* ZPage::type_to_string() const {
|
||||
switch (type()) {
|
||||
case ZPageType::small:
|
||||
@ -170,20 +156,16 @@ inline const ZVirtualMemory& ZPage::virtual_memory() const {
|
||||
return _virtual;
|
||||
}
|
||||
|
||||
inline const ZPhysicalMemory& ZPage::physical_memory() const {
|
||||
return _physical;
|
||||
inline uint32_t ZPage::single_partition_id() const {
|
||||
return _single_partition_id;
|
||||
}
|
||||
|
||||
inline ZPhysicalMemory& ZPage::physical_memory() {
|
||||
return _physical;
|
||||
inline bool ZPage::is_multi_partition() const {
|
||||
return _multi_partition_tracker != nullptr;
|
||||
}
|
||||
|
||||
inline uint8_t ZPage::numa_id() {
|
||||
if (_numa_id == (uint8_t)-1) {
|
||||
_numa_id = checked_cast<uint8_t>(ZNUMA::memory_id(untype(ZOffset::address(start()))));
|
||||
}
|
||||
|
||||
return _numa_id;
|
||||
inline ZMultiPartitionTracker* ZPage::multi_partition_tracker() const {
|
||||
return _multi_partition_tracker;
|
||||
}
|
||||
|
||||
inline ZPageAge ZPage::age() const {
|
||||
@ -202,14 +184,6 @@ inline bool ZPage::is_relocatable() const {
|
||||
return _seqnum < generation()->seqnum();
|
||||
}
|
||||
|
||||
inline uint64_t ZPage::last_used() const {
|
||||
return _last_used;
|
||||
}
|
||||
|
||||
inline void ZPage::set_last_used() {
|
||||
_last_used = (uint64_t)ceil(os::elapsedTime());
|
||||
}
|
||||
|
||||
inline bool ZPage::is_in(zoffset offset) const {
|
||||
return offset >= start() && offset < top();
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -24,68 +24,72 @@
|
||||
#ifndef SHARE_GC_Z_ZPAGEALLOCATOR_HPP
|
||||
#define SHARE_GC_Z_ZPAGEALLOCATOR_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zAllocationFlags.hpp"
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zGenerationId.hpp"
|
||||
#include "gc/z/zGranuleMap.hpp"
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/z/zMappedCache.hpp"
|
||||
#include "gc/z/zPage.hpp"
|
||||
#include "gc/z/zPageAge.hpp"
|
||||
#include "gc/z/zPageCache.hpp"
|
||||
#include "gc/z/zPageType.hpp"
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
#include "gc/z/zPhysicalMemoryManager.hpp"
|
||||
#include "gc/z/zSafeDelete.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
#include "gc/z/zUncommitter.hpp"
|
||||
#include "gc/z/zValue.hpp"
|
||||
#include "gc/z/zVirtualMemoryManager.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
class ThreadClosure;
|
||||
class ZGeneration;
|
||||
class ZMemoryAllocation;
|
||||
class ZMultiPartitionAllocation;
|
||||
class ZPageAllocation;
|
||||
class ZPageAllocator;
|
||||
class ZPageAllocatorStats;
|
||||
class ZSegmentStash;
|
||||
class ZSinglePartitionAllocation;
|
||||
class ZVirtualMemory;
|
||||
class ZWorkers;
|
||||
class ZUncommitter;
|
||||
class ZUnmapper;
|
||||
|
||||
class ZSafePageRecycle {
|
||||
class ZPartition {
|
||||
friend class VMStructs;
|
||||
friend class ZPageAllocator;
|
||||
|
||||
private:
|
||||
ZPageAllocator* _page_allocator;
|
||||
ZActivatedArray<ZPage> _unsafe_to_recycle;
|
||||
ZPageAllocator* const _page_allocator;
|
||||
ZMappedCache _cache;
|
||||
ZUncommitter _uncommitter;
|
||||
const size_t _min_capacity;
|
||||
const size_t _max_capacity;
|
||||
volatile size_t _current_max_capacity;
|
||||
volatile size_t _capacity;
|
||||
volatile size_t _claimed;
|
||||
size_t _used;
|
||||
double _last_commit;
|
||||
double _last_uncommit;
|
||||
size_t _to_uncommit;
|
||||
const uint32_t _numa_id;
|
||||
|
||||
const ZVirtualMemoryManager& virtual_memory_manager() const;
|
||||
ZVirtualMemoryManager& virtual_memory_manager();
|
||||
|
||||
const ZPhysicalMemoryManager& physical_memory_manager() const;
|
||||
ZPhysicalMemoryManager& physical_memory_manager();
|
||||
|
||||
void verify_virtual_memory_multi_partition_association(const ZVirtualMemory& vmem) const NOT_DEBUG_RETURN;
|
||||
void verify_virtual_memory_association(const ZVirtualMemory& vmem, bool check_multi_partition = false) const NOT_DEBUG_RETURN;
|
||||
void verify_virtual_memory_association(const ZArray<ZVirtualMemory>* vmems) const NOT_DEBUG_RETURN;
|
||||
void verify_memory_allocation_association(const ZMemoryAllocation* allocation) const NOT_DEBUG_RETURN;
|
||||
|
||||
public:
|
||||
ZSafePageRecycle(ZPageAllocator* page_allocator);
|
||||
ZPartition(uint32_t numa_id, ZPageAllocator* page_allocator);
|
||||
|
||||
void activate();
|
||||
void deactivate();
|
||||
uint32_t numa_id() const;
|
||||
|
||||
ZPage* register_and_clone_if_activated(ZPage* page);
|
||||
};
|
||||
|
||||
class ZPageAllocator {
|
||||
friend class VMStructs;
|
||||
friend class ZUnmapper;
|
||||
friend class ZUncommitter;
|
||||
|
||||
private:
|
||||
mutable ZLock _lock;
|
||||
ZPageCache _cache;
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
const size_t _min_capacity;
|
||||
const size_t _initial_capacity;
|
||||
const size_t _max_capacity;
|
||||
volatile size_t _current_max_capacity;
|
||||
volatile size_t _capacity;
|
||||
volatile size_t _claimed;
|
||||
volatile size_t _used;
|
||||
size_t _used_generations[2];
|
||||
struct {
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
} _collection_stats[2];
|
||||
ZList<ZPageAllocation> _stalled;
|
||||
ZUnmapper* _unmapper;
|
||||
ZUncommitter* _uncommitter;
|
||||
mutable ZSafeDelete<ZPage> _safe_destroy;
|
||||
mutable ZSafePageRecycle _safe_recycle;
|
||||
bool _initialized;
|
||||
size_t available() const;
|
||||
|
||||
size_t increase_capacity(size_t size);
|
||||
void decrease_capacity(size_t size, bool set_max_capacity);
|
||||
@ -93,38 +97,146 @@ private:
|
||||
void increase_used(size_t size);
|
||||
void decrease_used(size_t size);
|
||||
|
||||
void increase_used_generation(ZGenerationId id, size_t size);
|
||||
void decrease_used_generation(ZGenerationId id, size_t size);
|
||||
void free_memory(const ZVirtualMemory& vmem);
|
||||
|
||||
bool commit_page(ZPage* page);
|
||||
void uncommit_page(ZPage* page);
|
||||
|
||||
void map_page(const ZPage* page) const;
|
||||
void unmap_page(const ZPage* page) const;
|
||||
|
||||
void destroy_page(ZPage* page);
|
||||
|
||||
bool should_defragment(const ZPage* page) const;
|
||||
ZPage* defragment_page(ZPage* page);
|
||||
|
||||
bool is_alloc_allowed(size_t size) const;
|
||||
|
||||
bool alloc_page_common_inner(ZPageType type, size_t size, ZList<ZPage>* pages);
|
||||
bool alloc_page_common(ZPageAllocation* allocation);
|
||||
bool alloc_page_stall(ZPageAllocation* allocation);
|
||||
bool alloc_page_or_stall(ZPageAllocation* allocation);
|
||||
bool is_alloc_satisfied(ZPageAllocation* allocation) const;
|
||||
ZPage* alloc_page_create(ZPageAllocation* allocation);
|
||||
ZPage* alloc_page_finalize(ZPageAllocation* allocation);
|
||||
void free_pages_alloc_failed(ZPageAllocation* allocation);
|
||||
|
||||
void satisfy_stalled();
|
||||
void claim_from_cache_or_increase_capacity(ZMemoryAllocation* allocation);
|
||||
bool claim_capacity(ZMemoryAllocation* allocation);
|
||||
|
||||
size_t uncommit(uint64_t* timeout);
|
||||
|
||||
void sort_segments_physical(const ZVirtualMemory& vmem);
|
||||
|
||||
void claim_physical(const ZVirtualMemory& vmem);
|
||||
void free_physical(const ZVirtualMemory& vmem);
|
||||
size_t commit_physical(const ZVirtualMemory& vmem);
|
||||
size_t uncommit_physical(const ZVirtualMemory& vmem);
|
||||
|
||||
void map_virtual(const ZVirtualMemory& vmem);
|
||||
void unmap_virtual(const ZVirtualMemory& vmem);
|
||||
|
||||
void map_virtual_from_multi_partition(const ZVirtualMemory& vmem);
|
||||
void unmap_virtual_from_multi_partition(const ZVirtualMemory& vmem);
|
||||
|
||||
ZVirtualMemory claim_virtual(size_t size);
|
||||
size_t claim_virtual(size_t size, ZArray<ZVirtualMemory>* vmems_out);
|
||||
void free_virtual(const ZVirtualMemory& vmem);
|
||||
|
||||
void free_and_claim_virtual_from_low_many(const ZVirtualMemory& vmem, ZArray<ZVirtualMemory>* vmems_out);
|
||||
ZVirtualMemory free_and_claim_virtual_from_low_exact_or_many(size_t size, ZArray<ZVirtualMemory>* vmems_in_out);
|
||||
|
||||
bool prime(ZWorkers* workers, size_t size);
|
||||
|
||||
ZVirtualMemory prepare_harvested_and_claim_virtual(ZMemoryAllocation* allocation);
|
||||
|
||||
void copy_physical_segments_to_partition(const ZVirtualMemory& at, const ZVirtualMemory& from);
|
||||
void copy_physical_segments_from_partition(const ZVirtualMemory& at, const ZVirtualMemory& to);
|
||||
|
||||
void commit_increased_capacity(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
void map_memory(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void free_memory_alloc_failed(ZMemoryAllocation* allocation);
|
||||
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void print_cache_on(outputStream* st) const;
|
||||
void print_extended_on_error(outputStream* st) const;
|
||||
};
|
||||
|
||||
using ZPartitionIterator = ZPerNUMAIterator<ZPartition>;
|
||||
using ZPartitionConstIterator = ZPerNUMAConstIterator<ZPartition>;
|
||||
|
||||
class ZPageAllocator {
|
||||
friend class VMStructs;
|
||||
friend class ZMultiPartitionTracker;
|
||||
friend class ZPartition;
|
||||
friend class ZUncommitter;
|
||||
|
||||
private:
|
||||
mutable ZLock _lock;
|
||||
ZVirtualMemoryManager _virtual;
|
||||
ZPhysicalMemoryManager _physical;
|
||||
const size_t _min_capacity;
|
||||
const size_t _max_capacity;
|
||||
volatile size_t _used;
|
||||
volatile size_t _used_generations[2];
|
||||
struct {
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
} _collection_stats[2];
|
||||
ZPerNUMA<ZPartition> _partitions;
|
||||
ZList<ZPageAllocation> _stalled;
|
||||
mutable ZSafeDelete<ZPage> _safe_destroy;
|
||||
bool _initialized;
|
||||
|
||||
bool alloc_page_stall(ZPageAllocation* allocation);
|
||||
ZPage* alloc_page_inner(ZPageAllocation* allocation);
|
||||
|
||||
bool claim_capacity_or_stall(ZPageAllocation* allocation);
|
||||
bool claim_capacity(ZPageAllocation* allocation);
|
||||
bool claim_capacity_single_partition(ZSinglePartitionAllocation* single_partition_allocation, uint32_t partition_id);
|
||||
void claim_capacity_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, uint32_t start_partition);
|
||||
|
||||
ZVirtualMemory satisfied_from_cache_vmem(const ZPageAllocation* allocation) const;
|
||||
|
||||
ZVirtualMemory claim_virtual_memory(ZPageAllocation* allocation);
|
||||
ZVirtualMemory claim_virtual_memory_single_partition(ZSinglePartitionAllocation* single_partition_allocation);
|
||||
ZVirtualMemory claim_virtual_memory_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation);
|
||||
|
||||
void copy_claimed_physical_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void claim_physical_for_increased_capacity(ZPageAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
void claim_physical_for_increased_capacity_single_partition(ZSinglePartitionAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
void claim_physical_for_increased_capacity_multi_partition(const ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem);
|
||||
void claim_physical_for_increased_capacity(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
bool commit_and_map(ZPageAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
bool commit_and_map_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem);
|
||||
bool commit_and_map_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void commit(ZMemoryAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
bool commit_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem);
|
||||
bool commit_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void unmap_harvested_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation);
|
||||
|
||||
void map_committed_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem);
|
||||
void map_committed_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void cleanup_failed_commit_single_partition(ZSinglePartitionAllocation* single_partition_allocation, const ZVirtualMemory& vmem);
|
||||
void cleanup_failed_commit_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void free_after_alloc_page_failed(ZPageAllocation* allocation);
|
||||
|
||||
void free_memory_alloc_failed(ZPageAllocation* allocation);
|
||||
void free_memory_alloc_failed_single_partition(ZSinglePartitionAllocation* single_partition_allocation);
|
||||
void free_memory_alloc_failed_multi_partition(ZMultiPartitionAllocation* multi_partition_allocation);
|
||||
void free_memory_alloc_failed(ZMemoryAllocation* allocation);
|
||||
|
||||
ZPage* create_page(ZPageAllocation* allocation, const ZVirtualMemory& vmem);
|
||||
|
||||
void prepare_memory_for_free(ZPage* page, ZArray<ZVirtualMemory>* vmems);
|
||||
void remap_and_defragment(const ZVirtualMemory& vmem, ZArray<ZVirtualMemory>* vmems_out);
|
||||
void free_memory(ZArray<ZVirtualMemory>* vmems);
|
||||
|
||||
void satisfy_stalled();
|
||||
|
||||
bool is_multi_partition_enabled() const;
|
||||
|
||||
const ZPartition& partition_from_partition_id(uint32_t partition_id) const;
|
||||
ZPartition& partition_from_partition_id(uint32_t partition_id);
|
||||
ZPartition& partition_from_vmem(const ZVirtualMemory& vmem);
|
||||
|
||||
size_t sum_available() const;
|
||||
|
||||
void increase_used(size_t size);
|
||||
void decrease_used(size_t size);
|
||||
|
||||
void notify_out_of_memory();
|
||||
void restart_gc() const;
|
||||
|
||||
void print_on_inner(outputStream* st) const;
|
||||
|
||||
public:
|
||||
ZPageAllocator(size_t min_capacity,
|
||||
size_t initial_capacity,
|
||||
@ -135,56 +247,61 @@ public:
|
||||
|
||||
bool prime_cache(ZWorkers* workers, size_t size);
|
||||
|
||||
size_t initial_capacity() const;
|
||||
size_t min_capacity() const;
|
||||
size_t max_capacity() const;
|
||||
size_t soft_max_capacity() const;
|
||||
size_t current_max_capacity() const;
|
||||
size_t capacity() const;
|
||||
size_t used() const;
|
||||
size_t used_generation(ZGenerationId id) const;
|
||||
size_t unused() const;
|
||||
|
||||
void promote_used(size_t size);
|
||||
void increase_used_generation(ZGenerationId id, size_t size);
|
||||
void decrease_used_generation(ZGenerationId id, size_t size);
|
||||
|
||||
void promote_used(const ZPage* from, const ZPage* to);
|
||||
|
||||
ZPageAllocatorStats stats(ZGeneration* generation) const;
|
||||
|
||||
void reset_statistics(ZGenerationId id);
|
||||
|
||||
ZPage* alloc_page(ZPageType type, size_t size, ZAllocationFlags flags, ZPageAge age);
|
||||
ZPage* prepare_to_recycle(ZPage* page, bool allow_defragment);
|
||||
void recycle_page(ZPage* page);
|
||||
void safe_destroy_page(ZPage* page);
|
||||
void free_page(ZPage* page, bool allow_defragment);
|
||||
void free_pages(const ZArray<ZPage*>* pages);
|
||||
void free_page(ZPage* page);
|
||||
void free_pages(ZGenerationId id, const ZArray<ZPage*>* pages);
|
||||
|
||||
void enable_safe_destroy() const;
|
||||
void disable_safe_destroy() const;
|
||||
|
||||
void enable_safe_recycle() const;
|
||||
void disable_safe_recycle() const;
|
||||
|
||||
bool is_alloc_stalling() const;
|
||||
bool is_alloc_stalling_for_old() const;
|
||||
void handle_alloc_stalling_for_young();
|
||||
void handle_alloc_stalling_for_old(bool cleared_soft_refs);
|
||||
|
||||
ZPartitionConstIterator partition_iterator() const;
|
||||
ZPartitionIterator partition_iterator();
|
||||
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
|
||||
void print_on(outputStream* st) const;
|
||||
void print_extended_on_error(outputStream* st) const;
|
||||
void print_on_error(outputStream* st) const;
|
||||
};
|
||||
|
||||
class ZPageAllocatorStats {
|
||||
private:
|
||||
size_t _min_capacity;
|
||||
size_t _max_capacity;
|
||||
size_t _soft_max_capacity;
|
||||
size_t _capacity;
|
||||
size_t _used;
|
||||
size_t _used_high;
|
||||
size_t _used_low;
|
||||
size_t _used_generation;
|
||||
size_t _freed;
|
||||
size_t _promoted;
|
||||
size_t _compacted;
|
||||
size_t _allocation_stalls;
|
||||
const size_t _min_capacity;
|
||||
const size_t _max_capacity;
|
||||
const size_t _soft_max_capacity;
|
||||
const size_t _capacity;
|
||||
const size_t _used;
|
||||
const size_t _used_high;
|
||||
const size_t _used_low;
|
||||
const size_t _used_generation;
|
||||
const size_t _freed;
|
||||
const size_t _promoted;
|
||||
const size_t _compacted;
|
||||
const size_t _allocation_stalls;
|
||||
|
||||
public:
|
||||
ZPageAllocatorStats(size_t min_capacity,
|
||||
|
||||
@ -1,332 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageCache.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
|
||||
static const ZStatCounter ZCounterPageCacheHitL1("Memory", "Page Cache Hit L1", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheHitL2("Memory", "Page Cache Hit L2", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheHitL3("Memory", "Page Cache Hit L3", ZStatUnitOpsPerSecond);
|
||||
static const ZStatCounter ZCounterPageCacheMiss("Memory", "Page Cache Miss", ZStatUnitOpsPerSecond);
|
||||
|
||||
class ZPageCacheFlushClosure : public StackObj {
|
||||
friend class ZPageCache;
|
||||
|
||||
protected:
|
||||
const size_t _requested;
|
||||
size_t _flushed;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushClosure(size_t requested);
|
||||
virtual bool do_page(const ZPage* page) = 0;
|
||||
};
|
||||
|
||||
ZPageCacheFlushClosure::ZPageCacheFlushClosure(size_t requested)
|
||||
: _requested(requested),
|
||||
_flushed(0) {}
|
||||
|
||||
ZPageCache::ZPageCache()
|
||||
: _small(),
|
||||
_medium(),
|
||||
_large(),
|
||||
_last_commit(0) {}
|
||||
|
||||
ZPage* ZPageCache::alloc_small_page() {
|
||||
const uint32_t numa_id = ZNUMA::id();
|
||||
const uint32_t numa_count = ZNUMA::count();
|
||||
|
||||
// Try NUMA local page cache
|
||||
ZPage* const l1_page = _small.get(numa_id).remove_first();
|
||||
if (l1_page != nullptr) {
|
||||
ZStatInc(ZCounterPageCacheHitL1);
|
||||
return l1_page;
|
||||
}
|
||||
|
||||
// Try NUMA remote page cache(s)
|
||||
uint32_t remote_numa_id = numa_id + 1;
|
||||
const uint32_t remote_numa_count = numa_count - 1;
|
||||
for (uint32_t i = 0; i < remote_numa_count; i++) {
|
||||
if (remote_numa_id == numa_count) {
|
||||
remote_numa_id = 0;
|
||||
}
|
||||
|
||||
ZPage* const l2_page = _small.get(remote_numa_id).remove_first();
|
||||
if (l2_page != nullptr) {
|
||||
ZStatInc(ZCounterPageCacheHitL2);
|
||||
return l2_page;
|
||||
}
|
||||
|
||||
remote_numa_id++;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_medium_page() {
|
||||
ZPage* const page = _medium.remove_first();
|
||||
if (page != nullptr) {
|
||||
ZStatInc(ZCounterPageCacheHitL1);
|
||||
return page;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_large_page(size_t size) {
|
||||
// Find a page with the right size
|
||||
ZListIterator<ZPage> iter(&_large);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
if (size == page->size()) {
|
||||
// Page found
|
||||
_large.remove(page);
|
||||
ZStatInc(ZCounterPageCacheHitL1);
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_oversized_medium_page(size_t size) {
|
||||
if (size <= ZPageSizeMedium) {
|
||||
return _medium.remove_first();
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_oversized_large_page(size_t size) {
|
||||
// Find a page that is large enough
|
||||
ZListIterator<ZPage> iter(&_large);
|
||||
for (ZPage* page; iter.next(&page);) {
|
||||
if (size <= page->size()) {
|
||||
// Page found
|
||||
_large.remove(page);
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_oversized_page(size_t size) {
|
||||
ZPage* page = alloc_oversized_large_page(size);
|
||||
if (page == nullptr) {
|
||||
page = alloc_oversized_medium_page(size);
|
||||
}
|
||||
|
||||
if (page != nullptr) {
|
||||
ZStatInc(ZCounterPageCacheHitL3);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
ZPage* ZPageCache::alloc_page(ZPageType type, size_t size) {
|
||||
ZPage* page;
|
||||
|
||||
// Try allocate exact page
|
||||
if (type == ZPageType::small) {
|
||||
page = alloc_small_page();
|
||||
} else if (type == ZPageType::medium) {
|
||||
page = alloc_medium_page();
|
||||
} else {
|
||||
page = alloc_large_page(size);
|
||||
}
|
||||
|
||||
if (page == nullptr) {
|
||||
// Try allocate potentially oversized page
|
||||
ZPage* const oversized = alloc_oversized_page(size);
|
||||
if (oversized != nullptr) {
|
||||
if (size < oversized->size()) {
|
||||
// Split oversized page
|
||||
page = oversized->split(type, size);
|
||||
|
||||
// Cache remainder
|
||||
free_page(oversized);
|
||||
} else {
|
||||
// Re-type correctly sized page
|
||||
page = oversized->retype(type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (page == nullptr) {
|
||||
ZStatInc(ZCounterPageCacheMiss);
|
||||
}
|
||||
|
||||
return page;
|
||||
}
|
||||
|
||||
void ZPageCache::free_page(ZPage* page) {
|
||||
const ZPageType type = page->type();
|
||||
if (type == ZPageType::small) {
|
||||
_small.get(page->numa_id()).insert_first(page);
|
||||
} else if (type == ZPageType::medium) {
|
||||
_medium.insert_first(page);
|
||||
} else {
|
||||
_large.insert_first(page);
|
||||
}
|
||||
}
|
||||
|
||||
bool ZPageCache::flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
|
||||
ZPage* const page = from->last();
|
||||
if (page == nullptr || !cl->do_page(page)) {
|
||||
// Don't flush page
|
||||
return false;
|
||||
}
|
||||
|
||||
// Flush page
|
||||
from->remove(page);
|
||||
to->insert_last(page);
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZPageCache::flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to) {
|
||||
while (flush_list_inner(cl, from, to));
|
||||
}
|
||||
|
||||
void ZPageCache::flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to) {
|
||||
const uint32_t numa_count = ZNUMA::count();
|
||||
uint32_t numa_done = 0;
|
||||
uint32_t numa_next = 0;
|
||||
|
||||
// Flush lists round-robin
|
||||
while (numa_done < numa_count) {
|
||||
ZList<ZPage>* const numa_list = from->addr(numa_next);
|
||||
if (++numa_next == numa_count) {
|
||||
numa_next = 0;
|
||||
}
|
||||
|
||||
if (flush_list_inner(cl, numa_list, to)) {
|
||||
// Not done
|
||||
numa_done = 0;
|
||||
} else {
|
||||
// Done
|
||||
numa_done++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ZPageCache::flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to) {
|
||||
// Prefer flushing large, then medium and last small pages
|
||||
flush_list(cl, &_large, to);
|
||||
flush_list(cl, &_medium, to);
|
||||
flush_per_numa_lists(cl, &_small, to);
|
||||
|
||||
if (cl->_flushed > cl->_requested) {
|
||||
// Overflushed, re-insert part of last page into the cache
|
||||
const size_t overflushed = cl->_flushed - cl->_requested;
|
||||
ZPage* const reinsert = to->last()->split(overflushed);
|
||||
free_page(reinsert);
|
||||
cl->_flushed -= overflushed;
|
||||
}
|
||||
}
|
||||
|
||||
class ZPageCacheFlushForAllocationClosure : public ZPageCacheFlushClosure {
|
||||
public:
|
||||
ZPageCacheFlushForAllocationClosure(size_t requested)
|
||||
: ZPageCacheFlushClosure(requested) {}
|
||||
|
||||
virtual bool do_page(const ZPage* page) {
|
||||
if (_flushed < _requested) {
|
||||
// Flush page
|
||||
_flushed += page->size();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't flush page
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
void ZPageCache::flush_for_allocation(size_t requested, ZList<ZPage>* to) {
|
||||
ZPageCacheFlushForAllocationClosure cl(requested);
|
||||
flush(&cl, to);
|
||||
}
|
||||
|
||||
class ZPageCacheFlushForUncommitClosure : public ZPageCacheFlushClosure {
|
||||
private:
|
||||
const uint64_t _now;
|
||||
uint64_t* _timeout;
|
||||
|
||||
public:
|
||||
ZPageCacheFlushForUncommitClosure(size_t requested, uint64_t now, uint64_t* timeout)
|
||||
: ZPageCacheFlushClosure(requested),
|
||||
_now(now),
|
||||
_timeout(timeout) {
|
||||
// Set initial timeout
|
||||
*_timeout = ZUncommitDelay;
|
||||
}
|
||||
|
||||
virtual bool do_page(const ZPage* page) {
|
||||
const uint64_t expires = page->last_used() + ZUncommitDelay;
|
||||
if (expires > _now) {
|
||||
// Don't flush page, record shortest non-expired timeout
|
||||
*_timeout = MIN2(*_timeout, expires - _now);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (_flushed >= _requested) {
|
||||
// Don't flush page, requested amount flushed
|
||||
return false;
|
||||
}
|
||||
|
||||
// Flush page
|
||||
_flushed += page->size();
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
size_t ZPageCache::flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout) {
|
||||
const uint64_t now = (uint64_t)os::elapsedTime();
|
||||
const uint64_t expires = _last_commit + ZUncommitDelay;
|
||||
if (expires > now) {
|
||||
// Delay uncommit, set next timeout
|
||||
*timeout = expires - now;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (requested == 0) {
|
||||
// Nothing to flush, set next timeout
|
||||
*timeout = ZUncommitDelay;
|
||||
return 0;
|
||||
}
|
||||
|
||||
ZPageCacheFlushForUncommitClosure cl(requested, now, timeout);
|
||||
flush(&cl, to);
|
||||
|
||||
return cl._flushed;
|
||||
}
|
||||
|
||||
void ZPageCache::set_last_commit() {
|
||||
_last_commit = (uint64_t)ceil(os::elapsedTime());
|
||||
}
|
||||
@ -1,66 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPAGECACHE_HPP
|
||||
#define SHARE_GC_Z_ZPAGECACHE_HPP
|
||||
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zPage.hpp"
|
||||
#include "gc/z/zPageType.hpp"
|
||||
#include "gc/z/zValue.hpp"
|
||||
|
||||
class ZPageCacheFlushClosure;
|
||||
|
||||
class ZPageCache {
|
||||
private:
|
||||
ZPerNUMA<ZList<ZPage> > _small;
|
||||
ZList<ZPage> _medium;
|
||||
ZList<ZPage> _large;
|
||||
uint64_t _last_commit;
|
||||
|
||||
ZPage* alloc_small_page();
|
||||
ZPage* alloc_medium_page();
|
||||
ZPage* alloc_large_page(size_t size);
|
||||
|
||||
ZPage* alloc_oversized_medium_page(size_t size);
|
||||
ZPage* alloc_oversized_large_page(size_t size);
|
||||
ZPage* alloc_oversized_page(size_t size);
|
||||
|
||||
bool flush_list_inner(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
|
||||
void flush_list(ZPageCacheFlushClosure* cl, ZList<ZPage>* from, ZList<ZPage>* to);
|
||||
void flush_per_numa_lists(ZPageCacheFlushClosure* cl, ZPerNUMA<ZList<ZPage> >* from, ZList<ZPage>* to);
|
||||
void flush(ZPageCacheFlushClosure* cl, ZList<ZPage>* to);
|
||||
|
||||
public:
|
||||
ZPageCache();
|
||||
|
||||
ZPage* alloc_page(ZPageType type, size_t size);
|
||||
void free_page(ZPage* page);
|
||||
|
||||
void flush_for_allocation(size_t requested, ZList<ZPage>* to);
|
||||
size_t flush_for_uncommit(size_t requested, ZList<ZPage>* to, uint64_t* timeout);
|
||||
|
||||
void set_last_commit();
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPAGECACHE_HPP
|
||||
@ -81,11 +81,9 @@ ZGenerationPagesParallelIterator::ZGenerationPagesParallelIterator(const ZPageTa
|
||||
_generation_id(id),
|
||||
_page_allocator(page_allocator) {
|
||||
_page_allocator->enable_safe_destroy();
|
||||
_page_allocator->enable_safe_recycle();
|
||||
}
|
||||
|
||||
ZGenerationPagesParallelIterator::~ZGenerationPagesParallelIterator() {
|
||||
_page_allocator->disable_safe_recycle();
|
||||
_page_allocator->disable_safe_destroy();
|
||||
}
|
||||
|
||||
@ -94,10 +92,8 @@ ZGenerationPagesIterator::ZGenerationPagesIterator(const ZPageTable* page_table,
|
||||
_generation_id(id),
|
||||
_page_allocator(page_allocator) {
|
||||
_page_allocator->enable_safe_destroy();
|
||||
_page_allocator->enable_safe_recycle();
|
||||
}
|
||||
|
||||
ZGenerationPagesIterator::~ZGenerationPagesIterator() {
|
||||
_page_allocator->disable_safe_recycle();
|
||||
_page_allocator->disable_safe_destroy();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -103,11 +103,9 @@ inline bool ZGenerationPagesIterator::next(ZPage** page) {
|
||||
template <typename Function>
|
||||
inline void ZGenerationPagesIterator::yield(Function function) {
|
||||
_page_allocator->disable_safe_destroy();
|
||||
_page_allocator->disable_safe_recycle();
|
||||
|
||||
function();
|
||||
|
||||
_page_allocator->enable_safe_recycle();
|
||||
_page_allocator->enable_safe_destroy();
|
||||
}
|
||||
|
||||
|
||||
@ -1,386 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zNMT.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemory.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory()
|
||||
: _segments() {}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemorySegment& segment)
|
||||
: _segments() {
|
||||
_segments.append(segment);
|
||||
}
|
||||
|
||||
ZPhysicalMemory::ZPhysicalMemory(const ZPhysicalMemory& pmem)
|
||||
: _segments(pmem.nsegments()) {
|
||||
_segments.appendAll(&pmem._segments);
|
||||
}
|
||||
|
||||
const ZPhysicalMemory& ZPhysicalMemory::operator=(const ZPhysicalMemory& pmem) {
|
||||
// Check for self-assignment
|
||||
if (this == &pmem) {
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Free and copy segments
|
||||
_segments.clear_and_deallocate();
|
||||
_segments.reserve(pmem.nsegments());
|
||||
_segments.appendAll(&pmem._segments);
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemory::size() const {
|
||||
size_t size = 0;
|
||||
|
||||
for (int i = 0; i < _segments.length(); i++) {
|
||||
size += _segments.at(i).size();
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::insert_segment(int index, zoffset start, size_t size, bool committed) {
|
||||
_segments.insert_before(index, ZPhysicalMemorySegment(start, size, committed));
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::replace_segment(int index, zoffset start, size_t size, bool committed) {
|
||||
_segments.at_put(index, ZPhysicalMemorySegment(start, size, committed));
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::remove_segment(int index) {
|
||||
_segments.remove_at(index);
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::add_segments(const ZPhysicalMemory& pmem) {
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
add_segment(pmem.segment(i));
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::remove_segments() {
|
||||
_segments.clear_and_deallocate();
|
||||
}
|
||||
|
||||
static bool is_mergable(const ZPhysicalMemorySegment& before, const ZPhysicalMemorySegment& after) {
|
||||
return before.end() == after.start() && before.is_committed() == after.is_committed();
|
||||
}
|
||||
|
||||
void ZPhysicalMemory::add_segment(const ZPhysicalMemorySegment& segment) {
|
||||
// Insert segments in address order, merge segments when possible
|
||||
for (int i = _segments.length(); i > 0; i--) {
|
||||
const int current = i - 1;
|
||||
|
||||
if (_segments.at(current).end() <= segment.start()) {
|
||||
if (is_mergable(_segments.at(current), segment)) {
|
||||
if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
|
||||
// Merge with end of current segment and start of next segment
|
||||
const zoffset start = _segments.at(current).start();
|
||||
const size_t size = _segments.at(current).size() + segment.size() + _segments.at(current + 1).size();
|
||||
replace_segment(current, start, size, segment.is_committed());
|
||||
remove_segment(current + 1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Merge with end of current segment
|
||||
const zoffset start = _segments.at(current).start();
|
||||
const size_t size = _segments.at(current).size() + segment.size();
|
||||
replace_segment(current, start, size, segment.is_committed());
|
||||
return;
|
||||
} else if (current + 1 < _segments.length() && is_mergable(segment, _segments.at(current + 1))) {
|
||||
// Merge with start of next segment
|
||||
const zoffset start = segment.start();
|
||||
const size_t size = segment.size() + _segments.at(current + 1).size();
|
||||
replace_segment(current + 1, start, size, segment.is_committed());
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert after current segment
|
||||
insert_segment(current + 1, segment.start(), segment.size(), segment.is_committed());
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (_segments.length() > 0 && is_mergable(segment, _segments.at(0))) {
|
||||
// Merge with start of first segment
|
||||
const zoffset start = segment.start();
|
||||
const size_t size = segment.size() + _segments.at(0).size();
|
||||
replace_segment(0, start, size, segment.is_committed());
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert before first segment
|
||||
insert_segment(0, segment.start(), segment.size(), segment.is_committed());
|
||||
}
|
||||
|
||||
bool ZPhysicalMemory::commit_segment(int index, size_t size) {
|
||||
assert(size <= _segments.at(index).size(), "Invalid size");
|
||||
assert(!_segments.at(index).is_committed(), "Invalid state");
|
||||
|
||||
if (size == _segments.at(index).size()) {
|
||||
// Completely committed
|
||||
_segments.at(index).set_committed(true);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
// Partially committed, split segment
|
||||
insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, false /* committed */);
|
||||
replace_segment(index, _segments.at(index).start(), size, true /* committed */);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemory::uncommit_segment(int index, size_t size) {
|
||||
assert(size <= _segments.at(index).size(), "Invalid size");
|
||||
assert(_segments.at(index).is_committed(), "Invalid state");
|
||||
|
||||
if (size == _segments.at(index).size()) {
|
||||
// Completely uncommitted
|
||||
_segments.at(index).set_committed(false);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (size > 0) {
|
||||
// Partially uncommitted, split segment
|
||||
insert_segment(index + 1, _segments.at(index).start() + size, _segments.at(index).size() - size, true /* committed */);
|
||||
replace_segment(index, _segments.at(index).start(), size, false /* committed */);
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split(size_t size) {
|
||||
ZPhysicalMemory pmem;
|
||||
int nsegments = 0;
|
||||
|
||||
for (int i = 0; i < _segments.length(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments.at(i);
|
||||
if (pmem.size() < size) {
|
||||
if (pmem.size() + segment.size() <= size) {
|
||||
// Transfer segment
|
||||
pmem.add_segment(segment);
|
||||
} else {
|
||||
// Split segment
|
||||
const size_t split_size = size - pmem.size();
|
||||
pmem.add_segment(ZPhysicalMemorySegment(segment.start(), split_size, segment.is_committed()));
|
||||
_segments.at_put(nsegments++, ZPhysicalMemorySegment(segment.start() + split_size, segment.size() - split_size, segment.is_committed()));
|
||||
}
|
||||
} else {
|
||||
// Keep segment
|
||||
_segments.at_put(nsegments++, segment);
|
||||
}
|
||||
}
|
||||
|
||||
_segments.trunc_to(nsegments);
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
||||
ZPhysicalMemory ZPhysicalMemory::split_committed() {
|
||||
ZPhysicalMemory pmem;
|
||||
int nsegments = 0;
|
||||
|
||||
for (int i = 0; i < _segments.length(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = _segments.at(i);
|
||||
if (segment.is_committed()) {
|
||||
// Transfer segment
|
||||
pmem.add_segment(segment);
|
||||
} else {
|
||||
// Keep segment
|
||||
_segments.at_put(nsegments++, segment);
|
||||
}
|
||||
}
|
||||
|
||||
_segments.trunc_to(nsegments);
|
||||
|
||||
return pmem;
|
||||
}
|
||||
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity)
|
||||
: _backing(max_capacity) {
|
||||
// Make the whole range free
|
||||
_manager.register_range(zoffset(0), max_capacity);
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
return _backing.is_initialized();
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
|
||||
_backing.warn_commit_limits(max_capacity);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
|
||||
assert(!is_init_completed(), "Invalid state");
|
||||
|
||||
// If uncommit is not explicitly disabled, max capacity is greater than
|
||||
// min capacity, and uncommit is supported by the platform, then uncommit
|
||||
// will be enabled.
|
||||
if (!ZUncommit) {
|
||||
log_info_p(gc, init)("Uncommit: Disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (max_capacity == min_capacity) {
|
||||
log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
|
||||
FLAG_SET_ERGO(ZUncommit, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// Test if uncommit is supported by the operating system by committing
|
||||
// and then uncommitting a granule.
|
||||
ZPhysicalMemory pmem(ZPhysicalMemorySegment(zoffset(0), ZGranuleSize, false /* committed */));
|
||||
if (!commit(pmem) || !uncommit(pmem)) {
|
||||
log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
|
||||
FLAG_SET_ERGO(ZUncommit, false);
|
||||
return;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Uncommit: Enabled");
|
||||
log_info_p(gc, init)("Uncommit Delay: %zus", ZUncommitDelay);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::alloc(ZPhysicalMemory& pmem, size_t size) {
|
||||
assert(is_aligned(size, ZGranuleSize), "Invalid size");
|
||||
|
||||
// Allocate segments
|
||||
while (size > 0) {
|
||||
size_t allocated = 0;
|
||||
const zoffset start = _manager.alloc_low_address_at_most(size, &allocated);
|
||||
assert(start != zoffset(UINTPTR_MAX), "Allocation should never fail");
|
||||
pmem.add_segment(ZPhysicalMemorySegment(start, allocated, false /* committed */));
|
||||
size -= allocated;
|
||||
}
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::free(const ZPhysicalMemory& pmem) {
|
||||
// Free segments
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_manager.free(segment.start(), segment.size());
|
||||
}
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::commit(ZPhysicalMemory& pmem) {
|
||||
// Commit segments
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
if (segment.is_committed()) {
|
||||
// Segment already committed
|
||||
continue;
|
||||
}
|
||||
|
||||
// Commit segment
|
||||
const size_t committed = _backing.commit(segment.start(), segment.size());
|
||||
|
||||
// Register with NMT
|
||||
if (committed > 0) {
|
||||
ZNMT::commit(segment.start(), committed);
|
||||
}
|
||||
|
||||
// Register committed segment
|
||||
if (!pmem.commit_segment(i, committed)) {
|
||||
// Failed or partially failed
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::uncommit(ZPhysicalMemory& pmem) {
|
||||
// Commit segments
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
if (!segment.is_committed()) {
|
||||
// Segment already uncommitted
|
||||
continue;
|
||||
}
|
||||
|
||||
// Uncommit segment
|
||||
const size_t uncommitted = _backing.uncommit(segment.start(), segment.size());
|
||||
|
||||
// Unregister with NMT
|
||||
if (uncommitted > 0) {
|
||||
ZNMT::uncommit(segment.start(), uncommitted);
|
||||
}
|
||||
|
||||
// Deregister uncommitted segment
|
||||
if (!pmem.uncommit_segment(i, uncommitted)) {
|
||||
// Failed or partially failed
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
|
||||
// Map virtual memory to physcial memory
|
||||
void ZPhysicalMemoryManager::map(zoffset offset, const ZPhysicalMemory& pmem) const {
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(offset);
|
||||
|
||||
size_t size = 0;
|
||||
|
||||
// Map segments
|
||||
for (int i = 0; i < pmem.nsegments(); i++) {
|
||||
const ZPhysicalMemorySegment& segment = pmem.segment(i);
|
||||
_backing.map(addr + size, segment.size(), segment.start());
|
||||
size += segment.size();
|
||||
}
|
||||
|
||||
// Setup NUMA interleaving for large pages
|
||||
if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
|
||||
// To get granule-level NUMA interleaving when using large pages,
|
||||
// we simply let the kernel interleave the memory for us at page
|
||||
// fault time.
|
||||
os::numa_make_global((char*)addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
// Unmap virtual memory from physical memory
|
||||
void ZPhysicalMemoryManager::unmap(zoffset offset, size_t size) const {
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(offset);
|
||||
|
||||
_backing.unmap(addr, size);
|
||||
}
|
||||
@ -1,105 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_HPP
|
||||
#define SHARE_GC_Z_ZPHYSICALMEMORY_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zMemory.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include OS_HEADER(gc/z/zPhysicalMemoryBacking)
|
||||
|
||||
class ZPhysicalMemorySegment : public CHeapObj<mtGC> {
|
||||
private:
|
||||
zoffset _start;
|
||||
zoffset_end _end;
|
||||
bool _committed;
|
||||
|
||||
public:
|
||||
ZPhysicalMemorySegment();
|
||||
ZPhysicalMemorySegment(zoffset start, size_t size, bool committed);
|
||||
|
||||
zoffset start() const;
|
||||
zoffset_end end() const;
|
||||
size_t size() const;
|
||||
|
||||
bool is_committed() const;
|
||||
void set_committed(bool committed);
|
||||
};
|
||||
|
||||
class ZPhysicalMemory {
|
||||
private:
|
||||
ZArray<ZPhysicalMemorySegment> _segments;
|
||||
|
||||
void insert_segment(int index, zoffset start, size_t size, bool committed);
|
||||
void replace_segment(int index, zoffset start, size_t size, bool committed);
|
||||
void remove_segment(int index);
|
||||
|
||||
public:
|
||||
ZPhysicalMemory();
|
||||
ZPhysicalMemory(const ZPhysicalMemorySegment& segment);
|
||||
ZPhysicalMemory(const ZPhysicalMemory& pmem);
|
||||
const ZPhysicalMemory& operator=(const ZPhysicalMemory& pmem);
|
||||
|
||||
bool is_null() const;
|
||||
size_t size() const;
|
||||
|
||||
int nsegments() const;
|
||||
const ZPhysicalMemorySegment& segment(int index) const;
|
||||
|
||||
void add_segments(const ZPhysicalMemory& pmem);
|
||||
void remove_segments();
|
||||
|
||||
void add_segment(const ZPhysicalMemorySegment& segment);
|
||||
bool commit_segment(int index, size_t size);
|
||||
bool uncommit_segment(int index, size_t size);
|
||||
|
||||
ZPhysicalMemory split(size_t size);
|
||||
ZPhysicalMemory split_committed();
|
||||
};
|
||||
|
||||
class ZPhysicalMemoryManager {
|
||||
private:
|
||||
ZPhysicalMemoryBacking _backing;
|
||||
ZMemoryManager _manager;
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryManager(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
void try_enable_uncommit(size_t min_capacity, size_t max_capacity);
|
||||
|
||||
void alloc(ZPhysicalMemory& pmem, size_t size);
|
||||
void free(const ZPhysicalMemory& pmem);
|
||||
|
||||
bool commit(ZPhysicalMemory& pmem);
|
||||
bool uncommit(ZPhysicalMemory& pmem);
|
||||
|
||||
void map(zoffset offset, const ZPhysicalMemory& pmem) const;
|
||||
void unmap(zoffset offset, size_t size) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_HPP
|
||||
@ -1,74 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
|
||||
#include "gc/z/zPhysicalMemory.hpp"
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment()
|
||||
: _start(zoffset(UINTPTR_MAX)),
|
||||
_end(zoffset_end(UINTPTR_MAX)),
|
||||
_committed(false) {}
|
||||
|
||||
inline ZPhysicalMemorySegment::ZPhysicalMemorySegment(zoffset start, size_t size, bool committed)
|
||||
: _start(start),
|
||||
_end(to_zoffset_end(start, size)),
|
||||
_committed(committed) {}
|
||||
|
||||
inline zoffset ZPhysicalMemorySegment::start() const {
|
||||
return _start;
|
||||
}
|
||||
|
||||
inline zoffset_end ZPhysicalMemorySegment::end() const {
|
||||
return _end;
|
||||
}
|
||||
|
||||
inline size_t ZPhysicalMemorySegment::size() const {
|
||||
return _end - _start;
|
||||
}
|
||||
|
||||
inline bool ZPhysicalMemorySegment::is_committed() const {
|
||||
return _committed;
|
||||
}
|
||||
|
||||
inline void ZPhysicalMemorySegment::set_committed(bool committed) {
|
||||
_committed = committed;
|
||||
}
|
||||
|
||||
inline bool ZPhysicalMemory::is_null() const {
|
||||
return _segments.length() == 0;
|
||||
}
|
||||
|
||||
inline int ZPhysicalMemory::nsegments() const {
|
||||
return _segments.length();
|
||||
}
|
||||
|
||||
inline const ZPhysicalMemorySegment& ZPhysicalMemory::segment(int index) const {
|
||||
return _segments.at(index);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORY_INLINE_HPP
|
||||
376
src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp
Normal file
376
src/hotspot/share/gc/z/zPhysicalMemoryManager.cpp
Normal file
@ -0,0 +1,376 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zLargePages.inline.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zNMT.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zPhysicalMemoryManager.hpp"
|
||||
#include "gc/z/zRangeRegistry.inline.hpp"
|
||||
#include "gc/z/zUtils.inline.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "runtime/init.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/powerOfTwo.hpp"
|
||||
|
||||
ZPhysicalMemoryManager::ZPhysicalMemoryManager(size_t max_capacity)
|
||||
: _backing(max_capacity),
|
||||
_physical_mappings(ZAddressOffsetMax) {
|
||||
assert(is_aligned(max_capacity, ZGranuleSize), "must be granule aligned");
|
||||
|
||||
// Setup backing storage limits
|
||||
ZBackingOffsetMax = max_capacity;
|
||||
ZBackingIndexMax = checked_cast<uint32_t>(max_capacity >> ZGranuleSizeShift);
|
||||
|
||||
// Install capacity into the registry
|
||||
const size_t num_segments_total = max_capacity >> ZGranuleSizeShift;
|
||||
zbacking_index_end next_index = zbacking_index_end::zero;
|
||||
uint32_t numa_id;
|
||||
ZPerNUMAIterator<ZBackingIndexRegistry> iter(&_partition_registries);
|
||||
for (ZBackingIndexRegistry* registry; iter.next(®istry, &numa_id);) {
|
||||
const size_t num_segments = ZNUMA::calculate_share(numa_id, num_segments_total, 1 /* granule */);
|
||||
|
||||
if (num_segments == 0) {
|
||||
// If the capacity consist of less granules than the number of partitions,
|
||||
// some partitions will be empty.
|
||||
break;
|
||||
}
|
||||
|
||||
const zbacking_index index = to_zbacking_index(next_index);
|
||||
|
||||
// Insert the next number of segment indices into id's partition's registry
|
||||
registry->insert({index, num_segments});
|
||||
|
||||
// Advance to next index by the inserted number of segment indices
|
||||
next_index += num_segments;
|
||||
}
|
||||
|
||||
assert(untype(next_index) == ZBackingIndexMax, "must insert all capacity");
|
||||
}
|
||||
|
||||
bool ZPhysicalMemoryManager::is_initialized() const {
|
||||
return _backing.is_initialized();
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::warn_commit_limits(size_t max_capacity) const {
|
||||
_backing.warn_commit_limits(max_capacity);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::try_enable_uncommit(size_t min_capacity, size_t max_capacity) {
|
||||
assert(!is_init_completed(), "Invalid state");
|
||||
|
||||
// If uncommit is not explicitly disabled, max capacity is greater than
|
||||
// min capacity, and uncommit is supported by the platform, then uncommit
|
||||
// will be enabled.
|
||||
if (!ZUncommit) {
|
||||
log_info_p(gc, init)("Uncommit: Disabled");
|
||||
return;
|
||||
}
|
||||
|
||||
if (max_capacity == min_capacity) {
|
||||
log_info_p(gc, init)("Uncommit: Implicitly Disabled (-Xms equals -Xmx)");
|
||||
FLAG_SET_ERGO(ZUncommit, false);
|
||||
return;
|
||||
}
|
||||
|
||||
// Test if uncommit is supported by the operating system by committing
|
||||
// and then uncommitting a granule.
|
||||
const ZVirtualMemory vmem(zoffset(0), ZGranuleSize);
|
||||
if (!commit(vmem, (uint32_t)-1) || !uncommit(vmem)) {
|
||||
log_info_p(gc, init)("Uncommit: Implicitly Disabled (Not supported by operating system)");
|
||||
FLAG_SET_ERGO(ZUncommit, false);
|
||||
return;
|
||||
}
|
||||
|
||||
log_info_p(gc, init)("Uncommit: Enabled");
|
||||
log_info_p(gc, init)("Uncommit Delay: %zus", ZUncommitDelay);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::alloc(const ZVirtualMemory& vmem, uint32_t numa_id) {
|
||||
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
|
||||
assert(is_aligned(size, ZGranuleSize), "Invalid size");
|
||||
|
||||
size_t current_segment = 0;
|
||||
size_t remaining_segments = size >> ZGranuleSizeShift;
|
||||
|
||||
while (remaining_segments != 0) {
|
||||
// Allocate a range of backing segment indices
|
||||
ZBackingIndexRegistry& registry = _partition_registries.get(numa_id);
|
||||
const ZBackingIndexRange range = registry.remove_from_low_at_most(remaining_segments);
|
||||
assert(!range.is_null(), "Allocation should never fail");
|
||||
|
||||
const size_t num_allocated_segments = range.size();
|
||||
|
||||
// Insert backing segment indices in pmem
|
||||
const zbacking_index start_i = range.start();
|
||||
for (size_t i = 0; i < num_allocated_segments; i++) {
|
||||
pmem[current_segment + i] = start_i + i;
|
||||
}
|
||||
|
||||
// Advance by number of allocated segments
|
||||
remaining_segments -= num_allocated_segments;
|
||||
current_segment += num_allocated_segments;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename ReturnType>
|
||||
struct IterateInvoker {
|
||||
template<typename Function>
|
||||
bool operator()(Function function, zbacking_offset segment_start, size_t segment_size) const {
|
||||
return function(segment_start, segment_size);
|
||||
}
|
||||
};
|
||||
|
||||
template<>
|
||||
struct IterateInvoker<void> {
|
||||
template<typename Function>
|
||||
bool operator()(Function function, zbacking_offset segment_start, size_t segment_size) const {
|
||||
function(segment_start, segment_size);
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Function>
|
||||
bool for_each_segment_apply(const zbacking_index* pmem, size_t size, Function function) {
|
||||
IterateInvoker<decltype(function(zbacking_offset{}, size_t{}))> invoker;
|
||||
|
||||
// Total number of segment indices
|
||||
const size_t num_segments = size >> ZGranuleSizeShift;
|
||||
|
||||
// Apply the function over all zbacking_offset ranges consisting of consecutive indices
|
||||
for (size_t i = 0; i < num_segments; i++) {
|
||||
const size_t start_i = i;
|
||||
|
||||
// Find index corresponding to the last index in the consecutive range starting at start_i
|
||||
while (i + 1 < num_segments && to_zbacking_index_end(pmem[i], 1) == pmem[i + 1]) {
|
||||
i++;
|
||||
}
|
||||
|
||||
const size_t last_i = i;
|
||||
|
||||
// [start_i, last_i] now forms a consecutive range of indicies in pmem
|
||||
const size_t num_indicies = last_i - start_i + 1;
|
||||
const zbacking_offset start = to_zbacking_offset(pmem[start_i]);
|
||||
const size_t size = num_indicies * ZGranuleSize;
|
||||
|
||||
// Invoke function on zbacking_offset Range [start, start + size[
|
||||
if (!invoker(function, start, size)) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::free(const ZVirtualMemory& vmem, uint32_t numa_id) {
|
||||
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
|
||||
// Free segments
|
||||
for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) {
|
||||
const size_t num_segments = segment_size >> ZGranuleSizeShift;
|
||||
const zbacking_index index = to_zbacking_index(segment_start);
|
||||
|
||||
// Insert the free segment indices
|
||||
_partition_registries.get(numa_id).insert({index, num_segments});
|
||||
});
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryManager::commit(const ZVirtualMemory& vmem, uint32_t numa_id) {
|
||||
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
|
||||
size_t total_committed = 0;
|
||||
|
||||
// Commit segments
|
||||
for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) {
|
||||
// Commit segment
|
||||
const size_t committed = _backing.commit(segment_start, segment_size, numa_id);
|
||||
|
||||
total_committed += committed;
|
||||
|
||||
// Register with NMT
|
||||
if (committed > 0) {
|
||||
ZNMT::commit(segment_start, committed);
|
||||
}
|
||||
|
||||
return segment_size == committed;
|
||||
});
|
||||
|
||||
// Success
|
||||
return total_committed;
|
||||
}
|
||||
|
||||
size_t ZPhysicalMemoryManager::uncommit(const ZVirtualMemory& vmem) {
|
||||
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
|
||||
size_t total_uncommitted = 0;
|
||||
|
||||
// Uncommit segments
|
||||
for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) {
|
||||
// Uncommit segment
|
||||
const size_t uncommitted = _backing.uncommit(segment_start, segment_size);
|
||||
|
||||
total_uncommitted += uncommitted;
|
||||
|
||||
// Unregister with NMT
|
||||
if (uncommitted > 0) {
|
||||
ZNMT::uncommit(segment_start, uncommitted);
|
||||
}
|
||||
|
||||
return segment_size == uncommitted;
|
||||
});
|
||||
|
||||
// Success
|
||||
return total_uncommitted;
|
||||
}
|
||||
|
||||
// Map virtual memory to physical memory
|
||||
void ZPhysicalMemoryManager::map(const ZVirtualMemory& vmem, uint32_t numa_id) const {
|
||||
const zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
|
||||
size_t mapped = 0;
|
||||
|
||||
for_each_segment_apply(pmem, size, [&](zbacking_offset segment_start, size_t segment_size) {
|
||||
_backing.map(addr + mapped, segment_size, segment_start);
|
||||
mapped += segment_size;
|
||||
});
|
||||
|
||||
postcond(mapped == size);
|
||||
|
||||
// Setup NUMA preferred for large pages
|
||||
if (ZNUMA::is_enabled() && ZLargePages::is_explicit()) {
|
||||
os::numa_make_local((char*)addr, size, (int)numa_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Unmap virtual memory from physical memory
|
||||
void ZPhysicalMemoryManager::unmap(const ZVirtualMemory& vmem) const {
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(vmem.start());
|
||||
const size_t size = vmem.size();
|
||||
_backing.unmap(addr, size);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::copy_physical_segments(const ZVirtualMemory& to, const ZVirtualMemory& from) {
|
||||
assert(to.size() == from.size(), "must be of the same size");
|
||||
|
||||
zbacking_index* const dest = _physical_mappings.addr(to.start());
|
||||
const zbacking_index* const src = _physical_mappings.addr(from.start());
|
||||
const int granule_count = from.granule_count();
|
||||
|
||||
ZUtils::copy_disjoint(dest, src, granule_count);
|
||||
}
|
||||
|
||||
static void sort_zbacking_index_array(zbacking_index* array, int count) {
|
||||
ZUtils::sort(array, count, [](const zbacking_index* e1, const zbacking_index* e2) {
|
||||
return *e1 < *e2 ? -1 : 1;
|
||||
});
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::sort_segments_physical(const ZVirtualMemory& vmem) {
|
||||
zbacking_index* const pmem = _physical_mappings.addr(vmem.start());
|
||||
const int granule_count = vmem.granule_count();
|
||||
|
||||
// Sort physical segments
|
||||
sort_zbacking_index_array(pmem, granule_count);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::copy_to_stash(ZArraySlice<zbacking_index> stash, const ZVirtualMemory& vmem) const {
|
||||
zbacking_index* const dest = stash.adr_at(0);
|
||||
const zbacking_index* const src = _physical_mappings.addr(vmem.start());
|
||||
const int granule_count = vmem.granule_count();
|
||||
|
||||
// Check bounds
|
||||
assert(granule_count <= stash.length(), "Copy overflow %d <= %d", granule_count, stash.length());
|
||||
|
||||
// Copy to stash
|
||||
ZUtils::copy_disjoint(dest, src, granule_count);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::copy_from_stash(const ZArraySlice<const zbacking_index> stash, const ZVirtualMemory& vmem) {
|
||||
zbacking_index* const dest = _physical_mappings.addr(vmem.start());
|
||||
const zbacking_index* const src = stash.adr_at(0);
|
||||
const int granule_count = vmem.granule_count();
|
||||
|
||||
// Check bounds
|
||||
assert(granule_count <= stash.length(), "Copy overflow %d <= %d", granule_count, stash.length());
|
||||
|
||||
// Copy from stash
|
||||
ZUtils::copy_disjoint(dest, src, granule_count);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::stash_segments(const ZVirtualMemory& vmem, ZArray<zbacking_index>* stash_out) const {
|
||||
precond(stash_out->is_empty());
|
||||
|
||||
stash_out->at_grow(vmem.granule_count() - 1);
|
||||
copy_to_stash(*stash_out, vmem);
|
||||
sort_zbacking_index_array(stash_out->adr_at(0), stash_out->length());
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::restore_segments(const ZVirtualMemory& vmem, const ZArray<zbacking_index>& stash) {
|
||||
assert(vmem.granule_count() == stash.length(), "Must match stash size");
|
||||
|
||||
copy_from_stash(stash, vmem);
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::stash_segments(const ZArraySlice<const ZVirtualMemory>& vmems, ZArray<zbacking_index>* stash_out) const {
|
||||
precond(stash_out->is_empty());
|
||||
|
||||
int stash_index = 0;
|
||||
for (const ZVirtualMemory& vmem : vmems) {
|
||||
const int granule_count = vmem.granule_count();
|
||||
stash_out->at_grow(stash_index + vmem.granule_count() - 1);
|
||||
copy_to_stash(stash_out->slice_back(stash_index), vmem);
|
||||
stash_index += granule_count;
|
||||
}
|
||||
|
||||
sort_zbacking_index_array(stash_out->adr_at(0), stash_out->length());
|
||||
|
||||
}
|
||||
|
||||
void ZPhysicalMemoryManager::restore_segments(const ZArraySlice<const ZVirtualMemory>& vmems, const ZArray<zbacking_index>& stash) {
|
||||
int stash_index = 0;
|
||||
|
||||
for (const ZVirtualMemory& vmem : vmems) {
|
||||
copy_from_stash(stash.slice_back(stash_index), vmem);
|
||||
stash_index += vmem.granule_count();
|
||||
}
|
||||
|
||||
assert(stash_index == stash.length(), "Must have emptied the stash");
|
||||
}
|
||||
79
src/hotspot/share/gc/z/zPhysicalMemoryManager.hpp
Normal file
79
src/hotspot/share/gc/z/zPhysicalMemoryManager.hpp
Normal file
@ -0,0 +1,79 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZPHYSICALMEMORYMANAGER_HPP
|
||||
#define SHARE_GC_Z_ZPHYSICALMEMORYMANAGER_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zGranuleMap.hpp"
|
||||
#include "gc/z/zRange.hpp"
|
||||
#include "gc/z/zRangeRegistry.hpp"
|
||||
#include "gc/z/zValue.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include OS_HEADER(gc/z/zPhysicalMemoryBacking)
|
||||
|
||||
class ZVirtualMemory;
|
||||
|
||||
using ZBackingIndexRange = ZRange<zbacking_index, zbacking_index_end>;
|
||||
|
||||
class ZPhysicalMemoryManager {
|
||||
private:
|
||||
using ZBackingIndexRegistry = ZRangeRegistry<ZBackingIndexRange>;
|
||||
|
||||
ZPhysicalMemoryBacking _backing;
|
||||
ZPerNUMA<ZBackingIndexRegistry> _partition_registries;
|
||||
ZGranuleMap<zbacking_index> _physical_mappings;
|
||||
|
||||
void copy_to_stash(ZArraySlice<zbacking_index> stash, const ZVirtualMemory& vmem) const;
|
||||
void copy_from_stash(const ZArraySlice<const zbacking_index> stash, const ZVirtualMemory& vmem);
|
||||
|
||||
public:
|
||||
ZPhysicalMemoryManager(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
void warn_commit_limits(size_t max_capacity) const;
|
||||
void try_enable_uncommit(size_t min_capacity, size_t max_capacity);
|
||||
|
||||
void alloc(const ZVirtualMemory& vmem, uint32_t numa_id);
|
||||
void free(const ZVirtualMemory& vmem, uint32_t numa_id);
|
||||
|
||||
size_t commit(const ZVirtualMemory& vmem, uint32_t numa_id);
|
||||
size_t uncommit(const ZVirtualMemory& vmem);
|
||||
|
||||
void map(const ZVirtualMemory& vmem, uint32_t numa_id) const;
|
||||
void unmap(const ZVirtualMemory& vmem) const;
|
||||
|
||||
void copy_physical_segments(const ZVirtualMemory& to, const ZVirtualMemory& from);
|
||||
|
||||
void sort_segments_physical(const ZVirtualMemory& vmem);
|
||||
|
||||
void stash_segments(const ZVirtualMemory& vmem, ZArray<zbacking_index>* stash_out) const;
|
||||
void restore_segments(const ZVirtualMemory& vmem, const ZArray<zbacking_index>& stash);
|
||||
|
||||
void stash_segments(const ZArraySlice<const ZVirtualMemory>& vmems, ZArray<zbacking_index>* stash_out) const;
|
||||
void restore_segments(const ZArraySlice<const ZVirtualMemory>& vmems, const ZArray<zbacking_index>& stash);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZPHYSICALMEMORYMANAGER_HPP
|
||||
76
src/hotspot/share/gc/z/zRange.hpp
Normal file
76
src/hotspot/share/gc/z/zRange.hpp
Normal file
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZRANGE_HPP
|
||||
#define SHARE_GC_Z_ZRANGE_HPP
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
template <typename Start, typename End>
|
||||
class ZRange {
|
||||
friend class VMStructs;
|
||||
|
||||
public:
|
||||
using offset = Start;
|
||||
using offset_end = End;
|
||||
|
||||
private:
|
||||
End _start;
|
||||
size_t _size;
|
||||
|
||||
// Used internally to create a ZRange.
|
||||
//
|
||||
// The end parameter is only used for verification and to distinguish
|
||||
// the constructors if End == Start.
|
||||
ZRange(End start, size_t size, End end);
|
||||
|
||||
public:
|
||||
ZRange();
|
||||
ZRange(Start start, size_t size);
|
||||
|
||||
bool is_null() const;
|
||||
|
||||
Start start() const;
|
||||
End end() const;
|
||||
|
||||
size_t size() const;
|
||||
|
||||
bool operator==(const ZRange& other) const;
|
||||
bool operator!=(const ZRange& other) const;
|
||||
|
||||
bool contains(const ZRange& other) const;
|
||||
|
||||
void grow_from_front(size_t size);
|
||||
void grow_from_back(size_t size);
|
||||
|
||||
ZRange shrink_from_front(size_t size);
|
||||
ZRange shrink_from_back(size_t size);
|
||||
|
||||
ZRange partition(size_t offset, size_t partition_size) const;
|
||||
ZRange first_part(size_t split_offset) const;
|
||||
ZRange last_part(size_t split_offset) const;
|
||||
|
||||
bool adjacent_to(const ZRange& other) const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZRANGE_HPP
|
||||
144
src/hotspot/share/gc/z/zRange.inline.hpp
Normal file
144
src/hotspot/share/gc/z/zRange.inline.hpp
Normal file
@ -0,0 +1,144 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZRANGE_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZRANGE_INLINE_HPP
|
||||
|
||||
#include "gc/z/zRange.hpp"
|
||||
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End>::ZRange(End start, size_t size, End end)
|
||||
: _start(start),
|
||||
_size(size) {
|
||||
postcond(this->end() == end);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End>::ZRange()
|
||||
: _start(End::invalid),
|
||||
_size(0) {}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End>::ZRange(Start start, size_t size)
|
||||
: _start(to_end_type(start, 0)),
|
||||
_size(size) {}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline bool ZRange<Start, End>::is_null() const {
|
||||
return _start == End::invalid;
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline Start ZRange<Start, End>::start() const {
|
||||
return to_start_type(_start);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline End ZRange<Start, End>::end() const {
|
||||
return _start + _size;
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline size_t ZRange<Start, End>::size() const {
|
||||
return _size;
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline bool ZRange<Start, End>::operator==(const ZRange& other) const {
|
||||
precond(!is_null());
|
||||
precond(!other.is_null());
|
||||
|
||||
return _start == other._start && _size == other._size;
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline bool ZRange<Start, End>::operator!=(const ZRange& other) const {
|
||||
return !operator==(other);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline bool ZRange<Start, End>::contains(const ZRange& other) const {
|
||||
precond(!is_null());
|
||||
precond(!other.is_null());
|
||||
|
||||
return _start <= other._start && other.end() <= end();
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline void ZRange<Start, End>::grow_from_front(size_t size) {
|
||||
precond(size_t(start()) >= size);
|
||||
|
||||
_start -= size;
|
||||
_size += size;
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline void ZRange<Start, End>::grow_from_back(size_t size) {
|
||||
_size += size;
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End> ZRange<Start, End>::shrink_from_front(size_t size) {
|
||||
precond(this->size() >= size);
|
||||
|
||||
_start += size;
|
||||
_size -= size;
|
||||
|
||||
return ZRange(_start - size, size, _start);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End> ZRange<Start, End>::shrink_from_back(size_t size) {
|
||||
precond(this->size() >= size);
|
||||
|
||||
_size -= size;
|
||||
|
||||
return ZRange(end(), size, end() + size);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End> ZRange<Start, End>::partition(size_t offset, size_t partition_size) const {
|
||||
precond(size() - offset >= partition_size);
|
||||
|
||||
return ZRange(_start + offset, partition_size, _start + offset + partition_size);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End> ZRange<Start, End>::first_part(size_t split_offset) const {
|
||||
return partition(0, split_offset);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline ZRange<Start, End> ZRange<Start, End>::last_part(size_t split_offset) const {
|
||||
return partition(split_offset, size() - split_offset);
|
||||
}
|
||||
|
||||
template <typename Start, typename End>
|
||||
inline bool ZRange<Start, End>::adjacent_to(const ZRange<Start, End>& other) const {
|
||||
return end() == other.start() || other.end() == start();
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZRANGE_INLINE_HPP
|
||||
150
src/hotspot/share/gc/z/zRangeRegistry.hpp
Normal file
150
src/hotspot/share/gc/z/zRangeRegistry.hpp
Normal file
@ -0,0 +1,150 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZRANGEREGISTRY_HPP
|
||||
#define SHARE_GC_Z_ZRANGEREGISTRY_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
template <typename T>
|
||||
class ZArray;
|
||||
|
||||
template <typename Range>
|
||||
class ZRangeRegistry {
|
||||
friend class ZVirtualMemoryManagerTest;
|
||||
|
||||
private:
|
||||
// The node type for the list of Ranges
|
||||
class Node;
|
||||
|
||||
public:
|
||||
using offset = typename Range::offset;
|
||||
using offset_end = typename Range::offset_end;
|
||||
|
||||
typedef void (*CallbackPrepare)(const Range& range);
|
||||
typedef void (*CallbackResize)(const Range& from, const Range& to);
|
||||
|
||||
struct Callbacks {
|
||||
CallbackPrepare _prepare_for_hand_out;
|
||||
CallbackPrepare _prepare_for_hand_back;
|
||||
CallbackResize _grow;
|
||||
CallbackResize _shrink;
|
||||
|
||||
Callbacks();
|
||||
};
|
||||
|
||||
private:
|
||||
mutable ZLock _lock;
|
||||
ZList<Node> _list;
|
||||
Callbacks _callbacks;
|
||||
Range _limits;
|
||||
|
||||
void move_into(const Range& range);
|
||||
|
||||
void insert_inner(const Range& range);
|
||||
void register_inner(const Range& range);
|
||||
|
||||
void grow_from_front(Range* range, size_t size);
|
||||
void grow_from_back(Range* range, size_t size);
|
||||
|
||||
Range shrink_from_front(Range* range, size_t size);
|
||||
Range shrink_from_back(Range* range, size_t size);
|
||||
|
||||
Range remove_from_low_inner(size_t size);
|
||||
Range remove_from_low_at_most_inner(size_t size);
|
||||
|
||||
size_t remove_from_low_many_at_most_inner(size_t size, ZArray<Range>* out);
|
||||
|
||||
bool check_limits(const Range& range) const;
|
||||
|
||||
public:
|
||||
ZRangeRegistry();
|
||||
|
||||
void register_callbacks(const Callbacks& callbacks);
|
||||
|
||||
void register_range(const Range& range);
|
||||
bool unregister_first(Range* out);
|
||||
|
||||
bool is_empty() const;
|
||||
bool is_contiguous() const;
|
||||
|
||||
void anchor_limits();
|
||||
bool limits_contain(const Range& range) const;
|
||||
|
||||
offset peek_low_address() const;
|
||||
offset_end peak_high_address_end() const;
|
||||
|
||||
void insert(const Range& range);
|
||||
|
||||
void insert_and_remove_from_low_many(const Range& range, ZArray<Range>* out);
|
||||
Range insert_and_remove_from_low_exact_or_many(size_t size, ZArray<Range>* in_out);
|
||||
|
||||
Range remove_from_low(size_t size);
|
||||
Range remove_from_low_at_most(size_t size);
|
||||
size_t remove_from_low_many_at_most(size_t size, ZArray<Range>* out);
|
||||
Range remove_from_high(size_t size);
|
||||
|
||||
void transfer_from_low(ZRangeRegistry* other, size_t size);
|
||||
};
|
||||
|
||||
template <typename Range>
|
||||
class ZRangeRegistry<Range>::Node : public CHeapObj<mtGC> {
|
||||
friend class ZList<Node>;
|
||||
|
||||
private:
|
||||
using offset = typename Range::offset;
|
||||
using offset_end = typename Range::offset_end;
|
||||
|
||||
Range _range;
|
||||
ZListNode<Node> _node;
|
||||
|
||||
public:
|
||||
Node(offset start, size_t size)
|
||||
: _range(start, size),
|
||||
_node() {}
|
||||
|
||||
Node(const Range& other)
|
||||
: Node(other.start(), other.size()) {}
|
||||
|
||||
Range* range() {
|
||||
return &_range;
|
||||
}
|
||||
|
||||
offset start() const {
|
||||
return _range.start();
|
||||
}
|
||||
|
||||
offset_end end() const {
|
||||
return _range.end();
|
||||
}
|
||||
|
||||
size_t size() const {
|
||||
return _range.size();
|
||||
}
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZRANGEREGISTRY_HPP
|
||||
469
src/hotspot/share/gc/z/zRangeRegistry.inline.hpp
Normal file
469
src/hotspot/share/gc/z/zRangeRegistry.inline.hpp
Normal file
@ -0,0 +1,469 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZRANGEREGISTRY_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZRANGEREGISTRY_INLINE_HPP
|
||||
|
||||
#include "gc/z/zRangeRegistry.hpp"
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::move_into(const Range& range) {
|
||||
assert(!range.is_null(), "Invalid range");
|
||||
assert(check_limits(range), "Range outside limits");
|
||||
|
||||
const offset start = range.start();
|
||||
const offset_end end = range.end();
|
||||
const size_t size = range.size();
|
||||
|
||||
ZListIterator<Node> iter(&_list);
|
||||
for (Node* node; iter.next(&node);) {
|
||||
if (node->start() < start) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Node* const prev = _list.prev(node);
|
||||
if (prev != nullptr && start == prev->end()) {
|
||||
if (end == node->start()) {
|
||||
// Merge with prev and current ranges
|
||||
grow_from_back(prev->range(), size);
|
||||
grow_from_back(prev->range(), node->size());
|
||||
_list.remove(node);
|
||||
delete node;
|
||||
} else {
|
||||
// Merge with prev range
|
||||
grow_from_back(prev->range(), size);
|
||||
}
|
||||
} else if (end == node->start()) {
|
||||
// Merge with current range
|
||||
grow_from_front(node->range(), size);
|
||||
} else {
|
||||
// Insert range before current range
|
||||
assert(end < node->start(), "Areas must not overlap");
|
||||
Node* const new_node = new Node(start, size);
|
||||
_list.insert_before(node, new_node);
|
||||
}
|
||||
|
||||
// Done
|
||||
return;
|
||||
}
|
||||
|
||||
// Insert last
|
||||
Node* const last = _list.last();
|
||||
if (last != nullptr && start == last->end()) {
|
||||
// Merge with last range
|
||||
grow_from_back(last->range(), size);
|
||||
} else {
|
||||
// Insert new node last
|
||||
Node* const new_node = new Node(start, size);
|
||||
_list.insert_last(new_node);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::insert_inner(const Range& range) {
|
||||
if (_callbacks._prepare_for_hand_back != nullptr) {
|
||||
_callbacks._prepare_for_hand_back(range);
|
||||
}
|
||||
move_into(range);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::register_inner(const Range& range) {
|
||||
move_into(range);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::grow_from_front(Range* range, size_t size) {
|
||||
if (_callbacks._grow != nullptr) {
|
||||
const Range from = *range;
|
||||
const Range to = Range(from.start() - size, from.size() + size);
|
||||
_callbacks._grow(from, to);
|
||||
}
|
||||
range->grow_from_front(size);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::grow_from_back(Range* range, size_t size) {
|
||||
if (_callbacks._grow != nullptr) {
|
||||
const Range from = *range;
|
||||
const Range to = Range(from.start(), from.size() + size);
|
||||
_callbacks._grow(from, to);
|
||||
}
|
||||
range->grow_from_back(size);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::shrink_from_front(Range* range, size_t size) {
|
||||
if (_callbacks._shrink != nullptr) {
|
||||
const Range from = *range;
|
||||
const Range to = from.last_part(size);
|
||||
_callbacks._shrink(from, to);
|
||||
}
|
||||
return range->shrink_from_front(size);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::shrink_from_back(Range* range, size_t size) {
|
||||
if (_callbacks._shrink != nullptr) {
|
||||
const Range from = *range;
|
||||
const Range to = from.first_part(from.size() - size);
|
||||
_callbacks._shrink(from, to);
|
||||
}
|
||||
return range->shrink_from_back(size);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::remove_from_low_inner(size_t size) {
|
||||
ZListIterator<Node> iter(&_list);
|
||||
for (Node* node; iter.next(&node);) {
|
||||
if (node->size() >= size) {
|
||||
Range range;
|
||||
|
||||
if (node->size() == size) {
|
||||
// Exact match, remove range
|
||||
_list.remove(node);
|
||||
range = *node->range();
|
||||
delete node;
|
||||
} else {
|
||||
// Larger than requested, shrink range
|
||||
range = shrink_from_front(node->range(), size);
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(range);
|
||||
}
|
||||
|
||||
return range;
|
||||
}
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return Range();
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::remove_from_low_at_most_inner(size_t size) {
|
||||
Node* const node = _list.first();
|
||||
if (node == nullptr) {
|
||||
// List is empty
|
||||
return Range();
|
||||
}
|
||||
|
||||
Range range;
|
||||
|
||||
if (node->size() <= size) {
|
||||
// Smaller than or equal to requested, remove range
|
||||
_list.remove(node);
|
||||
range = *node->range();
|
||||
delete node;
|
||||
} else {
|
||||
// Larger than requested, shrink range
|
||||
range = shrink_from_front(node->range(), size);
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out) {
|
||||
_callbacks._prepare_for_hand_out(range);
|
||||
}
|
||||
|
||||
return range;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
size_t ZRangeRegistry<Range>::remove_from_low_many_at_most_inner(size_t size, ZArray<Range>* out) {
|
||||
size_t to_remove = size;
|
||||
|
||||
while (to_remove > 0) {
|
||||
const Range range = remove_from_low_at_most_inner(to_remove);
|
||||
|
||||
if (range.is_null()) {
|
||||
// The requested amount is not available
|
||||
return size - to_remove;
|
||||
}
|
||||
|
||||
to_remove -= range.size();
|
||||
out->append(range);
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
ZRangeRegistry<Range>::Callbacks::Callbacks()
|
||||
: _prepare_for_hand_out(nullptr),
|
||||
_prepare_for_hand_back(nullptr),
|
||||
_grow(nullptr),
|
||||
_shrink(nullptr) {}
|
||||
|
||||
template <typename Range>
|
||||
ZRangeRegistry<Range>::ZRangeRegistry()
|
||||
: _list(),
|
||||
_callbacks(),
|
||||
_limits() {}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::register_callbacks(const Callbacks& callbacks) {
|
||||
_callbacks = callbacks;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::register_range(const Range& range) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
register_inner(range);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
bool ZRangeRegistry<Range>::unregister_first(Range* out) {
|
||||
// Unregistering a range doesn't call a "prepare_to_hand_out" callback
|
||||
// because the range is unregistered and not handed out to be used.
|
||||
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
if (_list.is_empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Don't invoke the "prepare_to_hand_out" callback
|
||||
|
||||
Node* const node = _list.remove_first();
|
||||
|
||||
// Return the range
|
||||
*out = *node->range();
|
||||
|
||||
delete node;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
inline bool ZRangeRegistry<Range>::is_empty() const {
|
||||
return _list.is_empty();
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
bool ZRangeRegistry<Range>::is_contiguous() const {
|
||||
return _list.size() == 1;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::anchor_limits() {
|
||||
assert(_limits.is_null(), "Should only anchor limits once");
|
||||
|
||||
if (_list.is_empty()) {
|
||||
return;
|
||||
}
|
||||
|
||||
const offset start = _list.first()->start();
|
||||
const size_t size = _list.last()->end() - start;
|
||||
|
||||
_limits = Range(start, size);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
bool ZRangeRegistry<Range>::limits_contain(const Range& range) const {
|
||||
if (_limits.is_null() || range.is_null()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return range.start() >= _limits.start() && range.end() <= _limits.end();
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
bool ZRangeRegistry<Range>::check_limits(const Range& range) const {
|
||||
if (_limits.is_null()) {
|
||||
// Limits not anchored
|
||||
return true;
|
||||
}
|
||||
|
||||
// Otherwise, check that other is within the limits
|
||||
return limits_contain(range);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
typename ZRangeRegistry<Range>::offset ZRangeRegistry<Range>::peek_low_address() const {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
const Node* const node = _list.first();
|
||||
if (node != nullptr) {
|
||||
return node->start();
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return offset::invalid;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
typename ZRangeRegistry<Range>::offset_end ZRangeRegistry<Range>::peak_high_address_end() const {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
const Node* const node = _list.last();
|
||||
if (node != nullptr) {
|
||||
return node->end();
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return offset_end::invalid;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::insert(const Range& range) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
insert_inner(range);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::insert_and_remove_from_low_many(const Range& range, ZArray<Range>* out) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
const size_t size = range.size();
|
||||
|
||||
// Insert the range
|
||||
insert_inner(range);
|
||||
|
||||
// Remove (hopefully) at a lower address
|
||||
const size_t removed = remove_from_low_many_at_most_inner(size, out);
|
||||
|
||||
// This should always succeed since we freed the same amount.
|
||||
assert(removed == size, "must succeed");
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::insert_and_remove_from_low_exact_or_many(size_t size, ZArray<Range>* in_out) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
size_t inserted = 0;
|
||||
|
||||
// Insert everything
|
||||
ZArrayIterator<Range> iter(in_out);
|
||||
for (Range mem; iter.next(&mem);) {
|
||||
insert_inner(mem);
|
||||
inserted += mem.size();
|
||||
}
|
||||
|
||||
// Clear stored memory so that we can populate it below
|
||||
in_out->clear();
|
||||
|
||||
// Try to find and remove a contiguous chunk
|
||||
Range range = remove_from_low_inner(size);
|
||||
if (!range.is_null()) {
|
||||
return range;
|
||||
}
|
||||
|
||||
// Failed to find a contiguous chunk, split it up into smaller chunks and
|
||||
// only remove up to as much that has been inserted.
|
||||
size_t removed = remove_from_low_many_at_most_inner(inserted, in_out);
|
||||
assert(removed == inserted, "Should be able to get back as much as we previously inserted");
|
||||
return Range();
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::remove_from_low(size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
Range range = remove_from_low_inner(size);
|
||||
return range;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::remove_from_low_at_most(size_t size) {
|
||||
ZLocker<ZLock> lock(&_lock);
|
||||
Range range = remove_from_low_at_most_inner(size);
|
||||
return range;
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
size_t ZRangeRegistry<Range>::remove_from_low_many_at_most(size_t size, ZArray<Range>* out) {
|
||||
ZLocker<ZLock> lock(&_lock);
|
||||
return remove_from_low_many_at_most_inner(size, out);
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
Range ZRangeRegistry<Range>::remove_from_high(size_t size) {
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
|
||||
ZListReverseIterator<Node> iter(&_list);
|
||||
for (Node* node; iter.next(&node);) {
|
||||
if (node->size() >= size) {
|
||||
Range range;
|
||||
|
||||
if (node->size() == size) {
|
||||
// Exact match, remove range
|
||||
_list.remove(node);
|
||||
range = *node->range();
|
||||
delete node;
|
||||
} else {
|
||||
// Larger than requested, shrink range
|
||||
range = shrink_from_back(node->range(), size);
|
||||
}
|
||||
|
||||
if (_callbacks._prepare_for_hand_out != nullptr) {
|
||||
_callbacks._prepare_for_hand_out(range);
|
||||
}
|
||||
|
||||
return range;
|
||||
}
|
||||
}
|
||||
|
||||
// Out of memory
|
||||
return Range();
|
||||
}
|
||||
|
||||
template <typename Range>
|
||||
void ZRangeRegistry<Range>::transfer_from_low(ZRangeRegistry* other, size_t size) {
|
||||
assert(other->_list.is_empty(), "Should only be used for initialization");
|
||||
|
||||
ZLocker<ZLock> locker(&_lock);
|
||||
size_t to_move = size;
|
||||
|
||||
ZListIterator<Node> iter(&_list);
|
||||
for (Node* node; iter.next(&node);) {
|
||||
Node* to_transfer;
|
||||
|
||||
if (node->size() <= to_move) {
|
||||
// Smaller than or equal to requested, remove range
|
||||
_list.remove(node);
|
||||
to_transfer = node;
|
||||
} else {
|
||||
// Larger than requested, shrink range
|
||||
const Range range = shrink_from_front(node->range(), to_move);
|
||||
to_transfer = new Node(range);
|
||||
}
|
||||
|
||||
// Insert into the other list
|
||||
//
|
||||
// The from list is sorted, the other list starts empty, and the inserts
|
||||
// come in sort order, so we can insert_last here.
|
||||
other->_list.insert_last(to_transfer);
|
||||
|
||||
to_move -= to_transfer->size();
|
||||
if (to_move == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert(to_move == 0, "Should have transferred requested size");
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZRANGEREGISTRY_INLINE_HPP
|
||||
@ -410,7 +410,7 @@ static void retire_target_page(ZGeneration* generation, ZPage* page) {
|
||||
// relocate the remaining objects, leaving the target page empty when
|
||||
// relocation completed.
|
||||
if (page->used() == 0) {
|
||||
ZHeap::heap()->free_page(page, true /* allow_defragment */);
|
||||
ZHeap::heap()->free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
@ -841,14 +841,12 @@ private:
|
||||
const bool promotion = _forwarding->is_promotion();
|
||||
|
||||
// Promotions happen through a new cloned page
|
||||
ZPage* const to_page = promotion ? from_page->clone_limited() : from_page;
|
||||
ZPage* const to_page = promotion
|
||||
? from_page->clone_for_promotion()
|
||||
: from_page->reset(to_age);
|
||||
|
||||
// Reset page for in-place relocation
|
||||
to_page->reset(to_age);
|
||||
to_page->reset_top_for_allocation();
|
||||
if (promotion) {
|
||||
to_page->remset_alloc();
|
||||
}
|
||||
|
||||
// Verify that the inactive remset is clear when resetting the page for
|
||||
// in-place relocation.
|
||||
@ -1011,7 +1009,7 @@ public:
|
||||
page->log_msg(" (relocate page done normal)");
|
||||
|
||||
// Free page
|
||||
ZHeap::heap()->free_page(page, true /* allow_defragment */);
|
||||
ZHeap::heap()->free_page(page);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -1260,14 +1258,12 @@ public:
|
||||
prev_page->log_msg(promotion ? " (flip promoted)" : " (flip survived)");
|
||||
|
||||
// Setup to-space page
|
||||
ZPage* const new_page = promotion ? prev_page->clone_limited() : prev_page;
|
||||
ZPage* const new_page = promotion
|
||||
? prev_page->clone_for_promotion()
|
||||
: prev_page->reset(to_age);
|
||||
|
||||
// Reset page for flip aging
|
||||
new_page->reset(to_age);
|
||||
new_page->reset_livemap();
|
||||
if (promotion) {
|
||||
new_page->remset_alloc();
|
||||
}
|
||||
|
||||
if (promotion) {
|
||||
ZGeneration::young()->flip_promote(prev_page, new_page);
|
||||
|
||||
@ -473,11 +473,9 @@ public:
|
||||
_remset_table_iterator(remembered) {
|
||||
_mark->prepare_work();
|
||||
_remembered->_page_allocator->enable_safe_destroy();
|
||||
_remembered->_page_allocator->enable_safe_recycle();
|
||||
}
|
||||
|
||||
~ZRememberedScanMarkFollowTask() {
|
||||
_remembered->_page_allocator->disable_safe_recycle();
|
||||
_remembered->_page_allocator->disable_safe_destroy();
|
||||
_mark->finish_work();
|
||||
// We are done scanning the set of old pages.
|
||||
|
||||
@ -54,12 +54,6 @@ void ZRememberedSet::initialize(size_t page_size) {
|
||||
_bitmap[1].initialize(size_in_bits, true /* clear */);
|
||||
}
|
||||
|
||||
void ZRememberedSet::delete_all() {
|
||||
assert(is_initialized(), "precondition");
|
||||
_bitmap[0].resize(0);
|
||||
_bitmap[1].resize(0);
|
||||
}
|
||||
|
||||
bool ZRememberedSet::is_cleared_current() const {
|
||||
return current()->is_empty();
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -114,7 +114,6 @@ public:
|
||||
|
||||
bool is_initialized() const;
|
||||
void initialize(size_t page_size);
|
||||
void delete_all();
|
||||
|
||||
bool at_current(uintptr_t offset) const;
|
||||
bool at_previous(uintptr_t offset) const;
|
||||
|
||||
@ -31,11 +31,12 @@
|
||||
|
||||
static const ZStatCounter ZCounterUncommit("Memory", "Uncommit", ZStatUnitBytesPerSecond);
|
||||
|
||||
ZUncommitter::ZUncommitter(ZPageAllocator* page_allocator)
|
||||
: _page_allocator(page_allocator),
|
||||
ZUncommitter::ZUncommitter(uint32_t id, ZPartition* partition)
|
||||
: _id(id),
|
||||
_partition(partition),
|
||||
_lock(),
|
||||
_stop(false) {
|
||||
set_name("ZUncommitter");
|
||||
set_name("ZUncommitter#%u", id);
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
@ -46,7 +47,7 @@ bool ZUncommitter::wait(uint64_t timeout) const {
|
||||
}
|
||||
|
||||
if (!_stop && timeout > 0) {
|
||||
log_debug(gc, heap)("Uncommit Timeout: " UINT64_FORMAT "s", timeout);
|
||||
log_debug(gc, heap)("Uncommitter (%u) Timeout: " UINT64_FORMAT "s", _id, timeout);
|
||||
_lock.wait(timeout * MILLIUNITS);
|
||||
}
|
||||
|
||||
@ -63,27 +64,27 @@ void ZUncommitter::run_thread() {
|
||||
|
||||
while (wait(timeout)) {
|
||||
EventZUncommit event;
|
||||
size_t uncommitted = 0;
|
||||
size_t total_uncommitted = 0;
|
||||
|
||||
while (should_continue()) {
|
||||
// Uncommit chunk
|
||||
const size_t flushed = _page_allocator->uncommit(&timeout);
|
||||
if (flushed == 0) {
|
||||
const size_t uncommitted = _partition->uncommit(&timeout);
|
||||
if (uncommitted == 0) {
|
||||
// Done
|
||||
break;
|
||||
}
|
||||
|
||||
uncommitted += flushed;
|
||||
total_uncommitted += uncommitted;
|
||||
}
|
||||
|
||||
if (uncommitted > 0) {
|
||||
if (total_uncommitted > 0) {
|
||||
// Update statistics
|
||||
ZStatInc(ZCounterUncommit, uncommitted);
|
||||
log_info(gc, heap)("Uncommitted: %zuM(%.0f%%)",
|
||||
uncommitted / M, percent_of(uncommitted, ZHeap::heap()->max_capacity()));
|
||||
ZStatInc(ZCounterUncommit, total_uncommitted);
|
||||
log_info(gc, heap)("Uncommitter (%u) Uncommitted: %zuM(%.0f%%)",
|
||||
_id, total_uncommitted / M, percent_of(total_uncommitted, ZHeap::heap()->max_capacity()));
|
||||
|
||||
// Send event
|
||||
event.commit(uncommitted);
|
||||
event.commit(total_uncommitted);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2019, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,11 +27,12 @@
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/z/zThread.hpp"
|
||||
|
||||
class ZPageAllocator;
|
||||
class ZPartition;
|
||||
|
||||
class ZUncommitter : public ZThread {
|
||||
private:
|
||||
ZPageAllocator* const _page_allocator;
|
||||
const uint32_t _id;
|
||||
ZPartition* const _partition;
|
||||
mutable ZConditionLock _lock;
|
||||
bool _stop;
|
||||
|
||||
@ -43,7 +44,7 @@ protected:
|
||||
virtual void terminate();
|
||||
|
||||
public:
|
||||
ZUncommitter(ZPageAllocator* page_allocator);
|
||||
ZUncommitter(uint32_t id, ZPartition* partition);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZUNCOMMITTER_HPP
|
||||
|
||||
@ -1,129 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zPageAllocator.hpp"
|
||||
#include "gc/z/zUnmapper.hpp"
|
||||
#include "jfr/jfrEvents.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
|
||||
ZUnmapper::ZUnmapper(ZPageAllocator* page_allocator)
|
||||
: _page_allocator(page_allocator),
|
||||
_lock(),
|
||||
_queue(),
|
||||
_enqueued_bytes(0),
|
||||
_warned_sync_unmapping(false),
|
||||
_stop(false) {
|
||||
set_name("ZUnmapper");
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
ZPage* ZUnmapper::dequeue() {
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
|
||||
for (;;) {
|
||||
if (_stop) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
ZPage* const page = _queue.remove_first();
|
||||
if (page != nullptr) {
|
||||
_enqueued_bytes -= page->size();
|
||||
return page;
|
||||
}
|
||||
|
||||
_lock.wait();
|
||||
}
|
||||
}
|
||||
|
||||
bool ZUnmapper::try_enqueue(ZPage* page) {
|
||||
// Enqueue for asynchronous unmap and destroy
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
if (is_saturated()) {
|
||||
// The unmapper thread is lagging behind and is unable to unmap memory fast enough
|
||||
if (!_warned_sync_unmapping) {
|
||||
_warned_sync_unmapping = true;
|
||||
log_warning_p(gc)("WARNING: Encountered synchronous unmapping because asynchronous unmapping could not keep up");
|
||||
}
|
||||
log_debug(gc, unmap)("Synchronous unmapping %zuM page", page->size() / M);
|
||||
return false;
|
||||
}
|
||||
|
||||
log_trace(gc, unmap)("Asynchronous unmapping %zuM page (%zuM / %zuM enqueued)",
|
||||
page->size() / M, _enqueued_bytes / M, queue_capacity() / M);
|
||||
|
||||
_queue.insert_last(page);
|
||||
_enqueued_bytes += page->size();
|
||||
_lock.notify_all();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t ZUnmapper::queue_capacity() const {
|
||||
return align_up((size_t)(_page_allocator->max_capacity() * ZAsyncUnmappingLimit / 100.0), ZGranuleSize);
|
||||
}
|
||||
|
||||
bool ZUnmapper::is_saturated() const {
|
||||
return _enqueued_bytes >= queue_capacity();
|
||||
}
|
||||
|
||||
void ZUnmapper::do_unmap_and_destroy_page(ZPage* page) const {
|
||||
EventZUnmap event;
|
||||
const size_t unmapped = page->size();
|
||||
|
||||
// Unmap and destroy
|
||||
_page_allocator->unmap_page(page);
|
||||
_page_allocator->destroy_page(page);
|
||||
|
||||
// Send event
|
||||
event.commit(unmapped);
|
||||
}
|
||||
|
||||
void ZUnmapper::unmap_and_destroy_page(ZPage* page) {
|
||||
if (!try_enqueue(page)) {
|
||||
// Synchronously unmap and destroy
|
||||
do_unmap_and_destroy_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void ZUnmapper::run_thread() {
|
||||
for (;;) {
|
||||
ZPage* const page = dequeue();
|
||||
if (page == nullptr) {
|
||||
// Stop
|
||||
return;
|
||||
}
|
||||
|
||||
do_unmap_and_destroy_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void ZUnmapper::terminate() {
|
||||
ZLocker<ZConditionLock> locker(&_lock);
|
||||
_stop = true;
|
||||
_lock.notify_all();
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -47,6 +47,16 @@ public:
|
||||
|
||||
// Memory
|
||||
static void fill(uintptr_t* addr, size_t count, uintptr_t value);
|
||||
template <typename T>
|
||||
static void copy_disjoint(T* dest, const T* src, size_t count);
|
||||
template <typename T>
|
||||
static void copy_disjoint(T* dest, const T* src, int count);
|
||||
|
||||
// Sort
|
||||
template <typename T, typename Comparator>
|
||||
static void sort(T* array, size_t count, Comparator comparator);
|
||||
template <typename T, typename Comparator>
|
||||
static void sort(T* array, int count, Comparator comparator);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZUTILS_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -69,4 +69,35 @@ inline void ZUtils::object_copy_conjoint(zaddress from, zaddress to, size_t size
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZUtils::copy_disjoint(T* dest, const T* src, size_t count) {
|
||||
memcpy(dest, src, sizeof(T) * count);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZUtils::copy_disjoint(T* dest, const T* src, int count) {
|
||||
assert(count >= 0, "must be positive %d", count);
|
||||
|
||||
copy_disjoint(dest, src, static_cast<size_t>(count));
|
||||
}
|
||||
|
||||
template <typename T, typename Comparator>
|
||||
inline void ZUtils::sort(T* array, size_t count, Comparator comparator) {
|
||||
using SortType = int(const void*, const void*);
|
||||
using ComparatorType = int(const T*, const T*);
|
||||
|
||||
static constexpr bool IsComparatorCompatible = std::is_assignable<ComparatorType*&, Comparator>::value;
|
||||
static_assert(IsComparatorCompatible, "Incompatible Comparator, must decay to plain function pointer");
|
||||
|
||||
// We rely on ABI compatibility between ComparatorType and SortType
|
||||
qsort(array, count, sizeof(T), reinterpret_cast<SortType*>(static_cast<ComparatorType*>(comparator)));
|
||||
}
|
||||
|
||||
template <typename T, typename Comparator>
|
||||
inline void ZUtils::sort(T* array, int count, Comparator comparator) {
|
||||
assert(count >= 0, "must be positive %d", count);
|
||||
|
||||
sort(array, static_cast<size_t>(count), comparator);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZUTILS_INLINE_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -76,8 +76,12 @@ public:
|
||||
// Value
|
||||
//
|
||||
|
||||
struct ZValueIdTagType {};
|
||||
|
||||
template <typename S, typename T>
|
||||
class ZValue : public CHeapObj<mtGC> {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
const uintptr_t _addr;
|
||||
|
||||
@ -86,6 +90,8 @@ private:
|
||||
public:
|
||||
ZValue();
|
||||
ZValue(const T& value);
|
||||
template <typename... Args>
|
||||
ZValue(ZValueIdTagType, Args&&... args);
|
||||
|
||||
const T* addr(uint32_t value_id = S::id()) const;
|
||||
T* addr(uint32_t value_id = S::id());
|
||||
@ -95,6 +101,8 @@ public:
|
||||
|
||||
void set(const T& value, uint32_t value_id = S::id());
|
||||
void set_all(const T& value);
|
||||
|
||||
uint32_t count() const;
|
||||
};
|
||||
|
||||
template <typename T> using ZContended = ZValue<ZContendedStorage, T>;
|
||||
@ -106,16 +114,23 @@ template <typename T> using ZPerWorker = ZValue<ZPerWorkerStorage, T>;
|
||||
// Iterator
|
||||
//
|
||||
|
||||
template<typename S, typename T>
|
||||
class ZValueConstIterator;
|
||||
|
||||
template <typename S, typename T>
|
||||
class ZValueIterator {
|
||||
friend class ZValueConstIterator<S, T>;
|
||||
|
||||
private:
|
||||
ZValue<S, T>* const _value;
|
||||
uint32_t _value_id;
|
||||
|
||||
public:
|
||||
ZValueIterator(ZValue<S, T>* value);
|
||||
ZValueIterator(const ZValueIterator&) = default;
|
||||
|
||||
bool next(T** value);
|
||||
bool next(T** value, uint32_t* value_id);
|
||||
};
|
||||
|
||||
template <typename T> using ZPerCPUIterator = ZValueIterator<ZPerCPUStorage, T>;
|
||||
@ -130,6 +145,8 @@ private:
|
||||
|
||||
public:
|
||||
ZValueConstIterator(const ZValue<S, T>* value);
|
||||
ZValueConstIterator(const ZValueIterator<S, T>& other);
|
||||
ZValueConstIterator(const ZValueConstIterator&) = default;
|
||||
|
||||
bool next(const T** value);
|
||||
};
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
#include "gc/shared/workerThread.hpp"
|
||||
#include "gc/z/zCPU.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zNUMA.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zUtils.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -142,6 +142,18 @@ inline ZValue<S, T>::ZValue(const T& value)
|
||||
}
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
template <typename... Args>
|
||||
inline ZValue<S, T>::ZValue(ZValueIdTagType, Args&&... args)
|
||||
: _addr(S::alloc(sizeof(T))) {
|
||||
// Initialize all instances
|
||||
uint32_t value_id;
|
||||
ZValueIterator<S, T> iter(this);
|
||||
for (T* addr; iter.next(&addr, &value_id);) {
|
||||
::new (addr) T(value_id, args...);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
inline const T* ZValue<S, T>::addr(uint32_t value_id) const {
|
||||
return reinterpret_cast<const T*>(value_addr(value_id));
|
||||
@ -175,6 +187,11 @@ inline void ZValue<S, T>::set_all(const T& value) {
|
||||
}
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
uint32_t ZValue<S, T>::count() const {
|
||||
return S::count();
|
||||
}
|
||||
|
||||
//
|
||||
// Iterator
|
||||
//
|
||||
@ -192,12 +209,26 @@ inline bool ZValueIterator<S, T>::next(T** value) {
|
||||
}
|
||||
return false;
|
||||
}
|
||||
template <typename S, typename T>
|
||||
inline bool ZValueIterator<S, T>::next(T** value, uint32_t* value_id) {
|
||||
if (_value_id < S::count()) {
|
||||
*value_id = _value_id;
|
||||
*value = _value->addr(_value_id++);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
inline ZValueConstIterator<S, T>::ZValueConstIterator(const ZValue<S, T>* value)
|
||||
: _value(value),
|
||||
_value_id(0) {}
|
||||
|
||||
template <typename S, typename T>
|
||||
inline ZValueConstIterator<S, T>::ZValueConstIterator(const ZValueIterator<S, T>& other)
|
||||
: _value(other._value),
|
||||
_value_id(other._value_id) {}
|
||||
|
||||
template <typename S, typename T>
|
||||
inline bool ZValueConstIterator<S, T>::next(const T** value) {
|
||||
if (_value_id < S::count()) {
|
||||
|
||||
@ -1,257 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zAddressSpaceLimit.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zNMT.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity)
|
||||
: _manager(),
|
||||
_reserved(0),
|
||||
_initialized(false) {
|
||||
|
||||
assert(max_capacity <= ZAddressOffsetMax, "Too large max_capacity");
|
||||
|
||||
// Initialize platform specific parts before reserving address space
|
||||
pd_initialize_before_reserve();
|
||||
|
||||
// Register the Windows callbacks
|
||||
pd_register_callbacks(&_manager);
|
||||
|
||||
// Reserve address space
|
||||
if (!reserve(max_capacity)) {
|
||||
ZInitialize::error_d("Failed to reserve enough address space for Java heap");
|
||||
return;
|
||||
}
|
||||
|
||||
// Set ZAddressOffsetMax to the highest address end available after reservation
|
||||
ZAddressOffsetMax = untype(highest_available_address_end());
|
||||
|
||||
// Successfully initialized
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
size_t ZVirtualMemoryManager::force_reserve_discontiguous(size_t size) {
|
||||
const size_t min_range = calculate_min_range(size);
|
||||
const size_t max_range = MAX2(align_down(size / ZForceDiscontiguousHeapReservations, ZGranuleSize), min_range);
|
||||
size_t reserved = 0;
|
||||
|
||||
// Try to reserve ZForceDiscontiguousHeapReservations number of virtual memory
|
||||
// ranges. Starting with higher addresses.
|
||||
uintptr_t end = ZAddressOffsetMax;
|
||||
while (reserved < size && end >= max_range) {
|
||||
const size_t remaining = size - reserved;
|
||||
const size_t reserve_size = MIN2(max_range, remaining);
|
||||
const uintptr_t reserve_start = end - reserve_size;
|
||||
|
||||
if (reserve_contiguous(to_zoffset(reserve_start), reserve_size)) {
|
||||
reserved += reserve_size;
|
||||
}
|
||||
|
||||
end -= reserve_size * 2;
|
||||
}
|
||||
|
||||
// If (reserved < size) attempt to reserve the rest via normal divide and conquer
|
||||
uintptr_t start = 0;
|
||||
while (reserved < size && start < ZAddressOffsetMax) {
|
||||
const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
|
||||
reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range);
|
||||
start += remaining;
|
||||
}
|
||||
|
||||
return reserved;
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t ZVirtualMemoryManager::reserve_discontiguous(zoffset start, size_t size, size_t min_range) {
|
||||
if (size < min_range) {
|
||||
// Too small
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(is_aligned(size, ZGranuleSize), "Misaligned");
|
||||
|
||||
if (reserve_contiguous(start, size)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
const size_t half = size / 2;
|
||||
if (half < min_range) {
|
||||
// Too small
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Divide and conquer
|
||||
const size_t first_part = align_down(half, ZGranuleSize);
|
||||
const size_t second_part = size - first_part;
|
||||
const size_t first_size = reserve_discontiguous(start, first_part, min_range);
|
||||
const size_t second_size = reserve_discontiguous(start + first_part, second_part, min_range);
|
||||
return first_size + second_size;
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryManager::calculate_min_range(size_t size) {
|
||||
// Don't try to reserve address ranges smaller than 1% of the requested size.
|
||||
// This avoids an explosion of reservation attempts in case large parts of the
|
||||
// address space is already occupied.
|
||||
return align_up(size / ZMaxVirtualReservations, ZGranuleSize);
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryManager::reserve_discontiguous(size_t size) {
|
||||
const size_t min_range = calculate_min_range(size);
|
||||
uintptr_t start = 0;
|
||||
size_t reserved = 0;
|
||||
|
||||
// Reserve size somewhere between [0, ZAddressOffsetMax)
|
||||
while (reserved < size && start < ZAddressOffsetMax) {
|
||||
const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
|
||||
reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range);
|
||||
start += remaining;
|
||||
}
|
||||
|
||||
return reserved;
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::reserve_contiguous(zoffset start, size_t size) {
|
||||
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned 0x%zx", size);
|
||||
|
||||
// Reserve address views
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(start);
|
||||
|
||||
// Reserve address space
|
||||
if (!pd_reserve(addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register address views with native memory tracker
|
||||
ZNMT::reserve(addr, size);
|
||||
|
||||
// Make the address range free
|
||||
_manager.register_range(start, size);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::reserve_contiguous(size_t size) {
|
||||
// Allow at most 8192 attempts spread evenly across [0, ZAddressOffsetMax)
|
||||
const size_t unused = ZAddressOffsetMax - size;
|
||||
const size_t increment = MAX2(align_up(unused / 8192, ZGranuleSize), ZGranuleSize);
|
||||
|
||||
for (uintptr_t start = 0; start + size <= ZAddressOffsetMax; start += increment) {
|
||||
if (reserve_contiguous(to_zoffset(start), size)) {
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Failed
|
||||
return false;
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::reserve(size_t max_capacity) {
|
||||
const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap());
|
||||
const size_t size = MIN2(max_capacity * ZVirtualToPhysicalRatio, limit);
|
||||
|
||||
auto do_reserve = [&]() {
|
||||
#ifdef ASSERT
|
||||
if (ZForceDiscontiguousHeapReservations > 0) {
|
||||
return force_reserve_discontiguous(size);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Prefer a contiguous address space
|
||||
if (reserve_contiguous(size)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
// Fall back to a discontiguous address space
|
||||
return reserve_discontiguous(size);
|
||||
};
|
||||
|
||||
const size_t reserved = do_reserve();
|
||||
|
||||
const bool contiguous = _manager.free_is_contiguous();
|
||||
|
||||
log_info_p(gc, init)("Address Space Type: %s/%s/%s",
|
||||
(contiguous ? "Contiguous" : "Discontiguous"),
|
||||
(limit == ZAddressOffsetMax ? "Unrestricted" : "Restricted"),
|
||||
(reserved == size ? "Complete" : "Degraded"));
|
||||
log_info_p(gc, init)("Address Space Size: %zuM", reserved / M);
|
||||
|
||||
// Record reserved
|
||||
_reserved = reserved;
|
||||
|
||||
return reserved >= max_capacity;
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::unreserve(zoffset start, size_t size) {
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(start);
|
||||
|
||||
// Unregister the reserved memory from NMT
|
||||
ZNMT::unreserve(addr, size);
|
||||
|
||||
// Unreserve address space
|
||||
pd_unreserve(addr, size);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::unreserve_all() {
|
||||
zoffset start;
|
||||
size_t size;
|
||||
|
||||
while (_manager.unregister_first(&start, &size)) {
|
||||
unreserve(start, size);
|
||||
}
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
ZVirtualMemory ZVirtualMemoryManager::alloc(size_t size, bool force_low_address) {
|
||||
zoffset start;
|
||||
|
||||
// Small pages are allocated at low addresses, while medium/large pages
|
||||
// are allocated at high addresses (unless forced to be at a low address).
|
||||
if (force_low_address || size <= ZPageSizeSmall) {
|
||||
start = _manager.alloc_low_address(size);
|
||||
} else {
|
||||
start = _manager.alloc_high_address(size);
|
||||
}
|
||||
|
||||
if (start == zoffset(UINTPTR_MAX)) {
|
||||
return ZVirtualMemory();
|
||||
}
|
||||
|
||||
return ZVirtualMemory(start, size);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::free(const ZVirtualMemory& vmem) {
|
||||
_manager.free(vmem.start(), vmem.size());
|
||||
}
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,67 +25,16 @@
|
||||
#define SHARE_GC_Z_ZVIRTUALMEMORY_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zMemory.hpp"
|
||||
|
||||
class ZVirtualMemory {
|
||||
friend class VMStructs;
|
||||
|
||||
private:
|
||||
zoffset _start;
|
||||
zoffset_end _end;
|
||||
#include "gc/z/zRange.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
class ZVirtualMemory : public ZRange<zoffset, zoffset_end> {
|
||||
public:
|
||||
ZVirtualMemory();
|
||||
ZVirtualMemory(zoffset start, size_t size);
|
||||
ZVirtualMemory(const ZRange<zoffset, zoffset_end>& range);
|
||||
|
||||
bool is_null() const;
|
||||
zoffset start() const;
|
||||
zoffset_end end() const;
|
||||
size_t size() const;
|
||||
|
||||
ZVirtualMemory split(size_t size);
|
||||
};
|
||||
|
||||
class ZVirtualMemoryManager {
|
||||
friend class ZMapperTest;
|
||||
friend class ZVirtualMemoryManagerTest;
|
||||
|
||||
private:
|
||||
static size_t calculate_min_range(size_t size);
|
||||
|
||||
ZMemoryManager _manager;
|
||||
size_t _reserved;
|
||||
bool _initialized;
|
||||
|
||||
// Platform specific implementation
|
||||
void pd_initialize_before_reserve();
|
||||
void pd_register_callbacks(ZMemoryManager* manager);
|
||||
bool pd_reserve(zaddress_unsafe addr, size_t size);
|
||||
void pd_unreserve(zaddress_unsafe addr, size_t size);
|
||||
|
||||
bool reserve_contiguous(zoffset start, size_t size);
|
||||
bool reserve_contiguous(size_t size);
|
||||
size_t reserve_discontiguous(zoffset start, size_t size, size_t min_range);
|
||||
size_t reserve_discontiguous(size_t size);
|
||||
bool reserve(size_t max_capacity);
|
||||
|
||||
void unreserve(zoffset start, size_t size);
|
||||
|
||||
DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);)
|
||||
|
||||
public:
|
||||
ZVirtualMemoryManager(size_t max_capacity);
|
||||
|
||||
bool is_initialized() const;
|
||||
|
||||
size_t reserved() const;
|
||||
zoffset lowest_available_address() const;
|
||||
zoffset_end highest_available_address_end() const;
|
||||
|
||||
ZVirtualMemory alloc(size_t size, bool force_low_address);
|
||||
void free(const ZVirtualMemory& vmem);
|
||||
|
||||
void unreserve_all();
|
||||
int granule_count() const;
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_HPP
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,47 +26,32 @@
|
||||
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zRange.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline ZVirtualMemory::ZVirtualMemory()
|
||||
: _start(zoffset(UINTPTR_MAX)),
|
||||
_end(zoffset_end(UINTPTR_MAX)) {}
|
||||
: ZRange() {}
|
||||
|
||||
inline ZVirtualMemory::ZVirtualMemory(zoffset start, size_t size)
|
||||
: _start(start),
|
||||
_end(to_zoffset_end(start, size)) {}
|
||||
|
||||
inline bool ZVirtualMemory::is_null() const {
|
||||
return _start == zoffset(UINTPTR_MAX);
|
||||
: ZRange(start, size) {
|
||||
// ZVirtualMemory is only used for ZGranuleSize multiple ranges
|
||||
assert(is_aligned(untype(start), ZGranuleSize), "must be multiple of ZGranuleSize");
|
||||
assert(is_aligned(size, ZGranuleSize), "must be multiple of ZGranuleSize");
|
||||
}
|
||||
|
||||
inline zoffset ZVirtualMemory::start() const {
|
||||
return _start;
|
||||
}
|
||||
inline ZVirtualMemory::ZVirtualMemory(const ZRange<zoffset, zoffset_end>& range)
|
||||
: ZVirtualMemory(range.start(), range.size()) {}
|
||||
|
||||
inline zoffset_end ZVirtualMemory::end() const {
|
||||
return _end;
|
||||
}
|
||||
inline int ZVirtualMemory::granule_count() const {
|
||||
const size_t granule_count = size() >> ZGranuleSizeShift;
|
||||
|
||||
inline size_t ZVirtualMemory::size() const {
|
||||
return _end - _start;
|
||||
}
|
||||
assert(granule_count <= static_cast<size_t>(std::numeric_limits<int>::max()),
|
||||
"must not overflow an int %zu", granule_count);
|
||||
|
||||
inline ZVirtualMemory ZVirtualMemory::split(size_t size) {
|
||||
_start += size;
|
||||
return ZVirtualMemory(_start - size, size);
|
||||
}
|
||||
|
||||
inline size_t ZVirtualMemoryManager::reserved() const {
|
||||
return _reserved;
|
||||
}
|
||||
|
||||
inline zoffset ZVirtualMemoryManager::lowest_available_address() const {
|
||||
return _manager.peek_low_address();
|
||||
}
|
||||
|
||||
inline zoffset_end ZVirtualMemoryManager::highest_available_address_end() const {
|
||||
return _manager.peak_high_address_end();
|
||||
return static_cast<int>(granule_count);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORY_INLINE_HPP
|
||||
|
||||
357
src/hotspot/share/gc/z/zVirtualMemoryManager.cpp
Normal file
357
src/hotspot/share/gc/z/zVirtualMemoryManager.cpp
Normal file
@ -0,0 +1,357 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zAddressSpaceLimit.hpp"
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zInitialize.hpp"
|
||||
#include "gc/z/zNMT.hpp"
|
||||
#include "gc/z/zNUMA.inline.hpp"
|
||||
#include "gc/z/zValue.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "gc/z/zVirtualMemoryManager.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
ZVirtualMemoryReserver::ZVirtualMemoryReserver(size_t size)
|
||||
: _registry(),
|
||||
_reserved(reserve(size)) {}
|
||||
|
||||
void ZVirtualMemoryReserver::initialize_partition_registry(ZVirtualMemoryRegistry* partition_registry, size_t size) {
|
||||
assert(partition_registry->is_empty(), "Should be empty when initializing");
|
||||
|
||||
// Registers the Windows callbacks
|
||||
pd_register_callbacks(partition_registry);
|
||||
|
||||
_registry.transfer_from_low(partition_registry, size);
|
||||
|
||||
// Set the limits according to the virtual memory given to this partition
|
||||
partition_registry->anchor_limits();
|
||||
}
|
||||
|
||||
void ZVirtualMemoryReserver::unreserve(const ZVirtualMemory& vmem) {
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(vmem.start());
|
||||
|
||||
// Unregister the reserved memory from NMT
|
||||
ZNMT::unreserve(addr, vmem.size());
|
||||
|
||||
// Unreserve address space
|
||||
pd_unreserve(addr, vmem.size());
|
||||
}
|
||||
|
||||
void ZVirtualMemoryReserver::unreserve_all() {
|
||||
for (ZVirtualMemory vmem; _registry.unregister_first(&vmem);) {
|
||||
unreserve(vmem);
|
||||
}
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryReserver::is_empty() const {
|
||||
return _registry.is_empty();
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryReserver::is_contiguous() const {
|
||||
return _registry.is_contiguous();
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryReserver::reserved() const {
|
||||
return _reserved;
|
||||
}
|
||||
|
||||
zoffset_end ZVirtualMemoryReserver::highest_available_address_end() const {
|
||||
return _registry.peak_high_address_end();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
size_t ZVirtualMemoryReserver::force_reserve_discontiguous(size_t size) {
|
||||
const size_t min_range = calculate_min_range(size);
|
||||
const size_t max_range = MAX2(align_down(size / ZForceDiscontiguousHeapReservations, ZGranuleSize), min_range);
|
||||
size_t reserved = 0;
|
||||
|
||||
// Try to reserve ZForceDiscontiguousHeapReservations number of virtual memory
|
||||
// ranges. Starting with higher addresses.
|
||||
uintptr_t end = ZAddressOffsetMax;
|
||||
while (reserved < size && end >= max_range) {
|
||||
const size_t remaining = size - reserved;
|
||||
const size_t reserve_size = MIN2(max_range, remaining);
|
||||
const uintptr_t reserve_start = end - reserve_size;
|
||||
|
||||
if (reserve_contiguous(to_zoffset(reserve_start), reserve_size)) {
|
||||
reserved += reserve_size;
|
||||
}
|
||||
|
||||
end -= reserve_size * 2;
|
||||
}
|
||||
|
||||
// If (reserved < size) attempt to reserve the rest via normal divide and conquer
|
||||
uintptr_t start = 0;
|
||||
while (reserved < size && start < ZAddressOffsetMax) {
|
||||
const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
|
||||
reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range);
|
||||
start += remaining;
|
||||
}
|
||||
|
||||
return reserved;
|
||||
}
|
||||
#endif
|
||||
|
||||
size_t ZVirtualMemoryReserver::reserve_discontiguous(zoffset start, size_t size, size_t min_range) {
|
||||
if (size < min_range) {
|
||||
// Too small
|
||||
return 0;
|
||||
}
|
||||
|
||||
assert(is_aligned(size, ZGranuleSize), "Misaligned");
|
||||
|
||||
if (reserve_contiguous(start, size)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
const size_t half = size / 2;
|
||||
if (half < min_range) {
|
||||
// Too small
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Divide and conquer
|
||||
const size_t first_part = align_down(half, ZGranuleSize);
|
||||
const size_t second_part = size - first_part;
|
||||
const size_t first_size = reserve_discontiguous(start, first_part, min_range);
|
||||
const size_t second_size = reserve_discontiguous(start + first_part, second_part, min_range);
|
||||
return first_size + second_size;
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryReserver::calculate_min_range(size_t size) {
|
||||
// Don't try to reserve address ranges smaller than 1% of the requested size.
|
||||
// This avoids an explosion of reservation attempts in case large parts of the
|
||||
// address space is already occupied.
|
||||
return align_up(size / ZMaxVirtualReservations, ZGranuleSize);
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryReserver::reserve_discontiguous(size_t size) {
|
||||
const size_t min_range = calculate_min_range(size);
|
||||
uintptr_t start = 0;
|
||||
size_t reserved = 0;
|
||||
|
||||
// Reserve size somewhere between [0, ZAddressOffsetMax)
|
||||
while (reserved < size && start < ZAddressOffsetMax) {
|
||||
const size_t remaining = MIN2(size - reserved, ZAddressOffsetMax - start);
|
||||
reserved += reserve_discontiguous(to_zoffset(start), remaining, min_range);
|
||||
start += remaining;
|
||||
}
|
||||
|
||||
return reserved;
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryReserver::reserve_contiguous(zoffset start, size_t size) {
|
||||
assert(is_aligned(size, ZGranuleSize), "Must be granule aligned 0x%zx", size);
|
||||
|
||||
// Reserve address views
|
||||
const zaddress_unsafe addr = ZOffset::address_unsafe(start);
|
||||
|
||||
// Reserve address space
|
||||
if (!pd_reserve(addr, size)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Register address views with native memory tracker
|
||||
ZNMT::reserve(addr, size);
|
||||
|
||||
// Register the memory reservation
|
||||
_registry.register_range({start, size});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryReserver::reserve_contiguous(size_t size) {
|
||||
// Allow at most 8192 attempts spread evenly across [0, ZAddressOffsetMax)
|
||||
const size_t unused = ZAddressOffsetMax - size;
|
||||
const size_t increment = MAX2(align_up(unused / 8192, ZGranuleSize), ZGranuleSize);
|
||||
|
||||
for (uintptr_t start = 0; start + size <= ZAddressOffsetMax; start += increment) {
|
||||
if (reserve_contiguous(to_zoffset(start), size)) {
|
||||
// Success
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Failed
|
||||
return false;
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryReserver::reserve(size_t size) {
|
||||
// Register Windows callbacks
|
||||
pd_register_callbacks(&_registry);
|
||||
|
||||
// Reserve address space
|
||||
|
||||
#ifdef ASSERT
|
||||
if (ZForceDiscontiguousHeapReservations > 0) {
|
||||
return force_reserve_discontiguous(size);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Prefer a contiguous address space
|
||||
if (reserve_contiguous(size)) {
|
||||
return size;
|
||||
}
|
||||
|
||||
// Fall back to a discontiguous address space
|
||||
return reserve_discontiguous(size);
|
||||
}
|
||||
|
||||
ZVirtualMemoryManager::ZVirtualMemoryManager(size_t max_capacity)
|
||||
: _partition_registries(),
|
||||
_multi_partition_registry(),
|
||||
_is_multi_partition_enabled(false),
|
||||
_initialized(false) {
|
||||
|
||||
assert(max_capacity <= ZAddressOffsetMax, "Too large max_capacity");
|
||||
|
||||
ZAddressSpaceLimit::print_limits();
|
||||
|
||||
const size_t limit = MIN2(ZAddressOffsetMax, ZAddressSpaceLimit::heap());
|
||||
|
||||
const size_t desired_for_partitions = max_capacity * ZVirtualToPhysicalRatio;
|
||||
const size_t desired_for_multi_partition = ZNUMA::count() > 1 ? desired_for_partitions : 0;
|
||||
|
||||
const size_t desired = desired_for_partitions + desired_for_multi_partition;
|
||||
const size_t requested = desired <= limit
|
||||
? desired
|
||||
: MIN2(desired_for_partitions, limit);
|
||||
|
||||
// Reserve virtual memory for the heap
|
||||
ZVirtualMemoryReserver reserver(requested);
|
||||
|
||||
const size_t reserved = reserver.reserved();
|
||||
const bool is_contiguous = reserver.is_contiguous();
|
||||
|
||||
log_debug_p(gc, init)("Reserved Space: limit " EXACTFMT ", desired " EXACTFMT ", requested " EXACTFMT,
|
||||
EXACTFMTARGS(limit), EXACTFMTARGS(desired), EXACTFMTARGS(requested));
|
||||
|
||||
if (reserved < max_capacity) {
|
||||
ZInitialize::error_d("Failed to reserve " EXACTFMT " address space for Java heap", EXACTFMTARGS(max_capacity));
|
||||
return;
|
||||
}
|
||||
|
||||
// Set ZAddressOffsetMax to the highest address end available after reservation
|
||||
ZAddressOffsetMax = untype(reserver.highest_available_address_end());
|
||||
|
||||
const size_t size_for_partitions = MIN2(reserved, desired_for_partitions);
|
||||
|
||||
// Divide size_for_partitions virtual memory over the NUMA nodes
|
||||
initialize_partitions(&reserver, size_for_partitions);
|
||||
|
||||
// Set up multi-partition or unreserve the surplus memory
|
||||
if (desired_for_multi_partition > 0 && reserved == desired) {
|
||||
// Enough left to setup the multi-partition memory reservation
|
||||
reserver.initialize_partition_registry(&_multi_partition_registry, desired_for_multi_partition);
|
||||
_is_multi_partition_enabled = true;
|
||||
} else {
|
||||
// Failed to reserve enough memory for multi-partition, unreserve unused memory
|
||||
reserver.unreserve_all();
|
||||
}
|
||||
|
||||
assert(reserver.is_empty(), "Must have handled all reserved memory");
|
||||
|
||||
log_info_p(gc, init)("Reserved Space Type: %s/%s/%s",
|
||||
(is_contiguous ? "Contiguous" : "Discontiguous"),
|
||||
(requested == desired ? "Unrestricted" : "Restricted"),
|
||||
(reserved == desired ? "Complete" : ((reserved < desired_for_partitions) ? "Degraded" : "NUMA-Degraded")));
|
||||
log_info_p(gc, init)("Reserved Space Size: " EXACTFMT, EXACTFMTARGS(reserved));
|
||||
|
||||
// Successfully initialized
|
||||
_initialized = true;
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::initialize_partitions(ZVirtualMemoryReserver* reserver, size_t size_for_partitions) {
|
||||
precond(is_aligned(size_for_partitions, ZGranuleSize));
|
||||
|
||||
// If the capacity consist of less granules than the number of partitions
|
||||
// some partitions will be empty. Distribute these shares on the none empty
|
||||
// partitions.
|
||||
const uint32_t first_empty_numa_id = MIN2(static_cast<uint32_t>(size_for_partitions >> ZGranuleSizeShift), ZNUMA::count());
|
||||
const uint32_t ignore_count = ZNUMA::count() - first_empty_numa_id;
|
||||
|
||||
// Install reserved memory into registry(s)
|
||||
uint32_t numa_id;
|
||||
ZPerNUMAIterator<ZVirtualMemoryRegistry> iter(&_partition_registries);
|
||||
for (ZVirtualMemoryRegistry* registry; iter.next(®istry, &numa_id);) {
|
||||
if (numa_id == first_empty_numa_id) {
|
||||
break;
|
||||
}
|
||||
|
||||
// Calculate how much reserved memory this partition gets
|
||||
const size_t reserved_for_partition = ZNUMA::calculate_share(numa_id, size_for_partitions, ZGranuleSize, ignore_count);
|
||||
|
||||
// Transfer reserved memory
|
||||
reserver->initialize_partition_registry(registry, reserved_for_partition);
|
||||
}
|
||||
}
|
||||
|
||||
bool ZVirtualMemoryManager::is_initialized() const {
|
||||
return _initialized;
|
||||
}
|
||||
|
||||
ZVirtualMemoryRegistry& ZVirtualMemoryManager::registry(uint32_t partition_id) {
|
||||
return _partition_registries.get(partition_id);
|
||||
}
|
||||
|
||||
const ZVirtualMemoryRegistry& ZVirtualMemoryManager::registry(uint32_t partition_id) const {
|
||||
return _partition_registries.get(partition_id);
|
||||
}
|
||||
|
||||
zoffset ZVirtualMemoryManager::lowest_available_address(uint32_t partition_id) const {
|
||||
return registry(partition_id).peek_low_address();
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::insert(const ZVirtualMemory& vmem, uint32_t partition_id) {
|
||||
assert(partition_id == lookup_partition_id(vmem), "wrong partition_id for vmem");
|
||||
registry(partition_id).insert(vmem);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::insert_multi_partition(const ZVirtualMemory& vmem) {
|
||||
_multi_partition_registry.insert(vmem);
|
||||
}
|
||||
|
||||
size_t ZVirtualMemoryManager::remove_from_low_many_at_most(size_t size, uint32_t partition_id, ZArray<ZVirtualMemory>* vmems_out) {
|
||||
return registry(partition_id).remove_from_low_many_at_most(size, vmems_out);
|
||||
}
|
||||
|
||||
ZVirtualMemory ZVirtualMemoryManager::remove_from_low(size_t size, uint32_t partition_id) {
|
||||
return registry(partition_id).remove_from_low(size);
|
||||
}
|
||||
|
||||
ZVirtualMemory ZVirtualMemoryManager::remove_from_low_multi_partition(size_t size) {
|
||||
return _multi_partition_registry.remove_from_low(size);
|
||||
}
|
||||
|
||||
void ZVirtualMemoryManager::insert_and_remove_from_low_many(const ZVirtualMemory& vmem, uint32_t partition_id, ZArray<ZVirtualMemory>* vmems_out) {
|
||||
registry(partition_id).insert_and_remove_from_low_many(vmem, vmems_out);
|
||||
}
|
||||
|
||||
ZVirtualMemory ZVirtualMemoryManager::insert_and_remove_from_low_exact_or_many(size_t size, uint32_t partition_id, ZArray<ZVirtualMemory>* vmems_in_out) {
|
||||
return registry(partition_id).insert_and_remove_from_low_exact_or_many(size, vmems_in_out);
|
||||
}
|
||||
109
src/hotspot/share/gc/z/zVirtualMemoryManager.hpp
Normal file
109
src/hotspot/share/gc/z/zVirtualMemoryManager.hpp
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_HPP
|
||||
#define SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_HPP
|
||||
|
||||
#include "gc/z/zAddress.hpp"
|
||||
#include "gc/z/zArray.hpp"
|
||||
#include "gc/z/zRangeRegistry.hpp"
|
||||
#include "gc/z/zValue.hpp"
|
||||
#include "gc/z/zVirtualMemory.hpp"
|
||||
|
||||
using ZVirtualMemoryRegistry = ZRangeRegistry<ZVirtualMemory>;
|
||||
|
||||
class ZVirtualMemoryReserver {
|
||||
friend class ZMapperTest;
|
||||
friend class ZVirtualMemoryManagerTest;
|
||||
|
||||
private:
|
||||
|
||||
ZVirtualMemoryRegistry _registry;
|
||||
const size_t _reserved;
|
||||
|
||||
static size_t calculate_min_range(size_t size);
|
||||
|
||||
// Platform specific implementation
|
||||
void pd_register_callbacks(ZVirtualMemoryRegistry* registry);
|
||||
bool pd_reserve(zaddress_unsafe addr, size_t size);
|
||||
void pd_unreserve(zaddress_unsafe addr, size_t size);
|
||||
|
||||
bool reserve_contiguous(zoffset start, size_t size);
|
||||
bool reserve_contiguous(size_t size);
|
||||
size_t reserve_discontiguous(zoffset start, size_t size, size_t min_range);
|
||||
size_t reserve_discontiguous(size_t size);
|
||||
|
||||
size_t reserve(size_t size);
|
||||
void unreserve(const ZVirtualMemory& vmem);
|
||||
|
||||
DEBUG_ONLY(size_t force_reserve_discontiguous(size_t size);)
|
||||
|
||||
public:
|
||||
ZVirtualMemoryReserver(size_t size);
|
||||
|
||||
void initialize_partition_registry(ZVirtualMemoryRegistry* partition_registry, size_t size);
|
||||
|
||||
void unreserve_all();
|
||||
|
||||
bool is_empty() const;
|
||||
bool is_contiguous() const;
|
||||
|
||||
size_t reserved() const;
|
||||
|
||||
zoffset_end highest_available_address_end() const;
|
||||
};
|
||||
|
||||
class ZVirtualMemoryManager {
|
||||
private:
|
||||
ZPerNUMA<ZVirtualMemoryRegistry> _partition_registries;
|
||||
ZVirtualMemoryRegistry _multi_partition_registry;
|
||||
bool _is_multi_partition_enabled;
|
||||
bool _initialized;
|
||||
|
||||
ZVirtualMemoryRegistry& registry(uint32_t partition_id);
|
||||
const ZVirtualMemoryRegistry& registry(uint32_t partition_id) const;
|
||||
|
||||
public:
|
||||
ZVirtualMemoryManager(size_t max_capacity);
|
||||
|
||||
void initialize_partitions(ZVirtualMemoryReserver* reserver, size_t size_for_partitions);
|
||||
|
||||
bool is_initialized() const;
|
||||
bool is_multi_partition_enabled() const;
|
||||
bool is_in_multi_partition(const ZVirtualMemory& vmem) const;
|
||||
|
||||
uint32_t lookup_partition_id(const ZVirtualMemory& vmem) const;
|
||||
zoffset lowest_available_address(uint32_t partition_id) const;
|
||||
|
||||
void insert(const ZVirtualMemory& vmem, uint32_t partition_id);
|
||||
void insert_multi_partition(const ZVirtualMemory& vmem);
|
||||
|
||||
size_t remove_from_low_many_at_most(size_t size, uint32_t partition_id, ZArray<ZVirtualMemory>* vmems_out);
|
||||
ZVirtualMemory remove_from_low(size_t size, uint32_t partition_id);
|
||||
ZVirtualMemory remove_from_low_multi_partition(size_t size);
|
||||
|
||||
void insert_and_remove_from_low_many(const ZVirtualMemory& vmem, uint32_t partition_id, ZArray<ZVirtualMemory>* vmems_out);
|
||||
ZVirtualMemory insert_and_remove_from_low_exact_or_many(size_t size, uint32_t partition_id, ZArray<ZVirtualMemory>* vmems_in_out);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_HPP
|
||||
52
src/hotspot/share/gc/z/zVirtualMemoryManager.inline.hpp
Normal file
52
src/hotspot/share/gc/z/zVirtualMemoryManager.inline.hpp
Normal file
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_INLINE_HPP
|
||||
|
||||
#include "gc/z/zVirtualMemoryManager.hpp"
|
||||
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "gc/z/zRangeRegistry.inline.hpp"
|
||||
|
||||
|
||||
inline bool ZVirtualMemoryManager::is_multi_partition_enabled() const {
|
||||
return _is_multi_partition_enabled;
|
||||
}
|
||||
|
||||
inline bool ZVirtualMemoryManager::is_in_multi_partition(const ZVirtualMemory& vmem) const {
|
||||
return _multi_partition_registry.limits_contain(vmem);
|
||||
}
|
||||
|
||||
inline uint32_t ZVirtualMemoryManager::lookup_partition_id(const ZVirtualMemory& vmem) const {
|
||||
const uint32_t num_partitions = _partition_registries.count();
|
||||
for (uint32_t partition_id = 0; partition_id < num_partitions; partition_id++) {
|
||||
if (registry(partition_id).limits_contain(vmem)) {
|
||||
return partition_id;
|
||||
}
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZVIRTUALMEMORYMANAGER_INLINE_HPP
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -68,13 +68,6 @@
|
||||
product(bool, ZCollectionIntervalOnly, false, \
|
||||
"Only use timers for GC heuristics") \
|
||||
\
|
||||
product(double, ZAsyncUnmappingLimit, 100.0, DIAGNOSTIC, \
|
||||
"Specify the max amount (percentage of max heap size) of async " \
|
||||
"unmapping that can be in-flight before unmapping requests are " \
|
||||
"temporarily forced to be synchronous instead. " \
|
||||
"The default means after an amount of pages proportional to the " \
|
||||
"max capacity is enqueued, we resort to synchronous unmapping.") \
|
||||
\
|
||||
product(uint, ZStatisticsInterval, 10, DIAGNOSTIC, \
|
||||
"Time between statistics print outs (in seconds)") \
|
||||
range(1, (uint)-1) \
|
||||
@ -118,6 +111,11 @@
|
||||
develop(bool, ZVerifyOops, false, \
|
||||
"Verify accessed oops") \
|
||||
\
|
||||
develop(uint, ZFakeNUMA, 1, \
|
||||
"ZFakeNUMA is used to test the internal NUMA memory support " \
|
||||
"without the need for UseNUMA") \
|
||||
range(1, 16) \
|
||||
\
|
||||
develop(size_t, ZForceDiscontiguousHeapReservations, 0, \
|
||||
"The gc will attempt to split the heap reservation into this " \
|
||||
"many reservations, subject to available virtual address space " \
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
<?xml version="1.0" encoding="utf-8"?>
|
||||
|
||||
<!--
|
||||
Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2012, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
|
||||
This code is free software; you can redistribute it and/or modify it
|
||||
@ -1155,12 +1155,14 @@
|
||||
</Event>
|
||||
|
||||
<Event name="ZPageAllocation" category="Java Virtual Machine, GC, Detailed" label="ZGC Page Allocation" description="Allocation of a ZPage" thread="true" stackTrace="true">
|
||||
<Field type="ZPageTypeType" name="type" label="Type" />
|
||||
<Field type="ulong" contentType="bytes" name="size" label="Size" />
|
||||
<Field type="ulong" contentType="bytes" name="flushed" label="Flushed" />
|
||||
<Field type="ulong" contentType="bytes" name="committed" label="Committed" />
|
||||
<Field type="uint" name="segments" label="Segments" />
|
||||
<Field type="boolean" name="nonBlocking" label="Non-blocking" />
|
||||
<Field type="ZPageTypeType" name="type" label="Type" />
|
||||
<Field type="ulong" contentType="bytes" name="size" label="Size" />
|
||||
<Field type="ulong" contentType="bytes" name="harvested" label="Harvested" />
|
||||
<Field type="ulong" contentType="bytes" name="committed" label="Committed" />
|
||||
<Field type="uint" name="numHarvested" label="Number of harvested vmems" />
|
||||
<Field type="boolean" name="multiPartition" label="Multi-partition allocation" />
|
||||
<Field type="boolean" name="successful" label="Successful allocation" />
|
||||
<Field type="boolean" name="nonBlocking" label="Non-blocking" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZRelocationSet" category="Java Virtual Machine, GC, Detailed" label="ZGC Relocation Set" thread="true">
|
||||
@ -1198,10 +1200,6 @@
|
||||
<Field type="ulong" contentType="bytes" name="uncommitted" label="Uncommitted" />
|
||||
</Event>
|
||||
|
||||
<Event name="ZUnmap" category="Java Virtual Machine, GC, Detailed" label="ZGC Unmap" description="Unmapping of memory" thread="true">
|
||||
<Field type="ulong" contentType="bytes" name="unmapped" label="Unmapped" />
|
||||
</Event>
|
||||
|
||||
<Event name="ShenandoahHeapRegionStateChange" category="Java Virtual Machine, GC, Detailed" label="Shenandoah Heap Region State Change" description="Information about a Shenandoah heap region state change"
|
||||
startTime="false">
|
||||
<Field type="uint" name="index" label="Index" />
|
||||
|
||||
@ -328,6 +328,12 @@ void VMError::print_stack_trace(outputStream* st, JavaThread* jt,
|
||||
#endif // ZERO
|
||||
}
|
||||
|
||||
const char* VMError::get_filename_only() {
|
||||
char separator = os::file_separator()[0];
|
||||
const char* p = strrchr(_filename, separator);
|
||||
return p ? p + 1 : _filename;
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds `value` to `list` iff it's not already present and there is sufficient
|
||||
* capacity (i.e. length(list) < `list_capacity`). The length of the list
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2003, 2024, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2022 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -26,7 +26,9 @@
|
||||
#ifndef SHARE_UTILITIES_VMERROR_HPP
|
||||
#define SHARE_UTILITIES_VMERROR_HPP
|
||||
|
||||
#include "memory/allStatic.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "utilities/ostream.hpp"
|
||||
|
||||
class Decoder;
|
||||
class frame;
|
||||
@ -108,11 +110,7 @@ class VMError : public AllStatic {
|
||||
static void print_stack_trace(outputStream* st, JavaThread* jt,
|
||||
char* buf, int buflen, bool verbose = false);
|
||||
|
||||
static const char* get_filename_only() {
|
||||
char separator = os::file_separator()[0];
|
||||
const char* p = strrchr(_filename, separator);
|
||||
return p ? p+1 : _filename;
|
||||
}
|
||||
static const char* get_filename_only();
|
||||
|
||||
static bool should_report_bug(unsigned int id) {
|
||||
return (id != OOM_MALLOC_ERROR) && (id != OOM_MMAP_ERROR);
|
||||
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -19,41 +19,34 @@
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZUNMAPPER_HPP
|
||||
#define SHARE_GC_Z_ZUNMAPPER_HPP
|
||||
package sun.jvm.hotspot.gc.z;
|
||||
|
||||
#include "gc/z/zList.hpp"
|
||||
#include "gc/z/zLock.hpp"
|
||||
#include "gc/z/zThread.hpp"
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
class ZPage;
|
||||
class ZPageAllocator;
|
||||
// Mirror class for ZNUMA
|
||||
|
||||
class ZUnmapper : public ZThread {
|
||||
private:
|
||||
ZPageAllocator* const _page_allocator;
|
||||
ZConditionLock _lock;
|
||||
ZList<ZPage> _queue;
|
||||
size_t _enqueued_bytes;
|
||||
bool _warned_sync_unmapping;
|
||||
bool _stop;
|
||||
public class ZNUMA {
|
||||
|
||||
ZPage* dequeue();
|
||||
bool try_enqueue(ZPage* page);
|
||||
size_t queue_capacity() const;
|
||||
bool is_saturated() const;
|
||||
void do_unmap_and_destroy_page(ZPage* page) const;
|
||||
private static CIntegerField countField;
|
||||
|
||||
protected:
|
||||
virtual void run_thread();
|
||||
virtual void terminate();
|
||||
static {
|
||||
VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
|
||||
}
|
||||
|
||||
public:
|
||||
ZUnmapper(ZPageAllocator* page_allocator);
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ZNUMA");
|
||||
|
||||
void unmap_and_destroy_page(ZPage* page);
|
||||
};
|
||||
countField = type.getCIntegerField("_count");
|
||||
}
|
||||
|
||||
public static long count() {
|
||||
return countField.getValue();
|
||||
}
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZUNMAPPER_HPP
|
||||
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,6 +27,7 @@ package sun.jvm.hotspot.gc.z;
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
@ -36,8 +37,8 @@ import sun.jvm.hotspot.types.TypeDataBase;
|
||||
public class ZPageAllocator extends VMObject {
|
||||
|
||||
private static CIntegerField maxCapacityField;
|
||||
private static CIntegerField capacityField;
|
||||
private static CIntegerField usedField;
|
||||
private static long partitionsOffset;
|
||||
private static long numaCount;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
|
||||
@ -47,8 +48,13 @@ public class ZPageAllocator extends VMObject {
|
||||
Type type = db.lookupType("ZPageAllocator");
|
||||
|
||||
maxCapacityField = type.getCIntegerField("_max_capacity");
|
||||
capacityField = type.getCIntegerField("_capacity");
|
||||
usedField = type.getCIntegerField("_used");
|
||||
partitionsOffset = type.getAddressField("_partitions").getOffset();
|
||||
numaCount = ZNUMA.count();
|
||||
}
|
||||
|
||||
private ZPerNUMAZPartition partitions() {
|
||||
Address partitionsAddr = addr.addOffsetTo(partitionsOffset);
|
||||
return VMObjectFactory.newObject(ZPerNUMAZPartition.class, partitionsAddr);
|
||||
}
|
||||
|
||||
public long maxCapacity() {
|
||||
@ -56,11 +62,19 @@ public class ZPageAllocator extends VMObject {
|
||||
}
|
||||
|
||||
public long capacity() {
|
||||
return capacityField.getValue(addr);
|
||||
long total_capacity = 0;
|
||||
for (int id = 0; id < numaCount; id++) {
|
||||
total_capacity += partitions().value(id).capacity();
|
||||
}
|
||||
return total_capacity;
|
||||
}
|
||||
|
||||
public long used() {
|
||||
return usedField.getValue(addr);
|
||||
long total_used = 0;
|
||||
for (int id = 0; id < numaCount; id++) {
|
||||
total_used += partitions().value(id).used();
|
||||
}
|
||||
return total_used;
|
||||
}
|
||||
|
||||
public ZPageAllocator(Address addr) {
|
||||
|
||||
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.gc.z;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.types.CIntegerField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for ZPartition
|
||||
|
||||
public class ZPartition extends VMObject {
|
||||
|
||||
private static CIntegerField capacityField;
|
||||
private static CIntegerField usedField;
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ZPartition");
|
||||
|
||||
capacityField = type.getCIntegerField("_capacity");
|
||||
usedField = type.getCIntegerField("_used");
|
||||
}
|
||||
|
||||
public ZPartition(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
|
||||
public long capacity() {
|
||||
return capacityField.getValue(addr);
|
||||
}
|
||||
|
||||
public long used() {
|
||||
return usedField.getValue(addr);
|
||||
}
|
||||
}
|
||||
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
package sun.jvm.hotspot.gc.z;
|
||||
|
||||
import sun.jvm.hotspot.debugger.Address;
|
||||
import sun.jvm.hotspot.runtime.VM;
|
||||
import sun.jvm.hotspot.runtime.VMObject;
|
||||
import sun.jvm.hotspot.runtime.VMObjectFactory;
|
||||
import sun.jvm.hotspot.types.AddressField;
|
||||
import sun.jvm.hotspot.types.Type;
|
||||
import sun.jvm.hotspot.types.TypeDataBase;
|
||||
|
||||
// Mirror class for ZPerNUMA<ZPartition>
|
||||
|
||||
public class ZPerNUMAZPartition extends VMObject {
|
||||
|
||||
private static AddressField addrField;
|
||||
private static long valueOffset = 4096; // 4k
|
||||
|
||||
static {
|
||||
VM.registerVMInitializedObserver((o, d) -> initialize(VM.getVM().getTypeDataBase()));
|
||||
}
|
||||
|
||||
private static synchronized void initialize(TypeDataBase db) {
|
||||
Type type = db.lookupType("ZPerNUMAZPartition");
|
||||
addrField = type.getAddressField("_addr");
|
||||
}
|
||||
|
||||
public ZPartition value(long id) {
|
||||
Address valueArrayAddr = addrField.getValue(addr);
|
||||
Address partitionAddr = valueArrayAddr.addOffsetTo(id * valueOffset);
|
||||
return VMObjectFactory.newObject(ZPartition.class, partitionAddr);
|
||||
}
|
||||
|
||||
public ZPerNUMAZPartition(Address addr) {
|
||||
super(addr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -883,11 +883,6 @@
|
||||
<setting name="threshold">0 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZUnmap">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZYoungGarbageCollection">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
|
||||
@ -883,11 +883,6 @@
|
||||
<setting name="threshold">0 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZUnmap">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
</event>
|
||||
|
||||
<event name="jdk.ZYoungGarbageCollection">
|
||||
<setting name="enabled">true</setting>
|
||||
<setting name="threshold">0 ms</setting>
|
||||
|
||||
@ -22,7 +22,9 @@
|
||||
*/
|
||||
|
||||
#include "gc/z/zArray.inline.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "zunittest.hpp"
|
||||
|
||||
class ZArrayTest : public ZTest {};
|
||||
|
||||
TEST(ZArray, sanity) {
|
||||
ZArray<int> a;
|
||||
@ -80,3 +82,116 @@ TEST(ZArray, iterator) {
|
||||
// Check count
|
||||
ASSERT_EQ(count, 10);
|
||||
}
|
||||
|
||||
TEST_F(ZArrayTest, slice) {
|
||||
ZArray<int> a0(0);
|
||||
ZArray<int> a10(10);
|
||||
ZArray<int> ar(10 + abs(random() % 10));
|
||||
|
||||
// Add elements
|
||||
for (int i = 0; i < ar.capacity(); ++i) {
|
||||
const auto append = [&](ZArray<int>& a) {
|
||||
if (i < a.capacity()) {
|
||||
a.append(i);
|
||||
}
|
||||
};
|
||||
|
||||
append(a0);
|
||||
append(a10);
|
||||
append(ar);
|
||||
}
|
||||
|
||||
{
|
||||
const auto reverse_test = [](const ZArray<int>& original) {
|
||||
ZArray<int> a(original.capacity());
|
||||
a.appendAll(&original);
|
||||
|
||||
const auto reverse = [](ZArraySlice<int> slice, auto reverse) -> ZArraySlice<int> {
|
||||
const auto swap_elements = [](ZArraySlice<int> s1, ZArraySlice<int> s2) {
|
||||
ASSERT_EQ(s1.length(), s2.length());
|
||||
for (int i = 0; i < s1.length(); ++i) {
|
||||
::swap(s1.at(i), s2.at(i));
|
||||
}
|
||||
};
|
||||
|
||||
const int length = slice.length();
|
||||
if (length > 1) {
|
||||
const int middle = length / 2;
|
||||
swap_elements(
|
||||
reverse(slice.slice_front(middle), reverse),
|
||||
reverse(slice.slice_back(length - middle), reverse)
|
||||
);
|
||||
}
|
||||
return slice;
|
||||
};
|
||||
|
||||
const auto check_reversed = [](ZArraySlice<const int> original, ZArraySlice<int> reversed) {
|
||||
ASSERT_EQ(original.length(), reversed.length());
|
||||
for (int e : original) {
|
||||
ASSERT_EQ(e, reversed.pop());
|
||||
}
|
||||
};
|
||||
|
||||
ZArraySlice<int> a_reversed = reverse(a, reverse);
|
||||
check_reversed(original, a_reversed);
|
||||
};
|
||||
|
||||
reverse_test(a0);
|
||||
reverse_test(a10);
|
||||
reverse_test(ar);
|
||||
}
|
||||
|
||||
{
|
||||
const auto sort_test = [&](const ZArray<int>& original) {
|
||||
ZArray<int> a(original.capacity());
|
||||
a.appendAll(&original);
|
||||
|
||||
const auto shuffle = [&](ZArraySlice<int> slice) {
|
||||
for (int i = 1; i < slice.length(); ++i) {
|
||||
const ptrdiff_t random_index = random() % (i + 1);
|
||||
::swap(slice.at(i), slice.at(random_index));
|
||||
}
|
||||
};
|
||||
|
||||
const auto qsort = [](ZArraySlice<int> slice, auto qsort) -> void {
|
||||
const auto partition = [](ZArraySlice<int> slice) {
|
||||
const int p = slice.last();
|
||||
int pi = 0;
|
||||
for (int i = 0; i < slice.length() - 1; ++i) {
|
||||
if (slice.at(i) < p) {
|
||||
::swap(slice.at(i), slice.at(pi++));
|
||||
}
|
||||
}
|
||||
::swap(slice.at(pi), slice.last());
|
||||
return pi;
|
||||
};
|
||||
|
||||
if (slice.length() > 1) {
|
||||
const int pi = partition(slice);
|
||||
qsort(slice.slice_front(pi), qsort);
|
||||
qsort(slice.slice_back(pi + 1), qsort);
|
||||
}
|
||||
};
|
||||
|
||||
const auto verify = [](ZArraySlice<const int> slice) {
|
||||
for (int i = 0; i < slice.length(); ++i) {
|
||||
int e = slice.at(i);
|
||||
for (int l : slice.slice_front(i)) {
|
||||
ASSERT_GE(e, l);
|
||||
}
|
||||
for (int g : slice.slice_back(i)) {
|
||||
ASSERT_LE(e, g);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
shuffle(a);
|
||||
qsort(a, qsort);
|
||||
verify(a);
|
||||
};
|
||||
|
||||
sort_test(a0);
|
||||
sort_test(a10);
|
||||
sort_test(ar);
|
||||
}
|
||||
}
|
||||
|
||||
@ -28,6 +28,7 @@
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zHeap.hpp"
|
||||
#include "gc/z/zPage.inline.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "unittest.hpp"
|
||||
|
||||
@ -221,10 +222,7 @@ public:
|
||||
static void test(void (*function)(ZForwarding*), uint32_t size) {
|
||||
// Create page
|
||||
const ZVirtualMemory vmem(zoffset(_page_offset), ZPageSizeSmall);
|
||||
const ZPhysicalMemory pmem(ZPhysicalMemorySegment(zoffset(0), ZPageSizeSmall, true));
|
||||
ZPage page(ZPageType::small, vmem, pmem);
|
||||
|
||||
page.reset(ZPageAge::eden);
|
||||
ZPage page(ZPageType::small, ZPageAge::eden, vmem, 0u);
|
||||
|
||||
const size_t object_size = 16;
|
||||
const zaddress object = page.alloc_object(object_size);
|
||||
|
||||
379
test/hotspot/gtest/gc/z/test_zIntrusiveRBTree.cpp
Normal file
379
test/hotspot/gtest/gc/z/test_zIntrusiveRBTree.cpp
Normal file
@ -0,0 +1,379 @@
|
||||
/*
|
||||
* Copyright (c) 2024, 2025, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#include "gc/z/zIntrusiveRBTree.inline.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/arena.hpp"
|
||||
#include "nmt/memTag.hpp"
|
||||
#include "unittest.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
#include "zunittest.hpp"
|
||||
|
||||
#include <limits>
|
||||
|
||||
struct ZTestEntryCompare {
|
||||
int operator()(const ZIntrusiveRBTreeNode* a, const ZIntrusiveRBTreeNode* b);
|
||||
int operator()(int key, const ZIntrusiveRBTreeNode* entry);
|
||||
};
|
||||
|
||||
class ZTestEntry : public ArenaObj {
|
||||
friend class ZIntrusiveRBTree<int, ZTestEntryCompare>;
|
||||
|
||||
public:
|
||||
using ZTree = ZIntrusiveRBTree<int, ZTestEntryCompare>;
|
||||
private:
|
||||
const int _id;
|
||||
ZIntrusiveRBTreeNode _node;
|
||||
|
||||
public:
|
||||
ZTestEntry(int id)
|
||||
: _id(id),
|
||||
_node() {}
|
||||
|
||||
int id() const {
|
||||
return _id;
|
||||
}
|
||||
|
||||
static ZIntrusiveRBTreeNode* cast_to_inner(ZTestEntry* element) {
|
||||
return &element->_node;
|
||||
}
|
||||
static const ZTestEntry* cast_to_outer(const ZIntrusiveRBTreeNode* node) {
|
||||
return (ZTestEntry*)((uintptr_t)node - offset_of(ZTestEntry, _node));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
int ZTestEntryCompare::operator()(const ZIntrusiveRBTreeNode* a, const ZIntrusiveRBTreeNode* b) {
|
||||
return ZTestEntry::cast_to_outer(a)->id() - ZTestEntry::cast_to_outer(b)->id();
|
||||
}
|
||||
int ZTestEntryCompare::operator()(int key, const ZIntrusiveRBTreeNode* entry) {
|
||||
return key - ZTestEntry::cast_to_outer(entry)->id();
|
||||
}
|
||||
|
||||
class ZTreeTest : public ZTest {
|
||||
public:
|
||||
void shuffle_array(ZTestEntry** beg, ZTestEntry** end);
|
||||
void reverse_array(ZTestEntry** beg, ZTestEntry** end);
|
||||
};
|
||||
|
||||
class ResettableArena : public Arena {
|
||||
public:
|
||||
using Arena::Arena;
|
||||
|
||||
void reset_arena() {
|
||||
if (_chunk != _first) {
|
||||
set_size_in_bytes(_chunk->length());
|
||||
Chunk::next_chop(_first);
|
||||
}
|
||||
_chunk = _first;
|
||||
_hwm = _chunk->bottom();
|
||||
_max = _chunk->top();
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(ZTreeTest, test_random) {
|
||||
constexpr size_t sizes[] = {1, 2, 4, 8, 16, 1024, 1024 * 1024};
|
||||
constexpr size_t num_sizes = ARRAY_SIZE(sizes);
|
||||
constexpr size_t iterations_multiplier = 4;
|
||||
constexpr size_t max_allocation_size = sizes[num_sizes - 1] * iterations_multiplier * sizeof(ZTestEntry);
|
||||
ResettableArena arena{MemTag::mtTest, Arena::Tag::tag_other, max_allocation_size};
|
||||
for (size_t s : sizes) {
|
||||
ZTestEntry::ZTree tree;
|
||||
const size_t num_iterations = s * iterations_multiplier;
|
||||
for (size_t i = 0; i < num_iterations; i++) {
|
||||
if (i % s == 0) {
|
||||
tree.verify_tree();
|
||||
}
|
||||
int id = random() % s;
|
||||
auto cursor = tree.find(id);
|
||||
if (cursor.found()) {
|
||||
// Replace or Remove
|
||||
if (i % 2 == 0) {
|
||||
// Replace
|
||||
if (i % 4 == 0) {
|
||||
// Replace with new
|
||||
tree.replace(ZTestEntry::cast_to_inner(new (&arena) ZTestEntry(id)), cursor);
|
||||
} else {
|
||||
// Replace with same
|
||||
tree.replace(cursor.node(), cursor);
|
||||
}
|
||||
} else {
|
||||
// Remove
|
||||
tree.remove(cursor);
|
||||
}
|
||||
} else {
|
||||
// Insert
|
||||
tree.insert(ZTestEntry::cast_to_inner(new (&arena) ZTestEntry(id)), cursor);
|
||||
}
|
||||
}
|
||||
tree.verify_tree();
|
||||
arena.reset_arena();
|
||||
}
|
||||
}
|
||||
|
||||
void ZTreeTest::reverse_array(ZTestEntry** beg, ZTestEntry** end) {
|
||||
if (beg == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
ZTestEntry** first = beg;
|
||||
ZTestEntry** last = end - 1;
|
||||
while (first < last) {
|
||||
::swap(*first, *last);
|
||||
first++;
|
||||
last--;
|
||||
}
|
||||
}
|
||||
|
||||
void ZTreeTest::shuffle_array(ZTestEntry** beg, ZTestEntry** end) {
|
||||
if (beg == end) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (ZTestEntry** first = beg + 1; first != end; first++) {
|
||||
const ptrdiff_t distance = first - beg;
|
||||
ASSERT_GE(distance, 0);
|
||||
const ptrdiff_t random_index = random() % (distance + 1);
|
||||
::swap(*first, *(beg + random_index));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(ZTreeTest, test_insert) {
|
||||
Arena arena(MemTag::mtTest);
|
||||
constexpr size_t num_entries = 1024;
|
||||
ZTestEntry* forward[num_entries]{};
|
||||
ZTestEntry* reverse[num_entries]{};
|
||||
ZTestEntry* shuffle[num_entries]{};
|
||||
for (size_t i = 0; i < num_entries; i++) {
|
||||
const int id = static_cast<int>(i);
|
||||
forward[i] = new (&arena) ZTestEntry(id);
|
||||
reverse[i] = new (&arena) ZTestEntry(id);
|
||||
shuffle[i] = new (&arena) ZTestEntry(id);
|
||||
}
|
||||
reverse_array(reverse, reverse + num_entries);
|
||||
shuffle_array(shuffle, shuffle + num_entries);
|
||||
|
||||
ZTestEntry::ZTree forward_tree;
|
||||
auto cursor = forward_tree.root_cursor();
|
||||
for (size_t i = 0; i < num_entries; i++) {
|
||||
ASSERT_TRUE(cursor.is_valid());
|
||||
ASSERT_FALSE(cursor.found());
|
||||
ZIntrusiveRBTreeNode* const new_node = ZTestEntry::cast_to_inner(forward[i]);
|
||||
forward_tree.insert(new_node, cursor);
|
||||
cursor = forward_tree.next_cursor(new_node);
|
||||
}
|
||||
forward_tree.verify_tree();
|
||||
|
||||
ZTestEntry::ZTree reverse_tree;
|
||||
cursor = reverse_tree.root_cursor();
|
||||
for (size_t i = 0; i < num_entries; i++) {
|
||||
ASSERT_TRUE(cursor.is_valid());
|
||||
ASSERT_FALSE(cursor.found());
|
||||
ZIntrusiveRBTreeNode* const new_node = ZTestEntry::cast_to_inner(reverse[i]);
|
||||
reverse_tree.insert(new_node, cursor);
|
||||
cursor = reverse_tree.prev_cursor(new_node);
|
||||
}
|
||||
reverse_tree.verify_tree();
|
||||
|
||||
ZTestEntry::ZTree shuffle_tree;
|
||||
for (size_t i = 0; i < num_entries; i++) {
|
||||
cursor = shuffle_tree.find(reverse[i]->id());
|
||||
ASSERT_TRUE(cursor.is_valid());
|
||||
ASSERT_FALSE(cursor.found());
|
||||
ZIntrusiveRBTreeNode* const new_node = ZTestEntry::cast_to_inner(reverse[i]);
|
||||
shuffle_tree.insert(new_node, cursor);
|
||||
}
|
||||
shuffle_tree.verify_tree();
|
||||
|
||||
ZTestEntryCompare compare_fn;
|
||||
const ZIntrusiveRBTreeNode* forward_node = forward_tree.first();
|
||||
const ZIntrusiveRBTreeNode* reverse_node = reverse_tree.first();
|
||||
const ZIntrusiveRBTreeNode* shuffle_node = shuffle_tree.first();
|
||||
size_t count = 0;
|
||||
while (true) {
|
||||
count++;
|
||||
ASSERT_EQ(compare_fn(forward_node, reverse_node), 0);
|
||||
ASSERT_EQ(compare_fn(forward_node, shuffle_node), 0);
|
||||
ASSERT_EQ(compare_fn(reverse_node, shuffle_node), 0);
|
||||
const ZIntrusiveRBTreeNode* forward_next_node = forward_node->next();
|
||||
const ZIntrusiveRBTreeNode* reverse_next_node = reverse_node->next();
|
||||
const ZIntrusiveRBTreeNode* shuffle_next_node = shuffle_node->next();
|
||||
if (forward_next_node == nullptr) {
|
||||
ASSERT_EQ(forward_next_node, reverse_next_node);
|
||||
ASSERT_EQ(forward_next_node, shuffle_next_node);
|
||||
ASSERT_EQ(forward_node, forward_tree.last());
|
||||
ASSERT_EQ(reverse_node, reverse_tree.last());
|
||||
ASSERT_EQ(shuffle_node, shuffle_tree.last());
|
||||
break;
|
||||
}
|
||||
ASSERT_LT(compare_fn(forward_node, forward_next_node), 0);
|
||||
ASSERT_LT(compare_fn(reverse_node, reverse_next_node), 0);
|
||||
ASSERT_LT(compare_fn(shuffle_node, shuffle_next_node), 0);
|
||||
forward_node = forward_next_node;
|
||||
reverse_node = reverse_next_node;
|
||||
shuffle_node = shuffle_next_node;
|
||||
}
|
||||
ASSERT_EQ(count, num_entries);
|
||||
}
|
||||
|
||||
TEST_F(ZTreeTest, test_replace) {
|
||||
Arena arena(MemTag::mtTest);
|
||||
constexpr size_t num_entries = 1024;
|
||||
ZTestEntry::ZTree tree;
|
||||
auto cursor = tree.root_cursor();
|
||||
for (size_t i = 0; i < num_entries; i++) {
|
||||
ASSERT_TRUE(cursor.is_valid());
|
||||
ASSERT_FALSE(cursor.found());
|
||||
const int id = static_cast<int>(i) * 2 + 1;
|
||||
ZIntrusiveRBTreeNode* const new_node = ZTestEntry::cast_to_inner(new (&arena) ZTestEntry(id));
|
||||
tree.insert(new_node, cursor);
|
||||
cursor = tree.next_cursor(new_node);
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
size_t i = 0;
|
||||
for (auto it = tree.begin(), end = tree.end(); it != end; ++it) {
|
||||
auto& node = *it;
|
||||
if (i % (num_entries / 4)) {
|
||||
tree.verify_tree();
|
||||
}
|
||||
switch (i++ % 4) {
|
||||
case 0: {
|
||||
// Decrement
|
||||
ZTestEntry* new_entry = new (&arena) ZTestEntry(ZTestEntry::cast_to_outer(&node)->id() - 1);
|
||||
it.replace(ZTestEntry::cast_to_inner(new_entry));
|
||||
} break;
|
||||
case 1: break;
|
||||
case 2: {
|
||||
// Increment
|
||||
ZTestEntry* new_entry = new (&arena) ZTestEntry(ZTestEntry::cast_to_outer(&node)->id() + 1);
|
||||
it.replace(ZTestEntry::cast_to_inner(new_entry));
|
||||
} break;
|
||||
case 3: break;
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
int last_id = std::numeric_limits<int>::min();
|
||||
for (auto& node : tree) {
|
||||
int id = ZTestEntry::cast_to_outer(&node)->id();
|
||||
ASSERT_LT(last_id, id);
|
||||
last_id = id;
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
last_id = std::numeric_limits<int>::min();
|
||||
for (auto it = tree.begin(), end = tree.end(); it != end; ++it) {
|
||||
int id = ZTestEntry::cast_to_outer(&*it)->id();
|
||||
ASSERT_LT(last_id, id);
|
||||
last_id = id;
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
last_id = std::numeric_limits<int>::min();
|
||||
for (auto it = tree.cbegin(), end = tree.cend(); it != end; ++it) {
|
||||
int id = ZTestEntry::cast_to_outer(&*it)->id();
|
||||
ASSERT_LT(last_id, id);
|
||||
last_id = id;
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
last_id = std::numeric_limits<int>::max();
|
||||
for (auto it = tree.rbegin(), end = tree.rend(); it != end; ++it) {
|
||||
int id = ZTestEntry::cast_to_outer(&*it)->id();
|
||||
ASSERT_GT(last_id, id);
|
||||
last_id = id;
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
last_id = std::numeric_limits<int>::max();
|
||||
for (auto it = tree.crbegin(), end = tree.crend(); it != end; ++it) {
|
||||
int id = ZTestEntry::cast_to_outer(&*it)->id();
|
||||
ASSERT_GT(last_id, id);
|
||||
last_id = id;
|
||||
}
|
||||
tree.verify_tree();
|
||||
}
|
||||
|
||||
TEST_F(ZTreeTest, test_remove) {
|
||||
Arena arena(MemTag::mtTest);
|
||||
constexpr int num_entries = 1024;
|
||||
ZTestEntry::ZTree tree;
|
||||
int id = 0;
|
||||
tree.insert(ZTestEntry::cast_to_inner(new (&arena) ZTestEntry(++id)), tree.root_cursor());
|
||||
for (auto& node : tree) {
|
||||
if (ZTestEntry::cast_to_outer(&node)->id() == num_entries) {
|
||||
break;
|
||||
}
|
||||
auto cursor = tree.next_cursor(&node);
|
||||
ZIntrusiveRBTreeNode* const new_node = ZTestEntry::cast_to_inner(new (&arena) ZTestEntry(++id));
|
||||
tree.insert(new_node, cursor);
|
||||
}
|
||||
tree.verify_tree();
|
||||
ASSERT_EQ(ZTestEntry::cast_to_outer(tree.last())->id(), num_entries);
|
||||
|
||||
int i = 0;
|
||||
int removed = 0;
|
||||
for (auto it = tree.begin(), end = tree.end(); it != end; ++it) {
|
||||
if (i++ % 2 == 0) {
|
||||
it.remove();
|
||||
++removed;
|
||||
}
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
int count = 0;
|
||||
for (auto it = tree.cbegin(), end = tree.cend(); it != end; ++it) {
|
||||
++count;
|
||||
}
|
||||
ASSERT_EQ(count, num_entries - removed);
|
||||
tree.verify_tree();
|
||||
|
||||
for (auto it = tree.rbegin(), end = tree.rend(); it != end; ++it) {
|
||||
if (i++ % 2 == 0) {
|
||||
it.remove();
|
||||
++removed;
|
||||
}
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
count = 0;
|
||||
for (auto it = tree.cbegin(), end = tree.cend(); it != end; ++it) {
|
||||
++count;
|
||||
}
|
||||
ASSERT_EQ(count, num_entries - removed);
|
||||
tree.verify_tree();
|
||||
|
||||
for (auto it = tree.begin(), end = tree.end(); it != end; ++it) {
|
||||
it.remove();
|
||||
removed++;
|
||||
}
|
||||
tree.verify_tree();
|
||||
|
||||
ASSERT_EQ(removed, num_entries);
|
||||
ASSERT_EQ(tree.last(), nullptr);
|
||||
ASSERT_EQ(tree.first(), nullptr);
|
||||
}
|
||||
@ -23,13 +23,10 @@
|
||||
|
||||
#ifdef _WINDOWS
|
||||
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
#include "gc/z/zGlobals.hpp"
|
||||
#include "gc/z/zList.inline.hpp"
|
||||
#include "gc/z/zMapper_windows.hpp"
|
||||
#include "gc/z/zMemory.inline.hpp"
|
||||
#include "gc/z/zSyscall_windows.hpp"
|
||||
#include "gc/z/zVirtualMemory.inline.hpp"
|
||||
#include "gc/z/zVirtualMemoryManager.inline.hpp"
|
||||
#include "runtime/os.hpp"
|
||||
#include "zunittest.hpp"
|
||||
|
||||
@ -39,27 +36,19 @@ class ZMapperTest : public ZTest {
|
||||
private:
|
||||
static constexpr size_t ReservationSize = 32 * M;
|
||||
|
||||
ZVirtualMemoryManager* _vmm;
|
||||
ZMemoryManager* _va;
|
||||
ZVirtualMemoryReserver* _reserver;
|
||||
ZVirtualMemoryRegistry* _registry;
|
||||
|
||||
public:
|
||||
virtual void SetUp() {
|
||||
// Only run test on supported Windows versions
|
||||
if (!is_os_supported()) {
|
||||
GTEST_SKIP() << "Requires Windows version 1803 or later";
|
||||
GTEST_SKIP() << "OS not supported";
|
||||
}
|
||||
|
||||
// Fake a ZVirtualMemoryManager
|
||||
_vmm = (ZVirtualMemoryManager*)os::malloc(sizeof(ZVirtualMemoryManager), mtTest);
|
||||
_vmm = ::new (_vmm) ZVirtualMemoryManager(ReservationSize);
|
||||
|
||||
// Construct its internal ZMemoryManager
|
||||
_va = new (&_vmm->_manager) ZMemoryManager();
|
||||
|
||||
// Reserve address space for the test
|
||||
if (_vmm->reserved() != ReservationSize) {
|
||||
GTEST_SKIP() << "Failed to reserve address space";
|
||||
}
|
||||
_reserver = (ZVirtualMemoryReserver*)os::malloc(sizeof(ZVirtualMemoryManager), mtTest);
|
||||
_reserver = ::new (_reserver) ZVirtualMemoryReserver(ReservationSize);
|
||||
_registry = &_reserver->_registry;
|
||||
}
|
||||
|
||||
virtual void TearDown() {
|
||||
@ -69,26 +58,26 @@ public:
|
||||
}
|
||||
|
||||
// Best-effort cleanup
|
||||
_vmm->unreserve_all();
|
||||
_vmm->~ZVirtualMemoryManager();
|
||||
os::free(_vmm);
|
||||
_reserver->unreserve_all();
|
||||
_reserver->~ZVirtualMemoryReserver();
|
||||
os::free(_reserver);
|
||||
}
|
||||
|
||||
void test_unreserve() {
|
||||
zoffset bottom = _va->alloc_low_address(ZGranuleSize);
|
||||
zoffset middle = _va->alloc_low_address(ZGranuleSize);
|
||||
zoffset top = _va->alloc_low_address(ZGranuleSize);
|
||||
ZVirtualMemory bottom = _registry->remove_from_low(ZGranuleSize);
|
||||
ZVirtualMemory middle = _registry->remove_from_low(ZGranuleSize);
|
||||
ZVirtualMemory top = _registry->remove_from_low(ZGranuleSize);
|
||||
|
||||
ASSERT_EQ(bottom, zoffset(0));
|
||||
ASSERT_EQ(middle, bottom + 1 * ZGranuleSize);
|
||||
ASSERT_EQ(top, bottom + 2 * ZGranuleSize);
|
||||
ASSERT_EQ(bottom, ZVirtualMemory(bottom.start(), ZGranuleSize));
|
||||
ASSERT_EQ(middle, ZVirtualMemory(bottom.start() + 1 * ZGranuleSize, ZGranuleSize));
|
||||
ASSERT_EQ(top, ZVirtualMemory(bottom.start() + 2 * ZGranuleSize, ZGranuleSize));
|
||||
|
||||
// Unreserve the middle part
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(middle), ZGranuleSize);
|
||||
_reserver->unreserve(middle);
|
||||
|
||||
// Make sure that we still can unreserve the memory before and after
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(bottom), ZGranuleSize);
|
||||
ZMapper::unreserve(ZOffset::address_unsafe(top), ZGranuleSize);
|
||||
_reserver->unreserve(bottom);
|
||||
_reserver->unreserve(top);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user