8337217: Port VirtualMemoryTracker to use VMATree

Reviewed-by: jsjolen, gziemski
This commit is contained in:
Afshin Zafari 2025-06-18 11:37:48 +00:00
parent f07f5ce984
commit 547ce03016
27 changed files with 915 additions and 911 deletions

View File

@ -160,7 +160,7 @@ bool MemBaseline::baseline_allocation_sites() {
// Virtual memory allocation sites
VirtualMemoryAllocationWalker virtual_memory_walker;
if (!VirtualMemoryTracker::walk_virtual_memory(&virtual_memory_walker)) {
if (!VirtualMemoryTracker::Instance::walk_virtual_memory(&virtual_memory_walker)) {
return false;
}

View File

@ -157,7 +157,7 @@ public:
// Iterate all NMT virtual memory regions and fill this cache.
bool fill_from_nmt() {
return VirtualMemoryTracker::walk_virtual_memory(this);
return VirtualMemoryTracker::Instance::walk_virtual_memory(this);
}
};

View File

@ -22,6 +22,7 @@
*
*/
#include "cds/filemap.hpp"
#include "logging/log.hpp"
#include "memory/metaspace.hpp"
#include "memory/metaspaceUtils.hpp"
#include "nmt/mallocTracker.hpp"
@ -29,6 +30,8 @@
#include "nmt/memReporter.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/memoryFileTracker.hpp"
#include "nmt/regionsTree.hpp"
#include "nmt/regionsTree.inline.hpp"
#include "nmt/threadStackTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "utilities/debug.hpp"
@ -432,34 +435,45 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion*
}
if (all_committed) {
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
const CommittedMemoryRegion* committed_rgn = itr.next();
if (committed_rgn->size() == reserved_rgn->size() && committed_rgn->call_stack()->equals(*stack)) {
// One region spanning the entire reserved region, with the same stack trace.
// Don't print this regions because the "reserved and committed" line above
// already indicates that the region is committed.
assert(itr.next() == nullptr, "Unexpectedly more than one regions");
bool reserved_and_committed = false;
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(*reserved_rgn,
[&](CommittedMemoryRegion& committed_rgn) {
if (committed_rgn.equals(*reserved_rgn)) {
// One region spanning the entire reserved region, with the same stack trace.
// Don't print this regions because the "reserved and committed" line above
// already indicates that the region is committed.
reserved_and_committed = true;
return false;
}
return true;
});
if (reserved_and_committed) {
return;
}
}
CommittedRegionIterator itr = reserved_rgn->iterate_committed_regions();
const CommittedMemoryRegion* committed_rgn;
while ((committed_rgn = itr.next()) != nullptr) {
auto print_committed_rgn = [&](const CommittedMemoryRegion& crgn) {
// Don't report if size is too small
if (amount_in_current_scale(committed_rgn->size()) == 0) continue;
stack = committed_rgn->call_stack();
if (amount_in_current_scale(crgn.size()) == 0) return;
stack = crgn.call_stack();
out->cr();
INDENT_BY(8,
print_virtual_memory_region("committed", committed_rgn->base(), committed_rgn->size());
print_virtual_memory_region("committed", crgn.base(), crgn.size());
if (stack->is_empty()) {
out->cr();
} else {
out->print_cr(" from");
INDENT_BY(4, stack->print_on(out);)
INDENT_BY(4, _stackprinter.print_stack(stack);)
}
)
}
};
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(*reserved_rgn,
[&](CommittedMemoryRegion& crgn) {
print_committed_rgn(crgn);
return true;
});
}
void MemDetailReporter::report_memory_file_allocations() {

View File

@ -71,7 +71,7 @@ void MemTracker::initialize() {
_baseline.initialize();
if (!MallocTracker::initialize(level) ||
!MemoryFileTracker::Instance::initialize(level) ||
!VirtualMemoryTracker::initialize(level)) {
!VirtualMemoryTracker::Instance::initialize(level)) {
assert(false, "NMT initialization failed");
level = NMT_off;
log_warning(nmt)("NMT initialization failed. NMT disabled.");
@ -126,7 +126,7 @@ void MemTracker::final_report(outputStream* output) {
bool MemTracker::print_containing_region(const void* p, outputStream* out) {
return enabled() &&
(MallocTracker::print_pointer_information(p, out) ||
VirtualMemoryTracker::print_containing_region(p, out));
VirtualMemoryTracker::Instance::print_containing_region(p, out));
}
void MemTracker::report(bool summary_only, outputStream* output, size_t scale) {

View File

@ -132,7 +132,7 @@ class MemTracker : AllStatic {
if (!enabled()) return;
if (addr != nullptr) {
NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, mem_tag);
VirtualMemoryTracker::Instance::add_reserved_region((address)addr, size, stack, mem_tag);
}
}
@ -140,7 +140,7 @@ class MemTracker : AllStatic {
assert_post_init();
if (!enabled()) return;
if (addr != nullptr) {
VirtualMemoryTracker::remove_released_region((address)addr, size);
VirtualMemoryTracker::Instance::remove_released_region((address)addr, size);
}
}
@ -148,7 +148,7 @@ class MemTracker : AllStatic {
assert_post_init();
if (!enabled()) return;
if (addr != nullptr) {
VirtualMemoryTracker::remove_uncommitted_region((address)addr, size);
VirtualMemoryTracker::Instance::remove_uncommitted_region((address)addr, size);
}
}
@ -158,8 +158,8 @@ class MemTracker : AllStatic {
if (!enabled()) return;
if (addr != nullptr) {
NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, mem_tag);
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
VirtualMemoryTracker::Instance::add_reserved_region((address)addr, size, stack, mem_tag);
VirtualMemoryTracker::Instance::add_committed_region((address)addr, size, stack);
}
}
@ -169,7 +169,7 @@ class MemTracker : AllStatic {
if (!enabled()) return;
if (addr != nullptr) {
NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
VirtualMemoryTracker::Instance::add_committed_region((address)addr, size, stack);
}
}
@ -217,7 +217,7 @@ class MemTracker : AllStatic {
if (!enabled()) return;
if (addr != nullptr) {
NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::split_reserved_region((address)addr, size, split, mem_tag, split_tag);
VirtualMemoryTracker::Instance::split_reserved_region((address)addr, size, split, mem_tag, split_tag);
}
}
@ -230,7 +230,7 @@ class MemTracker : AllStatic {
if (!enabled()) return;
if (addr != nullptr) {
NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::set_reserved_region_type((address)addr, size, mem_tag);
VirtualMemoryTracker::Instance::set_reserved_region_tag((address)addr, size, mem_tag);
}
}

View File

@ -73,7 +73,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st
if (prev == nullptr) {
// Must be first node.
prev = current;
return;
return true;
}
#ifdef ASSERT
if (broken_start != nullptr && prev->val().out.mem_tag() != current->val().in.mem_tag()) {
@ -96,6 +96,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st
stream->cr();
}
prev = current;
return true;
});
#ifdef ASSERT
if (broken_start != nullptr) {

View File

@ -28,8 +28,8 @@
#include "memory/allocation.hpp"
#include "nmt/nmtCommon.hpp"
#include "nmt/nmtNativeCallStackStorage.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "nmt/vmatree.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/os.inline.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/nativeCallStack.hpp"

View File

@ -55,6 +55,7 @@ template<typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
class Treap {
friend class NMTVMATreeTest;
friend class NMTTreapTest;
friend class VMTWithVMATreeTest;
public:
class TreapNode {
friend Treap;
@ -212,12 +213,13 @@ private:
seen_count++;
if (last_seen == nullptr) {
last_seen = node;
return;
return true;
}
if (COMPARATOR::cmp(last_seen->key(), node->key()) > 0) {
failed = false;
}
last_seen = node;
return true;
});
assert(seen_count == _node_count, "the number of visited nodes do not match with the number of stored nodes");
assert(!failed, "keys was not monotonically strongly increasing when visiting in order");
@ -382,7 +384,9 @@ public:
head = head->left();
}
head = to_visit.pop();
f(head);
if (!f(head)) {
return;
}
head = head->right();
}
}
@ -409,7 +413,9 @@ public:
const int cmp_from = COMPARATOR::cmp(head->key(), from);
const int cmp_to = COMPARATOR::cmp(head->key(), to);
if (cmp_from >= 0 && cmp_to < 0) {
f(head);
if (!f(head)) {
return;
}
}
if (cmp_to < 0) {
head = head->right();

View File

@ -29,7 +29,6 @@
#include "nmt/nmtCommon.hpp"
#include "nmt/nmtUsage.hpp"
#include "nmt/threadStackTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
// Enabled all options for snapshot.
const NMTUsageOptions NMTUsage::OptionsAll = { true, true, true };
@ -48,7 +47,7 @@ void NMTUsage::walk_thread_stacks() {
// much memory had been committed if they are backed by virtual memory. This
// needs to happen before we take the snapshot of the virtual memory since it
// will update this information.
VirtualMemoryTracker::snapshot_thread_stacks();
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
}
void NMTUsage::update_malloc_usage() {

View File

@ -0,0 +1,57 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "nmt/regionsTree.hpp"
VMATree::SummaryDiff RegionsTree::commit_region(address addr, size_t size, const NativeCallStack& stack) {
return commit_mapping((VMATree::position)addr, size, make_region_data(stack, mtNone), /*use tag inplace*/ true);
}
VMATree::SummaryDiff RegionsTree::uncommit_region(address addr, size_t size) {
return uncommit_mapping((VMATree::position)addr, size, make_region_data(NativeCallStack::empty_stack(), mtNone));
}
#ifdef ASSERT
void RegionsTree::NodeHelper::print_on(outputStream* st) {
auto st_str = [&](VMATree::StateType s){
return s == VMATree::StateType::Released ? "Rl" :
s == VMATree::StateType::Reserved ? "Rv" : "Cm";
};
st->print_cr("pos: " INTPTR_FORMAT " "
"%s, %s <|> %s, %s",
p2i((address)position()),
st_str(in_state()),
NMTUtil::tag_to_name(in_tag()),
st_str(out_state()),
NMTUtil::tag_to_name(out_tag())
);
}
void RegionsTree::print_on(outputStream* st) {
visit_in_order([&](Node* node) {
NodeHelper curr(node);
curr.print_on(st);
return true;
});
}
#endif

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef NMT_REGIONSTREE_HPP
#define NMT_REGIONSTREE_HPP
#include "logging/log.hpp"
#include "nmt/nmtCommon.hpp"
#include "nmt/vmatree.hpp"
class ReservedMemoryRegion;
class CommittedMemoryRegion;
// RegionsTree extends VMATree to add some more specific API and also defines a helper
// for processing the tree nodes in a shorter and more meaningful way.
class RegionsTree : public VMATree {
NativeCallStackStorage _ncs_storage;
bool _with_storage;
public:
RegionsTree(bool with_storage) : VMATree() , _ncs_storage(with_storage), _with_storage(with_storage) { }
ReservedMemoryRegion find_reserved_region(address addr);
SummaryDiff commit_region(address addr, size_t size, const NativeCallStack& stack);
SummaryDiff uncommit_region(address addr, size_t size);
using Node = VMATree::TreapNode;
class NodeHelper {
Node* _node;
public:
NodeHelper() : _node(nullptr) { }
NodeHelper(Node* node) : _node(node) { }
inline bool is_valid() const { return _node != nullptr; }
inline void clear_node() { _node = nullptr; }
inline VMATree::position position() const { return _node->key(); }
inline bool is_committed_begin() const { return ((uint8_t)out_state() & (uint8_t)VMATree::StateType::Committed) >= 2; }
inline bool is_released_begin() const { return out_state() == VMATree::StateType::Released; }
inline bool is_reserved_begin() const { return ((uint8_t)out_state() & (uint8_t)VMATree::StateType::Reserved) == 1; }
inline VMATree::StateType in_state() const { return _node->val().in.type(); }
inline VMATree::StateType out_state() const { return _node->val().out.type(); }
inline size_t distance_from(const NodeHelper& other) const {
assert (position() > other.position(), "negative distance");
return position() - other.position();
}
inline NativeCallStackStorage::StackIndex out_stack_index() const { return _node->val().out.reserved_stack(); }
inline MemTag in_tag() const { return _node->val().in.mem_tag(); }
inline MemTag out_tag() const { return _node->val().out.mem_tag(); }
inline void set_in_tag(MemTag tag) { _node->val().in.set_tag(tag); }
inline void set_out_tag(MemTag tag) { _node->val().out.set_tag(tag); }
DEBUG_ONLY(void print_on(outputStream* st);)
};
DEBUG_ONLY(void print_on(outputStream* st);)
template<typename F>
void visit_committed_regions(const ReservedMemoryRegion& rgn, F func);
template<typename F>
void visit_reserved_regions(F func);
inline RegionData make_region_data(const NativeCallStack& ncs, MemTag tag) {
return RegionData(_ncs_storage.push(ncs), tag);
}
inline const NativeCallStack stack(NodeHelper& node) {
if (!_with_storage) {
return NativeCallStack::empty_stack();
}
NativeCallStackStorage::StackIndex si = node.out_stack_index();
return _ncs_storage.get(si);
}
};
#endif // NMT_REGIONSTREE_HPP

View File

@ -0,0 +1,87 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_NMT_REGIONSTREE_INLINE_HPP
#define SHARE_NMT_REGIONSTREE_INLINE_HPP
#include "nmt/regionsTree.hpp"
#include "nmt/virtualMemoryTracker.hpp"
template<typename F>
void RegionsTree::visit_committed_regions(const ReservedMemoryRegion& rgn, F func) {
position start = (position)rgn.base();
size_t end = reinterpret_cast<size_t>(rgn.end()) + 1;
size_t comm_size = 0;
NodeHelper prev;
visit_range_in_order(start, end, [&](Node* node) {
NodeHelper curr(node);
if (prev.is_valid() && prev.is_committed_begin()) {
CommittedMemoryRegion cmr((address)prev.position(), curr.distance_from(prev), stack(prev));
if (!func(cmr)) {
return false;
}
}
prev = curr;
return true;
});
}
template<typename F>
void RegionsTree::visit_reserved_regions(F func) {
NodeHelper begin_node, prev;
size_t rgn_size = 0;
visit_in_order([&](Node* node) {
NodeHelper curr(node);
if (prev.is_valid()) {
rgn_size += curr.distance_from(prev);
} else {
begin_node = curr;
rgn_size = 0;
}
prev = curr;
if (curr.is_released_begin() || begin_node.out_tag() != curr.out_tag()) {
auto st = stack(begin_node);
if (rgn_size == 0) {
prev.clear_node();
return true;
}
ReservedMemoryRegion rmr((address)begin_node.position(), rgn_size, st, begin_node.out_tag());
if (!func(rmr)) {
return false;
}
rgn_size = 0;
if (!curr.is_released_begin()) {
begin_node = curr;
} else {
begin_node.clear_node();
prev.clear_node();
}
}
return true;
});
}
#endif //SHARE_NMT_REGIONSTREE_INLINE_HPP

View File

@ -26,7 +26,6 @@
#include "nmt/memTracker.hpp"
#include "nmt/threadStackTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/os.hpp"
#include "utilities/align.hpp"
#include "utilities/debug.hpp"
@ -52,7 +51,7 @@ void ThreadStackTracker::new_thread_stack(void* base, size_t size, const NativeC
align_thread_stack_boundaries_inward(base, size);
MemTracker::NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
VirtualMemoryTracker::Instance::add_reserved_region((address)base, size, stack, mtThreadStack);
_thread_count++;
}
@ -62,7 +61,7 @@ void ThreadStackTracker::delete_thread_stack(void* base, size_t size) {
align_thread_stack_boundaries_inward(base, size);
MemTracker::NmtVirtualMemoryLocker nvml;
VirtualMemoryTracker::remove_released_region((address)base, size);
MemTracker::record_virtual_memory_release((address)base, size);
_thread_count--;
}

View File

@ -21,16 +21,16 @@
* questions.
*
*/
#include "logging/log.hpp"
#include "memory/metaspaceStats.hpp"
#include "memory/metaspaceUtils.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/nativeCallStackPrinter.hpp"
#include "nmt/threadStackTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "nmt/regionsTree.hpp"
#include "nmt/regionsTree.inline.hpp"
#include "runtime/os.hpp"
#include "utilities/ostream.hpp"
VirtualMemoryTracker* VirtualMemoryTracker::Instance::_tracker = nullptr;
VirtualMemorySnapshot VirtualMemorySummary::_snapshot;
void VirtualMemory::update_peak(size_t size) {
@ -47,553 +47,206 @@ void VirtualMemory::update_peak(size_t size) {
void VirtualMemorySummary::snapshot(VirtualMemorySnapshot* s) {
// Snapshot current thread stacks
VirtualMemoryTracker::snapshot_thread_stacks();
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
as_snapshot()->copy_to(s);
}
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* VirtualMemoryTracker::_reserved_regions;
int compare_committed_region(const CommittedMemoryRegion& r1, const CommittedMemoryRegion& r2) {
return r1.compare(r2);
}
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2) {
return r1.compare(r2);
}
static bool is_mergeable_with(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
return rgn->adjacent_to(addr, size) && rgn->call_stack()->equals(stack);
}
static bool is_same_as(CommittedMemoryRegion* rgn, address addr, size_t size, const NativeCallStack& stack) {
// It would have made sense to use rgn->equals(...), but equals returns true for overlapping regions.
return rgn->same_region(addr, size) && rgn->call_stack()->equals(stack);
}
static LinkedListNode<CommittedMemoryRegion>* find_preceding_node_from(LinkedListNode<CommittedMemoryRegion>* from, address addr) {
LinkedListNode<CommittedMemoryRegion>* preceding = nullptr;
for (LinkedListNode<CommittedMemoryRegion>* node = from; node != nullptr; node = node->next()) {
CommittedMemoryRegion* rgn = node->data();
// We searched past the region start.
if (rgn->end() > addr) {
break;
}
preceding = node;
bool VirtualMemoryTracker::Instance::initialize(NMT_TrackingLevel level) {
assert(_tracker == nullptr, "only call once");
if (level >= NMT_summary) {
void* tracker = os::malloc(sizeof(VirtualMemoryTracker), mtNMT);
if (tracker == nullptr) return false;
_tracker = new (tracker) VirtualMemoryTracker(level == NMT_detail);
}
return preceding;
}
static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, address addr, size_t size, const NativeCallStack& stack) {
if (node != nullptr) {
CommittedMemoryRegion* rgn = node->data();
if (is_mergeable_with(rgn, addr, size, stack)) {
rgn->expand_region(addr, size);
return true;
}
}
return false;
}
static bool try_merge_with(LinkedListNode<CommittedMemoryRegion>* node, LinkedListNode<CommittedMemoryRegion>* other) {
if (other == nullptr) {
return false;
}
CommittedMemoryRegion* rgn = other->data();
return try_merge_with(node, rgn->base(), rgn->size(), *rgn->call_stack());
}
bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const NativeCallStack& stack) {
assert(addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
assert(contain_region(addr, size), "Not contain this region");
// Find the region that fully precedes the [addr, addr + size) region.
LinkedListNode<CommittedMemoryRegion>* prev = find_preceding_node_from(_committed_regions.head(), addr);
LinkedListNode<CommittedMemoryRegion>* next = (prev != nullptr ? prev->next() : _committed_regions.head());
if (next != nullptr) {
// Ignore request if region already exists.
if (is_same_as(next->data(), addr, size, stack)) {
return true;
}
// The new region is after prev, and either overlaps with the
// next region (and maybe more regions), or overlaps with no region.
if (next->data()->overlap_region(addr, size)) {
// Remove _all_ overlapping regions, and parts of regions,
// in preparation for the addition of this new region.
remove_uncommitted_region(addr, size);
// The remove could have split a region into two and created a
// new prev region. Need to reset the prev and next pointers.
prev = find_preceding_node_from((prev != nullptr ? prev : _committed_regions.head()), addr);
next = (prev != nullptr ? prev->next() : _committed_regions.head());
}
}
// At this point the previous overlapping regions have been
// cleared, and the full region is guaranteed to be inserted.
VirtualMemorySummary::record_committed_memory(size, mem_tag());
// Try to merge with prev and possibly next.
if (try_merge_with(prev, addr, size, stack)) {
if (try_merge_with(prev, next)) {
// prev was expanded to contain the new region
// and next, need to remove next from the list
_committed_regions.remove_after(prev);
}
return true;
}
// Didn't merge with prev, try with next.
if (try_merge_with(next, addr, size, stack)) {
return true;
}
// Couldn't merge with any regions - create a new region.
return add_committed_region(CommittedMemoryRegion(addr, size, stack));
}
bool ReservedMemoryRegion::remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
address addr, size_t size) {
assert(addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
CommittedMemoryRegion* rgn = node->data();
assert(rgn->contain_region(addr, size), "Has to be contained");
assert(!rgn->same_region(addr, size), "Can not be the same region");
if (rgn->base() == addr ||
rgn->end() == addr + size) {
rgn->exclude_region(addr, size);
return true;
} else {
// split this region
address top =rgn->end();
// use this region for lower part
size_t exclude_size = rgn->end() - addr;
rgn->exclude_region(addr, exclude_size);
// higher part
address high_base = addr + size;
size_t high_size = top - high_base;
CommittedMemoryRegion high_rgn(high_base, high_size, *rgn->call_stack());
LinkedListNode<CommittedMemoryRegion>* high_node = _committed_regions.add(high_rgn);
assert(high_node == nullptr || node->next() == high_node, "Should be right after");
return (high_node != nullptr);
}
return false;
}
bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) {
assert(addr != nullptr, "Invalid address");
assert(sz > 0, "Invalid size");
CommittedMemoryRegion del_rgn(addr, sz, *call_stack());
address end = addr + sz;
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
CommittedMemoryRegion* crgn;
while (head != nullptr) {
crgn = head->data();
if (crgn->same_region(addr, sz)) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
_committed_regions.remove_after(prev);
return true;
}
// del_rgn contains crgn
if (del_rgn.contain_region(crgn->base(), crgn->size())) {
VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag());
head = head->next();
_committed_regions.remove_after(prev);
continue; // don't update head or prev
}
// Found addr in the current crgn. There are 2 subcases:
if (crgn->contain_address(addr)) {
// (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn)
if (crgn->contain_address(end - 1)) {
VirtualMemorySummary::record_uncommitted_memory(sz, mem_tag());
return remove_uncommitted_region(head, addr, sz); // done!
} else {
// (2) Did not find del_rgn's end in crgn.
size_t size = crgn->end() - del_rgn.base();
crgn->exclude_region(addr, size);
VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
}
} else if (crgn->contain_address(end - 1)) {
// Found del_rgn's end, but not its base addr.
size_t size = del_rgn.end() - crgn->base();
crgn->exclude_region(crgn->base(), size);
VirtualMemorySummary::record_uncommitted_memory(size, mem_tag());
return true; // should be done if the list is sorted properly!
}
prev = head;
head = head->next();
}
return true;
}
void ReservedMemoryRegion::move_committed_regions(address addr, ReservedMemoryRegion& rgn) {
assert(addr != nullptr, "Invalid address");
// split committed regions
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
LinkedListNode<CommittedMemoryRegion>* prev = nullptr;
void VirtualMemoryTracker::Instance::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MemTag mem_tag) {
assert(_tracker != nullptr, "Sanity check");
_tracker->add_reserved_region(base_addr, size, stack, mem_tag);
}
while (head != nullptr) {
if (head->data()->base() >= addr) {
break;
void VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MemTag mem_tag) {
VMATree::SummaryDiff diff = tree()->reserve_mapping((size_t)base_addr, size, tree()->make_region_data(stack, mem_tag));
apply_summary_diff(diff);
}
void VirtualMemoryTracker::Instance::set_reserved_region_tag(address addr, size_t size, MemTag mem_tag) {
assert(_tracker != nullptr, "Sanity check");
_tracker->set_reserved_region_tag(addr, size, mem_tag);
}
void VirtualMemoryTracker::set_reserved_region_tag(address addr, size_t size, MemTag mem_tag) {
VMATree::SummaryDiff diff = tree()->set_tag((VMATree::position) addr, size, mem_tag);
apply_summary_diff(diff);
}
void VirtualMemoryTracker::Instance::apply_summary_diff(VMATree::SummaryDiff diff) {
assert(_tracker != nullptr, "Sanity check");
_tracker->apply_summary_diff(diff);
}
void VirtualMemoryTracker::apply_summary_diff(VMATree::SummaryDiff diff) {
VMATree::SingleDiff::delta reserve_delta, commit_delta;
size_t reserved, committed;
MemTag tag = mtNone;
auto print_err = [&](const char* str) {
#ifdef ASSERT
log_error(nmt)("summary mismatch, at %s, for %s,"
" diff-reserved: %ld"
" diff-committed: %ld"
" vms-reserved: %zu"
" vms-committed: %zu",
str, NMTUtil::tag_to_name(tag), (long)reserve_delta, (long)commit_delta, reserved, committed);
#endif
};
for (int i = 0; i < mt_number_of_tags; i++) {
reserve_delta = diff.tag[i].reserve;
commit_delta = diff.tag[i].commit;
tag = NMTUtil::index_to_tag(i);
reserved = VirtualMemorySummary::as_snapshot()->by_tag(tag)->reserved();
committed = VirtualMemorySummary::as_snapshot()->by_tag(tag)->committed();
if (reserve_delta != 0) {
if (reserve_delta > 0) {
VirtualMemorySummary::record_reserved_memory(reserve_delta, tag);
} else {
if ((size_t)-reserve_delta <= reserved) {
VirtualMemorySummary::record_released_memory(-reserve_delta, tag);
} else {
print_err("release");
}
}
}
prev = head;
head = head->next();
}
if (head != nullptr) {
if (prev != nullptr) {
prev->set_next(head->next());
} else {
_committed_regions.set_head(nullptr);
if (commit_delta != 0) {
if (commit_delta > 0) {
if ((size_t)commit_delta <= ((size_t)reserve_delta + reserved)) {
VirtualMemorySummary::record_committed_memory(commit_delta, tag);
}
else {
print_err("commit");
}
}
else {
if ((size_t)-commit_delta <= committed) {
VirtualMemorySummary::record_uncommitted_memory(-commit_delta, tag);
} else {
print_err("uncommit");
}
}
}
}
}
rgn._committed_regions.set_head(head);
void VirtualMemoryTracker::Instance::add_committed_region(address addr, size_t size,
const NativeCallStack& stack) {
assert(_tracker != nullptr, "Sanity check");
_tracker->add_committed_region(addr, size, stack);
}
void VirtualMemoryTracker::add_committed_region(address addr, size_t size,
const NativeCallStack& stack) {
VMATree::SummaryDiff diff = tree()->commit_region(addr, size, stack);
apply_summary_diff(diff);
}
void VirtualMemoryTracker::Instance::remove_uncommitted_region(address addr, size_t size) {
assert(_tracker != nullptr, "Sanity check");
_tracker->remove_uncommitted_region(addr, size);
}
void VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
MemTracker::assert_locked();
VMATree::SummaryDiff diff = tree()->uncommit_region(addr, size);
apply_summary_diff(diff);
}
void VirtualMemoryTracker::Instance::remove_released_region(address addr, size_t size) {
assert(_tracker != nullptr, "Sanity check");
_tracker->remove_released_region(addr, size);
}
void VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
VMATree::SummaryDiff diff = tree()->release_mapping((VMATree::position)addr, size);
apply_summary_diff(diff);
}
void VirtualMemoryTracker::Instance::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_mem_tag) {
assert(_tracker != nullptr, "Sanity check");
_tracker->split_reserved_region(addr, size, split, mem_tag, split_mem_tag);
}
void VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_mem_tag) {
add_reserved_region(addr, split, NativeCallStack::empty_stack(), mem_tag);
add_reserved_region(addr + split, size - split, NativeCallStack::empty_stack(), split_mem_tag);
}
bool VirtualMemoryTracker::Instance::print_containing_region(const void* p, outputStream* st) {
assert(_tracker != nullptr, "Sanity check");
return _tracker->print_containing_region(p, st);
}
bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
ReservedMemoryRegion rmr = tree()->find_reserved_region((address)p);
if (!rmr.contain_address((address)p)) {
return false;
}
st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
p2i(p), p2i(rmr.base()), p2i(rmr.end()), NMTUtil::tag_to_enum_name(rmr.mem_tag()));
if (MemTracker::tracking_level() == NMT_detail) {
rmr.call_stack()->print_on(st);
}
st->cr();
return true;
}
bool VirtualMemoryTracker::Instance::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_tracker != nullptr, "Sanity check");
return _tracker->walk_virtual_memory(walker);
}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
MemTracker::NmtVirtualMemoryLocker nvml;
tree()->visit_reserved_regions([&](ReservedMemoryRegion& rgn) {
if (!walker->do_allocation_site(&rgn)) {
return false;
}
return true;
});
return true;
}
size_t ReservedMemoryRegion::committed_size() const {
size_t committed = 0;
LinkedListNode<CommittedMemoryRegion>* head =
_committed_regions.head();
while (head != nullptr) {
committed += head->data()->size();
head = head->next();
}
return committed;
}
void ReservedMemoryRegion::set_mem_tag(MemTag new_mem_tag) {
assert((mem_tag() == mtNone || mem_tag() == new_mem_tag),
"Overwrite memory tag for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.",
p2i(base()), p2i(end()), (unsigned)mem_tag(), (unsigned)new_mem_tag);
if (mem_tag() != new_mem_tag) {
VirtualMemorySummary::move_reserved_memory(mem_tag(), new_mem_tag, size());
VirtualMemorySummary::move_committed_memory(mem_tag(), new_mem_tag, committed_size());
_mem_tag = new_mem_tag;
}
size_t result = 0;
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(*this, [&](CommittedMemoryRegion& crgn) {
result += crgn.size();
return true;
});
return result;
}
address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const {
assert(mem_tag() == mtThreadStack, "Only for thread stack");
LinkedListNode<CommittedMemoryRegion>* head = _committed_regions.head();
address bottom = base();
address top = base() + size();
while (head != nullptr) {
address committed_top = head->data()->base() + head->data()->size();
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(*this, [&](CommittedMemoryRegion& crgn) {
address committed_top = crgn.base() + crgn.size();
if (committed_top < top) {
// committed stack guard pages, skip them
bottom = head->data()->base() + head->data()->size();
head = head->next();
bottom = crgn.base() + crgn.size();
} else {
assert(top == committed_top, "Sanity");
break;
assert(top == committed_top, "Sanity, top=" INTPTR_FORMAT " , com-top=" INTPTR_FORMAT, p2i(top), p2i(committed_top));
return false;;
}
}
return true;
});
return bottom;
}
bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) {
assert(_reserved_regions == nullptr, "only call once");
if (level >= NMT_summary) {
_reserved_regions = new (std::nothrow, mtNMT)
SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>();
return (_reserved_regions != nullptr);
}
return true;
}
bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
const NativeCallStack& stack, MemTag mem_tag) {
assert(base_addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();
ReservedMemoryRegion rgn(base_addr, size, stack, mem_tag);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", %zu)",
rgn.mem_tag_name(), p2i(rgn.base()), rgn.size());
if (reserved_rgn == nullptr) {
VirtualMemorySummary::record_reserved_memory(size, mem_tag);
return _reserved_regions->add(rgn) != nullptr;
} else {
// Deal with recursive reservation
// os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory()
// See JDK-8198226.
if (reserved_rgn->same_region(base_addr, size) &&
(reserved_rgn->mem_tag() == mem_tag || reserved_rgn->mem_tag() == mtNone)) {
reserved_rgn->set_call_stack(stack);
reserved_rgn->set_mem_tag(mem_tag);
return true;
} else {
assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
// Overlapped reservation.
// It can happen when the regions are thread stacks, as JNI
// thread does not detach from VM before exits, and leads to
// leak JavaThread object
if (reserved_rgn->mem_tag() == mtThreadStack) {
guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached");
// Overwrite with new region
// Release old region
VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag());
VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag());
// Add new region
VirtualMemorySummary::record_reserved_memory(rgn.size(), mem_tag);
*reserved_rgn = rgn;
return true;
}
// CDS mapping region.
// CDS reserves the whole region for mapping CDS archive, then maps each section into the region.
// NMT reports CDS as a whole.
if (reserved_rgn->mem_tag() == mtClassShared) {
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)",
reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region");
return true;
}
// Mapped CDS string region.
// The string region(s) is part of the java heap.
if (reserved_rgn->mem_tag() == mtJavaHeap) {
log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", %zu)",
reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size());
assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region");
return true;
}
if (reserved_rgn->mem_tag() == mtCode) {
assert(reserved_rgn->contain_region(base_addr, size), "Reserved code region should contain this mapping region");
return true;
}
// Print some more details.
stringStream ss;
ss.print_cr("Error: old region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %s.\n"
" new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %s.",
p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), NMTUtil::tag_to_name(reserved_rgn->mem_tag()),
p2i(base_addr), p2i(base_addr + size), NMTUtil::tag_to_name(mem_tag));
if (MemTracker::tracking_level() == NMT_detail) {
ss.print_cr("Existing region allocated from:");
reserved_rgn->call_stack()->print_on(&ss);
ss.print_cr("New region allocated from:");
stack.print_on(&ss);
}
log_debug(nmt)("%s", ss.freeze());
ShouldNotReachHere();
return false;
}
}
}
void VirtualMemoryTracker::set_reserved_region_type(address addr, size_t size, MemTag mem_tag) {
assert(addr != nullptr, "Invalid address");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();
ReservedMemoryRegion rgn(addr, 1);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
if (reserved_rgn != nullptr) {
assert(reserved_rgn->contain_address(addr), "Containment");
if (reserved_rgn->mem_tag() != mem_tag) {
assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")",
NMTUtil::tag_to_name(reserved_rgn->mem_tag()));
reserved_rgn->set_mem_tag(mem_tag);
}
}
}
bool VirtualMemoryTracker::add_committed_region(address addr, size_t size,
const NativeCallStack& stack) {
assert(addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
if (reserved_rgn == nullptr) {
log_debug(nmt)("Add committed region \'%s\', No reserved region found for (" INTPTR_FORMAT ", %zu)",
rgn.mem_tag_name(), p2i(rgn.base()), rgn.size());
}
assert(reserved_rgn != nullptr, "Add committed region, No reserved region found");
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
bool result = reserved_rgn->add_committed_region(addr, size, stack);
log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", %zu) %s",
reserved_rgn->mem_tag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed"));
return result;
}
bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) {
assert(addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", %zu)", p2i(addr), size);
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
const char* type_name = reserved_rgn->mem_tag_name(); // after remove, info is not complete
bool result = reserved_rgn->remove_uncommitted_region(addr, size);
log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s",
type_name, p2i(addr), size, (result ? " Succeeded" : "Failed"));
return result;
}
bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) {
assert(rgn != nullptr, "Sanity check");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();
// uncommit regions within the released region
ReservedMemoryRegion backup(*rgn);
bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size());
log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", %zu) %s",
backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
if (!result) {
return false;
}
VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag());
result = _reserved_regions->remove(*rgn);
log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", %zu) from _reserved_regions %s" ,
backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed"));
return result;
}
bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
assert(addr != nullptr, "Invalid address");
assert(size > 0, "Invalid size");
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::assert_locked();
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
if (reserved_rgn == nullptr) {
log_debug(nmt)("No reserved region found for (" INTPTR_FORMAT ", %zu)!",
p2i(rgn.base()), rgn.size());
}
assert(reserved_rgn != nullptr, "No reserved region");
if (reserved_rgn->same_region(addr, size)) {
return remove_released_region(reserved_rgn);
}
// uncommit regions within the released region
if (!reserved_rgn->remove_uncommitted_region(addr, size)) {
return false;
}
if (reserved_rgn->mem_tag() == mtClassShared) {
if (reserved_rgn->contain_region(addr, size)) {
// This is an unmapped CDS region, which is part of the reserved shared
// memory region.
// See special handling in VirtualMemoryTracker::add_reserved_region also.
return true;
}
if (size > reserved_rgn->size()) {
// This is from release the whole region spanning from archive space to class space,
// so we release them altogether.
ReservedMemoryRegion class_rgn(addr + reserved_rgn->size(),
(size - reserved_rgn->size()));
ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn);
assert(cls_rgn != nullptr, "Class space region not recorded?");
assert(cls_rgn->mem_tag() == mtClass, "Must be class mem tag");
remove_released_region(reserved_rgn);
remove_released_region(cls_rgn);
return true;
}
}
VirtualMemorySummary::record_released_memory(size, reserved_rgn->mem_tag());
assert(reserved_rgn->contain_region(addr, size), "Not completely contained");
if (reserved_rgn->base() == addr ||
reserved_rgn->end() == addr + size) {
reserved_rgn->exclude_region(addr, size);
return true;
} else {
address top = reserved_rgn->end();
address high_base = addr + size;
ReservedMemoryRegion high_rgn(high_base, top - high_base,
*reserved_rgn->call_stack(), reserved_rgn->mem_tag());
// use original region for lower region
reserved_rgn->exclude_region(addr, top - addr);
LinkedListNode<ReservedMemoryRegion>* new_rgn = _reserved_regions->add(high_rgn);
if (new_rgn == nullptr) {
return false;
} else {
reserved_rgn->move_committed_regions(addr, *new_rgn->data());
return true;
}
}
}
// Given an existing memory mapping registered with NMT, split the mapping in
// two. The newly created two mappings will be registered under the call
// stack and the memory tags of the original section.
bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) {
ReservedMemoryRegion rgn(addr, size);
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
assert(reserved_rgn->same_region(addr, size), "Must be identical region");
assert(reserved_rgn != nullptr, "No reserved region");
assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
NativeCallStack original_stack = *reserved_rgn->call_stack();
MemTag original_tag = reserved_rgn->mem_tag();
const char* name = reserved_rgn->mem_tag_name();
remove_released_region(reserved_rgn);
log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", %zu) with size %zu",
name, p2i(rgn.base()), rgn.size(), split);
// Now, create two new regions.
add_reserved_region(addr, split, original_stack, mem_tag);
add_reserved_region(addr + split, size - split, original_stack, split_tag);
return true;
}
// Iterate the range, find committed region within its bound.
class RegionIterator : public StackObj {
private:
@ -645,7 +298,6 @@ public:
// Align the size to work with full pages (Alpine and AIX stack top is not page aligned)
size_t aligned_stack_size = align_up(stack_size, os::vm_page_size());
ReservedMemoryRegion* region = const_cast<ReservedMemoryRegion*>(rgn);
NativeCallStack ncs; // empty stack
RegionIterator itr(stack_bottom, aligned_stack_size);
@ -657,7 +309,7 @@ public:
if (stack_bottom + stack_size < committed_start + committed_size) {
committed_size = stack_bottom + stack_size - committed_start;
}
region->add_committed_region(committed_start, committed_size, ncs);
VirtualMemoryTracker::Instance::add_committed_region(committed_start, committed_size, ncs);
DEBUG_ONLY(found_stack = true;)
}
#ifdef ASSERT
@ -670,55 +322,24 @@ public:
}
};
void VirtualMemoryTracker::snapshot_thread_stacks() {
void VirtualMemoryTracker::Instance::snapshot_thread_stacks() {
SnapshotThreadStackWalker walker;
walk_virtual_memory(&walker);
}
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
assert(_reserved_regions != nullptr, "Sanity check");
MemTracker::NmtVirtualMemoryLocker nvml;
// Check that the _reserved_regions haven't been deleted.
if (_reserved_regions != nullptr) {
LinkedListNode<ReservedMemoryRegion>* head = _reserved_regions->head();
while (head != nullptr) {
const ReservedMemoryRegion* rgn = head->peek();
if (!walker->do_allocation_site(rgn)) {
ReservedMemoryRegion RegionsTree::find_reserved_region(address addr) {
ReservedMemoryRegion rmr;
auto contain_region = [&](ReservedMemoryRegion& region_in_tree) {
if (region_in_tree.contain_address(addr)) {
rmr = region_in_tree;
return false;
}
head = head->next();
}
}
return true;
return true;
};
visit_reserved_regions(contain_region);
return rmr;
}
class PrintRegionWalker : public VirtualMemoryWalker {
private:
const address _p;
outputStream* _st;
NativeCallStackPrinter _stackprinter;
public:
PrintRegionWalker(const void* p, outputStream* st) :
_p((address)p), _st(st), _stackprinter(st) { }
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
if (rgn->contain_address(_p)) {
_st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s",
p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag()));
if (MemTracker::tracking_level() == NMT_detail) {
_stackprinter.print_stack(rgn->call_stack());
_st->cr();
}
return false;
}
return true;
}
};
// If p is contained within a known memory region, print information about it to the
// given stream and return true; false otherwise.
bool VirtualMemoryTracker::print_containing_region(const void* p, outputStream* st) {
PrintRegionWalker walker(p, st);
return !walk_virtual_memory(&walker);
}
bool CommittedMemoryRegion::equals(const ReservedMemoryRegion& rmr) const {
return size() == rmr.size() && call_stack()->equals(*(rmr.call_stack()));
}

View File

@ -25,16 +25,29 @@
#ifndef SHARE_NMT_VIRTUALMEMORYTRACKER_HPP
#define SHARE_NMT_VIRTUALMEMORYTRACKER_HPP
#include "memory/allocation.hpp"
#include "memory/metaspace.hpp" // For MetadataType
#include "memory/metaspaceStats.hpp"
#include "nmt/allocationSite.hpp"
#include "nmt/nmtCommon.hpp"
#include "nmt/vmatree.hpp"
#include "nmt/regionsTree.hpp"
#include "runtime/atomic.hpp"
#include "utilities/linkedlist.hpp"
#include "utilities/nativeCallStack.hpp"
#include "utilities/ostream.hpp"
// VirtualMemoryTracker (VMT) is an internal class of the MemTracker.
// All the Hotspot code use only the MemTracker interface to register the memory operations in NMT.
// Memory regions can be reserved/committed/uncommitted/released by calling MemTracker API which in turn call the corresponding functions in VMT.
// VMT uses RegionsTree to hold and manage the memory regions. Each region has two nodes that each one has address of the region (start/end) and
// state (reserved/released/committed) and MemTag of the regions before and after it.
//
// The memory operations of Reserve/Commit/Uncommit/Release are tracked by updating/inserting/deleting the nodes in the tree. When an operation
// changes nodes in the tree, the summary of the changes is returned back in a SummaryDiff struct. This struct shows that how much reserve/commit amount
// of any specific MemTag is changed. The summary of every operation is accumulated in VirtualMemorySummary class.
//
// Not all operations are valid in VMT. The following predicates are checked before the operation is applied to the tree and/or VirtualMemorySummary:
// - committed size of a MemTag should be <= of its reserved size
// - uncommitted size of a MemTag should be <= of its committed size
// - released size of a MemTag should be <= of its reserved size
/*
* Virtual memory counter
*/
@ -276,136 +289,124 @@ class CommittedMemoryRegion : public VirtualMemoryRegion {
NativeCallStack _stack;
public:
CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
VirtualMemoryRegion(addr, size), _stack(stack) { }
CommittedMemoryRegion()
: VirtualMemoryRegion((address)1, 1), _stack(NativeCallStack::empty_stack()) { }
CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack)
: VirtualMemoryRegion(addr, size), _stack(stack) { }
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
bool equals(const ReservedMemoryRegion& other) const;
};
typedef LinkedListIterator<CommittedMemoryRegion> CommittedRegionIterator;
int compare_committed_region(const CommittedMemoryRegion&, const CommittedMemoryRegion&);
class ReservedMemoryRegion : public VirtualMemoryRegion {
private:
SortedLinkedList<CommittedMemoryRegion, compare_committed_region>
_committed_regions;
NativeCallStack _stack;
MemTag _mem_tag;
MemTag _mem_tag;
public:
bool is_valid() { return base() != (address)1 && size() != 1;}
ReservedMemoryRegion()
: VirtualMemoryRegion((address)1, 1), _stack(NativeCallStack::empty_stack()), _mem_tag(mtNone) { }
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
MemTag mem_tag) :
VirtualMemoryRegion(base, size), _stack(stack), _mem_tag(mem_tag) { }
MemTag mem_tag = mtNone)
: VirtualMemoryRegion(base, size), _stack(stack), _mem_tag(mem_tag) { }
ReservedMemoryRegion(address base, size_t size) :
VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _mem_tag(mtNone) { }
ReservedMemoryRegion(address base, size_t size)
: VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _mem_tag(mtNone) { }
// Copy constructor
ReservedMemoryRegion(const ReservedMemoryRegion& rr) :
VirtualMemoryRegion(rr.base(), rr.size()) {
ReservedMemoryRegion(const ReservedMemoryRegion& rr)
: VirtualMemoryRegion(rr.base(), rr.size()) {
*this = rr;
}
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
inline const NativeCallStack* call_stack() const { return &_stack; }
void set_mem_tag(MemTag mem_tag);
inline MemTag mem_tag() const { return _mem_tag; }
// uncommitted thread stack bottom, above guard pages if there is any.
address thread_stack_uncommitted_bottom() const;
bool add_committed_region(address addr, size_t size, const NativeCallStack& stack);
bool remove_uncommitted_region(address addr, size_t size);
size_t committed_size() const;
size_t committed_size() const;
// move committed regions that higher than specified address to
// the new region
void move_committed_regions(address addr, ReservedMemoryRegion& rgn);
CommittedRegionIterator iterate_committed_regions() const {
return CommittedRegionIterator(_committed_regions.head());
}
ReservedMemoryRegion& operator= (const ReservedMemoryRegion& other) {
set_base(other.base());
set_size(other.size());
_stack = *other.call_stack();
_stack = *other.call_stack();
_mem_tag = other.mem_tag();
_committed_regions.clear();
CommittedRegionIterator itr = other.iterate_committed_regions();
const CommittedMemoryRegion* rgn = itr.next();
while (rgn != nullptr) {
_committed_regions.add(*rgn);
rgn = itr.next();
}
return *this;
}
const char* mem_tag_name() const { return NMTUtil::tag_to_name(_mem_tag); }
private:
// The committed region contains the uncommitted region, subtract the uncommitted
// region from this committed region
bool remove_uncommitted_region(LinkedListNode<CommittedMemoryRegion>* node,
address addr, size_t sz);
bool add_committed_region(const CommittedMemoryRegion& rgn) {
assert(rgn.base() != nullptr, "Invalid base address");
assert(size() > 0, "Invalid size");
return _committed_regions.add(rgn) != nullptr;
}
const char* tag_name() const { return NMTUtil::tag_to_name(_mem_tag); }
};
int compare_reserved_region_base(const ReservedMemoryRegion& r1, const ReservedMemoryRegion& r2);
class VirtualMemoryWalker : public StackObj {
public:
virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
};
// Main class called from MemTracker to track virtual memory allocations, commits and releases.
class VirtualMemoryTracker : AllStatic {
friend class VirtualMemoryTrackerTest;
friend class CommittedVirtualMemoryTest;
class VirtualMemoryTracker {
RegionsTree _tree;
public:
static bool initialize(NMT_TrackingLevel level);
VirtualMemoryTracker(bool is_detailed_mode) : _tree(is_detailed_mode) { }
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag);
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
static bool remove_uncommitted_region (address base_addr, size_t size);
static bool remove_released_region (address base_addr, size_t size);
static bool remove_released_region (ReservedMemoryRegion* rgn);
static void set_reserved_region_type (address addr, size_t size, MemTag mem_tag);
void add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag = mtNone);
void add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
void remove_uncommitted_region (address base_addr, size_t size);
void remove_released_region (address base_addr, size_t size);
void set_reserved_region_tag (address addr, size_t size, MemTag mem_tag);
// Given an existing memory mapping registered with NMT, split the mapping in
// two. The newly created two mappings will be registered under the call
// stack and the memory tag of the original section.
static bool split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_type);
// stack and the memory tags of the original section.
void split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_mem_tag);
// Walk virtual memory data structure for creating baseline, etc.
static bool walk_virtual_memory(VirtualMemoryWalker* walker);
bool walk_virtual_memory(VirtualMemoryWalker* walker);
// If p is contained within a known memory region, print information about it to the
// given stream and return true; false otherwise.
static bool print_containing_region(const void* p, outputStream* st);
bool print_containing_region(const void* p, outputStream* st);
// Snapshot current thread stacks
static void snapshot_thread_stacks();
void snapshot_thread_stacks();
void apply_summary_diff(VMATree::SummaryDiff diff);
RegionsTree* tree() { return &_tree; }
private:
static SortedLinkedList<ReservedMemoryRegion, compare_reserved_region_base>* _reserved_regions;
class Instance : public AllStatic {
friend class VirtualMemoryTrackerTest;
friend class CommittedVirtualMemoryTest;
static VirtualMemoryTracker* _tracker;
public:
using RegionData = VMATree::RegionData;
static bool initialize(NMT_TrackingLevel level);
static void add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag = mtNone);
static void add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
static void remove_uncommitted_region (address base_addr, size_t size);
static void remove_released_region (address base_addr, size_t size);
static void set_reserved_region_tag (address addr, size_t size, MemTag mem_tag);
static void split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_mem_tag);
static bool walk_virtual_memory(VirtualMemoryWalker* walker);
static bool print_containing_region(const void* p, outputStream* st);
static void snapshot_thread_stacks();
static void apply_summary_diff(VMATree::SummaryDiff diff);
static RegionsTree* tree() { return _tracker->tree(); }
};
};
#endif // SHARE_NMT_VIRTUALMEMORYTRACKER_HPP
#endif // SHARE_NMT_VIRTUALMEMORYTRACKER_HPP

View File

@ -82,8 +82,8 @@
const VMATree::RegionData VMATree::empty_regiondata{NativeCallStackStorage::invalid, mtNone};
const char* VMATree::statetype_strings[3] = {
"released", "reserved", "committed"
const char* VMATree::statetype_strings[4] = {
"released","reserved", "only-committed", "committed",
};
VMATree::SIndex VMATree::get_new_reserve_callstack(const SIndex es, const StateType ex, const RequestInfo& req) const {
@ -241,6 +241,7 @@ void VMATree::update_region(TreapNode* n1, TreapNode* n2, const RequestInfo& req
compute_summary_diff(region_size, existing_tag, existing_state, req, new_tag, diff);
}
VMATree::SummaryDiff VMATree::register_mapping(position _A, position _B, StateType state,
const RegionData& metadata, bool use_tag_inplace) {
@ -344,7 +345,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position _A, position _B, StateTy
return false;
};
GrowableArrayCHeap<position, mtNMT> to_be_removed;
// update regions in [Y,W)
// update regions in range A to B
auto update_loop = [&]() {
TreapNode* prev = nullptr;
_tree.visit_range_in_order(_A + 1, _B + 1, [&](TreapNode* curr) {
@ -357,6 +358,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position _A, position _B, StateTy
}
}
prev = curr;
return true;
});
};
// update region of [A,T)
@ -416,8 +418,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position _A, position _B, StateTy
if ( X_eq_A && Y_exists && !Y_eq_W && !W_eq_B && U_exists) { row = 22; }
if ( X_eq_A && Y_exists && W_eq_B && U_exists) { row = 23; }
DEBUG_ONLY(print_case();)
switch(row) {
switch(row) {
// row 0: .........A..................B.....
case 0: {
update_A(B);
@ -652,6 +653,7 @@ void VMATree::print_on(outputStream* out) {
visit_in_order([&](TreapNode* current) {
out->print("%zu (%s) - %s [%d, %d]-> ", current->key(), NMTUtil::tag_to_name(out_state(current).mem_tag()),
statetype_to_string(out_state(current).type()), current->val().out.reserved_stack(), current->val().out.committed_stack());
return true;
});
out->cr();
}

View File

@ -26,6 +26,7 @@
#ifndef SHARE_NMT_VMATREE_HPP
#define SHARE_NMT_VMATREE_HPP
#include "nmt/memTag.hpp"
#include "nmt/memTag.hpp"
#include "nmt/nmtNativeCallStackStorage.hpp"
#include "nmt/nmtTreap.hpp"
@ -40,6 +41,7 @@
// The set of points is stored in a balanced binary tree for efficient querying and updating.
class VMATree {
friend class NMTVMATreeTest;
friend class VMTWithVMATreeTest;
// A position in memory.
public:
using position = size_t;
@ -56,16 +58,18 @@ public:
}
};
enum class StateType : uint8_t { Released, Reserved, Committed, LAST };
// Bit fields view: bit 0 for Reserved, bit 1 for Committed.
// Setting a region as Committed preserves the Reserved state.
enum class StateType : uint8_t { Reserved = 1, Committed = 3, Released = 0, st_number_of_states = 4 };
private:
static const char* statetype_strings[static_cast<uint8_t>(StateType::LAST)];
static const char* statetype_strings[static_cast<uint8_t>(StateType::st_number_of_states)];
public:
NONCOPYABLE(VMATree);
static const char* statetype_to_string(StateType type) {
assert(type != StateType::LAST, "must be");
assert(type < StateType::st_number_of_states, "must be");
return statetype_strings[static_cast<uint8_t>(type)];
}
@ -304,12 +308,8 @@ public:
return register_mapping(from, from + size, StateType::Reserved, metadata, true);
}
SummaryDiff release_mapping(position from, size size) {
return register_mapping(from, from + size, StateType::Released, VMATree::empty_regiondata);
}
VMATreap& tree() {
return _tree;
SummaryDiff release_mapping(position from, position sz) {
return register_mapping(from, from + sz, StateType::Released, VMATree::empty_regiondata);
}
public:
@ -321,7 +321,10 @@ public:
#ifdef ASSERT
void print_on(outputStream* out);
#endif
template<typename F>
void visit_range_in_order(const position& from, const position& to, F f) {
_tree.visit_range_in_order(from, to, f);
}
VMATreap& tree() { return _tree; }
};
#endif

View File

@ -105,5 +105,6 @@ void InlinePrinter::IPInlineSite::dump(outputStream* tty, int level) const {
_children.visit_in_order([=](auto* node) {
node->val().dump(tty, level + 1);
return true;
});
}

View File

@ -37,7 +37,6 @@
#include "memory/universe.hpp"
#include "nmt/mallocTracker.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "oops/klass.inline.hpp"
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"

View File

@ -55,6 +55,7 @@
class MemTracker;
class NativeCallStack : public StackObj {
friend class VMTWithVMATreeTest;
private:
address _stack[NMT_TrackingStackDepth];
static const NativeCallStack _empty_stack;

View File

@ -1,53 +0,0 @@
/*
* Copyright (c) 2023 SAP SE. All rights reserved.
* Copyright (c) 2023, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "nmt/nmtCommon.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
// Tests the assignment operator of ReservedMemoryRegion
TEST_VM(NMT, ReservedRegionCopy) {
address dummy1 = (address)0x10000000;
NativeCallStack stack1(&dummy1, 1);
ReservedMemoryRegion region1(dummy1, os::vm_page_size(), stack1, mtThreadStack);
VirtualMemorySummary::record_reserved_memory(os::vm_page_size(), region1.mem_tag());
region1.add_committed_region(dummy1, os::vm_page_size(), stack1);
address dummy2 = (address)0x20000000;
NativeCallStack stack2(&dummy2, 1);
ReservedMemoryRegion region2(dummy2, os::vm_page_size(), stack2, mtCode);
VirtualMemorySummary::record_reserved_memory(os::vm_page_size(), region2.mem_tag());
region2.add_committed_region(dummy2, os::vm_page_size(), stack2);
region2 = region1;
CommittedRegionIterator itr = region2.iterate_committed_regions();
const CommittedMemoryRegion* rgn = itr.next();
ASSERT_EQ(rgn->base(), dummy1); // Now we should see dummy1
ASSERT_EQ(region2.mem_tag(), mtThreadStack); // Should be correct memory tag
ASSERT_EQ(region2.call_stack()->get_frame(0), dummy1); // Check the stack
rgn = itr.next();
ASSERT_EQ(rgn, (const CommittedMemoryRegion*)nullptr); // and nothing else
}

View File

@ -24,9 +24,9 @@
#include "memory/resourceArea.hpp"
#include "nmt/nmtTreap.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
class NMTTreapTest : public testing::Test {
public:
struct Cmp {
@ -72,6 +72,7 @@ public:
treap.visit_in_order([&](TreapCHeap<int, int, Cmp>::TreapNode* node) {
nums_seen.at(node->key())++;
return true;
});
for (int i = 0; i < up_to; i++) {
EXPECT_EQ(1, nums_seen.at(i));
@ -161,6 +162,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
treap.visit_range_in_order(0, 100, [&](Node* x) {
EXPECT_TRUE(false) << "Empty treap has no nodes to visit";
return true;
});
// Single-element set
@ -168,12 +170,14 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
int count = 0;
treap.visit_range_in_order(0, 100, [&](Node* x) {
count++;
return true;
});
EXPECT_EQ(1, count);
count = 0;
treap.visit_in_order([&](Node* x) {
count++;
return true;
});
EXPECT_EQ(1, count);
@ -184,12 +188,14 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
count = 0;
treap.visit_range_in_order(0, 100, [&](Node* x) {
count++;
return true;
});
EXPECT_EQ(1, count);
count = 0;
treap.visit_in_order([&](Node* x) {
count++;
return true;
});
EXPECT_EQ(3, count);
@ -197,6 +203,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
treap.upsert(0, 0); // This node should not be visited.
treap.visit_range_in_order(0, 0, [&](Node* x) {
EXPECT_TRUE(false) << "Empty visiting range should not visit any node";
return true;
});
treap.remove_all();
@ -208,6 +215,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
GrowableArray<int> seen;
treap.visit_range_in_order(0, 10, [&](Node* x) {
seen.push(x->key());
return true;
});
EXPECT_EQ(10, seen.length());
for (int i = 0; i < 10; i++) {
@ -217,6 +225,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
seen.clear();
treap.visit_in_order([&](Node* x) {
seen.push(x->key());
return true;
});
EXPECT_EQ(11, seen.length());
for (int i = 0; i < 10; i++) {
@ -226,6 +235,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
seen.clear();
treap.visit_range_in_order(10, 12, [&](Node* x) {
seen.push(x->key());
return true;
});
EXPECT_EQ(1, seen.length());
EXPECT_EQ(10, seen.at(0));
@ -241,6 +251,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
GrowableArray<int> seen;
treap.visit_range_in_order(9, -1, [&](Node* x) {
seen.push(x->key());
return true;
});
EXPECT_EQ(10, seen.length());
for (int i = 0; i < 10; i++) {
@ -250,6 +261,7 @@ TEST_VM_F(NMTTreapTest, TestVisitors) {
treap.visit_in_order([&](Node* x) {
seen.push(x->key());
return true;
});
EXPECT_EQ(10, seen.length());
for (int i = 0; i < 10; i++) {

View File

@ -0,0 +1,131 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "memory/allocation.hpp"
#include "nmt/memTag.hpp"
#include "nmt/nmtNativeCallStackStorage.hpp"
#include "nmt/regionsTree.inline.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "nmt/vmatree.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
class NMTRegionsTreeTest : public testing::Test {
public:
RegionsTree rt;
NMTRegionsTreeTest() : rt(true) { }
};
TEST_VM_F(NMTRegionsTreeTest, ReserveCommitTwice) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
VMATree::RegionData rd2 = rt.make_region_data(ncs, mtGC);
VMATree::SummaryDiff diff;
diff = rt.reserve_mapping(0, 100, rd);
EXPECT_EQ(100, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
diff = rt.commit_region(0, 50, ncs);
diff = rt.reserve_mapping(0, 100, rd);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = rt.reserve_mapping(0, 100, rd2);
EXPECT_EQ(-100, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(100, diff.tag[NMTUtil::tag_to_index(mtGC)].reserve);
diff = rt.commit_region(0, 50, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtGC)].reserve);
EXPECT_EQ(50, diff.tag[NMTUtil::tag_to_index(mtGC)].commit);
diff = rt.commit_region(0, 50, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
TEST_VM_F(NMTRegionsTreeTest, CommitUncommitRegion) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(0, 100, rd);
VMATree::SummaryDiff diff = rt.commit_region(0, 50, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = rt.commit_region((address)60, 10, ncs);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(10, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
diff = rt.uncommit_region(0, 50);
EXPECT_EQ(0, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(-50, diff.tag[NMTUtil::tag_to_index(mtTest)].commit);
}
TEST_VM_F(NMTRegionsTreeTest, FindReservedRegion) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(1000, 50, rd);
rt.reserve_mapping(1200, 50, rd);
rt.reserve_mapping(1300, 50, rd);
rt.reserve_mapping(1400, 50, rd);
ReservedMemoryRegion rmr;
rmr = rt.find_reserved_region((address)1205);
EXPECT_EQ(rmr.base(), (address)1200);
rmr = rt.find_reserved_region((address)1305);
EXPECT_EQ(rmr.base(), (address)1300);
rmr = rt.find_reserved_region((address)1405);
EXPECT_EQ(rmr.base(), (address)1400);
rmr = rt.find_reserved_region((address)1005);
EXPECT_EQ(rmr.base(), (address)1000);
}
TEST_VM_F(NMTRegionsTreeTest, VisitReservedRegions) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(1000, 50, rd);
rt.reserve_mapping(1200, 50, rd);
rt.reserve_mapping(1300, 50, rd);
rt.reserve_mapping(1400, 50, rd);
rt.visit_reserved_regions([&](const ReservedMemoryRegion& rgn) {
EXPECT_EQ(((size_t)rgn.base()) % 100, 0UL);
EXPECT_EQ(rgn.size(), 50UL);
return true;
});
}
TEST_VM_F(NMTRegionsTreeTest, VisitCommittedRegions) {
NativeCallStack ncs;
VMATree::RegionData rd = rt.make_region_data(ncs, mtTest);
rt.reserve_mapping(1000, 50, rd);
rt.reserve_mapping(1200, 50, rd);
rt.reserve_mapping(1300, 50, rd);
rt.reserve_mapping(1400, 50, rd);
rt.commit_region((address)1010, 5UL, ncs);
rt.commit_region((address)1020, 5UL, ncs);
rt.commit_region((address)1030, 5UL, ncs);
rt.commit_region((address)1040, 5UL, ncs);
ReservedMemoryRegion rmr((address)1000, 50);
size_t count = 0;
rt.visit_committed_regions(rmr, [&](CommittedMemoryRegion& crgn) {
count++;
EXPECT_EQ((((size_t)crgn.base()) % 100) / 10, count);
EXPECT_EQ(crgn.size(), 5UL);
return true;
});
EXPECT_EQ(count, 4UL);
}

View File

@ -24,6 +24,7 @@
#include "memory/allocation.hpp"
#include "nmt/memTag.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/nmtNativeCallStackStorage.hpp"
#include "nmt/vmatree.hpp"
#include "runtime/os.hpp"
@ -82,6 +83,7 @@ public:
int count = 0;
treap(tree).visit_in_order([&](TNode* x) {
++count;
return true;
});
return count;
}
@ -143,6 +145,7 @@ public:
VMATree::StateType out = out_type_of(x);
EXPECT_TRUE((in == VMATree::StateType::Released && out == VMATree::StateType::Committed) ||
(in == VMATree::StateType::Committed && out == VMATree::StateType::Released));
return true;
});
EXPECT_EQ(2, count_nodes(tree));
}
@ -168,6 +171,7 @@ public:
found[i] = x->key();
}
i++;
return true;
});
ASSERT_EQ(4, i) << "0 - 50 - 75 - 100 nodes expected";
@ -256,7 +260,6 @@ public:
}
tree.tree().upsert((VMATree::position)et.nodes[i], st);
}
print_tree(et, line_no);
}
template <int N>
@ -302,7 +305,6 @@ public:
EXPECT_FALSE(r.end->val().in.has_committed_stack()) << for_this_node;
}
}
print_tree(et, line_no);
}
template<int N>
@ -346,6 +348,16 @@ TEST_VM_F(NMTVMATreeTest, OverlappingReservationsResultInTwoNodes) {
EXPECT_EQ(2, count_nodes(tree));
}
TEST_VM_F(NMTVMATreeTest, DuplicateReserve) {
VMATree::RegionData rd{si[0], mtTest};
Tree tree;
tree.reserve_mapping(100, 100, rd);
tree.reserve_mapping(100, 100, rd);
EXPECT_EQ(2, count_nodes(tree));
VMATree::VMATreap::Range r = tree.tree().find_enclosing_range(110);
EXPECT_EQ(100, (int)(r.end->key() - r.start->key()));
}
TEST_VM_F(NMTVMATreeTest, UseTagInplace) {
Tree tree;
VMATree::RegionData rd_Test_cs0(si[0], mtTest);
@ -364,6 +376,7 @@ TEST_VM_F(NMTVMATreeTest, UseTagInplace) {
EXPECT_EQ(VMATree::StateType::Reserved, node->val().out.type());
}
}
return true;
});
}
@ -402,6 +415,7 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
if (x->key() == 0) {
EXPECT_EQ(x->val().out.reserved_regiondata().mem_tag, mtTest);
}
return true;
});
EXPECT_EQ(2, count_nodes(tree));
@ -441,6 +455,7 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) {
if (x->key() == 100) {
EXPECT_EQ(mtTest, x->val().in.reserved_regiondata().mem_tag);
}
return true;
});
}
@ -796,6 +811,18 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) {
}
}
TEST_VM_F(NMTVMATreeTest, SummaryAccountingReserveAsUncommit) {
Tree tree;
Tree::RegionData rd(NCS::StackIndex(), mtTest);
VMATree::SummaryDiff diff1 = tree.reserve_mapping(1200, 100, rd);
VMATree::SummaryDiff diff2 = tree.commit_mapping(1210, 50, rd);
EXPECT_EQ(100, diff1.tag[NMTUtil::tag_to_index(mtTest)].reserve);
EXPECT_EQ(50, diff2.tag[NMTUtil::tag_to_index(mtTest)].commit);
VMATree::SummaryDiff diff3 = tree.reserve_mapping(1220, 20, rd);
EXPECT_EQ(-20, diff3.tag[NMTUtil::tag_to_index(mtTest)].commit);
EXPECT_EQ(0, diff3.tag[NMTUtil::tag_to_index(mtTest)].reserve);
}
// Exceedingly simple tracker for page-granular allocations
// Use it for testing consistency with VMATree.
struct SimpleVMATracker : public CHeapObj<mtTest> {

View File

@ -22,7 +22,8 @@
*/
#include "nmt/memTracker.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "nmt/regionsTree.hpp"
#include "nmt/regionsTree.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -37,18 +38,15 @@ public:
MemTracker::record_thread_stack(stack_end, stack_size);
VirtualMemoryTracker::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
VirtualMemoryTracker::Instance::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
// snapshot current stack usage
VirtualMemoryTracker::snapshot_thread_stacks();
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(stack_end, stack_size));
ASSERT_TRUE(rmr != nullptr);
ReservedMemoryRegion rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region(stack_end);
ASSERT_TRUE(rmr_found.is_valid());
ASSERT_EQ(rmr_found.base(), stack_end);
ASSERT_EQ(rmr->base(), stack_end);
ASSERT_EQ(rmr->size(), stack_size);
CommittedRegionIterator iter = rmr->iterate_committed_regions();
int i = 0;
address i_addr = (address)&i;
bool found_i_addr = false;
@ -56,24 +54,23 @@ public:
// stack grows downward
address stack_top = stack_end + stack_size;
bool found_stack_top = false;
for (const CommittedMemoryRegion* region = iter.next(); region != nullptr; region = iter.next()) {
if (region->base() + region->size() == stack_top) {
ASSERT_TRUE(region->size() <= stack_size);
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr_found, [&](const CommittedMemoryRegion& cmr) {
if (cmr.base() + cmr.size() == stack_top) {
EXPECT_TRUE(cmr.size() <= stack_size);
found_stack_top = true;
}
if(i_addr < stack_top && i_addr >= region->base()) {
if(i_addr < stack_top && i_addr >= cmr.base()) {
found_i_addr = true;
}
i++;
}
return true;
});
// stack and guard pages may be contiguous as one region
ASSERT_TRUE(i >= 1);
ASSERT_TRUE(found_stack_top);
ASSERT_TRUE(found_i_addr);
ASSERT_TRUE(found_stack_top);
}
static void check_covered_pages(address addr, size_t size, address base, size_t touch_pages, int* page_num) {
@ -100,28 +97,24 @@ public:
*touch_addr = 'a';
}
address frame = (address)0x1235;
NativeCallStack stack(&frame, 1);
VirtualMemoryTracker::add_reserved_region((address)base, size, stack, mtThreadStack);
// trigger the test
VirtualMemoryTracker::snapshot_thread_stacks();
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
ReservedMemoryRegion rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region((address)base);
ASSERT_TRUE(rmr_found.is_valid());
ASSERT_EQ(rmr_found.base(), (address)base);
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
ASSERT_TRUE(rmr != nullptr);
bool precise_tracking_supported = false;
CommittedRegionIterator iter = rmr->iterate_committed_regions();
for (const CommittedMemoryRegion* region = iter.next(); region != nullptr; region = iter.next()) {
if (region->size() == size) {
// platforms that do not support precise tracking.
ASSERT_TRUE(iter.next() == nullptr);
break;
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr_found, [&](const CommittedMemoryRegion& cmr){
if (cmr.size() == size) {
return false;
} else {
precise_tracking_supported = true;
check_covered_pages(region->base(), region->size(), (address)base, touch_pages, page_num);
check_covered_pages(cmr.base(), cmr.size(), (address)base, touch_pages, page_num);
}
}
return true;
});
if (precise_tracking_supported) {
// All touched pages should be committed
@ -132,10 +125,9 @@ public:
// Cleanup
os::disclaim_memory(base, size);
VirtualMemoryTracker::remove_released_region((address)base, size);
rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion((address)base, size));
ASSERT_TRUE(rmr == nullptr);
VirtualMemoryTracker::Instance::remove_released_region((address)base, size);
rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region((address)base);
ASSERT_TRUE(!rmr_found.is_valid());
}
static void test_committed_region() {
@ -233,8 +225,7 @@ public:
}
};
TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) {
TEST_VM(NMTCommittedVirtualMemoryTracker, test_committed_virtualmemory_region) {
// This tests the VM-global NMT facility. The test must *not* modify global state,
// since that interferes with other tests!
// The gtestLauncher are called with and without -XX:NativeMemoryTracking during jtreg-controlled
@ -251,7 +242,7 @@ TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) {
}
#if !defined(_WINDOWS) && !defined(_AIX)
TEST_VM(CommittedVirtualMemory, test_committed_in_range){
TEST_VM(NMTCommittedVirtualMemory, test_committed_in_range){
CommittedVirtualMemoryTest::test_committed_in_range(1024, 1024);
CommittedVirtualMemoryTest::test_committed_in_range(2, 1);
}

View File

@ -32,6 +32,7 @@
#include "memory/memoryReserver.hpp"
#include "nmt/memTracker.hpp"
#include "nmt/regionsTree.inline.hpp"
#include "nmt/virtualMemoryTracker.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@ -56,16 +57,15 @@ namespace {
check_inner((rmr), nullptr, 0, __FILE__, __LINE__); \
} while (false)
static void diagnostic_print(ReservedMemoryRegion* rmr) {
CommittedRegionIterator iter = rmr->iterate_committed_regions();
LOG("In reserved region " PTR_FORMAT ", size 0x%zx:", p2i(rmr->base()), rmr->size());
for (const CommittedMemoryRegion* region = iter.next(); region != nullptr; region = iter.next()) {
LOG(" committed region: " PTR_FORMAT ", size 0x%zx", p2i(region->base()), region->size());
}
static void diagnostic_print(const ReservedMemoryRegion& rmr) {
LOG("In reserved region " PTR_FORMAT ", size %X:", p2i(rmr.base()), rmr.size());
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr, [&](CommittedMemoryRegion& region) {
LOG(" committed region: " PTR_FORMAT ", size %X", p2i(region.base()), region.size());
return true;
});
}
static void check_inner(ReservedMemoryRegion* rmr, R* regions, size_t regions_size, const char* file, int line) {
CommittedRegionIterator iter = rmr->iterate_committed_regions();
static void check_inner(const ReservedMemoryRegion& rmr, R* regions, size_t regions_size, const char* file, int line) {
size_t i = 0;
size_t size = 0;
@ -74,16 +74,17 @@ static void check_inner(ReservedMemoryRegion* rmr, R* regions, size_t regions_si
#define WHERE " from " << file << ":" << line
for (const CommittedMemoryRegion* region = iter.next(); region != nullptr; region = iter.next()) {
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr, [&](CommittedMemoryRegion& region) {
EXPECT_LT(i, regions_size) << WHERE;
EXPECT_EQ(region->base(), regions[i]._addr) << WHERE;
EXPECT_EQ(region->size(), regions[i]._size) << WHERE;
size += region->size();
EXPECT_EQ(region.base(), regions[i]._addr) << WHERE;
EXPECT_EQ(region.size(), regions[i]._size) << WHERE;
size += region.size();
i++;
}
return true;
});
EXPECT_EQ(i, regions_size) << WHERE;
EXPECT_EQ(size, rmr->committed_size()) << WHERE;
EXPECT_EQ(size, rmr.committed_size()) << WHERE;
}
class VirtualMemoryTrackerTest {
@ -103,10 +104,11 @@ public:
NativeCallStack stack2(&frame2, 1);
// Fetch the added RMR for the space
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ReservedMemoryRegion rmr = VirtualMemoryTracker::Instance::tree()->find_reserved_region(addr);
RegionsTree* rtree = VirtualMemoryTracker::Instance::tree();
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
ASSERT_EQ(rmr.size(), size);
ASSERT_EQ(rmr.base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
@ -114,45 +116,45 @@ public:
// Commit adjacent regions with same stack
{ // Commit one region
rmr->add_committed_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack);
R r[] = { {addr + cs, cs} };
check(rmr, r);
}
{ // Commit adjacent - lower address
rmr->add_committed_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit adjacent - higher address
rmr->add_committed_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 3 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
rtree->uncommit_region(addr, 3 * cs);
ASSERT_EQ(rmr.committed_size(), 0u);
// Commit adjacent regions with different stacks
{ // Commit one region
rmr->add_committed_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack);
R r[] = { {addr + cs, cs} };
check(rmr, r);
}
{ // Commit adjacent - lower address
rmr->add_committed_region(addr, cs, stack2);
rtree->commit_region(addr, cs, stack2);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(rmr, r);
}
{ // Commit adjacent - higher address
rmr->add_committed_region(addr + 2 * cs, cs, stack2);
rtree->commit_region(addr + 2 * cs, cs, stack2);
R r[] = { {addr, cs},
{addr + cs, cs},
{addr + 2 * cs, cs} };
@ -160,11 +162,13 @@ public:
}
// Cleanup
rmr->remove_uncommitted_region(addr, 3 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
rtree->uncommit_region(addr, 3 * cs);
ASSERT_EQ(rmr.committed_size(), 0u);
}
static void test_add_committed_region_adjacent_overlapping() {
RegionsTree* rtree = VirtualMemoryTracker::Instance::tree();
rtree->tree().remove_all();
size_t size = 0x01000000;
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
@ -178,14 +182,11 @@ public:
NativeCallStack stack(&frame1, 1);
NativeCallStack stack2(&frame2, 1);
// Add the reserved memory
VirtualMemoryTracker::add_reserved_region(addr, size, stack, mtTest);
// Fetch the added RMR for the space
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ReservedMemoryRegion rmr = rtree->find_reserved_region(addr);
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
ASSERT_EQ(rmr.size(), size);
ASSERT_EQ(rmr.base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
@ -193,46 +194,46 @@ public:
// Commit adjacent and overlapping regions with same stack
{ // Commit two non-adjacent regions
rmr->add_committed_region(addr, 2 * cs, stack);
rmr->add_committed_region(addr + 3 * cs, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack);
rtree->commit_region(addr + 3 * cs, 2 * cs, stack);
R r[] = { {addr, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(rmr, r);
}
{ // Commit adjacent and overlapping
rmr->add_committed_region(addr + 2 * cs, 2 * cs, stack);
rtree->commit_region(addr + 2 * cs, 2 * cs, stack);
R r[] = { {addr, 5 * cs} };
check(rmr, r);
}
// revert to two non-adjacent regions
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
ASSERT_EQ(rmr->committed_size(), 4 * cs);
rtree->uncommit_region(addr + 2 * cs, cs);
ASSERT_EQ(rmr.committed_size(), 4 * cs);
{ // Commit overlapping and adjacent
rmr->add_committed_region(addr + cs, 2 * cs, stack);
rtree->commit_region(addr + cs, 2 * cs, stack);
R r[] = { {addr, 5 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 5 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
rtree->uncommit_region(addr, 5 * cs);
ASSERT_EQ(rmr.committed_size(), 0u);
// Commit adjacent and overlapping regions with different stacks
{ // Commit two non-adjacent regions
rmr->add_committed_region(addr, 2 * cs, stack);
rmr->add_committed_region(addr + 3 * cs, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack);
rtree->commit_region(addr + 3 * cs, 2 * cs, stack);
R r[] = { {addr, 2 * cs},
{addr + 3 * cs, 2 * cs} };
check(rmr, r);
}
{ // Commit adjacent and overlapping
rmr->add_committed_region(addr + 2 * cs, 2 * cs, stack2);
rtree->commit_region(addr + 2 * cs, 2 * cs, stack2);
R r[] = { {addr, 2 * cs},
{addr + 2 * cs, 2 * cs},
{addr + 4 * cs, cs} };
@ -240,12 +241,12 @@ public:
}
// revert to two non-adjacent regions
rmr->add_committed_region(addr, 5 * cs, stack);
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
ASSERT_EQ(rmr->committed_size(), 4 * cs);
rtree->commit_region(addr, 5 * cs, stack);
rtree->uncommit_region(addr + 2 * cs, cs);
ASSERT_EQ(rmr.committed_size(), 4 * cs);
{ // Commit overlapping and adjacent
rmr->add_committed_region(addr + cs, 2 * cs, stack2);
rtree->commit_region(addr + cs, 2 * cs, stack2);
R r[] = { {addr, cs},
{addr + cs, 2 * cs},
{addr + 3 * cs, 2 * cs} };
@ -254,6 +255,8 @@ public:
}
static void test_add_committed_region_overlapping() {
RegionsTree* rtree = VirtualMemoryTracker::Instance::tree();
rtree->tree().remove_all();
size_t size = 0x01000000;
@ -269,10 +272,11 @@ public:
NativeCallStack stack2(&frame2, 1);
// Fetch the added RMR for the space
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ReservedMemoryRegion rmr = rtree->find_reserved_region(addr);
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
ASSERT_EQ(rmr.size(), size);
ASSERT_EQ(rmr.base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
@ -280,77 +284,77 @@ public:
// With same stack
{ // Commit one region
rmr->add_committed_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit the same region
rmr->add_committed_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit a succeeding region
rmr->add_committed_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit over two regions
rmr->add_committed_region(addr, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{// Commit first part of a region
rmr->add_committed_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit second part of a region
rmr->add_committed_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit a third part
rmr->add_committed_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
{ // Commit in the middle of a region
rmr->add_committed_region(addr + 1 * cs, cs, stack);
rtree->commit_region(addr + 1 * cs, cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
// Cleanup
rmr->remove_uncommitted_region(addr, 3 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
rtree->uncommit_region(addr, 3 * cs);
ASSERT_EQ(rmr.committed_size(), 0u);
// With preceding region
rmr->add_committed_region(addr, cs, stack);
rmr->add_committed_region(addr + 2 * cs, 3 * cs, stack);
rtree->commit_region(addr, cs, stack);
rtree->commit_region(addr + 2 * cs, 3 * cs, stack);
rmr->add_committed_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(rmr, r);
}
rmr->add_committed_region(addr + 3 * cs, cs, stack);
rtree->commit_region(addr + 3 * cs, cs, stack);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
check(rmr, r);
}
rmr->add_committed_region(addr + 4 * cs, cs, stack);
rtree->commit_region(addr + 4 * cs, cs, stack);
{
R r[] = { {addr, cs},
{addr + 2 * cs, 3 * cs} };
@ -358,57 +362,57 @@ public:
}
// Cleanup
rmr->remove_uncommitted_region(addr, 5 * cs);
ASSERT_EQ(rmr->committed_size(), 0u);
rtree->uncommit_region(addr, 5 * cs);
ASSERT_EQ(rmr.committed_size(), 0u);
// With different stacks
{ // Commit one region
rmr->add_committed_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit the same region
rmr->add_committed_region(addr, cs, stack2);
rtree->commit_region(addr, cs, stack2);
R r[] = { {addr, cs} };
check(rmr, r);
}
{ // Commit a succeeding region
rmr->add_committed_region(addr + cs, cs, stack);
rtree->commit_region(addr + cs, cs, stack);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(rmr, r);
}
{ // Commit over two regions
rmr->add_committed_region(addr, 2 * cs, stack);
rtree->commit_region(addr, 2 * cs, stack);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{// Commit first part of a region
rmr->add_committed_region(addr, cs, stack2);
rtree->commit_region(addr, cs, stack2);
R r[] = { {addr, cs},
{addr + cs, cs} };
check(rmr, r);
}
{ // Commit second part of a region
rmr->add_committed_region(addr + cs, cs, stack2);
rtree->commit_region(addr + cs, cs, stack2);
R r[] = { {addr, 2 * cs} };
check(rmr, r);
}
{ // Commit a third part
rmr->add_committed_region(addr + 2 * cs, cs, stack2);
rtree->commit_region(addr + 2 * cs, cs, stack2);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
}
{ // Commit in the middle of a region
rmr->add_committed_region(addr + 1 * cs, cs, stack);
rtree->commit_region(addr + 1 * cs, cs, stack);
R r[] = { {addr, cs},
{addr + cs, cs},
{addr + 2 * cs, cs} };
@ -428,6 +432,8 @@ public:
}
static void test_remove_uncommitted_region() {
RegionsTree* rtree = VirtualMemoryTracker::Instance::tree();
rtree->tree().remove_all();
size_t size = 0x01000000;
ReservedSpace rs = MemoryReserver::reserve(size, mtTest);
@ -442,114 +448,114 @@ public:
NativeCallStack stack2(&frame2, 1);
// Fetch the added RMR for the space
ReservedMemoryRegion* rmr = VirtualMemoryTracker::_reserved_regions->find(ReservedMemoryRegion(addr, size));
ReservedMemoryRegion rmr = rtree->find_reserved_region(addr);
ASSERT_EQ(rmr->size(), size);
ASSERT_EQ(rmr->base(), addr);
ASSERT_EQ(rmr.size(), size);
ASSERT_EQ(rmr.base(), addr);
// Commit Size Granularity
const size_t cs = 0x1000;
{ // Commit regions
rmr->add_committed_region(addr, 3 * cs, stack);
rtree->commit_region(addr, 3 * cs, stack);
R r[] = { {addr, 3 * cs} };
check(rmr, r);
// Remove only existing
rmr->remove_uncommitted_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs);
check_empty(rmr);
}
{
rmr->add_committed_region(addr + 0 * cs, cs, stack);
rmr->add_committed_region(addr + 2 * cs, cs, stack);
rmr->add_committed_region(addr + 4 * cs, cs, stack);
rtree->commit_region(addr + 0 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 4 * cs, cs, stack);
{ // Remove first
rmr->remove_uncommitted_region(addr, cs);
rtree->uncommit_region(addr, cs);
R r[] = { {addr + 2 * cs, cs},
{addr + 4 * cs, cs} };
check(rmr, r);
}
// add back
rmr->add_committed_region(addr, cs, stack);
rtree->commit_region(addr, cs, stack);
{ // Remove middle
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
rtree->uncommit_region(addr + 2 * cs, cs);
R r[] = { {addr + 0 * cs, cs},
{addr + 4 * cs, cs} };
check(rmr, r);
}
// add back
rmr->add_committed_region(addr + 2 * cs, cs, stack);
rtree->commit_region(addr + 2 * cs, cs, stack);
{ // Remove end
rmr->remove_uncommitted_region(addr + 4 * cs, cs);
rtree->uncommit_region(addr + 4 * cs, cs);
R r[] = { {addr + 0 * cs, cs},
{addr + 2 * cs, cs} };
check(rmr, r);
}
rmr->remove_uncommitted_region(addr, 5 * cs);
rtree->uncommit_region(addr, 5 * cs);
check_empty(rmr);
}
{ // Remove larger region
rmr->add_committed_region(addr + 1 * cs, cs, stack);
rmr->remove_uncommitted_region(addr, 3 * cs);
rtree->commit_region(addr + 1 * cs, cs, stack);
rtree->uncommit_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller region - in the middle
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 1 * cs, cs);
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 1 * cs, cs);
R r[] = { { addr + 0 * cs, cs},
{ addr + 2 * cs, cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller region - at the beginning
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 0 * cs, cs);
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 0 * cs, cs);
R r[] = { { addr + 1 * cs, 2 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller region - at the end
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 2 * cs, cs);
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 2 * cs, cs);
R r[] = { { addr, 2 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs);
check_empty(rmr);
}
{ // Remove smaller, overlapping region - at the beginning
rmr->add_committed_region(addr + 1 * cs, 4 * cs, stack);
rmr->remove_uncommitted_region(addr, 2 * cs);
rtree->commit_region(addr + 1 * cs, 4 * cs, stack);
rtree->uncommit_region(addr, 2 * cs);
R r[] = { { addr + 2 * cs, 3 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr + 1 * cs, 4 * cs);
rtree->uncommit_region(addr + 1 * cs, 4 * cs);
check_empty(rmr);
}
{ // Remove smaller, overlapping region - at the end
rmr->add_committed_region(addr, 3 * cs, stack);
rmr->remove_uncommitted_region(addr + 2 * cs, 2 * cs);
rtree->commit_region(addr, 3 * cs, stack);
rtree->uncommit_region(addr + 2 * cs, 2 * cs);
R r[] = { { addr, 2 * cs} };
check(rmr, r);
rmr->remove_uncommitted_region(addr, 3 * cs);
rtree->uncommit_region(addr, 3 * cs);
check_empty(rmr);
}
}
@ -569,4 +575,4 @@ TEST_VM(NMT_VirtualMemoryTracker, remove_uncommitted_region) {
} else {
tty->print_cr("skipped.");
}
}
}

View File

@ -169,14 +169,17 @@ public class TestAlwaysPreTouchStacks {
}
long expected_delta = numThreads * (max_stack_usage_with_pretouch - min_stack_usage_with_pretouch);
long actual_delta = pretouch_committed - no_pretouch_committed;
if (pretouch_committed <= (no_pretouch_committed + expected_delta)) {
throw new RuntimeException("Expected a higher amount of committed with pretouch stacks" +
"PreTouch amount: " + pretouch_committed +
"NoPreTouch amount: " + (no_pretouch_committed + expected_delta));
}
if (actual_delta < expected_delta) {
throw new RuntimeException("Expected a higher delta between stack committed of with and without pretouch." +
"Expected: " + expected_delta + " Actual: " + actual_delta);
if (((double)pretouch_committed) / ((double)no_pretouch_committed) < 1.20) {
if (pretouch_committed <= (no_pretouch_committed + expected_delta)) {
throw new RuntimeException("Expected a higher amount of committed with pretouch stacks" +
" PreTouch amount: " + pretouch_committed +
" NoPreTouch amount: " + no_pretouch_committed +
" Expected delta: " + expected_delta);
}
if (actual_delta < expected_delta) {
throw new RuntimeException("Expected a higher delta between stack committed of with and without pretouch." +
" Expected: " + expected_delta + " Actual: " + actual_delta);
}
}
}
}