8366363: MemBaseline accesses VMT without using lock

Co-authored-by: Casper Norrbin <cnorrbin@openjdk.org>
Reviewed-by: azafari, cnorrbin
This commit is contained in:
Johan Sjölen 2025-09-09 07:31:14 +00:00
parent 680bf75898
commit ecfba66d3d
12 changed files with 185 additions and 65 deletions

View File

@ -27,8 +27,7 @@
#include "memory/metaspaceUtils.hpp"
#include "nmt/memBaseline.hpp"
#include "nmt/memTracker.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/safepoint.hpp"
#include "nmt/regionsTree.inline.hpp"
/*
* Sizes are sorted in descenting order for reporting
@ -104,38 +103,6 @@ class MallocAllocationSiteWalker : public MallocSiteWalker {
}
};
// Walk all virtual memory regions for baselining
class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
private:
typedef LinkedListImpl<ReservedMemoryRegion, AnyObj::C_HEAP, mtNMT,
AllocFailStrategy::RETURN_NULL> EntryList;
EntryList _virtual_memory_regions;
DEBUG_ONLY(address _last_base;)
public:
VirtualMemoryAllocationWalker() {
DEBUG_ONLY(_last_base = nullptr);
}
bool do_allocation_site(const ReservedMemoryRegion* rgn) {
assert(rgn->base() >= _last_base, "region unordered?");
DEBUG_ONLY(_last_base = rgn->base());
if (rgn->size() > 0) {
if (_virtual_memory_regions.add(*rgn) != nullptr) {
return true;
} else {
return false;
}
} else {
// Ignore empty sites.
return true;
}
}
LinkedList<ReservedMemoryRegion>* virtual_memory_allocations() {
return &_virtual_memory_regions;
}
};
void MemBaseline::baseline_summary() {
MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
@ -158,14 +125,15 @@ bool MemBaseline::baseline_allocation_sites() {
// The malloc sites are collected in size order
_malloc_sites_order = by_size;
// Virtual memory allocation sites
VirtualMemoryAllocationWalker virtual_memory_walker;
if (!VirtualMemoryTracker::Instance::walk_virtual_memory(&virtual_memory_walker)) {
return false;
}
assert(_vma_allocations == nullptr, "must");
// Virtual memory allocations are collected in call stack order
_virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations());
{
MemTracker::NmtVirtualMemoryLocker locker;
_vma_allocations = new (mtNMT, std::nothrow) RegionsTree(*VirtualMemoryTracker::Instance::tree());
if (_vma_allocations == nullptr) {
return false;
}
}
if (!aggregate_virtual_memory_allocation_sites()) {
return false;
@ -202,20 +170,28 @@ int compare_allocation_site(const VirtualMemoryAllocationSite& s1,
bool MemBaseline::aggregate_virtual_memory_allocation_sites() {
SortedLinkedList<VirtualMemoryAllocationSite, compare_allocation_site> allocation_sites;
VirtualMemoryAllocationIterator itr = virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
VirtualMemoryAllocationSite* site;
while ((rgn = itr.next()) != nullptr) {
VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->mem_tag());
bool failed_oom = false;
_vma_allocations->visit_reserved_regions([&](ReservedMemoryRegion& rgn) {
VirtualMemoryAllocationSite tmp(*rgn.call_stack(), rgn.mem_tag());
site = allocation_sites.find(tmp);
if (site == nullptr) {
LinkedListNode<VirtualMemoryAllocationSite>* node =
allocation_sites.add(tmp);
if (node == nullptr) return false;
if (node == nullptr) {
failed_oom = true;
return false;
}
site = node->data();
}
site->reserve_memory(rgn->size());
site->commit_memory(VirtualMemoryTracker::Instance::committed_size(rgn));
site->reserve_memory(rgn.size());
site->commit_memory(_vma_allocations->committed_size(rgn));
return true;
});
if (failed_oom) {
return false;
}
_virtual_memory_sites.move(&allocation_sites);

View File

@ -35,7 +35,6 @@
typedef LinkedListIterator<MallocSite> MallocSiteIterator;
typedef LinkedListIterator<VirtualMemoryAllocationSite> VirtualMemorySiteIterator;
typedef LinkedListIterator<ReservedMemoryRegion> VirtualMemoryAllocationIterator;
/*
* Baseline a memory snapshot
@ -71,7 +70,7 @@ class MemBaseline {
LinkedListImpl<MallocSite> _malloc_sites;
// All virtual memory allocations
LinkedListImpl<ReservedMemoryRegion> _virtual_memory_allocations;
RegionsTree* _vma_allocations;
// Virtual memory allocations by allocation sites, always in by_address
// order
@ -86,6 +85,7 @@ class MemBaseline {
// create a memory baseline
MemBaseline():
_instance_class_count(0), _array_class_count(0), _thread_count(0),
_vma_allocations(nullptr),
_baseline_type(Not_baselined) {
}
@ -110,9 +110,9 @@ class MemBaseline {
// Virtual memory allocation iterator always returns in virtual memory
// base address order.
VirtualMemoryAllocationIterator virtual_memory_allocations() {
assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline");
return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head());
RegionsTree* virtual_memory_allocations() {
assert(_vma_allocations != nullptr, "Not detail baseline");
return _vma_allocations;
}
// Total reserved memory = total malloc'd memory + total reserved virtual
@ -185,7 +185,8 @@ class MemBaseline {
_malloc_sites.clear();
_virtual_memory_sites.clear();
_virtual_memory_allocations.clear();
delete _vma_allocations;
_vma_allocations = nullptr;
}
private:

View File

@ -394,13 +394,11 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() {
void MemDetailReporter::report_virtual_memory_map() {
// Virtual memory map always in base address order
VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations();
const ReservedMemoryRegion* rgn;
output()->print_cr("Virtual memory map:");
while ((rgn = itr.next()) != nullptr) {
report_virtual_memory_region(rgn);
}
_baseline.virtual_memory_allocations()->visit_reserved_regions([&](ReservedMemoryRegion& rgn) {
report_virtual_memory_region(&rgn);
return true;
});
}
void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) {

View File

@ -57,3 +57,21 @@ NativeCallStackStorage::NativeCallStackStorage(bool is_detailed_mode, int table_
NativeCallStackStorage::~NativeCallStackStorage() {
FREE_C_HEAP_ARRAY(LinkPtr, _table);
}
NativeCallStackStorage::NativeCallStackStorage(const NativeCallStackStorage& other)
: _table_size(other._table_size),
_table(nullptr),
_stacks(),
_is_detailed_mode(other._is_detailed_mode),
_fake_stack(other._fake_stack) {
if (_is_detailed_mode) {
_table = NEW_C_HEAP_ARRAY(TableEntryIndex, _table_size, mtNMT);
for (int i = 0; i < _table_size; i++) {
_table[i] = other._table[i];
}
}
_stacks.reserve(other._stacks.length());
for (int i = 0; i < other._stacks.length(); i++) {
_stacks.at_grow(i) = other._stacks.at(i);
}
}

View File

@ -95,7 +95,8 @@ public:
}
NativeCallStackStorage(bool is_detailed_mode, int table_size = default_table_size);
NativeCallStackStorage(const NativeCallStackStorage& other);
NativeCallStackStorage& operator=(const NativeCallStackStorage& other) = delete;
~NativeCallStackStorage();
};

View File

@ -22,6 +22,8 @@
*
*/
#include "nmt/regionsTree.hpp"
#include "nmt/regionsTree.inline.hpp"
#include "nmt/virtualMemoryTracker.hpp"
VMATree::SummaryDiff RegionsTree::commit_region(address addr, size_t size, const NativeCallStack& stack) {
return commit_mapping((VMATree::position)addr, size, make_region_data(stack, mtNone), /*use tag inplace*/ true);
@ -54,4 +56,13 @@ void RegionsTree::print_on(outputStream* st) {
return true;
});
}
#endif
#endif
size_t RegionsTree::committed_size(ReservedMemoryRegion& rgn) {
size_t result = 0;
visit_committed_regions(rgn, [&](CommittedMemoryRegion& crgn) {
result += crgn.size();
return true;
});
return result;
}

View File

@ -40,6 +40,12 @@ class RegionsTree : public VMATree {
public:
RegionsTree(bool with_storage) : VMATree() , _ncs_storage(with_storage), _with_storage(with_storage) { }
RegionsTree(const RegionsTree& other)
: VMATree(other),
_ncs_storage(other._ncs_storage),
_with_storage(other._with_storage) {}
RegionsTree& operator=(const RegionsTree& other) = delete;
ReservedMemoryRegion find_reserved_region(address addr);
SummaryDiff commit_region(address addr, size_t size, const NativeCallStack& stack);
@ -91,6 +97,8 @@ class RegionsTree : public VMATree {
NativeCallStackStorage::StackIndex si = node.out_stack_index();
return _ncs_storage.get(si);
}
size_t committed_size(ReservedMemoryRegion& rgn);
};
#endif // NMT_REGIONSTREE_HPP
#endif // NMT_REGIONSTREE_HPP

View File

@ -744,3 +744,10 @@ void VMATree::SummaryDiff::print_on(outputStream* out) {
}
}
#endif
void VMATree::clear() {
_tree.remove_all();
};
bool VMATree::is_empty() {
return _tree.size() == 0;
};

View File

@ -30,6 +30,7 @@
#include "nmt/nmtNativeCallStackStorage.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include "utilities/rbTree.hpp"
#include "utilities/rbTree.inline.hpp"
#include <cstdint>
@ -39,7 +40,7 @@
// For example, the state may go from released memory to committed memory,
// or from committed memory of a certain MemTag to committed memory of a different MemTag.
// The set of points is stored in a balanced binary tree for efficient querying and updating.
class VMATree {
class VMATree : public CHeapObjBase {
friend class NMTVMATreeTest;
friend class VMTWithVMATreeTest;
// A position in memory.
@ -65,7 +66,6 @@ private:
static const char* statetype_strings[static_cast<uint8_t>(StateType::st_number_of_states)];
public:
NONCOPYABLE(VMATree);
static const char* statetype_to_string(StateType type) {
assert(type < StateType::st_number_of_states, "must be");
@ -226,6 +226,10 @@ private:
public:
VMATree() : _tree() {}
VMATree(const VMATree& other) : _tree() {
assert(other._tree.copy_into(_tree), "VMATree dies on OOM");
}
VMATree& operator=(VMATree const&) = delete;
struct SingleDiff {
using delta = int64_t;
@ -329,5 +333,8 @@ public:
_tree.visit_range_in_order(from, to, f);
}
VMARBTree& tree() { return _tree; }
void clear();
bool is_empty();
};
#endif

View File

@ -428,6 +428,7 @@ public:
template <typename F>
void visit_in_order(F f);
// Visit all RBNodes in ascending order whose keys are in range [from, to], calling f on each node.
// If f returns `true` the iteration continues, otherwise it is stopped at the current node.
template <typename F>
@ -475,6 +476,7 @@ class RBTree : public AbstractRBTree<K, RBNode<K, V>, COMPARATOR> {
public:
RBTree() : BaseType(), _allocator() {}
NONCOPYABLE(RBTree);
~RBTree() { remove_all(); }
RBTree(const RBTree& other) : BaseType(), _allocator() {
assert(std::is_copy_constructible<V>(), "Value type must be copy-constructible");
@ -485,6 +487,8 @@ public:
}
RBTree& operator=(const RBTree& other) = delete;
bool copy_into(RBTree& other) const;
typedef typename BaseType::Cursor Cursor;
using BaseType::cursor;
using BaseType::insert_at_cursor;

View File

@ -753,4 +753,53 @@ void AbstractRBTree<K, NodeType, COMPARATOR>::print_on(outputStream* st, const P
}
}
template<typename K, typename V, typename COMPARATOR, typename ALLOCATOR>
bool RBTree<K, V, COMPARATOR, ALLOCATOR>::copy_into(RBTree& other) const {
assert(other.size() == 0, "You can only copy into an empty RBTree");
assert(std::is_copy_constructible<K>::value, "Key type must be copy-constructible when copying a RBTree");
assert(std::is_copy_constructible<V>::value, "Value type must be copy-constructible when copying a RBTree");
enum class Dir { Left, Right };
struct node_pair { const IntrusiveRBNode* current; IntrusiveRBNode* other_parent; Dir dir; };
struct stack {
node_pair s[64];
int idx = 0;
stack() : idx(0) {}
node_pair pop() { idx--; return s[idx]; };
void push(node_pair n) { s[idx] = n; idx++; };
bool is_empty() { return idx == 0; };
};
stack visit_stack;
if (this->_root == nullptr) {
return true;
}
RBNode<K, V>* root = static_cast<RBNode<K, V>*>(this->_root);
other._root = other.allocate_node(root->key(), root->val());
if (other._root == nullptr) return false;
visit_stack.push({this->_root->_left, other._root, Dir::Left});
visit_stack.push({this->_root->_right, other._root, Dir::Right});
while (!visit_stack.is_empty()) {
node_pair n = visit_stack.pop();
const RBNode<K, V>* current = static_cast<const RBNode<K, V>*>(n.current);
if (current == nullptr) continue;
RBNode<K, V>* new_node = other.allocate_node(current->key(), current->val());
if (new_node == nullptr) {
return false;
}
if (n.dir == Dir::Left) {
n.other_parent->_left = new_node;
} else {
n.other_parent->_right = new_node;
}
new_node->set_parent(n.other_parent);
new_node->_parent |= n.current->_parent & 0x1;
visit_stack.push({n.current->_left, new_node, Dir::Left});
visit_stack.push({n.current->_right, new_node, Dir::Right});
}
other._num_nodes = this->_num_nodes;
DEBUG_ONLY(other._expected_visited = this->_expected_visited);
return true;
}
#endif // SHARE_UTILITIES_RBTREE_INLINE_HPP

View File

@ -1200,6 +1200,46 @@ TEST_VM_F(RBTreeTest, VerifyItThroughStressTest) {
}
}
TEST_VM_F(RBTreeTest, TestCopyInto) {
{
RBTreeInt rbtree1;
RBTreeInt rbtree2;
rbtree1.copy_into(rbtree2);
rbtree2.verify_self();
}
RBTreeInt rbtree1;
RBTreeInt rbtree2;
int size = 1000;
for (int i = 0; i < size; i++) {
rbtree1.upsert(i, i);
}
rbtree1.copy_into(rbtree2);
rbtree2.verify_self();
ResourceMark rm;
GrowableArray<int> allocations(size);
int size1 = 0;
rbtree1.visit_in_order([&](RBTreeIntNode* node) {
size1++;
allocations.append(node->key());
return true;
});
int size2 = 0;
rbtree2.visit_in_order([&](RBTreeIntNode* node) {
EXPECT_EQ(node->key(), allocations.at(size2++));
return true;
});
EXPECT_EQ(size1, size2);
EXPECT_EQ(rbtree1.size(), rbtree2.size());
EXPECT_EQ(size2, static_cast<int>(rbtree2.size()));
}
struct OomAllocator {
void* allocate(size_t sz) {
return nullptr;