From ecfba66d3d7c1fef755f0824f342189d0f231007 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johan=20Sj=C3=B6len?= Date: Tue, 9 Sep 2025 07:31:14 +0000 Subject: [PATCH] 8366363: MemBaseline accesses VMT without using lock Co-authored-by: Casper Norrbin Reviewed-by: azafari, cnorrbin --- src/hotspot/share/nmt/memBaseline.cpp | 72 +++++++------------ src/hotspot/share/nmt/memBaseline.hpp | 13 ++-- src/hotspot/share/nmt/memReporter.cpp | 10 ++- .../share/nmt/nmtNativeCallStackStorage.cpp | 18 +++++ .../share/nmt/nmtNativeCallStackStorage.hpp | 3 +- src/hotspot/share/nmt/regionsTree.cpp | 13 +++- src/hotspot/share/nmt/regionsTree.hpp | 10 ++- src/hotspot/share/nmt/vmatree.cpp | 7 ++ src/hotspot/share/nmt/vmatree.hpp | 11 ++- src/hotspot/share/utilities/rbTree.hpp | 4 ++ src/hotspot/share/utilities/rbTree.inline.hpp | 49 +++++++++++++ test/hotspot/gtest/utilities/test_rbtree.cpp | 40 +++++++++++ 12 files changed, 185 insertions(+), 65 deletions(-) diff --git a/src/hotspot/share/nmt/memBaseline.cpp b/src/hotspot/share/nmt/memBaseline.cpp index 35dff0d8646..9b796ce0c41 100644 --- a/src/hotspot/share/nmt/memBaseline.cpp +++ b/src/hotspot/share/nmt/memBaseline.cpp @@ -27,8 +27,7 @@ #include "memory/metaspaceUtils.hpp" #include "nmt/memBaseline.hpp" #include "nmt/memTracker.hpp" -#include "runtime/javaThread.hpp" -#include "runtime/safepoint.hpp" +#include "nmt/regionsTree.inline.hpp" /* * Sizes are sorted in descenting order for reporting @@ -104,38 +103,6 @@ class MallocAllocationSiteWalker : public MallocSiteWalker { } }; -// Walk all virtual memory regions for baselining -class VirtualMemoryAllocationWalker : public VirtualMemoryWalker { - private: - typedef LinkedListImpl EntryList; - EntryList _virtual_memory_regions; - DEBUG_ONLY(address _last_base;) - public: - VirtualMemoryAllocationWalker() { - DEBUG_ONLY(_last_base = nullptr); - } - - bool do_allocation_site(const ReservedMemoryRegion* rgn) { - assert(rgn->base() >= _last_base, "region unordered?"); - DEBUG_ONLY(_last_base = rgn->base()); - if (rgn->size() > 0) { - if (_virtual_memory_regions.add(*rgn) != nullptr) { - return true; - } else { - return false; - } - } else { - // Ignore empty sites. - return true; - } - } - - LinkedList* virtual_memory_allocations() { - return &_virtual_memory_regions; - } -}; - void MemBaseline::baseline_summary() { MallocMemorySummary::snapshot(&_malloc_memory_snapshot); VirtualMemorySummary::snapshot(&_virtual_memory_snapshot); @@ -158,14 +125,15 @@ bool MemBaseline::baseline_allocation_sites() { // The malloc sites are collected in size order _malloc_sites_order = by_size; - // Virtual memory allocation sites - VirtualMemoryAllocationWalker virtual_memory_walker; - if (!VirtualMemoryTracker::Instance::walk_virtual_memory(&virtual_memory_walker)) { - return false; - } + assert(_vma_allocations == nullptr, "must"); - // Virtual memory allocations are collected in call stack order - _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations()); + { + MemTracker::NmtVirtualMemoryLocker locker; + _vma_allocations = new (mtNMT, std::nothrow) RegionsTree(*VirtualMemoryTracker::Instance::tree()); + if (_vma_allocations == nullptr) { + return false; + } + } if (!aggregate_virtual_memory_allocation_sites()) { return false; @@ -202,20 +170,28 @@ int compare_allocation_site(const VirtualMemoryAllocationSite& s1, bool MemBaseline::aggregate_virtual_memory_allocation_sites() { SortedLinkedList allocation_sites; - VirtualMemoryAllocationIterator itr = virtual_memory_allocations(); - const ReservedMemoryRegion* rgn; VirtualMemoryAllocationSite* site; - while ((rgn = itr.next()) != nullptr) { - VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->mem_tag()); + bool failed_oom = false; + _vma_allocations->visit_reserved_regions([&](ReservedMemoryRegion& rgn) { + VirtualMemoryAllocationSite tmp(*rgn.call_stack(), rgn.mem_tag()); site = allocation_sites.find(tmp); if (site == nullptr) { LinkedListNode* node = allocation_sites.add(tmp); - if (node == nullptr) return false; + if (node == nullptr) { + failed_oom = true; + return false; + } site = node->data(); } - site->reserve_memory(rgn->size()); - site->commit_memory(VirtualMemoryTracker::Instance::committed_size(rgn)); + site->reserve_memory(rgn.size()); + + site->commit_memory(_vma_allocations->committed_size(rgn)); + return true; + }); + + if (failed_oom) { + return false; } _virtual_memory_sites.move(&allocation_sites); diff --git a/src/hotspot/share/nmt/memBaseline.hpp b/src/hotspot/share/nmt/memBaseline.hpp index 2fff4cc666c..3f1ea46d815 100644 --- a/src/hotspot/share/nmt/memBaseline.hpp +++ b/src/hotspot/share/nmt/memBaseline.hpp @@ -35,7 +35,6 @@ typedef LinkedListIterator MallocSiteIterator; typedef LinkedListIterator VirtualMemorySiteIterator; -typedef LinkedListIterator VirtualMemoryAllocationIterator; /* * Baseline a memory snapshot @@ -71,7 +70,7 @@ class MemBaseline { LinkedListImpl _malloc_sites; // All virtual memory allocations - LinkedListImpl _virtual_memory_allocations; + RegionsTree* _vma_allocations; // Virtual memory allocations by allocation sites, always in by_address // order @@ -86,6 +85,7 @@ class MemBaseline { // create a memory baseline MemBaseline(): _instance_class_count(0), _array_class_count(0), _thread_count(0), + _vma_allocations(nullptr), _baseline_type(Not_baselined) { } @@ -110,9 +110,9 @@ class MemBaseline { // Virtual memory allocation iterator always returns in virtual memory // base address order. - VirtualMemoryAllocationIterator virtual_memory_allocations() { - assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline"); - return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head()); + RegionsTree* virtual_memory_allocations() { + assert(_vma_allocations != nullptr, "Not detail baseline"); + return _vma_allocations; } // Total reserved memory = total malloc'd memory + total reserved virtual @@ -185,7 +185,8 @@ class MemBaseline { _malloc_sites.clear(); _virtual_memory_sites.clear(); - _virtual_memory_allocations.clear(); + delete _vma_allocations; + _vma_allocations = nullptr; } private: diff --git a/src/hotspot/share/nmt/memReporter.cpp b/src/hotspot/share/nmt/memReporter.cpp index 65d4d76942b..84f0f90f6e5 100644 --- a/src/hotspot/share/nmt/memReporter.cpp +++ b/src/hotspot/share/nmt/memReporter.cpp @@ -394,13 +394,11 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() { void MemDetailReporter::report_virtual_memory_map() { // Virtual memory map always in base address order - VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations(); - const ReservedMemoryRegion* rgn; - output()->print_cr("Virtual memory map:"); - while ((rgn = itr.next()) != nullptr) { - report_virtual_memory_region(rgn); - } + _baseline.virtual_memory_allocations()->visit_reserved_regions([&](ReservedMemoryRegion& rgn) { + report_virtual_memory_region(&rgn); + return true; + }); } void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) { diff --git a/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp b/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp index 3e5c1d2f0ea..9a2ecd57ecc 100644 --- a/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp +++ b/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp @@ -57,3 +57,21 @@ NativeCallStackStorage::NativeCallStackStorage(bool is_detailed_mode, int table_ NativeCallStackStorage::~NativeCallStackStorage() { FREE_C_HEAP_ARRAY(LinkPtr, _table); } + +NativeCallStackStorage::NativeCallStackStorage(const NativeCallStackStorage& other) + : _table_size(other._table_size), + _table(nullptr), + _stacks(), + _is_detailed_mode(other._is_detailed_mode), + _fake_stack(other._fake_stack) { + if (_is_detailed_mode) { + _table = NEW_C_HEAP_ARRAY(TableEntryIndex, _table_size, mtNMT); + for (int i = 0; i < _table_size; i++) { + _table[i] = other._table[i]; + } + } + _stacks.reserve(other._stacks.length()); + for (int i = 0; i < other._stacks.length(); i++) { + _stacks.at_grow(i) = other._stacks.at(i); + } +} diff --git a/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp b/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp index 6f194cfa5a1..6ead8f49248 100644 --- a/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp +++ b/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp @@ -95,7 +95,8 @@ public: } NativeCallStackStorage(bool is_detailed_mode, int table_size = default_table_size); - + NativeCallStackStorage(const NativeCallStackStorage& other); + NativeCallStackStorage& operator=(const NativeCallStackStorage& other) = delete; ~NativeCallStackStorage(); }; diff --git a/src/hotspot/share/nmt/regionsTree.cpp b/src/hotspot/share/nmt/regionsTree.cpp index 370c69a2485..8644a2d4731 100644 --- a/src/hotspot/share/nmt/regionsTree.cpp +++ b/src/hotspot/share/nmt/regionsTree.cpp @@ -22,6 +22,8 @@ * */ #include "nmt/regionsTree.hpp" +#include "nmt/regionsTree.inline.hpp" +#include "nmt/virtualMemoryTracker.hpp" VMATree::SummaryDiff RegionsTree::commit_region(address addr, size_t size, const NativeCallStack& stack) { return commit_mapping((VMATree::position)addr, size, make_region_data(stack, mtNone), /*use tag inplace*/ true); @@ -54,4 +56,13 @@ void RegionsTree::print_on(outputStream* st) { return true; }); } -#endif \ No newline at end of file +#endif + +size_t RegionsTree::committed_size(ReservedMemoryRegion& rgn) { + size_t result = 0; + visit_committed_regions(rgn, [&](CommittedMemoryRegion& crgn) { + result += crgn.size(); + return true; + }); + return result; +} diff --git a/src/hotspot/share/nmt/regionsTree.hpp b/src/hotspot/share/nmt/regionsTree.hpp index bf2ab711b2d..b0c5d928bab 100644 --- a/src/hotspot/share/nmt/regionsTree.hpp +++ b/src/hotspot/share/nmt/regionsTree.hpp @@ -40,6 +40,12 @@ class RegionsTree : public VMATree { public: RegionsTree(bool with_storage) : VMATree() , _ncs_storage(with_storage), _with_storage(with_storage) { } + RegionsTree(const RegionsTree& other) + : VMATree(other), + _ncs_storage(other._ncs_storage), + _with_storage(other._with_storage) {} + RegionsTree& operator=(const RegionsTree& other) = delete; + ReservedMemoryRegion find_reserved_region(address addr); SummaryDiff commit_region(address addr, size_t size, const NativeCallStack& stack); @@ -91,6 +97,8 @@ class RegionsTree : public VMATree { NativeCallStackStorage::StackIndex si = node.out_stack_index(); return _ncs_storage.get(si); } + + size_t committed_size(ReservedMemoryRegion& rgn); }; -#endif // NMT_REGIONSTREE_HPP \ No newline at end of file +#endif // NMT_REGIONSTREE_HPP diff --git a/src/hotspot/share/nmt/vmatree.cpp b/src/hotspot/share/nmt/vmatree.cpp index 4f6f8e12185..69887068cb2 100644 --- a/src/hotspot/share/nmt/vmatree.cpp +++ b/src/hotspot/share/nmt/vmatree.cpp @@ -744,3 +744,10 @@ void VMATree::SummaryDiff::print_on(outputStream* out) { } } #endif + +void VMATree::clear() { + _tree.remove_all(); +}; +bool VMATree::is_empty() { + return _tree.size() == 0; +}; diff --git a/src/hotspot/share/nmt/vmatree.hpp b/src/hotspot/share/nmt/vmatree.hpp index 1b5729054e4..88b680a045c 100644 --- a/src/hotspot/share/nmt/vmatree.hpp +++ b/src/hotspot/share/nmt/vmatree.hpp @@ -30,6 +30,7 @@ #include "nmt/nmtNativeCallStackStorage.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" +#include "utilities/rbTree.hpp" #include "utilities/rbTree.inline.hpp" #include @@ -39,7 +40,7 @@ // For example, the state may go from released memory to committed memory, // or from committed memory of a certain MemTag to committed memory of a different MemTag. // The set of points is stored in a balanced binary tree for efficient querying and updating. -class VMATree { +class VMATree : public CHeapObjBase { friend class NMTVMATreeTest; friend class VMTWithVMATreeTest; // A position in memory. @@ -65,7 +66,6 @@ private: static const char* statetype_strings[static_cast(StateType::st_number_of_states)]; public: - NONCOPYABLE(VMATree); static const char* statetype_to_string(StateType type) { assert(type < StateType::st_number_of_states, "must be"); @@ -226,6 +226,10 @@ private: public: VMATree() : _tree() {} + VMATree(const VMATree& other) : _tree() { + assert(other._tree.copy_into(_tree), "VMATree dies on OOM"); + } + VMATree& operator=(VMATree const&) = delete; struct SingleDiff { using delta = int64_t; @@ -329,5 +333,8 @@ public: _tree.visit_range_in_order(from, to, f); } VMARBTree& tree() { return _tree; } + + void clear(); + bool is_empty(); }; #endif diff --git a/src/hotspot/share/utilities/rbTree.hpp b/src/hotspot/share/utilities/rbTree.hpp index 4c358b53ff0..fd0e25051f5 100644 --- a/src/hotspot/share/utilities/rbTree.hpp +++ b/src/hotspot/share/utilities/rbTree.hpp @@ -428,6 +428,7 @@ public: template void visit_in_order(F f); + // Visit all RBNodes in ascending order whose keys are in range [from, to], calling f on each node. // If f returns `true` the iteration continues, otherwise it is stopped at the current node. template @@ -475,6 +476,7 @@ class RBTree : public AbstractRBTree, COMPARATOR> { public: RBTree() : BaseType(), _allocator() {} + NONCOPYABLE(RBTree); ~RBTree() { remove_all(); } RBTree(const RBTree& other) : BaseType(), _allocator() { assert(std::is_copy_constructible(), "Value type must be copy-constructible"); @@ -485,6 +487,8 @@ public: } RBTree& operator=(const RBTree& other) = delete; + bool copy_into(RBTree& other) const; + typedef typename BaseType::Cursor Cursor; using BaseType::cursor; using BaseType::insert_at_cursor; diff --git a/src/hotspot/share/utilities/rbTree.inline.hpp b/src/hotspot/share/utilities/rbTree.inline.hpp index f28923eb867..b7de0a7ac4c 100644 --- a/src/hotspot/share/utilities/rbTree.inline.hpp +++ b/src/hotspot/share/utilities/rbTree.inline.hpp @@ -753,4 +753,53 @@ void AbstractRBTree::print_on(outputStream* st, const P } } +template +bool RBTree::copy_into(RBTree& other) const { + assert(other.size() == 0, "You can only copy into an empty RBTree"); + assert(std::is_copy_constructible::value, "Key type must be copy-constructible when copying a RBTree"); + assert(std::is_copy_constructible::value, "Value type must be copy-constructible when copying a RBTree"); + enum class Dir { Left, Right }; + struct node_pair { const IntrusiveRBNode* current; IntrusiveRBNode* other_parent; Dir dir; }; + struct stack { + node_pair s[64]; + int idx = 0; + stack() : idx(0) {} + node_pair pop() { idx--; return s[idx]; }; + void push(node_pair n) { s[idx] = n; idx++; }; + bool is_empty() { return idx == 0; }; + }; + + stack visit_stack; + if (this->_root == nullptr) { + return true; + } + RBNode* root = static_cast*>(this->_root); + other._root = other.allocate_node(root->key(), root->val()); + if (other._root == nullptr) return false; + + visit_stack.push({this->_root->_left, other._root, Dir::Left}); + visit_stack.push({this->_root->_right, other._root, Dir::Right}); + while (!visit_stack.is_empty()) { + node_pair n = visit_stack.pop(); + const RBNode* current = static_cast*>(n.current); + if (current == nullptr) continue; + RBNode* new_node = other.allocate_node(current->key(), current->val()); + if (new_node == nullptr) { + return false; + } + if (n.dir == Dir::Left) { + n.other_parent->_left = new_node; + } else { + n.other_parent->_right = new_node; + } + new_node->set_parent(n.other_parent); + new_node->_parent |= n.current->_parent & 0x1; + visit_stack.push({n.current->_left, new_node, Dir::Left}); + visit_stack.push({n.current->_right, new_node, Dir::Right}); + } + other._num_nodes = this->_num_nodes; + DEBUG_ONLY(other._expected_visited = this->_expected_visited); + return true; +} + #endif // SHARE_UTILITIES_RBTREE_INLINE_HPP diff --git a/test/hotspot/gtest/utilities/test_rbtree.cpp b/test/hotspot/gtest/utilities/test_rbtree.cpp index ff234dd764a..a351e2141e8 100644 --- a/test/hotspot/gtest/utilities/test_rbtree.cpp +++ b/test/hotspot/gtest/utilities/test_rbtree.cpp @@ -1200,6 +1200,46 @@ TEST_VM_F(RBTreeTest, VerifyItThroughStressTest) { } } +TEST_VM_F(RBTreeTest, TestCopyInto) { + { + RBTreeInt rbtree1; + RBTreeInt rbtree2; + + rbtree1.copy_into(rbtree2); + rbtree2.verify_self(); + } + + RBTreeInt rbtree1; + RBTreeInt rbtree2; + + int size = 1000; + for (int i = 0; i < size; i++) { + rbtree1.upsert(i, i); + } + + rbtree1.copy_into(rbtree2); + rbtree2.verify_self(); + + ResourceMark rm; + GrowableArray allocations(size); + int size1 = 0; + rbtree1.visit_in_order([&](RBTreeIntNode* node) { + size1++; + allocations.append(node->key()); + return true; + }); + + int size2 = 0; + rbtree2.visit_in_order([&](RBTreeIntNode* node) { + EXPECT_EQ(node->key(), allocations.at(size2++)); + return true; + }); + + EXPECT_EQ(size1, size2); + EXPECT_EQ(rbtree1.size(), rbtree2.size()); + EXPECT_EQ(size2, static_cast(rbtree2.size())); +} + struct OomAllocator { void* allocate(size_t sz) { return nullptr;