diff --git a/src/hotspot/share/nmt/memBaseline.cpp b/src/hotspot/share/nmt/memBaseline.cpp index 9b796ce0c41..35dff0d8646 100644 --- a/src/hotspot/share/nmt/memBaseline.cpp +++ b/src/hotspot/share/nmt/memBaseline.cpp @@ -27,7 +27,8 @@ #include "memory/metaspaceUtils.hpp" #include "nmt/memBaseline.hpp" #include "nmt/memTracker.hpp" -#include "nmt/regionsTree.inline.hpp" +#include "runtime/javaThread.hpp" +#include "runtime/safepoint.hpp" /* * Sizes are sorted in descenting order for reporting @@ -103,6 +104,38 @@ class MallocAllocationSiteWalker : public MallocSiteWalker { } }; +// Walk all virtual memory regions for baselining +class VirtualMemoryAllocationWalker : public VirtualMemoryWalker { + private: + typedef LinkedListImpl EntryList; + EntryList _virtual_memory_regions; + DEBUG_ONLY(address _last_base;) + public: + VirtualMemoryAllocationWalker() { + DEBUG_ONLY(_last_base = nullptr); + } + + bool do_allocation_site(const ReservedMemoryRegion* rgn) { + assert(rgn->base() >= _last_base, "region unordered?"); + DEBUG_ONLY(_last_base = rgn->base()); + if (rgn->size() > 0) { + if (_virtual_memory_regions.add(*rgn) != nullptr) { + return true; + } else { + return false; + } + } else { + // Ignore empty sites. + return true; + } + } + + LinkedList* virtual_memory_allocations() { + return &_virtual_memory_regions; + } +}; + void MemBaseline::baseline_summary() { MallocMemorySummary::snapshot(&_malloc_memory_snapshot); VirtualMemorySummary::snapshot(&_virtual_memory_snapshot); @@ -125,16 +158,15 @@ bool MemBaseline::baseline_allocation_sites() { // The malloc sites are collected in size order _malloc_sites_order = by_size; - assert(_vma_allocations == nullptr, "must"); - - { - MemTracker::NmtVirtualMemoryLocker locker; - _vma_allocations = new (mtNMT, std::nothrow) RegionsTree(*VirtualMemoryTracker::Instance::tree()); - if (_vma_allocations == nullptr) { - return false; - } + // Virtual memory allocation sites + VirtualMemoryAllocationWalker virtual_memory_walker; + if (!VirtualMemoryTracker::Instance::walk_virtual_memory(&virtual_memory_walker)) { + return false; } + // Virtual memory allocations are collected in call stack order + _virtual_memory_allocations.move(virtual_memory_walker.virtual_memory_allocations()); + if (!aggregate_virtual_memory_allocation_sites()) { return false; } @@ -170,28 +202,20 @@ int compare_allocation_site(const VirtualMemoryAllocationSite& s1, bool MemBaseline::aggregate_virtual_memory_allocation_sites() { SortedLinkedList allocation_sites; + VirtualMemoryAllocationIterator itr = virtual_memory_allocations(); + const ReservedMemoryRegion* rgn; VirtualMemoryAllocationSite* site; - bool failed_oom = false; - _vma_allocations->visit_reserved_regions([&](ReservedMemoryRegion& rgn) { - VirtualMemoryAllocationSite tmp(*rgn.call_stack(), rgn.mem_tag()); + while ((rgn = itr.next()) != nullptr) { + VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->mem_tag()); site = allocation_sites.find(tmp); if (site == nullptr) { LinkedListNode* node = allocation_sites.add(tmp); - if (node == nullptr) { - failed_oom = true; - return false; - } + if (node == nullptr) return false; site = node->data(); } - site->reserve_memory(rgn.size()); - - site->commit_memory(_vma_allocations->committed_size(rgn)); - return true; - }); - - if (failed_oom) { - return false; + site->reserve_memory(rgn->size()); + site->commit_memory(VirtualMemoryTracker::Instance::committed_size(rgn)); } _virtual_memory_sites.move(&allocation_sites); diff --git a/src/hotspot/share/nmt/memBaseline.hpp b/src/hotspot/share/nmt/memBaseline.hpp index 3f1ea46d815..2fff4cc666c 100644 --- a/src/hotspot/share/nmt/memBaseline.hpp +++ b/src/hotspot/share/nmt/memBaseline.hpp @@ -35,6 +35,7 @@ typedef LinkedListIterator MallocSiteIterator; typedef LinkedListIterator VirtualMemorySiteIterator; +typedef LinkedListIterator VirtualMemoryAllocationIterator; /* * Baseline a memory snapshot @@ -70,7 +71,7 @@ class MemBaseline { LinkedListImpl _malloc_sites; // All virtual memory allocations - RegionsTree* _vma_allocations; + LinkedListImpl _virtual_memory_allocations; // Virtual memory allocations by allocation sites, always in by_address // order @@ -85,7 +86,6 @@ class MemBaseline { // create a memory baseline MemBaseline(): _instance_class_count(0), _array_class_count(0), _thread_count(0), - _vma_allocations(nullptr), _baseline_type(Not_baselined) { } @@ -110,9 +110,9 @@ class MemBaseline { // Virtual memory allocation iterator always returns in virtual memory // base address order. - RegionsTree* virtual_memory_allocations() { - assert(_vma_allocations != nullptr, "Not detail baseline"); - return _vma_allocations; + VirtualMemoryAllocationIterator virtual_memory_allocations() { + assert(!_virtual_memory_allocations.is_empty(), "Not detail baseline"); + return VirtualMemoryAllocationIterator(_virtual_memory_allocations.head()); } // Total reserved memory = total malloc'd memory + total reserved virtual @@ -185,8 +185,7 @@ class MemBaseline { _malloc_sites.clear(); _virtual_memory_sites.clear(); - delete _vma_allocations; - _vma_allocations = nullptr; + _virtual_memory_allocations.clear(); } private: diff --git a/src/hotspot/share/nmt/memReporter.cpp b/src/hotspot/share/nmt/memReporter.cpp index 84f0f90f6e5..65d4d76942b 100644 --- a/src/hotspot/share/nmt/memReporter.cpp +++ b/src/hotspot/share/nmt/memReporter.cpp @@ -394,11 +394,13 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() { void MemDetailReporter::report_virtual_memory_map() { // Virtual memory map always in base address order + VirtualMemoryAllocationIterator itr = _baseline.virtual_memory_allocations(); + const ReservedMemoryRegion* rgn; + output()->print_cr("Virtual memory map:"); - _baseline.virtual_memory_allocations()->visit_reserved_regions([&](ReservedMemoryRegion& rgn) { - report_virtual_memory_region(&rgn); - return true; - }); + while ((rgn = itr.next()) != nullptr) { + report_virtual_memory_region(rgn); + } } void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* reserved_rgn) { diff --git a/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp b/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp index 9a2ecd57ecc..3e5c1d2f0ea 100644 --- a/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp +++ b/src/hotspot/share/nmt/nmtNativeCallStackStorage.cpp @@ -57,21 +57,3 @@ NativeCallStackStorage::NativeCallStackStorage(bool is_detailed_mode, int table_ NativeCallStackStorage::~NativeCallStackStorage() { FREE_C_HEAP_ARRAY(LinkPtr, _table); } - -NativeCallStackStorage::NativeCallStackStorage(const NativeCallStackStorage& other) - : _table_size(other._table_size), - _table(nullptr), - _stacks(), - _is_detailed_mode(other._is_detailed_mode), - _fake_stack(other._fake_stack) { - if (_is_detailed_mode) { - _table = NEW_C_HEAP_ARRAY(TableEntryIndex, _table_size, mtNMT); - for (int i = 0; i < _table_size; i++) { - _table[i] = other._table[i]; - } - } - _stacks.reserve(other._stacks.length()); - for (int i = 0; i < other._stacks.length(); i++) { - _stacks.at_grow(i) = other._stacks.at(i); - } -} diff --git a/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp b/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp index 6ead8f49248..6f194cfa5a1 100644 --- a/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp +++ b/src/hotspot/share/nmt/nmtNativeCallStackStorage.hpp @@ -95,8 +95,7 @@ public: } NativeCallStackStorage(bool is_detailed_mode, int table_size = default_table_size); - NativeCallStackStorage(const NativeCallStackStorage& other); - NativeCallStackStorage& operator=(const NativeCallStackStorage& other) = delete; + ~NativeCallStackStorage(); }; diff --git a/src/hotspot/share/nmt/regionsTree.cpp b/src/hotspot/share/nmt/regionsTree.cpp index 8644a2d4731..370c69a2485 100644 --- a/src/hotspot/share/nmt/regionsTree.cpp +++ b/src/hotspot/share/nmt/regionsTree.cpp @@ -22,8 +22,6 @@ * */ #include "nmt/regionsTree.hpp" -#include "nmt/regionsTree.inline.hpp" -#include "nmt/virtualMemoryTracker.hpp" VMATree::SummaryDiff RegionsTree::commit_region(address addr, size_t size, const NativeCallStack& stack) { return commit_mapping((VMATree::position)addr, size, make_region_data(stack, mtNone), /*use tag inplace*/ true); @@ -56,13 +54,4 @@ void RegionsTree::print_on(outputStream* st) { return true; }); } -#endif - -size_t RegionsTree::committed_size(ReservedMemoryRegion& rgn) { - size_t result = 0; - visit_committed_regions(rgn, [&](CommittedMemoryRegion& crgn) { - result += crgn.size(); - return true; - }); - return result; -} +#endif \ No newline at end of file diff --git a/src/hotspot/share/nmt/regionsTree.hpp b/src/hotspot/share/nmt/regionsTree.hpp index b0c5d928bab..bf2ab711b2d 100644 --- a/src/hotspot/share/nmt/regionsTree.hpp +++ b/src/hotspot/share/nmt/regionsTree.hpp @@ -40,12 +40,6 @@ class RegionsTree : public VMATree { public: RegionsTree(bool with_storage) : VMATree() , _ncs_storage(with_storage), _with_storage(with_storage) { } - RegionsTree(const RegionsTree& other) - : VMATree(other), - _ncs_storage(other._ncs_storage), - _with_storage(other._with_storage) {} - RegionsTree& operator=(const RegionsTree& other) = delete; - ReservedMemoryRegion find_reserved_region(address addr); SummaryDiff commit_region(address addr, size_t size, const NativeCallStack& stack); @@ -97,8 +91,6 @@ class RegionsTree : public VMATree { NativeCallStackStorage::StackIndex si = node.out_stack_index(); return _ncs_storage.get(si); } - - size_t committed_size(ReservedMemoryRegion& rgn); }; -#endif // NMT_REGIONSTREE_HPP +#endif // NMT_REGIONSTREE_HPP \ No newline at end of file diff --git a/src/hotspot/share/nmt/vmatree.cpp b/src/hotspot/share/nmt/vmatree.cpp index 69887068cb2..4f6f8e12185 100644 --- a/src/hotspot/share/nmt/vmatree.cpp +++ b/src/hotspot/share/nmt/vmatree.cpp @@ -744,10 +744,3 @@ void VMATree::SummaryDiff::print_on(outputStream* out) { } } #endif - -void VMATree::clear() { - _tree.remove_all(); -}; -bool VMATree::is_empty() { - return _tree.size() == 0; -}; diff --git a/src/hotspot/share/nmt/vmatree.hpp b/src/hotspot/share/nmt/vmatree.hpp index 88b680a045c..1b5729054e4 100644 --- a/src/hotspot/share/nmt/vmatree.hpp +++ b/src/hotspot/share/nmt/vmatree.hpp @@ -30,7 +30,6 @@ #include "nmt/nmtNativeCallStackStorage.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ostream.hpp" -#include "utilities/rbTree.hpp" #include "utilities/rbTree.inline.hpp" #include @@ -40,7 +39,7 @@ // For example, the state may go from released memory to committed memory, // or from committed memory of a certain MemTag to committed memory of a different MemTag. // The set of points is stored in a balanced binary tree for efficient querying and updating. -class VMATree : public CHeapObjBase { +class VMATree { friend class NMTVMATreeTest; friend class VMTWithVMATreeTest; // A position in memory. @@ -66,6 +65,7 @@ private: static const char* statetype_strings[static_cast(StateType::st_number_of_states)]; public: + NONCOPYABLE(VMATree); static const char* statetype_to_string(StateType type) { assert(type < StateType::st_number_of_states, "must be"); @@ -226,10 +226,6 @@ private: public: VMATree() : _tree() {} - VMATree(const VMATree& other) : _tree() { - assert(other._tree.copy_into(_tree), "VMATree dies on OOM"); - } - VMATree& operator=(VMATree const&) = delete; struct SingleDiff { using delta = int64_t; @@ -333,8 +329,5 @@ public: _tree.visit_range_in_order(from, to, f); } VMARBTree& tree() { return _tree; } - - void clear(); - bool is_empty(); }; #endif diff --git a/src/hotspot/share/utilities/rbTree.hpp b/src/hotspot/share/utilities/rbTree.hpp index fd0e25051f5..4c358b53ff0 100644 --- a/src/hotspot/share/utilities/rbTree.hpp +++ b/src/hotspot/share/utilities/rbTree.hpp @@ -428,7 +428,6 @@ public: template void visit_in_order(F f); - // Visit all RBNodes in ascending order whose keys are in range [from, to], calling f on each node. // If f returns `true` the iteration continues, otherwise it is stopped at the current node. template @@ -476,7 +475,6 @@ class RBTree : public AbstractRBTree, COMPARATOR> { public: RBTree() : BaseType(), _allocator() {} - NONCOPYABLE(RBTree); ~RBTree() { remove_all(); } RBTree(const RBTree& other) : BaseType(), _allocator() { assert(std::is_copy_constructible(), "Value type must be copy-constructible"); @@ -487,8 +485,6 @@ public: } RBTree& operator=(const RBTree& other) = delete; - bool copy_into(RBTree& other) const; - typedef typename BaseType::Cursor Cursor; using BaseType::cursor; using BaseType::insert_at_cursor; diff --git a/src/hotspot/share/utilities/rbTree.inline.hpp b/src/hotspot/share/utilities/rbTree.inline.hpp index b7de0a7ac4c..f28923eb867 100644 --- a/src/hotspot/share/utilities/rbTree.inline.hpp +++ b/src/hotspot/share/utilities/rbTree.inline.hpp @@ -753,53 +753,4 @@ void AbstractRBTree::print_on(outputStream* st, const P } } -template -bool RBTree::copy_into(RBTree& other) const { - assert(other.size() == 0, "You can only copy into an empty RBTree"); - assert(std::is_copy_constructible::value, "Key type must be copy-constructible when copying a RBTree"); - assert(std::is_copy_constructible::value, "Value type must be copy-constructible when copying a RBTree"); - enum class Dir { Left, Right }; - struct node_pair { const IntrusiveRBNode* current; IntrusiveRBNode* other_parent; Dir dir; }; - struct stack { - node_pair s[64]; - int idx = 0; - stack() : idx(0) {} - node_pair pop() { idx--; return s[idx]; }; - void push(node_pair n) { s[idx] = n; idx++; }; - bool is_empty() { return idx == 0; }; - }; - - stack visit_stack; - if (this->_root == nullptr) { - return true; - } - RBNode* root = static_cast*>(this->_root); - other._root = other.allocate_node(root->key(), root->val()); - if (other._root == nullptr) return false; - - visit_stack.push({this->_root->_left, other._root, Dir::Left}); - visit_stack.push({this->_root->_right, other._root, Dir::Right}); - while (!visit_stack.is_empty()) { - node_pair n = visit_stack.pop(); - const RBNode* current = static_cast*>(n.current); - if (current == nullptr) continue; - RBNode* new_node = other.allocate_node(current->key(), current->val()); - if (new_node == nullptr) { - return false; - } - if (n.dir == Dir::Left) { - n.other_parent->_left = new_node; - } else { - n.other_parent->_right = new_node; - } - new_node->set_parent(n.other_parent); - new_node->_parent |= n.current->_parent & 0x1; - visit_stack.push({n.current->_left, new_node, Dir::Left}); - visit_stack.push({n.current->_right, new_node, Dir::Right}); - } - other._num_nodes = this->_num_nodes; - DEBUG_ONLY(other._expected_visited = this->_expected_visited); - return true; -} - #endif // SHARE_UTILITIES_RBTREE_INLINE_HPP diff --git a/test/hotspot/gtest/utilities/test_rbtree.cpp b/test/hotspot/gtest/utilities/test_rbtree.cpp index a351e2141e8..ff234dd764a 100644 --- a/test/hotspot/gtest/utilities/test_rbtree.cpp +++ b/test/hotspot/gtest/utilities/test_rbtree.cpp @@ -1200,46 +1200,6 @@ TEST_VM_F(RBTreeTest, VerifyItThroughStressTest) { } } -TEST_VM_F(RBTreeTest, TestCopyInto) { - { - RBTreeInt rbtree1; - RBTreeInt rbtree2; - - rbtree1.copy_into(rbtree2); - rbtree2.verify_self(); - } - - RBTreeInt rbtree1; - RBTreeInt rbtree2; - - int size = 1000; - for (int i = 0; i < size; i++) { - rbtree1.upsert(i, i); - } - - rbtree1.copy_into(rbtree2); - rbtree2.verify_self(); - - ResourceMark rm; - GrowableArray allocations(size); - int size1 = 0; - rbtree1.visit_in_order([&](RBTreeIntNode* node) { - size1++; - allocations.append(node->key()); - return true; - }); - - int size2 = 0; - rbtree2.visit_in_order([&](RBTreeIntNode* node) { - EXPECT_EQ(node->key(), allocations.at(size2++)); - return true; - }); - - EXPECT_EQ(size1, size2); - EXPECT_EQ(rbtree1.size(), rbtree2.size()); - EXPECT_EQ(size2, static_cast(rbtree2.size())); -} - struct OomAllocator { void* allocate(size_t sz) { return nullptr;