mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 12:09:14 +00:00
8366658: Add missing locks when accessing the VirtualMemoryTracker instance in tests and MemMapPrinter
Reviewed-by: azafari, phubner
This commit is contained in:
parent
1cb1267ce8
commit
a7dc011ac4
@ -138,9 +138,9 @@ class VirtualMemoryAllocationWalker : public VirtualMemoryWalker {
|
||||
|
||||
void MemBaseline::baseline_summary() {
|
||||
MallocMemorySummary::snapshot(&_malloc_memory_snapshot);
|
||||
VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
|
||||
{
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
VirtualMemorySummary::snapshot(&_virtual_memory_snapshot);
|
||||
MemoryFileTracker::Instance::summary_snapshot(&_virtual_memory_snapshot);
|
||||
}
|
||||
|
||||
@ -160,7 +160,7 @@ bool MemBaseline::baseline_allocation_sites() {
|
||||
|
||||
// Virtual memory allocation sites
|
||||
VirtualMemoryAllocationWalker virtual_memory_walker;
|
||||
if (!VirtualMemoryTracker::Instance::walk_virtual_memory(&virtual_memory_walker)) {
|
||||
if (!MemTracker::walk_virtual_memory(&virtual_memory_walker)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ public:
|
||||
|
||||
// Iterate all NMT virtual memory regions and fill this cache.
|
||||
bool fill_from_nmt() {
|
||||
return VirtualMemoryTracker::Instance::walk_virtual_memory(this);
|
||||
return MemTracker::walk_virtual_memory(this);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -173,6 +173,13 @@ class MemTracker : AllStatic {
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool walk_virtual_memory(VirtualMemoryWalker* walker) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return false;
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
return VirtualMemoryTracker::Instance::walk_virtual_memory(walker);
|
||||
}
|
||||
|
||||
static inline MemoryFileTracker::MemoryFile* register_file(const char* descriptive_name) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return nullptr;
|
||||
|
||||
@ -29,6 +29,7 @@
|
||||
#include "nmt/nmtCommon.hpp"
|
||||
#include "nmt/nmtUsage.hpp"
|
||||
#include "nmt/threadStackTracker.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
|
||||
// Enabled all options for snapshot.
|
||||
const NMTUsageOptions NMTUsage::OptionsAll = { true, true, true };
|
||||
@ -47,7 +48,9 @@ void NMTUsage::walk_thread_stacks() {
|
||||
// much memory had been committed if they are backed by virtual memory. This
|
||||
// needs to happen before we take the snapshot of the virtual memory since it
|
||||
// will update this information.
|
||||
MemTracker::NmtVirtualMemoryLocker locker;
|
||||
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
|
||||
|
||||
}
|
||||
|
||||
void NMTUsage::update_malloc_usage() {
|
||||
|
||||
@ -208,14 +208,15 @@ bool VirtualMemoryTracker::Instance::walk_virtual_memory(VirtualMemoryWalker* wa
|
||||
}
|
||||
|
||||
bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
bool ret = true;
|
||||
tree()->visit_reserved_regions([&](ReservedMemoryRegion& rgn) {
|
||||
if (!walker->do_allocation_site(&rgn)) {
|
||||
ret = false;
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
});
|
||||
return true;
|
||||
return ret;
|
||||
}
|
||||
|
||||
size_t VirtualMemoryTracker::committed_size(const ReservedMemoryRegion* rmr) {
|
||||
@ -350,4 +351,4 @@ ReservedMemoryRegion RegionsTree::find_reserved_region(address addr) {
|
||||
|
||||
bool CommittedMemoryRegion::equals(const ReservedMemoryRegion& rmr) const {
|
||||
return size() == rmr.size() && call_stack()->equals(*(rmr.call_stack()));
|
||||
}
|
||||
}
|
||||
|
||||
@ -345,7 +345,7 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
|
||||
|
||||
class VirtualMemoryWalker : public StackObj {
|
||||
public:
|
||||
virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
|
||||
virtual bool do_allocation_site(const ReservedMemoryRegion* rgn) { return false; }
|
||||
};
|
||||
|
||||
|
||||
@ -409,4 +409,4 @@ class VirtualMemoryTracker {
|
||||
};
|
||||
};
|
||||
|
||||
#endif // SHARE_NMT_VIRTUALMEMORYTRACKER_HPP
|
||||
#endif // SHARE_NMT_VIRTUALMEMORYTRACKER_HPP
|
||||
|
||||
@ -38,10 +38,12 @@ public:
|
||||
|
||||
MemTracker::record_thread_stack(stack_end, stack_size);
|
||||
|
||||
VirtualMemoryTracker::Instance::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
|
||||
|
||||
// snapshot current stack usage
|
||||
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
|
||||
{
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
VirtualMemoryTracker::Instance::add_reserved_region(stack_end, stack_size, CALLER_PC, mtThreadStack);
|
||||
// snapshot current stack usage
|
||||
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
|
||||
}
|
||||
|
||||
ReservedMemoryRegion rmr_found;
|
||||
{
|
||||
@ -106,23 +108,29 @@ public:
|
||||
}
|
||||
|
||||
// trigger the test
|
||||
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
|
||||
|
||||
ReservedMemoryRegion rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region((address)base);
|
||||
ReservedMemoryRegion rmr_found;
|
||||
{
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
VirtualMemoryTracker::Instance::snapshot_thread_stacks();
|
||||
rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region((address)base);
|
||||
}
|
||||
ASSERT_TRUE(rmr_found.is_valid());
|
||||
ASSERT_EQ(rmr_found.base(), (address)base);
|
||||
|
||||
|
||||
bool precise_tracking_supported = false;
|
||||
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr_found, [&](const CommittedMemoryRegion& cmr){
|
||||
if (cmr.size() == size) {
|
||||
return false;
|
||||
} else {
|
||||
precise_tracking_supported = true;
|
||||
check_covered_pages(cmr.base(), cmr.size(), (address)base, touch_pages, page_num);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
{
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr_found, [&](const CommittedMemoryRegion& cmr){
|
||||
if (cmr.size() == size) {
|
||||
return false;
|
||||
} else {
|
||||
precise_tracking_supported = true;
|
||||
check_covered_pages(cmr.base(), cmr.size(), (address)base, touch_pages, page_num);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
if (precise_tracking_supported) {
|
||||
// All touched pages should be committed
|
||||
@ -133,8 +141,11 @@ public:
|
||||
|
||||
// Cleanup
|
||||
os::disclaim_memory(base, size);
|
||||
VirtualMemoryTracker::Instance::remove_released_region((address)base, size);
|
||||
rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region((address)base);
|
||||
{
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
VirtualMemoryTracker::Instance::remove_released_region((address)base, size);
|
||||
rmr_found = VirtualMemoryTracker::Instance::tree()->find_reserved_region((address)base);
|
||||
}
|
||||
ASSERT_TRUE(!rmr_found.is_valid());
|
||||
}
|
||||
|
||||
|
||||
@ -59,7 +59,7 @@ namespace {
|
||||
|
||||
static void diagnostic_print(VirtualMemoryTracker& vmt, const ReservedMemoryRegion& rmr) {
|
||||
LOG("In reserved region " PTR_FORMAT ", size %X:", p2i(rmr.base()), rmr.size());
|
||||
VirtualMemoryTracker::Instance::tree()->visit_committed_regions(rmr, [&](CommittedMemoryRegion& region) {
|
||||
vmt.tree()->visit_committed_regions(rmr, [&](CommittedMemoryRegion& region) {
|
||||
LOG(" committed region: " PTR_FORMAT ", size %X", p2i(region.base()), region.size());
|
||||
return true;
|
||||
});
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user