8277990: NMT: Remove NMT shutdown capability

Reviewed-by: stuefe, shade
This commit is contained in:
Zhengyu Gu 2021-12-07 17:42:08 +00:00
parent 7217cb7878
commit 5a036ace01
20 changed files with 116 additions and 431 deletions

View File

@ -1028,7 +1028,7 @@ static char* mmap_create_shared(size_t size) {
//
static void unmap_shared(char* addr, size_t bytes) {
int res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// Note: Tracker contains a ThreadCritical.
Tracker tkr(Tracker::release);
res = ::munmap(addr, bytes);

View File

@ -1834,7 +1834,7 @@ void PerfMemory::detach(char* addr, size_t bytes) {
return;
}
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// it does not go through os api, the operation has to record from here
Tracker tkr(Tracker::release);
remove_file_mapping(addr);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -282,7 +282,7 @@ void ZPhysicalMemoryManager::nmt_commit(uintptr_t offset, size_t size) const {
}
void ZPhysicalMemoryManager::nmt_uncommit(uintptr_t offset, size_t size) const {
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
const uintptr_t addr = ZAddress::marked0(offset);
Tracker tracker(Tracker::uncommit);
tracker.record((address)addr, size);

View File

@ -703,37 +703,6 @@ WB_ENTRY(void, WB_NMTReleaseMemory(JNIEnv* env, jobject o, jlong addr, jlong siz
os::release_memory((char *)(uintptr_t)addr, size);
WB_END
WB_ENTRY(jboolean, WB_NMTChangeTrackingLevel(JNIEnv* env))
// Test that we can downgrade NMT levels but not upgrade them.
if (MemTracker::tracking_level() == NMT_off) {
MemTracker::transition_to(NMT_off);
return MemTracker::tracking_level() == NMT_off;
} else {
assert(MemTracker::tracking_level() == NMT_detail, "Should start out as detail tracking");
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_summary, "Should be summary now");
// Can't go to detail once NMT is set to summary.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_summary, "Should still be summary now");
// Shutdown sets tracking level to minimal.
MemTracker::shutdown();
assert(MemTracker::tracking_level() == NMT_minimal, "Should be minimal now");
// Once the tracking level is minimal, we cannot increase to summary.
// The code ignores this request instead of asserting because if the malloc site
// table overflows in another thread, it tries to change the code to summary.
MemTracker::transition_to(NMT_summary);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
// Really can never go up to detail, verify that the code would never do this.
MemTracker::transition_to(NMT_detail);
assert(MemTracker::tracking_level() == NMT_minimal, "Should still be minimal now");
return MemTracker::tracking_level() == NMT_minimal;
}
WB_END
WB_ENTRY(jint, WB_NMTGetHashSize(JNIEnv* env, jobject o))
int hash_size = MallocSiteTable::hash_buckets();
assert(hash_size > 0, "NMT hash_size should be > 0");
@ -2567,7 +2536,6 @@ static JNINativeMethod methods[] = {
{CC"NMTCommitMemory", CC"(JJ)V", (void*)&WB_NMTCommitMemory },
{CC"NMTUncommitMemory", CC"(JJ)V", (void*)&WB_NMTUncommitMemory },
{CC"NMTReleaseMemory", CC"(JJ)V", (void*)&WB_NMTReleaseMemory },
{CC"NMTChangeTrackingLevel", CC"()Z", (void*)&WB_NMTChangeTrackingLevel},
{CC"NMTGetHashSize", CC"()I", (void*)&WB_NMTGetHashSize },
{CC"NMTNewArena", CC"(J)J", (void*)&WB_NMTNewArena },
{CC"NMTFreeArena", CC"(J)V", (void*)&WB_NMTFreeArena },

View File

@ -1786,7 +1786,7 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
Tracker tkr(Tracker::uncommit);
res = pd_uncommit_memory(addr, bytes, executable);
if (res) {
@ -1800,7 +1800,7 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable) {
bool os::release_memory(char* addr, size_t bytes) {
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// Note: Tracker contains a ThreadCritical.
Tracker tkr(Tracker::release);
res = pd_release_memory(addr, bytes);
@ -1869,7 +1869,7 @@ char* os::remap_memory(int fd, const char* file_name, size_t file_offset,
bool os::unmap_memory(char *addr, size_t bytes) {
bool result;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
Tracker tkr(Tracker::release);
result = pd_unmap_memory(addr, bytes);
if (result) {
@ -1905,7 +1905,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size
bool os::release_memory_special(char* addr, size_t bytes) {
bool res;
if (MemTracker::tracking_level() > NMT_minimal) {
if (MemTracker::enabled()) {
// Note: Tracker contains a ThreadCritical.
Tracker tkr(Tracker::release);
res = pd_release_memory_special(addr, bytes);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,12 +33,6 @@ MallocSiteHashtableEntry* MallocSiteTable::_table[MallocSiteTable::table_size];
const NativeCallStack* MallocSiteTable::_hash_entry_allocation_stack = NULL;
const MallocSiteHashtableEntry* MallocSiteTable::_hash_entry_allocation_site = NULL;
// concurrent access counter
volatile int MallocSiteTable::_access_count = 0;
// Tracking hashtable contention
NOT_PRODUCT(int MallocSiteTable::_peak_count = 0;)
/*
* Initialize malloc site table.
* Hashtable entry is malloc'd, so it can cause infinite recursion.
@ -202,123 +196,82 @@ void MallocSiteTable::delete_linked_list(MallocSiteHashtableEntry* head) {
}
}
void MallocSiteTable::shutdown() {
AccessLock locker(&_access_count);
locker.exclusiveLock();
reset();
}
bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) {
assert(walker != NULL, "NuLL walker");
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
return walk(walker);
}
return false;
}
void MallocSiteTable::AccessLock::exclusiveLock() {
int target;
int val;
assert(_lock_state != ExclusiveLock, "Can only call once");
assert(*_lock >= 0, "Can not content exclusive lock");
// make counter negative to block out shared locks
do {
val = *_lock;
target = _MAGIC_ + *_lock;
} while (Atomic::cmpxchg(_lock, val, target) != val);
// wait for all readers to exit
while (*_lock != _MAGIC_) {
#ifdef _WINDOWS
os::naked_short_sleep(1);
#else
os::naked_yield();
#endif
}
_lock_state = ExclusiveLock;
return walk(walker);
}
void MallocSiteTable::print_tuning_statistics(outputStream* st) {
// Total number of allocation sites, include empty sites
int total_entries = 0;
// Number of allocation sites that have all memory freed
int empty_entries = 0;
// Number of captured call stack distribution
int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 };
// Chain lengths
int lengths[table_size] = { 0 };
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
// Total number of allocation sites, include empty sites
int total_entries = 0;
// Number of allocation sites that have all memory freed
int empty_entries = 0;
// Number of captured call stack distribution
int stack_depth_distribution[NMT_TrackingStackDepth + 1] = { 0 };
// Chain lengths
int lengths[table_size] = { 0 };
for (int i = 0; i < table_size; i ++) {
int this_chain_length = 0;
const MallocSiteHashtableEntry* head = _table[i];
while (head != NULL) {
total_entries ++;
this_chain_length ++;
if (head->size() == 0) {
empty_entries ++;
}
const int callstack_depth = head->peek()->call_stack()->frames();
assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth,
"Sanity (%d)", callstack_depth);
stack_depth_distribution[callstack_depth] ++;
head = head->next();
for (int i = 0; i < table_size; i ++) {
int this_chain_length = 0;
const MallocSiteHashtableEntry* head = _table[i];
while (head != NULL) {
total_entries ++;
this_chain_length ++;
if (head->size() == 0) {
empty_entries ++;
}
lengths[i] = this_chain_length;
const int callstack_depth = head->peek()->call_stack()->frames();
assert(callstack_depth >= 0 && callstack_depth <= NMT_TrackingStackDepth,
"Sanity (%d)", callstack_depth);
stack_depth_distribution[callstack_depth] ++;
head = head->next();
}
lengths[i] = this_chain_length;
}
st->print_cr("Malloc allocation site table:");
st->print_cr("\tTotal entries: %d", total_entries);
st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries);
st->cr();
st->print_cr("Malloc allocation site table:");
st->print_cr("\tTotal entries: %d", total_entries);
st->print_cr("\tEmpty entries: %d (%2.2f%%)", empty_entries, ((float)empty_entries * 100) / total_entries);
st->cr();
// We report the hash distribution (chain length distribution) of the n shortest chains
// - under the assumption that this usually contains all lengths. Reporting threshold
// is 20, and the expected avg chain length is 5..6 (see table size).
static const int chain_length_threshold = 20;
int chain_length_distribution[chain_length_threshold] = { 0 };
int over_threshold = 0;
int longest_chain_length = 0;
for (int i = 0; i < table_size; i ++) {
if (lengths[i] >= chain_length_threshold) {
over_threshold ++;
} else {
chain_length_distribution[lengths[i]] ++;
}
longest_chain_length = MAX2(longest_chain_length, lengths[i]);
}
st->print_cr("Hash distribution:");
if (chain_length_distribution[0] == 0) {
st->print_cr("no empty buckets.");
// We report the hash distribution (chain length distribution) of the n shortest chains
// - under the assumption that this usually contains all lengths. Reporting threshold
// is 20, and the expected avg chain length is 5..6 (see table size).
static const int chain_length_threshold = 20;
int chain_length_distribution[chain_length_threshold] = { 0 };
int over_threshold = 0;
int longest_chain_length = 0;
for (int i = 0; i < table_size; i ++) {
if (lengths[i] >= chain_length_threshold) {
over_threshold ++;
} else {
st->print_cr("%d buckets are empty.", chain_length_distribution[0]);
chain_length_distribution[lengths[i]] ++;
}
for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) {
st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]);
}
if (longest_chain_length >= chain_length_threshold) {
st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold);
}
st->print_cr("most entries: %d.", longest_chain_length);
st->cr();
longest_chain_length = MAX2(longest_chain_length, lengths[i]);
}
st->print_cr("Call stack depth distribution:");
for (int i = 0; i <= NMT_TrackingStackDepth; i ++) {
st->print_cr("\t%d: %d", i, stack_depth_distribution[i]);
}
st->cr();
} // lock
st->print_cr("Hash distribution:");
if (chain_length_distribution[0] == 0) {
st->print_cr("no empty buckets.");
} else {
st->print_cr("%d buckets are empty.", chain_length_distribution[0]);
}
for (int len = 1; len < MIN2(longest_chain_length + 1, chain_length_threshold); len ++) {
st->print_cr("%2d %s: %d.", len, (len == 1 ? " entry" : "entries"), chain_length_distribution[len]);
}
if (longest_chain_length >= chain_length_threshold) {
st->print_cr(">=%2d entries: %d.", chain_length_threshold, over_threshold);
}
st->print_cr("most entries: %d.", longest_chain_length);
st->cr();
st->print_cr("Call stack depth distribution:");
for (int i = 0; i <= NMT_TrackingStackDepth; i ++) {
st->print_cr("\t%d: %d", i, stack_depth_distribution[i]);
}
st->cr();
}
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
return Atomic::replace_if_null(&_next, entry);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -118,54 +118,8 @@ class MallocSiteTable : AllStatic {
// in the malloc header can hold.
STATIC_ASSERT(table_size <= MAX_MALLOCSITE_TABLE_SIZE);
// This is a very special lock, that allows multiple shared accesses (sharedLock), but
// once exclusive access (exclusiveLock) is requested, all shared accesses are
// rejected forever.
class AccessLock : public StackObj {
enum LockState {
NoLock,
SharedLock,
ExclusiveLock
};
private:
// A very large negative number. The only possibility to "overflow"
// this number is when there are more than -min_jint threads in
// this process, which is not going to happen in foreseeable future.
const static int _MAGIC_ = min_jint;
LockState _lock_state;
volatile int* _lock;
public:
AccessLock(volatile int* lock) :
_lock_state(NoLock), _lock(lock) {
}
~AccessLock() {
if (_lock_state == SharedLock) {
Atomic::dec(_lock);
}
}
// Acquire shared lock.
// Return true if shared access is granted.
inline bool sharedLock() {
jint res = Atomic::add(_lock, 1);
if (res < 0) {
Atomic::dec(_lock);
return false;
}
_lock_state = SharedLock;
return true;
}
// Acquire exclusive lock
void exclusiveLock();
};
public:
static bool initialize();
static void shutdown();
NOT_PRODUCT(static int access_peak_count() { return _peak_count; })
// Number of hash buckets
static inline int hash_buckets() { return (int)table_size; }
@ -174,14 +128,10 @@ class MallocSiteTable : AllStatic {
// acquired before access the entry.
static inline bool access_stack(NativeCallStack& stack, size_t bucket_idx,
size_t pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
stack = *site->call_stack();
return true;
}
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
stack = *site->call_stack();
return true;
}
return false;
}
@ -195,27 +145,18 @@ class MallocSiteTable : AllStatic {
// 2. overflow hash bucket
static inline bool allocation_at(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
if (site != NULL) site->allocate(size);
return site != NULL;
}
return false;
MallocSite* site = lookup_or_add(stack, bucket_idx, pos_idx, flags);
if (site != NULL) site->allocate(size);
return site != NULL;
}
// Record memory deallocation. bucket_idx and pos_idx indicate where the allocation
// information was recorded.
static inline bool deallocation_at(size_t size, size_t bucket_idx, size_t pos_idx) {
AccessLock locker(&_access_count);
if (locker.sharedLock()) {
NOT_PRODUCT(_peak_count = MAX2(_peak_count, _access_count);)
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
site->deallocate(size);
return true;
}
MallocSite* site = malloc_site(bucket_idx, pos_idx);
if (site != NULL) {
site->deallocate(size);
return true;
}
return false;
}
@ -251,17 +192,11 @@ class MallocSiteTable : AllStatic {
}
private:
// Counter for counting concurrent access
static volatile int _access_count;
// The callsite hashtable. It has to be a static table,
// since malloc call can come from C runtime linker.
static MallocSiteHashtableEntry* _table[table_size];
static const NativeCallStack* _hash_entry_allocation_stack;
static const MallocSiteHashtableEntry* _hash_entry_allocation_site;
NOT_PRODUCT(static int _peak_count;)
};
#endif // INCLUDE_NMT

View File

@ -113,8 +113,7 @@ void MallocHeader::mark_block_as_dead() {
}
void MallocHeader::release() {
// Tracking already shutdown, no housekeeping is needed anymore
if (MemTracker::tracking_level() <= NMT_minimal) return;
assert(MemTracker::enabled(), "Sanity");
check_block_integrity();
@ -222,15 +221,7 @@ void MallocHeader::check_block_integrity() const {
bool MallocHeader::record_malloc_site(const NativeCallStack& stack, size_t size,
size_t* bucket_idx, size_t* pos_idx, MEMFLAGS flags) const {
bool ret = MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx, flags);
// Something went wrong, could be OOM or overflow malloc site table.
// We want to keep tracking data under OOM circumstance, so transition to
// summary tracking.
if (!ret) {
MemTracker::transition_to(NMT_summary);
}
return ret;
return MallocSiteTable::allocation_at(stack, size, bucket_idx, pos_idx, flags);
}
bool MallocHeader::get_stack(NativeCallStack& stack) const {
@ -248,18 +239,6 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) {
return true;
}
bool MallocTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert(from != NMT_off, "Can not transition from off state");
assert(to != NMT_off, "Can not transition to off state");
assert (from != NMT_minimal, "cannot transition from minimal state");
if (from == NMT_detail) {
assert(to == NMT_minimal || to == NMT_summary, "Just check");
MallocSiteTable::shutdown();
}
return true;
}
// Record a malloc memory allocation
void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags,
const NativeCallStack& stack, NMT_TrackingLevel level) {
@ -281,7 +260,7 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag
assert(((size_t)memblock & (sizeof(size_t) * 2 - 1)) == 0, "Alignment check");
#ifdef ASSERT
if (level > NMT_minimal) {
if (level > NMT_off) {
// Read back
assert(get_size(memblock) == size, "Wrong size");
assert(get_flags(memblock) == flags, "Wrong flags");

View File

@ -330,13 +330,8 @@ class MallocHeader {
public:
MallocHeader(size_t size, MEMFLAGS flags, const NativeCallStack& stack, NMT_TrackingLevel level) {
assert(size < max_reasonable_malloc_size, "Too large allocation size?");
if (level == NMT_minimal) {
return;
}
_flags = NMTUtil::flag_to_index(flags);
set_size(size);
if (level == NMT_detail) {
@ -386,8 +381,6 @@ class MallocTracker : AllStatic {
// Initialize malloc tracker for specific tracking level
static bool initialize(NMT_TrackingLevel level);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
// malloc tracking header size for specific tracking level
static inline size_t malloc_header_size(NMT_TrackingLevel level) {
return (level == NMT_off) ? 0 : sizeof(MallocHeader);

View File

@ -108,41 +108,9 @@ void Tracker::record(address addr, size_t size) {
}
}
// Shutdown can only be issued via JCmd, and NMT JCmd is serialized by lock
void MemTracker::shutdown() {
// We can only shutdown NMT to minimal tracking level if it is ever on.
if (tracking_level() > NMT_minimal) {
transition_to(NMT_minimal);
}
}
bool MemTracker::transition_to(NMT_TrackingLevel level) {
NMT_TrackingLevel current_level = tracking_level();
assert(level != NMT_off || current_level == NMT_off, "Cannot transition NMT to off");
if (current_level == level) {
return true;
} else if (current_level > level) {
// Downgrade tracking level, we want to lower the tracking level first
_tracking_level = level;
// Make _tracking_level visible immediately.
OrderAccess::fence();
VirtualMemoryTracker::transition(current_level, level);
MallocTracker::transition(current_level, level);
ThreadStackTracker::transition(current_level, level);
} else {
// Upgrading tracking level is not supported and has never been supported.
// Allocating and deallocating malloc tracking structures is not thread safe and
// leads to inconsistencies unless a lot coarser locks are added.
}
return true;
}
// Report during error reporting.
void MemTracker::error_report(outputStream* output) {
if (tracking_level() >= NMT_summary) {
if (enabled()) {
report(true, output, MemReporterBase::default_scale); // just print summary for error case.
output->print("Preinit state:");
NMTPreInit::print_state(output);
@ -157,11 +125,8 @@ void MemTracker::final_report(outputStream* output) {
// printing the final report during normal VM exit, it should not print
// the final report again. In addition, it should be guarded from
// recursive calls in case NMT reporting itself crashes.
if (Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) {
NMT_TrackingLevel level = tracking_level();
if (level >= NMT_summary) {
report(level == NMT_summary, output, 1);
}
if (enabled() && Atomic::cmpxchg(&g_final_report_did_run, false, true) == false) {
report(tracking_level() == NMT_summary, output, 1);
}
}
@ -189,7 +154,6 @@ void MemTracker::tuning_statistics(outputStream* out) {
out->print_cr("State: %s", NMTUtil::tracking_level_to_string(_tracking_level));
out->print_cr("Malloc allocation site table size: %d", MallocSiteTable::hash_buckets());
out->print_cr(" Tracking stack depth: %d", NMT_TrackingStackDepth);
NOT_PRODUCT(out->print_cr("Peak concurrent access: %d", MallocSiteTable::access_peak_count());)
out->cr();
MallocSiteTable::print_tuning_statistics(out);
out->cr();

View File

@ -49,7 +49,7 @@ class Tracker : public StackObj {
class MemTracker : AllStatic {
public:
static inline NMT_TrackingLevel tracking_level() { return NMT_off; }
static inline void shutdown() { }
static inline bool enabled() { return false; }
static inline void init() { }
static bool check_launcher_nmt_support(const char* value) { return true; }
static bool verify_nmt_option() { return true; }
@ -137,14 +137,9 @@ class MemTracker : AllStatic {
return _tracking_level;
}
// Shutdown native memory tracking.
// This transitions the tracking level:
// summary -> minimal
// detail -> minimal
static void shutdown();
// Transition the tracking level to specified level
static bool transition_to(NMT_TrackingLevel level);
static inline bool enabled() {
return _tracking_level > NMT_off;
}
static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag,
const NativeCallStack& stack, NMT_TrackingLevel level) {
@ -180,20 +175,20 @@ class MemTracker : AllStatic {
// Record creation of an arena
static inline void record_new_arena(MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
MallocTracker::record_new_arena(flag);
}
// Record destruction of an arena
static inline void record_arena_free(MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
MallocTracker::record_arena_free(flag);
}
// Record arena size change. Arena size is the size of all arena
// chuncks that backing up the arena.
static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) {
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
MallocTracker::record_arena_size_change(diff, flag);
}
@ -203,11 +198,9 @@ class MemTracker : AllStatic {
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
MEMFLAGS flag = mtNone) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadCritical tc;
// Recheck to avoid potential racing during NMT shutdown
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
}
}
@ -215,10 +208,9 @@ class MemTracker : AllStatic {
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
const NativeCallStack& stack, MEMFLAGS flag = mtNone) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag);
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
}
@ -227,10 +219,9 @@ class MemTracker : AllStatic {
static inline void record_virtual_memory_commit(void* addr, size_t size,
const NativeCallStack& stack) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::add_committed_region((address)addr, size, stack);
}
}
@ -243,28 +234,25 @@ class MemTracker : AllStatic {
// memory flags of the original region.
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadCritical tc;
// Recheck to avoid potential racing during NMT shutdown
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::split_reserved_region((address)addr, size, split);
}
}
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadCritical tc;
if (tracking_level() < NMT_summary) return;
VirtualMemoryTracker::set_reserved_region_type((address)addr, flag);
}
}
static void record_thread_stack(void* addr, size_t size) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadStackTracker::new_thread_stack((address)addr, size, CALLER_PC);
}
@ -272,7 +260,7 @@ class MemTracker : AllStatic {
static inline void release_thread_stack(void* addr, size_t size) {
assert_post_init();
if (tracking_level() < NMT_summary) return;
if (!enabled()) return;
if (addr != NULL) {
ThreadStackTracker::delete_thread_stack((address)addr, size);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,11 +28,14 @@
#define MEMORY_TYPE_DECLARE_NAME(type, human_readable) \
human_readable,
STATIC_ASSERT(NMT_off > NMT_unknown);
STATIC_ASSERT(NMT_summary > NMT_off);
STATIC_ASSERT(NMT_detail > NMT_summary);
const char* NMTUtil::_memory_type_names[] = {
MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_NAME)
};
const char* NMTUtil::scale_name(size_t scale) {
switch(scale) {
case 1: return "";
@ -64,7 +67,6 @@ const char* NMTUtil::tracking_level_to_string(NMT_TrackingLevel lvl) {
switch(lvl) {
case NMT_unknown: return "unknown"; break;
case NMT_off: return "off"; break;
case NMT_minimal: return "minimal"; break;
case NMT_summary: return "summary"; break;
case NMT_detail: return "detail"; break;
default: return "invalid"; break;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,10 +41,6 @@
// - nothing is tracked
// - no malloc headers are used
//
// "minimal": after shutdown - NMT had been on at some point but has been switched off
// - nothing is tracked
// - malloc headers are allocated but not initialized not used
//
// "summary": after initialization with NativeMemoryTracking=summary - NMT in summary mode
// - category summaries per tag are tracked
// - thread stacks are tracked
@ -59,25 +55,16 @@
// - malloc headers are used
// - malloc call site table is allocated and used
//
// Valid state transitions:
//
// unknown ----> off
// |
// |--> summary --
// | |
// |--> detail --+--> minimal
//
// Please keep relation of numerical values!
// unknown < off < minimal < summary < detail
// unknown < off < summary < detail
//
enum NMT_TrackingLevel {
NMT_unknown = 0,
NMT_off = 1,
NMT_minimal = 2,
NMT_summary = 3,
NMT_detail = 4
NMT_unknown,
NMT_off,
NMT_summary,
NMT_detail
};
// Number of stack frames to capture. This is a

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -42,19 +42,6 @@ bool ThreadStackTracker::initialize(NMT_TrackingLevel level) {
return true;
}
bool ThreadStackTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
if (to == NMT_minimal) {
assert(from == NMT_summary || from == NMT_detail, "Just check");
ThreadCritical tc;
if (_simple_thread_stacks != NULL) {
delete _simple_thread_stacks;
_simple_thread_stacks = NULL;
}
}
return true;
}
int ThreadStackTracker::compare_thread_stack_base(const SimpleThreadStackSite& s1, const SimpleThreadStackSite& s2) {
return s1.base() - s2.base();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
* Copyright (c) 2019, 2021, Red Hat, Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -72,7 +72,6 @@ private:
static SortedLinkedList<SimpleThreadStackSite, compare_thread_stack_base>* _simple_thread_stacks;
public:
static bool initialize(NMT_TrackingLevel level);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
static void new_thread_stack(void* base, size_t size, const NativeCallStack& stack);
static void delete_thread_stack(void* base, size_t size);

View File

@ -671,20 +671,3 @@ bool VirtualMemoryTracker::walk_virtual_memory(VirtualMemoryWalker* walker) {
}
return true;
}
// Transition virtual memory tracking level.
bool VirtualMemoryTracker::transition(NMT_TrackingLevel from, NMT_TrackingLevel to) {
assert (from != NMT_minimal, "cannot convert from the lowest tracking level to anything");
if (to == NMT_minimal) {
assert(from == NMT_summary || from == NMT_detail, "Just check");
// Clean up virtual memory tracking data structures.
ThreadCritical tc;
// Check for potential race with other thread calling transition
if (_reserved_regions != NULL) {
delete _reserved_regions;
_reserved_regions = NULL;
}
}
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -387,8 +387,6 @@ class VirtualMemoryTracker : AllStatic {
// Walk virtual memory data structure for creating baseline, etc.
static bool walk_virtual_memory(VirtualMemoryWalker* walker);
static bool transition(NMT_TrackingLevel from, NMT_TrackingLevel to);
// Snapshot current thread stacks
static void snapshot_thread_stacks();

View File

@ -1,49 +0,0 @@
/*
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8059100
* @summary Test that you can decrease NMT tracking level but not increase it.
* @modules java.base/jdk.internal.misc
* @library /test/lib
* @build sun.hotspot.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller sun.hotspot.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:NativeMemoryTracking=detail ChangeTrackingLevel
*/
import sun.hotspot.WhiteBox;
public class ChangeTrackingLevel {
public static WhiteBox wb = WhiteBox.getWhiteBox();
public static void main(String args[]) throws Exception {
boolean testChangeLevel = wb.NMTChangeTrackingLevel();
if (testChangeLevel) {
System.out.println("NMT level change test passed.");
} else {
// it also fails if the VM asserts.
throw new RuntimeException("NMT level change test failed");
}
}
};

View File

@ -225,7 +225,6 @@ public class WhiteBox {
public native void NMTReleaseMemory(long addr, long size);
public native long NMTMallocWithPseudoStack(long size, int index);
public native long NMTMallocWithPseudoStackAndType(long size, int index, int type);
public native boolean NMTChangeTrackingLevel();
public native int NMTGetHashSize();
public native long NMTNewArena(long initSize);
public native void NMTFreeArena(long arena);

View File

@ -226,7 +226,6 @@ public class WhiteBox {
public native void NMTReleaseMemory(long addr, long size);
public native long NMTMallocWithPseudoStack(long size, int index);
public native long NMTMallocWithPseudoStackAndType(long size, int index, int type);
public native boolean NMTChangeTrackingLevel();
public native int NMTGetHashSize();
public native long NMTNewArena(long initSize);
public native void NMTFreeArena(long arena);