mirror of
https://github.com/openjdk/jdk.git
synced 2026-03-18 11:53:17 +00:00
8243535: NMT may show wrong numbers for CDS and CCS
Reviewed-by: zgu, iklam
This commit is contained in:
parent
dc91b06661
commit
8dc66431d4
@ -370,6 +370,10 @@ void os::split_reserved_memory(char *base, size_t size, size_t split) {
|
||||
assert(split > 0, "Sanity");
|
||||
assert(is_aligned(base, os::vm_allocation_granularity()), "Sanity");
|
||||
assert(is_aligned(split_address, os::vm_allocation_granularity()), "Sanity");
|
||||
|
||||
// NMT: tell NMT to track both parts individually from now on.
|
||||
MemTracker::record_virtual_memory_split_reserved(base, size, split);
|
||||
|
||||
}
|
||||
|
||||
int os::vsnprintf(char* buf, size_t len, const char* fmt, va_list args) {
|
||||
|
||||
@ -3237,6 +3237,10 @@ void os::split_reserved_memory(char *base, size_t size, size_t split) {
|
||||
reserve_memory(split, base);
|
||||
reserve_memory(size - split, split_address);
|
||||
|
||||
// NMT: nothing to do here. Since Windows implements the split by
|
||||
// releasing and re-reserving memory, the parts are already registered
|
||||
// as individual mappings with NMT.
|
||||
|
||||
}
|
||||
|
||||
// Multiple threads can race in this code but it's not possible to unmap small sections of
|
||||
|
||||
@ -1499,11 +1499,6 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
|
||||
si->set_read_only(false); // Need to patch the pointers
|
||||
}
|
||||
|
||||
if (rs.is_reserved()) {
|
||||
assert(rs.contains(requested_addr) && rs.contains(requested_addr + size - 1), "must be");
|
||||
MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared);
|
||||
}
|
||||
|
||||
if (MetaspaceShared::use_windows_memory_mapping() && rs.is_reserved()) {
|
||||
// This is the second time we try to map the archive(s). We have already created a ReservedSpace
|
||||
// that covers all the FileMapRegions to ensure all regions can be mapped. However, Windows
|
||||
@ -1515,9 +1510,12 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
|
||||
return MAP_ARCHIVE_OTHER_FAILURE; // oom or I/O error.
|
||||
}
|
||||
} else {
|
||||
// Note that this may either be a "fresh" mapping into unreserved address
|
||||
// space (Windows, first mapping attempt), or a mapping into pre-reserved
|
||||
// space (Posix). See also comment in MetaspaceShared::map_archives().
|
||||
char* base = os::map_memory(_fd, _full_path, si->file_offset(),
|
||||
requested_addr, size, si->read_only(),
|
||||
si->allow_exec());
|
||||
si->allow_exec(), mtClassShared);
|
||||
if (base != requested_addr) {
|
||||
log_info(cds)("Unable to map %s shared space at " INTPTR_FORMAT,
|
||||
shared_region_name[i], p2i(requested_addr));
|
||||
@ -1528,14 +1526,6 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
|
||||
}
|
||||
si->set_mapped_base(requested_addr);
|
||||
|
||||
if (!rs.is_reserved()) {
|
||||
// When mapping on Windows for the first attempt, we don't reserve the address space for the regions
|
||||
// (Windows can't mmap into a ReservedSpace). In this case, NMT requires we call it after
|
||||
// os::map_memory has succeeded.
|
||||
assert(MetaspaceShared::use_windows_memory_mapping(), "Windows memory mapping only");
|
||||
MemTracker::record_virtual_memory_type((address)requested_addr, mtClassShared);
|
||||
}
|
||||
|
||||
if (VerifySharedSpaces && !verify_region_checksum(i)) {
|
||||
return MAP_ARCHIVE_OTHER_FAILURE;
|
||||
}
|
||||
@ -1552,7 +1542,7 @@ char* FileMapInfo::map_bitmap_region() {
|
||||
bool read_only = true, allow_exec = false;
|
||||
char* requested_addr = NULL; // allow OS to pick any location
|
||||
char* bitmap_base = os::map_memory(_fd, _full_path, si->file_offset(),
|
||||
requested_addr, si->used_aligned(), read_only, allow_exec);
|
||||
requested_addr, si->used_aligned(), read_only, allow_exec, mtClassShared);
|
||||
if (bitmap_base == NULL) {
|
||||
log_error(cds)("failed to map relocation bitmap");
|
||||
return NULL;
|
||||
|
||||
@ -2538,6 +2538,10 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
assert(is_aligned(class_space_rs.base(), class_space_alignment), "Sanity");
|
||||
assert(is_aligned(class_space_rs.size(), class_space_alignment), "Sanity");
|
||||
|
||||
// NMT: fix up the space tags
|
||||
MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared);
|
||||
MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass);
|
||||
|
||||
return archive_space_rs.base();
|
||||
|
||||
#else
|
||||
|
||||
@ -78,6 +78,8 @@ class ReservedSpace {
|
||||
// This splits the space into two spaces, the first part of which will be returned.
|
||||
// If split==true, the resulting two spaces can be released independently from each other.
|
||||
// This may cause the original space to loose its content.
|
||||
// They also will be tracked individually by NMT and can be tagged with different flags.
|
||||
// Note that this may cause the original space to loose its content.
|
||||
// If split==false, the resulting space will be just a hotspot-internal representation
|
||||
// of a sub section of the underlying mapping.
|
||||
ReservedSpace first_part(size_t partition_size, size_t alignment, bool split = false);
|
||||
|
||||
@ -1763,10 +1763,10 @@ void os::pretouch_memory(void* start, void* end, size_t page_size) {
|
||||
|
||||
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec) {
|
||||
bool allow_exec, MEMFLAGS flags) {
|
||||
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
||||
if (result != NULL) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -326,7 +326,8 @@ class os: AllStatic {
|
||||
// Both base and split point must be aligned to allocation granularity; split point shall
|
||||
// be >0 and <size.
|
||||
// Splitting guarantees that the resulting two memory regions can be released independently
|
||||
// from each other using os::release_memory().
|
||||
// from each other using os::release_memory(). It also means NMT will track these regions
|
||||
// individually, allowing different tags to be set.
|
||||
static void split_reserved_memory(char *base, size_t size, size_t split);
|
||||
|
||||
static bool commit_memory(char* addr, size_t bytes, bool executable);
|
||||
@ -368,7 +369,7 @@ class os: AllStatic {
|
||||
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only = false,
|
||||
bool allow_exec = false);
|
||||
bool allow_exec = false, MEMFLAGS flags = mtNone);
|
||||
static char* remap_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec);
|
||||
|
||||
@ -68,6 +68,7 @@ class MemTracker : AllStatic {
|
||||
MEMFLAGS flag = mtNone) { }
|
||||
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
|
||||
const NativeCallStack& stack, MEMFLAGS flag = mtNone) { }
|
||||
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split) { }
|
||||
static inline void record_virtual_memory_commit(void* addr, size_t size, const NativeCallStack& stack) { }
|
||||
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { }
|
||||
static inline void record_thread_stack(void* addr, size_t size) { }
|
||||
@ -238,6 +239,22 @@ class MemTracker : AllStatic {
|
||||
}
|
||||
}
|
||||
|
||||
// Given an existing memory mapping registered with NMT and a splitting
|
||||
// address, split the mapping in two. The memory region is supposed to
|
||||
// be fully uncommitted.
|
||||
//
|
||||
// The two new memory regions will be both registered under stack and
|
||||
// memory flags of the original region.
|
||||
static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split) {
|
||||
if (tracking_level() < NMT_summary) return;
|
||||
if (addr != NULL) {
|
||||
ThreadCritical tc;
|
||||
// Recheck to avoid potential racing during NMT shutdown
|
||||
if (tracking_level() < NMT_summary) return;
|
||||
VirtualMemoryTracker::split_reserved_region((address)addr, size, split);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) {
|
||||
if (tracking_level() < NMT_summary) return;
|
||||
if (addr != NULL) {
|
||||
|
||||
@ -348,12 +348,9 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size,
|
||||
reserved_rgn->set_call_stack(stack);
|
||||
reserved_rgn->set_flag(flag);
|
||||
return true;
|
||||
} else if (reserved_rgn->adjacent_to(base_addr, size)) {
|
||||
VirtualMemorySummary::record_reserved_memory(size, flag);
|
||||
reserved_rgn->expand_region(base_addr, size);
|
||||
reserved_rgn->set_call_stack(stack);
|
||||
return true;
|
||||
} else {
|
||||
assert(reserved_rgn->overlap_region(base_addr, size), "Must be");
|
||||
|
||||
// Overlapped reservation.
|
||||
// It can happen when the regions are thread stacks, as JNI
|
||||
// thread does not detach from VM before exits, and leads to
|
||||
@ -491,6 +488,30 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) {
|
||||
}
|
||||
}
|
||||
|
||||
// Given an existing memory mapping registered with NMT, split the mapping in
|
||||
// two. The newly created two mappings will be registered under the call
|
||||
// stack and the memory flags of the original section.
|
||||
bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split) {
|
||||
|
||||
ReservedMemoryRegion rgn(addr, size);
|
||||
ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn);
|
||||
assert(reserved_rgn->same_region(addr, size), "Must be identical region");
|
||||
assert(reserved_rgn != NULL, "No reserved region");
|
||||
assert(reserved_rgn->committed_size() == 0, "Splitting committed region?");
|
||||
|
||||
NativeCallStack original_stack = *reserved_rgn->call_stack();
|
||||
MEMFLAGS original_flags = reserved_rgn->flag();
|
||||
|
||||
_reserved_regions->remove(rgn);
|
||||
|
||||
// Now, create two new regions.
|
||||
add_reserved_region(addr, split, original_stack, original_flags);
|
||||
add_reserved_region(addr + split, size - split, original_stack, original_flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// Iterate the range, find committed region within its bound.
|
||||
class RegionIterator : public StackObj {
|
||||
private:
|
||||
|
||||
@ -210,11 +210,8 @@ class VirtualMemoryRegion {
|
||||
inline bool overlap_region(address addr, size_t sz) const {
|
||||
assert(sz > 0, "Invalid size");
|
||||
assert(size() > 0, "Invalid size");
|
||||
VirtualMemoryRegion rgn(addr, sz);
|
||||
return contain_address(addr) ||
|
||||
contain_address(addr + sz - 1) ||
|
||||
rgn.contain_address(base()) ||
|
||||
rgn.contain_address(end() - 1);
|
||||
contain_address(addr + sz - 1);
|
||||
}
|
||||
|
||||
inline bool adjacent_to(address addr, size_t sz) const {
|
||||
@ -240,6 +237,24 @@ class VirtualMemoryRegion {
|
||||
set_size(size() + sz);
|
||||
}
|
||||
|
||||
// Returns 0 if regions overlap; 1 if this region follows rgn;
|
||||
// -1 if this region precedes rgn.
|
||||
inline int compare(const VirtualMemoryRegion& rgn) const {
|
||||
if (overlap_region(rgn.base(), rgn.size())) {
|
||||
return 0;
|
||||
} else if (base() >= rgn.end()) {
|
||||
return 1;
|
||||
} else {
|
||||
assert(rgn.base() >= end(), "Sanity");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if regions overlap, false otherwise.
|
||||
inline bool equals(const VirtualMemoryRegion& rgn) const {
|
||||
return compare(rgn) == 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
void set_base(address base) {
|
||||
assert(base != NULL, "Sanity check");
|
||||
@ -261,24 +276,6 @@ class CommittedMemoryRegion : public VirtualMemoryRegion {
|
||||
CommittedMemoryRegion(address addr, size_t size, const NativeCallStack& stack) :
|
||||
VirtualMemoryRegion(addr, size), _stack(stack) { }
|
||||
|
||||
inline int compare(const CommittedMemoryRegion& rgn) const {
|
||||
if (overlap_region(rgn.base(), rgn.size())) {
|
||||
return 0;
|
||||
} else {
|
||||
if (base() == rgn.base()) {
|
||||
return 0;
|
||||
} else if (base() > rgn.base()) {
|
||||
return 1;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline bool equals(const CommittedMemoryRegion& rgn) const {
|
||||
return compare(rgn) == 0;
|
||||
}
|
||||
|
||||
inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; }
|
||||
inline const NativeCallStack* call_stack() const { return &_stack; }
|
||||
};
|
||||
@ -316,24 +313,6 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
|
||||
void set_flag(MEMFLAGS flag);
|
||||
inline MEMFLAGS flag() const { return _flag; }
|
||||
|
||||
inline int compare(const ReservedMemoryRegion& rgn) const {
|
||||
if (overlap_region(rgn.base(), rgn.size())) {
|
||||
return 0;
|
||||
} else {
|
||||
if (base() == rgn.base()) {
|
||||
return 0;
|
||||
} else if (base() > rgn.base()) {
|
||||
return 1;
|
||||
} else {
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
inline bool equals(const ReservedMemoryRegion& rgn) const {
|
||||
return compare(rgn) == 0;
|
||||
}
|
||||
|
||||
// uncommitted thread stack bottom, above guard pages if there is any.
|
||||
address thread_stack_uncommitted_bottom() const;
|
||||
|
||||
@ -405,6 +384,11 @@ class VirtualMemoryTracker : AllStatic {
|
||||
static bool remove_released_region (address base_addr, size_t size);
|
||||
static void set_reserved_region_type (address addr, MEMFLAGS flag);
|
||||
|
||||
// Given an existing memory mapping registered with NMT, split the mapping in
|
||||
// two. The newly created two mappings will be registered under the call
|
||||
// stack and the memory flags of the original section.
|
||||
static bool split_reserved_region(address addr, size_t size, size_t split);
|
||||
|
||||
// Walk virtual memory data structure for creating baseline, etc.
|
||||
static bool walk_virtual_memory(VirtualMemoryWalker* walker);
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user