diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index d4699567733..609317df45f 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -4602,7 +4602,7 @@ static void workaround_expand_exec_shield_cs_limit() { return; // No matter, we tried, best effort. } - MemTracker::record_virtual_memory_type((address)codebuf, mtInternal); + MemTracker::record_virtual_memory_tag((address)codebuf, mtInternal); log_info(os)("[CS limit NX emulation work-around, exec code at: %p]", codebuf); diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp index 26bff6c8bd4..60efdeb2ef5 100644 --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -367,7 +367,7 @@ bool os::dir_is_empty(const char* path) { return result; } -static char* reserve_mmapped_memory(size_t bytes, char* requested_addr, MEMFLAGS flag) { +static char* reserve_mmapped_memory(size_t bytes, char* requested_addr, MemTag mem_tag) { char * addr; int flags = MAP_PRIVATE NOT_AIX( | MAP_NORESERVE ) | MAP_ANONYMOUS; if (requested_addr != nullptr) { @@ -382,7 +382,7 @@ static char* reserve_mmapped_memory(size_t bytes, char* requested_addr, MEMFLAGS flags, -1, 0); if (addr != MAP_FAILED) { - MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC, flag); + MemTracker::record_virtual_memory_reserve((address)addr, bytes, CALLER_PC, mem_tag); return addr; } return nullptr; @@ -495,7 +495,7 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) { return chop_extra_memory(size, alignment, extra_base, extra_size); } -char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc, MEMFLAGS flag) { +char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag) { size_t extra_size = calculate_aligned_extra_size(size, alignment); // For file mapping, we do not call os:map_memory_to_file(size,fd) since: // - we later chop away parts of the mapping using os::release_memory and that could fail if the @@ -503,7 +503,7 @@ char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int file_des // - The memory API os::reserve_memory uses is an implementation detail. It may (and usually is) // mmap but it also may System V shared memory which cannot be uncommitted as a whole, so // chopping off and unmapping excess bits back and front (see below) would not work. - char* extra_base = reserve_mmapped_memory(extra_size, nullptr, flag); + char* extra_base = reserve_mmapped_memory(extra_size, nullptr, mem_tag); if (extra_base == nullptr) { return nullptr; } diff --git a/src/hotspot/os/windows/os_windows.cpp b/src/hotspot/os/windows/os_windows.cpp index a1e0a78837f..1acce2540a8 100644 --- a/src/hotspot/os/windows/os_windows.cpp +++ b/src/hotspot/os/windows/os_windows.cpp @@ -3428,7 +3428,7 @@ char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, in // Multiple threads can race in this code but it's not possible to unmap small sections of // virtual space to get requested alignment, like posix-like os's. // Windows prevents multiple thread from remapping over each other so this loop is thread-safe. -static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MEMFLAGS flag = mtNone) { +static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag = mtNone) { assert(is_aligned(alignment, os::vm_allocation_granularity()), "Alignment must be a multiple of allocation granularity (page size)"); assert(is_aligned(size, os::vm_allocation_granularity()), @@ -3441,8 +3441,8 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi static const int max_attempts = 20; for (int attempt = 0; attempt < max_attempts && aligned_base == nullptr; attempt ++) { - char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc, flag) : - os::reserve_memory(extra_size, false, flag); + char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc, mem_tag) : + os::reserve_memory(extra_size, false, mem_tag); if (extra_base == nullptr) { return nullptr; } @@ -3458,8 +3458,8 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi // Attempt to map, into the just vacated space, the slightly smaller aligned area. // Which may fail, hence the loop. - aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc, flag) : - os::attempt_reserve_memory_at(aligned_base, size, false, flag); + aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc, mem_tag) : + os::attempt_reserve_memory_at(aligned_base, size, false, mem_tag); } assert(aligned_base != nullptr, @@ -3473,8 +3473,8 @@ char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) { return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */); } -char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag) { - return map_or_reserve_memory_aligned(size, alignment, fd, flag); +char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag) { + return map_or_reserve_memory_aligned(size, alignment, fd, mem_tag); } char* os::pd_reserve_memory(size_t bytes, bool exec) { diff --git a/src/hotspot/share/cds/filemap.cpp b/src/hotspot/share/cds/filemap.cpp index b86118c6868..7bbb9688015 100644 --- a/src/hotspot/share/cds/filemap.cpp +++ b/src/hotspot/share/cds/filemap.cpp @@ -1716,10 +1716,10 @@ void FileMapInfo::close() { */ static char* map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, - bool allow_exec, MEMFLAGS flags = mtNone) { + bool allow_exec, MemTag mem_tag = mtNone) { char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes, AlwaysPreTouch ? false : read_only, - allow_exec, flags); + allow_exec, mem_tag); if (mem != nullptr && AlwaysPreTouch) { os::pretouch_memory(mem, mem + bytes); } @@ -2178,7 +2178,7 @@ bool FileMapInfo::map_heap_region_impl() { _mapped_heap_memregion = MemRegion(start, word_size); - // Map the archived heap data. No need to call MemTracker::record_virtual_memory_type() + // Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag() // for mapped region as it is part of the reserved java heap, which is already recorded. char* addr = (char*)_mapped_heap_memregion.start(); char* base; diff --git a/src/hotspot/share/cds/metaspaceShared.cpp b/src/hotspot/share/cds/metaspaceShared.cpp index 4d978a7ad88..c66398cefac 100644 --- a/src/hotspot/share/cds/metaspaceShared.cpp +++ b/src/hotspot/share/cds/metaspaceShared.cpp @@ -1299,7 +1299,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma assert(base_address == nullptr || (address)archive_space_rs.base() == base_address, "Sanity"); // Register archive space with NMT. - MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); + MemTracker::record_virtual_memory_tag(archive_space_rs.base(), mtClassShared); return archive_space_rs.base(); } return nullptr; @@ -1361,8 +1361,8 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma return nullptr; } // NMT: fix up the space tags - MemTracker::record_virtual_memory_type(archive_space_rs.base(), mtClassShared); - MemTracker::record_virtual_memory_type(class_space_rs.base(), mtClass); + MemTracker::record_virtual_memory_tag(archive_space_rs.base(), mtClassShared); + MemTracker::record_virtual_memory_tag(class_space_rs.base(), mtClass); } else { if (use_archive_base_addr && base_address != nullptr) { total_space_rs = ReservedSpace(total_range_size, base_address_alignment, diff --git a/src/hotspot/share/gc/g1/g1BatchedTask.hpp b/src/hotspot/share/gc/g1/g1BatchedTask.hpp index aa16f4ddfd4..020fda634e4 100644 --- a/src/hotspot/share/gc/g1/g1BatchedTask.hpp +++ b/src/hotspot/share/gc/g1/g1BatchedTask.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,7 +29,7 @@ #include "gc/shared/workerThread.hpp" #include "memory/allocation.hpp" -template +template class GrowableArrayCHeap; // G1AbstractSubTask represents a task to be performed either within a diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp index 81748d277cf..b2706d7a946 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.cpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,22 +29,22 @@ #include "runtime/vmOperations.hpp" #include "utilities/globalCounter.inline.hpp" -G1MonotonicArena::Segment::Segment(uint slot_size, uint num_slots, Segment* next, MEMFLAGS flag) : +G1MonotonicArena::Segment::Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag) : _slot_size(slot_size), _num_slots(num_slots), _next(next), _next_allocate(0), - _mem_flag(flag) { + _mem_tag(mem_tag) { _bottom = ((char*) this) + header_size(); } G1MonotonicArena::Segment* G1MonotonicArena::Segment::create_segment(uint slot_size, uint num_slots, Segment* next, - MEMFLAGS mem_flag) { + MemTag mem_tag) { size_t block_size = size_in_bytes(slot_size, num_slots); - char* alloc_block = NEW_C_HEAP_ARRAY(char, block_size, mem_flag); - return new (alloc_block) Segment(slot_size, num_slots, next, mem_flag); + char* alloc_block = NEW_C_HEAP_ARRAY(char, block_size, mem_tag); + return new (alloc_block) Segment(slot_size, num_slots, next, mem_tag); } void G1MonotonicArena::Segment::delete_segment(Segment* segment) { @@ -54,7 +54,7 @@ void G1MonotonicArena::Segment::delete_segment(Segment* segment) { GlobalCounter::write_synchronize(); } segment->~Segment(); - FREE_C_HEAP_ARRAY(_mem_flag, segment); + FREE_C_HEAP_ARRAY(_mem_tag, segment); } void G1MonotonicArena::SegmentFreeList::bulk_add(Segment& first, @@ -108,7 +108,7 @@ G1MonotonicArena::Segment* G1MonotonicArena::new_segment(Segment* const prev) { uint prev_num_slots = (prev != nullptr) ? prev->num_slots() : 0; uint num_slots = _alloc_options->next_num_slots(prev_num_slots); - next = Segment::create_segment(slot_size(), num_slots, prev, _alloc_options->mem_flag()); + next = Segment::create_segment(slot_size(), num_slots, prev, _alloc_options->mem_tag()); } else { assert(slot_size() == next->slot_size() , "Mismatch %d != %d", slot_size(), next->slot_size()); diff --git a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp index bf46e4a3351..b51f3e37db1 100644 --- a/src/hotspot/share/gc/g1/g1MonotonicArena.hpp +++ b/src/hotspot/share/gc/g1/g1MonotonicArena.hpp @@ -27,7 +27,7 @@ #define SHARE_GC_G1_G1MONOTONICARENA_HPP #include "gc/shared/freeListAllocator.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/lockFreeStack.hpp" @@ -120,7 +120,7 @@ class G1MonotonicArena::Segment { // to _num_slots (can be larger because we atomically increment this value and // check only afterwards if the allocation has been successful). uint volatile _next_allocate; - const MEMFLAGS _mem_flag; + const MemTag _mem_tag; char* _bottom; // Actual data. // Do not add class member variables beyond this point @@ -136,7 +136,7 @@ class G1MonotonicArena::Segment { NONCOPYABLE(Segment); - Segment(uint slot_size, uint num_slots, Segment* next, MEMFLAGS flag); + Segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag); ~Segment() = default; public: Segment* volatile* next_addr() { return &_next; } @@ -173,7 +173,7 @@ public: return header_size() + payload_size(slot_size, num_slots); } - static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MEMFLAGS mem_flag); + static Segment* create_segment(uint slot_size, uint num_slots, Segment* next, MemTag mem_tag); static void delete_segment(Segment* segment); // Copies the contents of this segment into the destination. @@ -222,7 +222,7 @@ public: class G1MonotonicArena::AllocOptions { protected: - const MEMFLAGS _mem_flag; + const MemTag _mem_tag; const uint _slot_size; const uint _initial_num_slots; // Defines a limit to the number of slots in the segment @@ -230,8 +230,8 @@ protected: const uint _slot_alignment; public: - AllocOptions(MEMFLAGS mem_flag, uint slot_size, uint initial_num_slots, uint max_num_slots, uint alignment) : - _mem_flag(mem_flag), + AllocOptions(MemTag mem_tag, uint slot_size, uint initial_num_slots, uint max_num_slots, uint alignment) : + _mem_tag(mem_tag), _slot_size(align_up(slot_size, alignment)), _initial_num_slots(initial_num_slots), _max_num_slots(max_num_slots), @@ -250,7 +250,7 @@ public: uint slot_alignment() const { return _slot_alignment; } - MEMFLAGS mem_flag() const {return _mem_flag; } + MemTag mem_tag() const {return _mem_tag; } }; #endif //SHARE_GC_G1_MONOTONICARENA_HPP diff --git a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp index 5f903960cce..4403b4c8dd9 100644 --- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp +++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -40,15 +40,15 @@ G1RegionToSpaceMapper::G1RegionToSpaceMapper(ReservedSpace rs, size_t page_size, size_t region_granularity, size_t commit_factor, - MEMFLAGS type) : + MemTag mem_tag) : _listener(nullptr), _storage(rs, used_size, page_size), _region_commit_map(rs.size() * commit_factor / region_granularity, mtGC), - _memory_type(type) { + _memory_tag(mem_tag) { guarantee(is_power_of_2(page_size), "must be"); guarantee(is_power_of_2(region_granularity), "must be"); - MemTracker::record_virtual_memory_type((address)rs.base(), type); + MemTracker::record_virtual_memory_tag((address)rs.base(), mem_tag); } // Used to manually signal a mapper to handle a set of regions as committed. @@ -72,8 +72,8 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper { size_t page_size, size_t alloc_granularity, size_t commit_factor, - MEMFLAGS type) : - G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), + MemTag mem_tag) : + G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, mem_tag), _pages_per_region(alloc_granularity / (page_size * commit_factor)) { guarantee(alloc_granularity >= page_size, "allocation granularity smaller than commit granularity"); @@ -97,7 +97,7 @@ class G1RegionsLargerThanCommitSizeMapper : public G1RegionToSpaceMapper { const size_t start_page = (size_t)start_idx * _pages_per_region; const size_t size_in_pages = num_regions * _pages_per_region; bool zero_filled = _storage.commit(start_page, size_in_pages); - if (_memory_type == mtJavaHeap) { + if (_memory_tag == mtJavaHeap) { for (uint region_index = start_idx; region_index < start_idx + num_regions; region_index++ ) { void* address = _storage.page_start(region_index * _pages_per_region); size_t size_in_bytes = _storage.page_size() * _pages_per_region; @@ -150,7 +150,7 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { } void numa_request_on_node(size_t page_idx) { - if (_memory_type == mtJavaHeap) { + if (_memory_tag == mtJavaHeap) { uint region = (uint)(page_idx * _regions_per_page); void* address = _storage.page_start(page_idx); size_t size_in_bytes = _storage.page_size(); @@ -164,8 +164,8 @@ class G1RegionsSmallerThanCommitSizeMapper : public G1RegionToSpaceMapper { size_t page_size, size_t alloc_granularity, size_t commit_factor, - MEMFLAGS type) : - G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, type), + MemTag mem_tag) : + G1RegionToSpaceMapper(rs, actual_size, page_size, alloc_granularity, commit_factor, mem_tag), _regions_per_page((page_size * commit_factor) / alloc_granularity), _lock(Mutex::service-3, "G1Mapper_lock") { @@ -263,10 +263,10 @@ G1RegionToSpaceMapper* G1RegionToSpaceMapper::create_mapper(ReservedSpace rs, size_t page_size, size_t region_granularity, size_t commit_factor, - MEMFLAGS type) { + MemTag mem_tag) { if (region_granularity >= (page_size * commit_factor)) { - return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); + return new G1RegionsLargerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, mem_tag); } else { - return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, type); + return new G1RegionsSmallerThanCommitSizeMapper(rs, actual_size, page_size, region_granularity, commit_factor, mem_tag); } } diff --git a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp index 02498b394b3..5ef0f8ec5ab 100644 --- a/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp +++ b/src/hotspot/share/gc/g1/g1RegionToSpaceMapper.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -52,9 +52,9 @@ class G1RegionToSpaceMapper : public CHeapObj { // Mapping management CHeapBitMap _region_commit_map; - MEMFLAGS _memory_type; + MemTag _memory_tag; - G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MEMFLAGS type); + G1RegionToSpaceMapper(ReservedSpace rs, size_t used_size, size_t page_size, size_t region_granularity, size_t commit_factor, MemTag mem_tag); void fire_on_commit(uint start_idx, size_t num_regions, bool zero_filled); public: @@ -85,7 +85,7 @@ class G1RegionToSpaceMapper : public CHeapObj { size_t page_size, size_t region_granularity, size_t byte_translation_factor, - MEMFLAGS type); + MemTag mem_tag); }; #endif // SHARE_GC_G1_G1REGIONTOSPACEMAPPER_HPP diff --git a/src/hotspot/share/gc/parallel/objectStartArray.cpp b/src/hotspot/share/gc/parallel/objectStartArray.cpp index b1fc956a54a..ef9de7abfd7 100644 --- a/src/hotspot/share/gc/parallel/objectStartArray.cpp +++ b/src/hotspot/share/gc/parallel/objectStartArray.cpp @@ -51,7 +51,7 @@ void ObjectStartArray::initialize(MemRegion reserved_region) { if (!backing_store.is_reserved()) { vm_exit_during_initialization("Could not reserve space for ObjectStartArray"); } - MemTracker::record_virtual_memory_type(backing_store.base(), mtGC); + MemTracker::record_virtual_memory_tag(backing_store.base(), mtGC); // We do not commit any memory initially _virtual_space.initialize(backing_store); diff --git a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp index 658c3ef106f..46a178500e5 100644 --- a/src/hotspot/share/gc/parallel/parMarkBitMap.cpp +++ b/src/hotspot/share/gc/parallel/parMarkBitMap.cpp @@ -51,7 +51,7 @@ ParMarkBitMap::initialize(MemRegion covered_region) os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes, rs.base(), rs.size(), used_page_sz); - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC); _virtual_space = new PSVirtualSpace(rs, page_sz); if (_virtual_space != nullptr && _virtual_space->expand_by(_reserved_byte_size)) { diff --git a/src/hotspot/share/gc/parallel/psParallelCompact.cpp b/src/hotspot/share/gc/parallel/psParallelCompact.cpp index 4bff8f8a7d0..1ab7b2af7ed 100644 --- a/src/hotspot/share/gc/parallel/psParallelCompact.cpp +++ b/src/hotspot/share/gc/parallel/psParallelCompact.cpp @@ -235,7 +235,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size) os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(), rs.size(), page_sz); - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC); PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz); if (vspace != nullptr) { diff --git a/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp b/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp index 59b7f130df3..31f18652c63 100644 --- a/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp +++ b/src/hotspot/share/gc/serial/serialBlockOffsetTable.cpp @@ -42,7 +42,7 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved, vm_exit_during_initialization("Could not reserve enough space for heap offset array"); } - MemTracker::record_virtual_memory_type((address)rs.base(), mtGC); + MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC); if (!_vs.initialize(rs, 0)) { vm_exit_during_initialization("Could not reserve enough space for heap offset array"); diff --git a/src/hotspot/share/gc/shared/cardTable.cpp b/src/hotspot/share/gc/shared/cardTable.cpp index 95f7058f4e4..acd4bda6e10 100644 --- a/src/hotspot/share/gc/shared/cardTable.cpp +++ b/src/hotspot/share/gc/shared/cardTable.cpp @@ -84,7 +84,7 @@ void CardTable::initialize(void* region0_start, void* region1_start) { MAX2(_page_size, os::vm_allocation_granularity()); ReservedSpace heap_rs(_byte_map_size, rs_align, _page_size); - MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); + MemTracker::record_virtual_memory_tag((address)heap_rs.base(), mtGC); os::trace_page_sizes("Card Table", num_bytes, num_bytes, heap_rs.base(), heap_rs.size(), _page_size); diff --git a/src/hotspot/share/gc/shared/oopStorage.cpp b/src/hotspot/share/gc/shared/oopStorage.cpp index 7117b86b264..6c5e57c9479 100644 --- a/src/hotspot/share/gc/shared/oopStorage.cpp +++ b/src/hotspot/share/gc/shared/oopStorage.cpp @@ -127,10 +127,10 @@ OopStorage::ActiveArray::~ActiveArray() { } OopStorage::ActiveArray* OopStorage::ActiveArray::create(size_t size, - MEMFLAGS memflags, + MemTag mem_tag, AllocFailType alloc_fail) { size_t size_in_bytes = blocks_offset() + sizeof(Block*) * size; - void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, memflags, CURRENT_PC, alloc_fail); + void* mem = NEW_C_HEAP_ARRAY3(char, size_in_bytes, mem_tag, CURRENT_PC, alloc_fail); if (mem == nullptr) return nullptr; return new (mem) ActiveArray(size); } @@ -343,7 +343,7 @@ OopStorage::Block* OopStorage::Block::new_block(const OopStorage* owner) { // _data must be first member: aligning block => aligning _data. STATIC_ASSERT(_data_pos == 0); size_t size_needed = allocation_size(); - void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, owner->memflags()); + void* memory = NEW_C_HEAP_ARRAY_RETURN_NULL(char, size_needed, owner->mem_tag()); if (memory == nullptr) { return nullptr; } @@ -575,7 +575,7 @@ bool OopStorage::expand_active_array() { log_debug(oopstorage, blocks)("%s: expand active array " SIZE_FORMAT, name(), new_size); ActiveArray* new_array = ActiveArray::create(new_size, - memflags(), + mem_tag(), AllocFailStrategy::RETURN_NULL); if (new_array == nullptr) return false; new_array->copy_from(old_array); @@ -805,8 +805,8 @@ void OopStorage::release(const oop* const* ptrs, size_t size) { } } -OopStorage* OopStorage::create(const char* name, MEMFLAGS memflags) { - return new (memflags) OopStorage(name, memflags); +OopStorage* OopStorage::create(const char* name, MemTag mem_tag) { + return new (mem_tag) OopStorage(name, mem_tag); } const size_t initial_active_array_size = 8; @@ -819,9 +819,9 @@ static Mutex* make_oopstorage_mutex(const char* storage_name, return new PaddedMutex(rank, name); } -OopStorage::OopStorage(const char* name, MEMFLAGS memflags) : +OopStorage::OopStorage(const char* name, MemTag mem_tag) : _name(os::strdup(name)), - _active_array(ActiveArray::create(initial_active_array_size, memflags)), + _active_array(ActiveArray::create(initial_active_array_size, mem_tag)), _allocation_list(), _deferred_updates(nullptr), _allocation_mutex(make_oopstorage_mutex(name, "alloc", Mutex::oopstorage)), @@ -829,7 +829,7 @@ OopStorage::OopStorage(const char* name, MEMFLAGS memflags) : _num_dead_callback(nullptr), _allocation_count(0), _concurrent_iteration_count(0), - _memflags(memflags), + _mem_tag(mem_tag), _needs_cleanup(false) { _active_array->increment_refcount(); @@ -1030,7 +1030,7 @@ size_t OopStorage::total_memory_usage() const { return total_size; } -MEMFLAGS OopStorage::memflags() const { return _memflags; } +MemTag OopStorage::mem_tag() const { return _mem_tag; } // Parallel iteration support diff --git a/src/hotspot/share/gc/shared/oopStorage.hpp b/src/hotspot/share/gc/shared/oopStorage.hpp index dfc0f83fc19..f78746fc14a 100644 --- a/src/hotspot/share/gc/shared/oopStorage.hpp +++ b/src/hotspot/share/gc/shared/oopStorage.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -74,7 +74,7 @@ class outputStream; class OopStorage : public CHeapObjBase { public: - static OopStorage* create(const char* name, MEMFLAGS memflags); + static OopStorage* create(const char* name, MemTag mem_tag); ~OopStorage(); // These count and usage accessors are racy unless at a safepoint. @@ -89,8 +89,8 @@ public: // bookkeeping overhead, including this storage object. size_t total_memory_usage() const; - // The memory type for allocations. - MEMFLAGS memflags() const; + // The memory tag for allocations. + MemTag mem_tag() const; enum EntryStatus { INVALID_ENTRY, @@ -273,14 +273,14 @@ private: // mutable because this gets set even for const iteration. mutable int _concurrent_iteration_count; - // The memory type for allocations. - MEMFLAGS _memflags; + // The memory tag for allocations. + MemTag _mem_tag; // Flag indicating this storage object is a candidate for empty block deletion. volatile bool _needs_cleanup; // Clients construct via "create" factory function. - OopStorage(const char* name, MEMFLAGS memflags); + OopStorage(const char* name, MemTag mem_tag); NONCOPYABLE(OopStorage); bool try_add_block(); diff --git a/src/hotspot/share/gc/shared/oopStorage.inline.hpp b/src/hotspot/share/gc/shared/oopStorage.inline.hpp index e1e815acd09..ce78e507efc 100644 --- a/src/hotspot/share/gc/shared/oopStorage.inline.hpp +++ b/src/hotspot/share/gc/shared/oopStorage.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ class OopStorage::ActiveArray { public: static ActiveArray* create(size_t size, - MEMFLAGS memflags = mtGC, + MemTag mem_tag = mtGC, AllocFailType alloc_fail = AllocFailStrategy::EXIT_OOM); static void destroy(ActiveArray* ba); diff --git a/src/hotspot/share/gc/shared/oopStorageSet.cpp b/src/hotspot/share/gc/shared/oopStorageSet.cpp index e119e570759..d061fc77638 100644 --- a/src/hotspot/share/gc/shared/oopStorageSet.cpp +++ b/src/hotspot/share/gc/shared/oopStorageSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,18 +31,18 @@ OopStorage* OopStorageSet::_storages[all_count] = {}; -OopStorage* OopStorageSet::create_strong(const char* name, MEMFLAGS memflags) { +OopStorage* OopStorageSet::create_strong(const char* name, MemTag mem_tag) { static uint registered_strong = 0; assert(registered_strong < strong_count, "More registered strong storages than slots"); - OopStorage* storage = OopStorage::create(name, memflags); + OopStorage* storage = OopStorage::create(name, mem_tag); _storages[strong_start + registered_strong++] = storage; return storage; } -OopStorage* OopStorageSet::create_weak(const char* name, MEMFLAGS memflags) { +OopStorage* OopStorageSet::create_weak(const char* name, MemTag mem_tag) { static uint registered_weak = 0; assert(registered_weak < weak_count, "More registered strong storages than slots"); - OopStorage* storage = OopStorage::create(name, memflags); + OopStorage* storage = OopStorage::create(name, mem_tag); _storages[weak_start + registered_weak++] = storage; return storage; } diff --git a/src/hotspot/share/gc/shared/oopStorageSet.hpp b/src/hotspot/share/gc/shared/oopStorageSet.hpp index 26e0e9f5a77..273a769dd59 100644 --- a/src/hotspot/share/gc/shared/oopStorageSet.hpp +++ b/src/hotspot/share/gc/shared/oopStorageSet.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_GC_SHARED_OOPSTORAGESET_HPP #define SHARE_GC_SHARED_OOPSTORAGESET_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/debug.hpp" #include "utilities/enumIterator.hpp" #include "utilities/globalDefinitions.hpp" @@ -79,8 +79,8 @@ public: static OopStorage* storage(WeakId id) { return get_storage(id); } static OopStorage* storage(Id id) { return get_storage(id); } - static OopStorage* create_strong(const char* name, MEMFLAGS memflags); - static OopStorage* create_weak(const char* name, MEMFLAGS memflags); + static OopStorage* create_strong(const char* name, MemTag mem_tag); + static OopStorage* create_weak(const char* name, MemTag mem_tag); // Support iteration over the storage objects. template class Range; diff --git a/src/hotspot/share/gc/shared/partialArrayState.cpp b/src/hotspot/share/gc/shared/partialArrayState.cpp index fd23a320222..48ef974ecfa 100644 --- a/src/hotspot/share/gc/shared/partialArrayState.cpp +++ b/src/hotspot/share/gc/shared/partialArrayState.cpp @@ -26,7 +26,7 @@ #include "gc/shared/partialArrayState.hpp" #include "memory/allocation.inline.hpp" #include "memory/arena.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "oops/oopsHierarchy.hpp" #include "runtime/atomic.hpp" #include "runtime/orderAccess.hpp" diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp index aab2f5d3123..ab85c293941 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupProcessor.cpp @@ -35,7 +35,7 @@ #include "gc/shared/stringdedup/stringDedupTable.hpp" #include "logging/log.hpp" #include "memory/iterator.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "oops/access.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/cpuTimeCounters.hpp" diff --git a/src/hotspot/share/gc/shared/taskqueue.hpp b/src/hotspot/share/gc/shared/taskqueue.hpp index f4a3731583b..efbc1882fbe 100644 --- a/src/hotspot/share/gc/shared/taskqueue.hpp +++ b/src/hotspot/share/gc/shared/taskqueue.hpp @@ -116,8 +116,8 @@ void TaskQueueStats::reset() { // TaskQueueSuper collects functionality common to all GenericTaskQueue instances. -template -class TaskQueueSuper: public CHeapObj { +template +class TaskQueueSuper: public CHeapObj { protected: // Internal type for indexing the queue; also used for the tag. typedef NOT_LP64(uint16_t) LP64_ONLY(uint32_t) idx_t; @@ -324,39 +324,39 @@ public: // practice of parallel programming (PPoPP 2013), 69-80 // -template -class GenericTaskQueue: public TaskQueueSuper { +template +class GenericTaskQueue: public TaskQueueSuper { protected: - typedef typename TaskQueueSuper::Age Age; - typedef typename TaskQueueSuper::idx_t idx_t; + typedef typename TaskQueueSuper::Age Age; + typedef typename TaskQueueSuper::idx_t idx_t; - using TaskQueueSuper::MOD_N_MASK; + using TaskQueueSuper::MOD_N_MASK; - using TaskQueueSuper::bottom_relaxed; - using TaskQueueSuper::bottom_acquire; + using TaskQueueSuper::bottom_relaxed; + using TaskQueueSuper::bottom_acquire; - using TaskQueueSuper::set_bottom_relaxed; - using TaskQueueSuper::release_set_bottom; + using TaskQueueSuper::set_bottom_relaxed; + using TaskQueueSuper::release_set_bottom; - using TaskQueueSuper::age_relaxed; - using TaskQueueSuper::set_age_relaxed; - using TaskQueueSuper::cmpxchg_age; - using TaskQueueSuper::age_top_relaxed; + using TaskQueueSuper::age_relaxed; + using TaskQueueSuper::set_age_relaxed; + using TaskQueueSuper::cmpxchg_age; + using TaskQueueSuper::age_top_relaxed; - using TaskQueueSuper::increment_index; - using TaskQueueSuper::decrement_index; - using TaskQueueSuper::dirty_size; - using TaskQueueSuper::clean_size; - using TaskQueueSuper::assert_not_underflow; + using TaskQueueSuper::increment_index; + using TaskQueueSuper::decrement_index; + using TaskQueueSuper::dirty_size; + using TaskQueueSuper::clean_size; + using TaskQueueSuper::assert_not_underflow; public: - typedef typename TaskQueueSuper::PopResult PopResult; + typedef typename TaskQueueSuper::PopResult PopResult; - using TaskQueueSuper::max_elems; - using TaskQueueSuper::size; + using TaskQueueSuper::max_elems; + using TaskQueueSuper::size; #if TASKQUEUE_STATS - using TaskQueueSuper::stats; + using TaskQueueSuper::stats; #endif private: @@ -428,12 +428,12 @@ public: // Note that size() is not hidden--it returns the number of elements in the // TaskQueue, and does not include the size of the overflow stack. This // simplifies replacement of GenericTaskQueues with OverflowTaskQueues. -template -class OverflowTaskQueue: public GenericTaskQueue +template +class OverflowTaskQueue: public GenericTaskQueue { public: - typedef Stack overflow_t; - typedef GenericTaskQueue taskqueue_t; + typedef Stack overflow_t; + typedef GenericTaskQueue taskqueue_t; TASKQUEUE_STATS_ONLY(using taskqueue_t::stats;) @@ -467,11 +467,11 @@ public: virtual uint tasks() const = 0; }; -template class TaskQueueSetSuperImpl: public CHeapObj, public TaskQueueSetSuper { +template class TaskQueueSetSuperImpl: public CHeapObj, public TaskQueueSetSuper { }; -template -class GenericTaskQueueSet: public TaskQueueSetSuperImpl { +template +class GenericTaskQueueSet: public TaskQueueSetSuperImpl { public: typedef typename T::element_type E; typedef typename T::PopResult PopResult; @@ -518,29 +518,29 @@ public: #endif // TASKQUEUE_STATS }; -template void -GenericTaskQueueSet::register_queue(uint i, T* q) { +template void +GenericTaskQueueSet::register_queue(uint i, T* q) { assert(i < _n, "index out of range."); _queues[i] = q; } -template T* -GenericTaskQueueSet::queue(uint i) { +template T* +GenericTaskQueueSet::queue(uint i) { assert(i < _n, "index out of range."); return _queues[i]; } #ifdef ASSERT -template -void GenericTaskQueueSet::assert_empty() const { +template +void GenericTaskQueueSet::assert_empty() const { for (uint j = 0; j < _n; j++) { _queues[j]->assert_empty(); } } #endif // ASSERT -template -uint GenericTaskQueueSet::tasks() const { +template +uint GenericTaskQueueSet::tasks() const { uint n = 0; for (uint j = 0; j < _n; j++) { n += _queues[j]->size(); diff --git a/src/hotspot/share/gc/shared/taskqueue.inline.hpp b/src/hotspot/share/gc/shared/taskqueue.inline.hpp index f937ce8a2e9..8e65cfd704f 100644 --- a/src/hotspot/share/gc/shared/taskqueue.inline.hpp +++ b/src/hotspot/share/gc/shared/taskqueue.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,30 +38,30 @@ #include "utilities/ostream.hpp" #include "utilities/stack.inline.hpp" -template -inline GenericTaskQueueSet::GenericTaskQueueSet(uint n) : _n(n) { +template +inline GenericTaskQueueSet::GenericTaskQueueSet(uint n) : _n(n) { typedef T* GenericTaskQueuePtr; - _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F); + _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, MT); for (uint i = 0; i < n; i++) { _queues[i] = nullptr; } } -template -inline GenericTaskQueueSet::~GenericTaskQueueSet() { +template +inline GenericTaskQueueSet::~GenericTaskQueueSet() { FREE_C_HEAP_ARRAY(T*, _queues); } #if TASKQUEUE_STATS -template -void GenericTaskQueueSet::print_taskqueue_stats_hdr(outputStream* const st, const char* label) { +template +void GenericTaskQueueSet::print_taskqueue_stats_hdr(outputStream* const st, const char* label) { st->print_cr("GC Task Stats %s", label); st->print("thr "); TaskQueueStats::print_header(1, st); st->cr(); st->print("--- "); TaskQueueStats::print_header(2, st); st->cr(); } -template -void GenericTaskQueueSet::print_taskqueue_stats(outputStream* const st, const char* label) { +template +void GenericTaskQueueSet::print_taskqueue_stats(outputStream* const st, const char* label) { print_taskqueue_stats_hdr(st, label); TaskQueueStats totals; @@ -75,16 +75,16 @@ void GenericTaskQueueSet::print_taskqueue_stats(outputStream* const st, co DEBUG_ONLY(totals.verify()); } -template -void GenericTaskQueueSet::reset_taskqueue_stats() { +template +void GenericTaskQueueSet::reset_taskqueue_stats() { const uint n = size(); for (uint i = 0; i < n; ++i) { queue(i)->stats.reset(); } } -template -inline void GenericTaskQueueSet::print_and_reset_taskqueue_stats(const char* label) { +template +inline void GenericTaskQueueSet::print_and_reset_taskqueue_stats(const char* label) { if (!log_is_enabled(Trace, gc, task, stats)) { return; } @@ -97,19 +97,19 @@ inline void GenericTaskQueueSet::print_and_reset_taskqueue_stats(const cha } #endif // TASKQUEUE_STATS -template -inline GenericTaskQueue::GenericTaskQueue() : - _elems(MallocArrayAllocator::allocate(N, F)), +template +inline GenericTaskQueue::GenericTaskQueue() : + _elems(MallocArrayAllocator::allocate(N, MT)), _last_stolen_queue_id(InvalidQueueId), _seed(17 /* random number */) {} -template -inline GenericTaskQueue::~GenericTaskQueue() { +template +inline GenericTaskQueue::~GenericTaskQueue() { MallocArrayAllocator::free(_elems); } -template inline bool -GenericTaskQueue::push(E t) { +template inline bool +GenericTaskQueue::push(E t) { uint localBot = bottom_relaxed(); assert(localBot < N, "_bottom out of range."); idx_t top = age_top_relaxed(); @@ -134,8 +134,8 @@ GenericTaskQueue::push(E t) { return false; // Queue is full. } -template -inline bool OverflowTaskQueue::push(E t) { +template +inline bool OverflowTaskQueue::push(E t) { if (!taskqueue_t::push(t)) { overflow_stack()->push(t); TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); @@ -143,8 +143,8 @@ inline bool OverflowTaskQueue::push(E t) { return true; } -template -inline bool OverflowTaskQueue::try_push_to_taskqueue(E t) { +template +inline bool OverflowTaskQueue::try_push_to_taskqueue(E t) { return taskqueue_t::push(t); } @@ -154,8 +154,8 @@ inline bool OverflowTaskQueue::try_push_to_taskqueue(E t) { // whenever the queue goes empty which it will do here if this thread // gets the last task or in pop_global() if the queue wraps (top == 0 // and pop_global() succeeds, see pop_global()). -template -bool GenericTaskQueue::pop_local_slow(uint localBot, Age oldAge) { +template +bool GenericTaskQueue::pop_local_slow(uint localBot, Age oldAge) { // This queue was observed to contain exactly one element; either this // thread will claim it, or a competing "pop_global". In either case, // the queue will be logically empty afterwards. Create a new Age value @@ -187,8 +187,8 @@ bool GenericTaskQueue::pop_local_slow(uint localBot, Age oldAge) { return false; } -template inline bool -GenericTaskQueue::pop_local(E& t, uint threshold) { +template inline bool +GenericTaskQueue::pop_local(E& t, uint threshold) { uint localBot = bottom_relaxed(); // This value cannot be N-1. That can only occur as a result of // the assignment to bottom in this method. If it does, this method @@ -224,8 +224,8 @@ GenericTaskQueue::pop_local(E& t, uint threshold) { } } -template -bool OverflowTaskQueue::pop_overflow(E& t) +template +bool OverflowTaskQueue::pop_overflow(E& t) { if (overflow_empty()) return false; t = overflow_stack()->pop(); @@ -253,8 +253,8 @@ bool OverflowTaskQueue::pop_overflow(E& t) // (3) Owner starts a push, writing elems[bottom]. At the same time, Thief // reads elems[oldAge.top]. The owner's bottom == the thief's oldAge.top. // (4) Thief will discard the read value, because its cmpxchg of age will fail. -template -typename GenericTaskQueue::PopResult GenericTaskQueue::pop_global(E& t) { +template +typename GenericTaskQueue::PopResult GenericTaskQueue::pop_global(E& t) { Age oldAge = age_relaxed(); // Architectures with non-multi-copy-atomic memory model require a @@ -311,13 +311,13 @@ inline int randomParkAndMiller(int *seed0) { return seed; } -template -int GenericTaskQueue::next_random_queue_id() { +template +int GenericTaskQueue::next_random_queue_id() { return randomParkAndMiller(&_seed); } -template -typename GenericTaskQueueSet::PopResult GenericTaskQueueSet::steal_best_of_2(uint queue_num, E& t) { +template +typename GenericTaskQueueSet::PopResult GenericTaskQueueSet::steal_best_of_2(uint queue_num, E& t) { T* const local_queue = queue(queue_num); if (_n > 2) { uint k1 = queue_num; @@ -372,8 +372,8 @@ typename GenericTaskQueueSet::PopResult GenericTaskQueueSet::steal_b } } -template -bool GenericTaskQueueSet::steal(uint queue_num, E& t) { +template +bool GenericTaskQueueSet::steal(uint queue_num, E& t) { uint const num_retries = 2 * _n; TASKQUEUE_STATS_ONLY(uint contended_in_a_row = 0;) @@ -394,9 +394,9 @@ bool GenericTaskQueueSet::steal(uint queue_num, E& t) { return false; } -template +template template -inline void GenericTaskQueue::iterate(Fn fn) { +inline void GenericTaskQueue::iterate(Fn fn) { uint iters = size(); uint index = bottom_relaxed(); for (uint i = 0; i < iters; ++i) { diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp index 25062c5317d..70401b42461 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectionSet.cpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2023, Red Hat, Inc. All rights reserved. + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -57,7 +58,7 @@ ShenandoahCollectionSet::ShenandoahCollectionSet(ShenandoahHeap* heap, ReservedS // subsystem for mapping not-yet-written-to pages to a single physical backing page, // but this is not guaranteed, and would confuse NMT and other memory accounting tools. - MemTracker::record_virtual_memory_type(_map_space.base(), mtGC); + MemTracker::record_virtual_memory_tag(_map_space.base(), mtGC); size_t page_size = os::vm_page_size(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp index a587cc417e3..4ab17aabcc5 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp @@ -252,7 +252,7 @@ jint ShenandoahHeap::initialize() { bitmap_size_orig, bitmap_page_size, bitmap.base(), bitmap.size(), bitmap.page_size()); - MemTracker::record_virtual_memory_type(bitmap.base(), mtGC); + MemTracker::record_virtual_memory_tag(bitmap.base(), mtGC); _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize); _bitmap_region_special = bitmap.special(); @@ -276,7 +276,7 @@ jint ShenandoahHeap::initialize() { os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false, "Cannot commit verification bitmap memory"); } - MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC); + MemTracker::record_virtual_memory_tag(verify_bitmap.base(), mtGC); MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize); _verification_bit_map.initialize(_heap_region, verify_bitmap_region); _verifier = new ShenandoahVerifier(this, &_verification_bit_map); @@ -290,7 +290,7 @@ jint ShenandoahHeap::initialize() { bitmap_size_orig, aux_bitmap_page_size, aux_bitmap.base(), aux_bitmap.size(), aux_bitmap.page_size()); - MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC); + MemTracker::record_virtual_memory_tag(aux_bitmap.base(), mtGC); _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize); _aux_bitmap_region_special = aux_bitmap.special(); _aux_bit_map.initialize(_heap_region, _aux_bitmap_region); @@ -308,7 +308,7 @@ jint ShenandoahHeap::initialize() { region_storage_size_orig, region_page_size, region_storage.base(), region_storage.size(), region_storage.page_size()); - MemTracker::record_virtual_memory_type(region_storage.base(), mtGC); + MemTracker::record_virtual_memory_tag(region_storage.base(), mtGC); if (!region_storage.special()) { os::commit_memory_or_exit(region_storage.base(), region_storage_size, region_page_size, false, "Cannot commit region memory"); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp index 50f18a8c73f..2b160a29387 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2024, Red Hat, Inc. All rights reserved. + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +29,7 @@ #include "gc/shared/taskTerminator.hpp" #include "gc/shared/taskqueue.hpp" #include "gc/shenandoah/shenandoahPadding.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "runtime/atomic.hpp" #include "runtime/javaThread.hpp" #include "runtime/mutex.hpp" @@ -36,11 +37,11 @@ class ShenandoahHeap; -template -class BufferedOverflowTaskQueue: public OverflowTaskQueue +template +class BufferedOverflowTaskQueue: public OverflowTaskQueue { public: - typedef OverflowTaskQueue taskqueue_t; + typedef OverflowTaskQueue taskqueue_t; BufferedOverflowTaskQueue() : _buf_empty(true) {}; @@ -301,8 +302,8 @@ public: typedef BufferedOverflowTaskQueue ShenandoahBufferedOverflowTaskQueue; typedef Padded ShenandoahObjToScanQueue; -template -class ParallelClaimableQueueSet: public GenericTaskQueueSet { +template +class ParallelClaimableQueueSet: public GenericTaskQueueSet { private: shenandoah_padding(0); volatile jint _claimed_index; @@ -311,10 +312,10 @@ private: debug_only(uint _reserved; ) public: - using GenericTaskQueueSet::size; + using GenericTaskQueueSet::size; public: - ParallelClaimableQueueSet(int n) : GenericTaskQueueSet(n), _claimed_index(0) { + ParallelClaimableQueueSet(int n) : GenericTaskQueueSet(n), _claimed_index(0) { debug_only(_reserved = 0; ) } @@ -331,9 +332,9 @@ public: debug_only(uint get_reserved() const { return (uint)_reserved; }) }; -template -T* ParallelClaimableQueueSet::claim_next() { - jint size = (jint)GenericTaskQueueSet::size(); +template +T* ParallelClaimableQueueSet::claim_next() { + jint size = (jint)GenericTaskQueueSet::size(); if (_claimed_index >= size) { return nullptr; @@ -342,7 +343,7 @@ T* ParallelClaimableQueueSet::claim_next() { jint index = Atomic::add(&_claimed_index, 1, memory_order_relaxed); if (index <= size) { - return GenericTaskQueueSet::queue((uint)index - 1); + return GenericTaskQueueSet::queue((uint)index - 1); } else { return nullptr; } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp index fa770021742..9fa4fabc1c7 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahTaskqueue.inline.hpp @@ -1,5 +1,6 @@ /* * Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved. + * Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,8 +31,8 @@ #include "gc/shared/taskqueue.inline.hpp" #include "utilities/stack.inline.hpp" -template -bool BufferedOverflowTaskQueue::pop(E &t) { +template +bool BufferedOverflowTaskQueue::pop(E &t) { if (!_buf_empty) { t = _elem; _buf_empty = true; @@ -45,8 +46,8 @@ bool BufferedOverflowTaskQueue::pop(E &t) { return taskqueue_t::pop_overflow(t); } -template -inline bool BufferedOverflowTaskQueue::push(E t) { +template +inline bool BufferedOverflowTaskQueue::push(E t) { if (_buf_empty) { _elem = t; _buf_empty = false; @@ -58,8 +59,8 @@ inline bool BufferedOverflowTaskQueue::push(E t) { return true; } -template -void BufferedOverflowTaskQueue::clear() { +template +void BufferedOverflowTaskQueue::clear() { _buf_empty = true; taskqueue_t::set_empty(); taskqueue_t::overflow_stack()->clear(); diff --git a/src/hotspot/share/gc/x/xVirtualMemory.cpp b/src/hotspot/share/gc/x/xVirtualMemory.cpp index 1d66cdd069e..63cb789d8de 100644 --- a/src/hotspot/share/gc/x/xVirtualMemory.cpp +++ b/src/hotspot/share/gc/x/xVirtualMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -182,7 +182,7 @@ bool XVirtualMemoryManager::reserve(size_t max_capacity) { void XVirtualMemoryManager::nmt_reserve(uintptr_t start, size_t size) { MemTracker::record_virtual_memory_reserve((void*)start, size, CALLER_PC); - MemTracker::record_virtual_memory_type((void*)start, mtJavaHeap); + MemTracker::record_virtual_memory_tag((void*)start, mtJavaHeap); } bool XVirtualMemoryManager::is_initialized() const { diff --git a/src/hotspot/share/gc/z/zNMT.cpp b/src/hotspot/share/gc/z/zNMT.cpp index 41d99b102bf..b23452eb156 100644 --- a/src/hotspot/share/gc/z/zNMT.cpp +++ b/src/hotspot/share/gc/z/zNMT.cpp @@ -26,7 +26,7 @@ #include "gc/z/zGlobals.hpp" #include "gc/z/zNMT.hpp" #include "gc/z/zVirtualMemory.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "nmt/memTracker.hpp" #include "nmt/memoryFileTracker.hpp" #include "utilities/nativeCallStack.hpp" diff --git a/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp b/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp index 8f1d2b4d5b1..9a123a91629 100644 --- a/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp +++ b/src/hotspot/share/jfr/leakprofiler/chains/jfrbitset.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_JFR_LEAKPROFILER_JFRBITSET_HPP #define SHARE_JFR_LEAKPROFILER_JFRBITSET_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/objectBitSet.inline.hpp" typedef ObjectBitSet JFRBitSet; diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index 0782f94a903..babb24f28ea 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -735,7 +735,7 @@ + description="Native memory usage for a given memory tag in the JVM" period="everyChunk"> diff --git a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp index 2b861081215..6c0584f50ff 100644 --- a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp +++ b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -63,10 +63,10 @@ void JfrNativeMemoryEvent::send_total_event(const Ticks& timestamp) { event.commit(); } -void JfrNativeMemoryEvent::send_type_event(const Ticks& starttime, MEMFLAGS flag, size_t reserved, size_t committed) { +void JfrNativeMemoryEvent::send_type_event(const Ticks& starttime, MemTag mem_tag, size_t reserved, size_t committed) { EventNativeMemoryUsage event(UNTIMED); event.set_starttime(starttime); - event.set_type(NMTUtil::flag_to_index(flag)); + event.set_type(NMTUtil::tag_to_index(mem_tag)); event.set_reserved(reserved); event.set_committed(committed); event.commit(); @@ -79,12 +79,12 @@ void JfrNativeMemoryEvent::send_type_events(const Ticks& timestamp) { NMTUsage* usage = get_usage(timestamp); - for (int index = 0; index < mt_number_of_types; index ++) { - MEMFLAGS flag = NMTUtil::index_to_flag(index); - if (flag == mtNone) { + for (int index = 0; index < mt_number_of_tags; index ++) { + MemTag mem_tag = NMTUtil::index_to_tag(index); + if (mem_tag == mtNone) { // Skip mtNone since it is not really used. continue; } - send_type_event(timestamp, flag, usage->reserved(flag), usage->committed(flag)); + send_type_event(timestamp, mem_tag, usage->reserved(mem_tag), usage->committed(mem_tag)); } } diff --git a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp index c5a476c78a8..f2628bc1287 100644 --- a/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp +++ b/src/hotspot/share/jfr/periodic/jfrNativeMemoryEvent.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_JFR_PERIODIC_JFRNATIVEMEMORYEVENT_HPP #define SHARE_JFR_PERIODIC_JFRNATIVEMEMORYEVENT_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "nmt/nmtUsage.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/ticks.hpp" @@ -35,7 +35,7 @@ // so no more synchronization is needed. class JfrNativeMemoryEvent : public AllStatic { private: - static void send_type_event(const Ticks& starttime, MEMFLAGS flag, size_t reserved, size_t committed); + static void send_type_event(const Ticks& starttime, MemTag mem_tag, size_t reserved, size_t committed); public: static void send_total_event(const Ticks& timestamp); static void send_type_events(const Ticks& timestamp); diff --git a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp index 5cd9e6b253b..63c7a027373 100644 --- a/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp +++ b/src/hotspot/share/jfr/recorder/checkpoint/types/jfrType.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2016, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -332,10 +332,10 @@ void CompilerTypeConstant::serialize(JfrCheckpointWriter& writer) { } void NMTTypeConstant::serialize(JfrCheckpointWriter& writer) { - writer.write_count(mt_number_of_types); - for (int i = 0; i < mt_number_of_types; ++i) { + writer.write_count(mt_number_of_tags); + for (int i = 0; i < mt_number_of_tags; ++i) { writer.write_key(i); - MEMFLAGS flag = NMTUtil::index_to_flag(i); - writer.write(NMTUtil::flag_to_name(flag)); + MemTag mem_tag = NMTUtil::index_to_tag(i); + writer.write(NMTUtil::tag_to_name(mem_tag)); } } diff --git a/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp b/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp index 78309f00913..aaae65ed10d 100644 --- a/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp +++ b/src/hotspot/share/jfr/recorder/storage/jfrVirtualMemory.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -117,7 +117,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes) _rs.base(), _rs.size(), os::vm_page_size()); - MemTracker::record_virtual_memory_type((address)_rs.base(), mtTracing); + MemTracker::record_virtual_memory_tag((address)_rs.base(), mtTracing); assert(is_aligned(_rs.base(), os::vm_page_size()), "invariant"); assert(is_aligned(_rs.size(), os::vm_page_size()), "invariant"); diff --git a/src/hotspot/share/memory/allocation.cpp b/src/hotspot/share/memory/allocation.cpp index 73bc9d4ad2a..096ee696421 100644 --- a/src/hotspot/share/memory/allocation.cpp +++ b/src/hotspot/share/memory/allocation.cpp @@ -36,10 +36,10 @@ // allocate using malloc; will fail if no memory available char* AllocateHeap(size_t size, - MEMFLAGS flags, + MemTag mem_tag, const NativeCallStack& stack, AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { - char* p = (char*) os::malloc(size, flags, stack); + char* p = (char*) os::malloc(size, mem_tag, stack); if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) { vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "AllocateHeap"); } @@ -47,16 +47,16 @@ char* AllocateHeap(size_t size, } char* AllocateHeap(size_t size, - MEMFLAGS flags, + MemTag mem_tag, AllocFailType alloc_failmode /* = AllocFailStrategy::EXIT_OOM*/) { - return AllocateHeap(size, flags, CALLER_PC, alloc_failmode); + return AllocateHeap(size, mem_tag, CALLER_PC, alloc_failmode); } char* ReallocateHeap(char *old, size_t size, - MEMFLAGS flag, + MemTag mem_tag, AllocFailType alloc_failmode) { - char* p = (char*) os::realloc(old, size, flag, CALLER_PC); + char* p = (char*) os::realloc(old, size, mem_tag, CALLER_PC); if (p == nullptr && alloc_failmode == AllocFailStrategy::EXIT_OOM) { vm_exit_out_of_memory(size, OOM_MALLOC_ERROR, "ReallocateHeap"); } @@ -119,16 +119,16 @@ void* AnyObj::operator new(size_t size, Arena *arena) { return res; } -void* AnyObj::operator new(size_t size, MEMFLAGS flags) throw() { - address res = (address)AllocateHeap(size, flags, CALLER_PC); +void* AnyObj::operator new(size_t size, MemTag mem_tag) throw() { + address res = (address)AllocateHeap(size, mem_tag, CALLER_PC); DEBUG_ONLY(set_allocation_type(res, C_HEAP);) return res; } void* AnyObj::operator new(size_t size, const std::nothrow_t& nothrow_constant, - MEMFLAGS flags) throw() { + MemTag mem_tag) throw() { // should only call this with std::nothrow, use other operator new() otherwise - address res = (address)AllocateHeap(size, flags, CALLER_PC, AllocFailStrategy::RETURN_NULL); + address res = (address)AllocateHeap(size, mem_tag, CALLER_PC, AllocFailStrategy::RETURN_NULL); DEBUG_ONLY(if (res!= nullptr) set_allocation_type(res, C_HEAP);) return res; } diff --git a/src/hotspot/share/memory/allocation.hpp b/src/hotspot/share/memory/allocation.hpp index da5f7b19830..9841ce3183c 100644 --- a/src/hotspot/share/memory/allocation.hpp +++ b/src/hotspot/share/memory/allocation.hpp @@ -26,7 +26,7 @@ #define SHARE_MEMORY_ALLOCATION_HPP #include "memory/allStatic.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" @@ -94,9 +94,9 @@ typedef AllocFailStrategy::AllocFailEnum AllocFailType; // NEW_C_HEAP_OBJ* // FREE_C_HEAP_OBJ // -// char* AllocateHeap(size_t size, MEMFLAGS flags, const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); -// char* AllocateHeap(size_t size, MEMFLAGS flags, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); -// char* ReallocateHeap(char *old, size_t size, MEMFLAGS flag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +// char* AllocateHeap(size_t size, MemTag mem_tag, const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +// char* AllocateHeap(size_t size, MemTag mem_tag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); +// char* ReallocateHeap(char *old, size_t size, MemTag mem_tag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); // void FreeHeap(void* p); // @@ -106,16 +106,16 @@ class NativeCallStack; char* AllocateHeap(size_t size, - MEMFLAGS flags, + MemTag mem_tag, const NativeCallStack& stack, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); char* AllocateHeap(size_t size, - MEMFLAGS flags, + MemTag mem_tag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); char* ReallocateHeap(char *old, size_t size, - MEMFLAGS flag, + MemTag mem_tag, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); // handles null pointers @@ -123,50 +123,50 @@ void FreeHeap(void* p); class CHeapObjBase { public: - ALWAYSINLINE void* operator new(size_t size, MEMFLAGS f) { - return AllocateHeap(size, f); + ALWAYSINLINE void* operator new(size_t size, MemTag mem_tag) { + return AllocateHeap(size, mem_tag); } ALWAYSINLINE void* operator new(size_t size, - MEMFLAGS f, + MemTag mem_tag, const NativeCallStack& stack) { - return AllocateHeap(size, f, stack); + return AllocateHeap(size, mem_tag, stack); } ALWAYSINLINE void* operator new(size_t size, - MEMFLAGS f, + MemTag mem_tag, const std::nothrow_t&, const NativeCallStack& stack) throw() { - return AllocateHeap(size, f, stack, AllocFailStrategy::RETURN_NULL); + return AllocateHeap(size, mem_tag, stack, AllocFailStrategy::RETURN_NULL); } ALWAYSINLINE void* operator new(size_t size, - MEMFLAGS f, + MemTag mem_tag, const std::nothrow_t&) throw() { - return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL); + return AllocateHeap(size, mem_tag, AllocFailStrategy::RETURN_NULL); } - ALWAYSINLINE void* operator new[](size_t size, MEMFLAGS f) { - return AllocateHeap(size, f); + ALWAYSINLINE void* operator new[](size_t size, MemTag mem_tag) { + return AllocateHeap(size, mem_tag); } ALWAYSINLINE void* operator new[](size_t size, - MEMFLAGS f, + MemTag mem_tag, const NativeCallStack& stack) { - return AllocateHeap(size, f, stack); + return AllocateHeap(size, mem_tag, stack); } ALWAYSINLINE void* operator new[](size_t size, - MEMFLAGS f, + MemTag mem_tag, const std::nothrow_t&, const NativeCallStack& stack) throw() { - return AllocateHeap(size, f, stack, AllocFailStrategy::RETURN_NULL); + return AllocateHeap(size, mem_tag, stack, AllocFailStrategy::RETURN_NULL); } ALWAYSINLINE void* operator new[](size_t size, - MEMFLAGS f, + MemTag mem_tag, const std::nothrow_t&) throw() { - return AllocateHeap(size, f, AllocFailStrategy::RETURN_NULL); + return AllocateHeap(size, mem_tag, AllocFailStrategy::RETURN_NULL); } void operator delete(void* p) { FreeHeap(p); } @@ -174,43 +174,43 @@ class CHeapObjBase { }; // Uses the implicitly static new and delete operators of CHeapObjBase -template +template class CHeapObj { public: ALWAYSINLINE void* operator new(size_t size) { - return CHeapObjBase::operator new(size, F); + return CHeapObjBase::operator new(size, MT); } ALWAYSINLINE void* operator new(size_t size, const NativeCallStack& stack) { - return CHeapObjBase::operator new(size, F, stack); + return CHeapObjBase::operator new(size, MT, stack); } ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t& nt, const NativeCallStack& stack) throw() { - return CHeapObjBase::operator new(size, F, nt, stack); + return CHeapObjBase::operator new(size, MT, nt, stack); } ALWAYSINLINE void* operator new(size_t size, const std::nothrow_t& nt) throw() { - return CHeapObjBase::operator new(size, F, nt); + return CHeapObjBase::operator new(size, MT, nt); } ALWAYSINLINE void* operator new[](size_t size) { - return CHeapObjBase::operator new[](size, F); + return CHeapObjBase::operator new[](size, MT); } ALWAYSINLINE void* operator new[](size_t size, const NativeCallStack& stack) { - return CHeapObjBase::operator new[](size, F, stack); + return CHeapObjBase::operator new[](size, MT, stack); } ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t& nt, const NativeCallStack& stack) throw() { - return CHeapObjBase::operator new[](size, F, nt, stack); + return CHeapObjBase::operator new[](size, MT, nt, stack); } ALWAYSINLINE void* operator new[](size_t size, const std::nothrow_t& nt) throw() { - return CHeapObjBase::operator new[](size, F, nt); + return CHeapObjBase::operator new[](size, MT, nt); } void operator delete(void* p) { @@ -439,10 +439,10 @@ protected: public: // CHeap allocations - void* operator new(size_t size, MEMFLAGS flags) throw(); - void* operator new [](size_t size, MEMFLAGS flags) throw() = delete; - void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw(); - void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, MEMFLAGS flags) throw() = delete; + void* operator new(size_t size, MemTag mem_tag) throw(); + void* operator new [](size_t size, MemTag mem_tag) throw() = delete; + void* operator new(size_t size, const std::nothrow_t& nothrow_constant, MemTag mem_tag) throw(); + void* operator new [](size_t size, const std::nothrow_t& nothrow_constant, MemTag mem_tag) throw() = delete; // Arena allocations void* operator new(size_t size, Arena *arena); @@ -510,36 +510,36 @@ protected: #define NEW_RESOURCE_OBJ_RETURN_NULL(type)\ NEW_RESOURCE_ARRAY_RETURN_NULL(type, 1) -#define NEW_C_HEAP_ARRAY3(type, size, memflags, pc, allocfail)\ - (type*) AllocateHeap((size) * sizeof(type), memflags, pc, allocfail) +#define NEW_C_HEAP_ARRAY3(type, size, mem_tag, pc, allocfail)\ + (type*) AllocateHeap((size) * sizeof(type), mem_tag, pc, allocfail) -#define NEW_C_HEAP_ARRAY2(type, size, memflags, pc)\ - (type*) (AllocateHeap((size) * sizeof(type), memflags, pc)) +#define NEW_C_HEAP_ARRAY2(type, size, mem_tag, pc)\ + (type*) (AllocateHeap((size) * sizeof(type), mem_tag, pc)) -#define NEW_C_HEAP_ARRAY(type, size, memflags)\ - (type*) (AllocateHeap((size) * sizeof(type), memflags)) +#define NEW_C_HEAP_ARRAY(type, size, mem_tag)\ + (type*) (AllocateHeap((size) * sizeof(type), mem_tag)) -#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, memflags, pc)\ - NEW_C_HEAP_ARRAY3(type, (size), memflags, pc, AllocFailStrategy::RETURN_NULL) +#define NEW_C_HEAP_ARRAY2_RETURN_NULL(type, size, mem_tag, pc)\ + NEW_C_HEAP_ARRAY3(type, (size), mem_tag, pc, AllocFailStrategy::RETURN_NULL) -#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, memflags)\ - NEW_C_HEAP_ARRAY2(type, (size), memflags, AllocFailStrategy::RETURN_NULL) +#define NEW_C_HEAP_ARRAY_RETURN_NULL(type, size, mem_tag)\ + NEW_C_HEAP_ARRAY2(type, (size), mem_tag, AllocFailStrategy::RETURN_NULL) -#define REALLOC_C_HEAP_ARRAY(type, old, size, memflags)\ - (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags)) +#define REALLOC_C_HEAP_ARRAY(type, old, size, mem_tag)\ + (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), mem_tag)) -#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, memflags)\ - (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), memflags, AllocFailStrategy::RETURN_NULL)) +#define REALLOC_C_HEAP_ARRAY_RETURN_NULL(type, old, size, mem_tag)\ + (type*) (ReallocateHeap((char*)(old), (size) * sizeof(type), mem_tag, AllocFailStrategy::RETURN_NULL)) #define FREE_C_HEAP_ARRAY(type, old) \ FreeHeap((char*)(old)) // allocate type in heap without calling ctor -#define NEW_C_HEAP_OBJ(type, memflags)\ - NEW_C_HEAP_ARRAY(type, 1, memflags) +#define NEW_C_HEAP_OBJ(type, mem_tag)\ + NEW_C_HEAP_ARRAY(type, 1, mem_tag) -#define NEW_C_HEAP_OBJ_RETURN_NULL(type, memflags)\ - NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, memflags) +#define NEW_C_HEAP_OBJ_RETURN_NULL(type, mem_tag)\ + NEW_C_HEAP_ARRAY_RETURN_NULL(type, 1, mem_tag) // deallocate obj of type in heap without calling dtor #define FREE_C_HEAP_OBJ(objname)\ @@ -568,8 +568,8 @@ class MmapArrayAllocator : public AllStatic { static size_t size_for(size_t length); public: - static E* allocate_or_null(size_t length, MEMFLAGS flags); - static E* allocate(size_t length, MEMFLAGS flags); + static E* allocate_or_null(size_t length, MemTag mem_tag); + static E* allocate(size_t length, MemTag mem_tag); static void free(E* addr, size_t length); }; @@ -579,8 +579,8 @@ class MallocArrayAllocator : public AllStatic { public: static size_t size_for(size_t length); - static E* allocate(size_t length, MEMFLAGS flags); - static E* reallocate(E* addr, size_t new_length, MEMFLAGS flags); + static E* allocate(size_t length, MemTag mem_tag); + static E* reallocate(E* addr, size_t new_length, MemTag mem_tag); static void free(E* addr); }; diff --git a/src/hotspot/share/memory/allocation.inline.hpp b/src/hotspot/share/memory/allocation.inline.hpp index 4a1b0b0c597..8d531c6dd23 100644 --- a/src/hotspot/share/memory/allocation.inline.hpp +++ b/src/hotspot/share/memory/allocation.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -55,10 +55,10 @@ size_t MmapArrayAllocator::size_for(size_t length) { } template -E* MmapArrayAllocator::allocate_or_null(size_t length, MEMFLAGS flags) { +E* MmapArrayAllocator::allocate_or_null(size_t length, MemTag mem_tag) { size_t size = size_for(length); - char* addr = os::reserve_memory(size, !ExecMem, flags); + char* addr = os::reserve_memory(size, !ExecMem, mem_tag); if (addr == nullptr) { return nullptr; } @@ -72,10 +72,10 @@ E* MmapArrayAllocator::allocate_or_null(size_t length, MEMFLAGS flags) { } template -E* MmapArrayAllocator::allocate(size_t length, MEMFLAGS flags) { +E* MmapArrayAllocator::allocate(size_t length, MemTag mem_tag) { size_t size = size_for(length); - char* addr = os::reserve_memory(size, !ExecMem, flags); + char* addr = os::reserve_memory(size, !ExecMem, mem_tag); if (addr == nullptr) { vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)"); } @@ -97,13 +97,13 @@ size_t MallocArrayAllocator::size_for(size_t length) { } template -E* MallocArrayAllocator::allocate(size_t length, MEMFLAGS flags) { - return (E*)AllocateHeap(size_for(length), flags); +E* MallocArrayAllocator::allocate(size_t length, MemTag mem_tag) { + return (E*)AllocateHeap(size_for(length), mem_tag); } template -E* MallocArrayAllocator::reallocate(E* addr, size_t new_length, MEMFLAGS flags) { - return (E*)ReallocateHeap((char*)addr, size_for(new_length), flags); +E* MallocArrayAllocator::reallocate(E* addr, size_t new_length, MemTag mem_tag) { + return (E*)ReallocateHeap((char*)addr, size_for(new_length), mem_tag); } template diff --git a/src/hotspot/share/memory/arena.cpp b/src/hotspot/share/memory/arena.cpp index d1f3f3de7b2..51d7cda9c61 100644 --- a/src/hotspot/share/memory/arena.cpp +++ b/src/hotspot/share/memory/arena.cpp @@ -222,8 +222,8 @@ void Chunk::next_chop(Chunk* k) { k->_next = nullptr; } -Arena::Arena(MEMFLAGS flag, Tag tag, size_t init_size) : - _flags(flag), _tag(tag), +Arena::Arena(MemTag mem_tag, Tag tag, size_t init_size) : + _mem_tag(mem_tag), _tag(tag), _size_in_bytes(0), _first(nullptr), _chunk(nullptr), _hwm(nullptr), _max(nullptr) @@ -233,13 +233,13 @@ Arena::Arena(MEMFLAGS flag, Tag tag, size_t init_size) : _first = _chunk; _hwm = _chunk->bottom(); // Save the cached hwm, max _max = _chunk->top(); - MemTracker::record_new_arena(flag); + MemTracker::record_new_arena(mem_tag); set_size_in_bytes(init_size); } Arena::~Arena() { destruct_contents(); - MemTracker::record_arena_free(_flags); + MemTracker::record_arena_free(_mem_tag); } // Destroy this arenas contents and reset to empty @@ -259,8 +259,8 @@ void Arena::set_size_in_bytes(size_t size) { if (_size_in_bytes != size) { ssize_t delta = size - size_in_bytes(); _size_in_bytes = size; - MemTracker::record_arena_size_change(delta, _flags); - if (CompilationMemoryStatistic::enabled() && _flags == mtCompiler) { + MemTracker::record_arena_size_change(delta, _mem_tag); + if (CompilationMemoryStatistic::enabled() && _mem_tag == mtCompiler) { Thread* const t = Thread::current(); if (t != nullptr && t->is_Compiler_thread()) { CompilationMemoryStatistic::on_arena_change(delta, this); @@ -286,7 +286,7 @@ void* Arena::grow(size_t x, AllocFailType alloc_failmode) { // (Note: all chunk sizes have to be 64-bit aligned) size_t len = MAX2(ARENA_ALIGN(x), (size_t) Chunk::size); - if (MemTracker::check_exceeds_limit(x, _flags)) { + if (MemTracker::check_exceeds_limit(x, _mem_tag)) { return nullptr; } diff --git a/src/hotspot/share/memory/arena.hpp b/src/hotspot/share/memory/arena.hpp index 1f3c5eb4e8b..5f0def2a655 100644 --- a/src/hotspot/share/memory/arena.hpp +++ b/src/hotspot/share/memory/arena.hpp @@ -107,7 +107,7 @@ public: static const char* tag_desc[static_cast(Arena::Tag::tag_count)]; private: - const MEMFLAGS _flags; // Memory tracking flags + const MemTag _mem_tag; // Native Memory Tracking tag const Tag _tag; size_t _size_in_bytes; // Size of arena (used for native memory tracking) @@ -138,7 +138,7 @@ protected: public: // Start the chunk_pool cleaner task static void start_chunk_pool_cleaner_task(); - Arena(MEMFLAGS memflag, Tag tag = Tag::tag_other, size_t init_size = Chunk::init_size); + Arena(MemTag mem_tag, Tag tag = Tag::tag_other, size_t init_size = Chunk::init_size); ~Arena(); void destruct_contents(); char* hwm() const { return _hwm; } diff --git a/src/hotspot/share/memory/guardedMemory.cpp b/src/hotspot/share/memory/guardedMemory.cpp index 7b676d72588..12ffde3cc1b 100644 --- a/src/hotspot/share/memory/guardedMemory.cpp +++ b/src/hotspot/share/memory/guardedMemory.cpp @@ -23,7 +23,7 @@ */ #include "precompiled.hpp" #include "memory/guardedMemory.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "runtime/os.hpp" void* GuardedMemory::wrap_copy(const void* ptr, const size_t len, const void* tag) { diff --git a/src/hotspot/share/memory/heap.cpp b/src/hotspot/share/memory/heap.cpp index 98e59f58184..658ec3e8de7 100644 --- a/src/hotspot/share/memory/heap.cpp +++ b/src/hotspot/share/memory/heap.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -232,7 +232,7 @@ bool CodeHeap::reserve(ReservedSpace rs, size_t committed_size, size_t segment_s return false; } - MemTracker::record_virtual_memory_type((address)_segmap.low_boundary(), mtCode); + MemTracker::record_virtual_memory_tag((address)_segmap.low_boundary(), mtCode); assert(_segmap.committed_size() >= (size_t) _number_of_committed_segments, "could not commit enough space for segment map"); assert(_segmap.reserved_size() >= (size_t) _number_of_reserved_segments , "could not reserve enough space for segment map"); diff --git a/src/hotspot/share/memory/memRegion.cpp b/src/hotspot/share/memory/memRegion.cpp index 7dd7e1be4ab..d6565b00324 100644 --- a/src/hotspot/share/memory/memRegion.cpp +++ b/src/hotspot/share/memory/memRegion.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -102,8 +102,8 @@ MemRegion MemRegion::minus(const MemRegion mr2) const { return MemRegion(); } -MemRegion* MemRegion::create_array(size_t length, MEMFLAGS flags) { - MemRegion* result = NEW_C_HEAP_ARRAY(MemRegion, length, flags); +MemRegion* MemRegion::create_array(size_t length, MemTag mem_tag) { + MemRegion* result = NEW_C_HEAP_ARRAY(MemRegion, length, mem_tag); for (size_t i = 0; i < length; i++) { ::new (&result[i]) MemRegion(); } diff --git a/src/hotspot/share/memory/memRegion.hpp b/src/hotspot/share/memory/memRegion.hpp index 5d3d635c650..920efe75288 100644 --- a/src/hotspot/share/memory/memRegion.hpp +++ b/src/hotspot/share/memory/memRegion.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2000, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -90,7 +90,7 @@ public: bool is_empty() const { return word_size() == 0; } // Creates and initializes an array of MemRegions of the given length. - static MemRegion* create_array(size_t length, MEMFLAGS flags); + static MemRegion* create_array(size_t length, MemTag mem_tag); static void destroy_array(MemRegion* array, size_t length); }; diff --git a/src/hotspot/share/memory/metaspace.cpp b/src/hotspot/share/memory/metaspace.cpp index 2674278ec99..f86be4774d5 100644 --- a/src/hotspot/share/memory/metaspace.cpp +++ b/src/hotspot/share/memory/metaspace.cpp @@ -772,7 +772,7 @@ void Metaspace::global_initialize() { } // Mark class space as such - MemTracker::record_virtual_memory_type((address)rs.base(), mtClass); + MemTracker::record_virtual_memory_tag((address)rs.base(), mtClass); // Initialize space Metaspace::initialize_class_space(rs); diff --git a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp index a8afbc130c9..83a591e4cad 100644 --- a/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp +++ b/src/hotspot/share/memory/metaspace/virtualSpaceNode.cpp @@ -259,7 +259,7 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size, if (!rs.is_reserved()) { vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace"); } - MemTracker::record_virtual_memory_type(rs.base(), mtMetaspace); + MemTracker::record_virtual_memory_tag(rs.base(), mtMetaspace); assert_is_aligned(rs.base(), chunklevel::MAX_CHUNK_BYTE_SIZE); InternalStats::inc_num_vsnodes_births(); return new VirtualSpaceNode(rs, true, limiter, reserve_words_counter, commit_words_counter); diff --git a/src/hotspot/share/memory/padded.hpp b/src/hotspot/share/memory/padded.hpp index bca1d168cb5..7e0e0615208 100644 --- a/src/hotspot/share/memory/padded.hpp +++ b/src/hotspot/share/memory/padded.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_MEMORY_PADDED_HPP #define SHARE_MEMORY_PADDED_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" @@ -89,7 +89,7 @@ class PaddedEnd : public PaddedEndImpl { // Helper class to create an array of PaddedEnd objects. All elements will // start at a multiple of alignment and the size will be aligned to alignment. -template +template class PaddedArray { public: // Creates an aligned padded array. @@ -100,7 +100,7 @@ class PaddedArray { // Helper class to create an array of references to arrays of primitive types // Both the array of references and the data arrays are aligned to the given // alignment. The allocated memory is zero-filled. -template +template class Padded2DArray { public: // Creates an aligned padded 2D array. @@ -112,7 +112,7 @@ class Padded2DArray { // Helper class to create an array of T objects. The array as a whole will // start at a multiple of alignment and its size will be aligned to alignment. -template +template class PaddedPrimitiveArray { public: static T* create_unfreeable(size_t length); diff --git a/src/hotspot/share/memory/padded.inline.hpp b/src/hotspot/share/memory/padded.inline.hpp index 72001e3aad6..ba477bfe88f 100644 --- a/src/hotspot/share/memory/padded.inline.hpp +++ b/src/hotspot/share/memory/padded.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -34,13 +34,13 @@ // Creates an aligned padded array. // The memory can't be deleted since the raw memory chunk is not returned. -template -PaddedEnd* PaddedArray::create_unfreeable(uint length) { +template +PaddedEnd* PaddedArray::create_unfreeable(uint length) { // Check that the PaddedEnd class works as intended. STATIC_ASSERT(is_aligned(sizeof(PaddedEnd), alignment)); // Allocate a chunk of memory large enough to allow for some alignment. - void* chunk = AllocateHeap(length * sizeof(PaddedEnd) + alignment, flags); + void* chunk = AllocateHeap(length * sizeof(PaddedEnd) + alignment, MT); // Make the initial alignment. PaddedEnd* aligned_padded_array = (PaddedEnd*)align_up(chunk, alignment); @@ -53,8 +53,8 @@ PaddedEnd* PaddedArray::create_unfreeable(uint length) { return aligned_padded_array; } -template -T** Padded2DArray::create_unfreeable(uint rows, uint columns, size_t* allocation_size) { +template +T** Padded2DArray::create_unfreeable(uint rows, uint columns, size_t* allocation_size) { // Calculate and align the size of the first dimension's table. size_t table_size = align_up(rows * sizeof(T*), alignment); // The size of the separate rows. @@ -63,7 +63,7 @@ T** Padded2DArray::create_unfreeable(uint rows, uint column size_t total_size = table_size + rows * row_size + alignment; // Allocate a chunk of memory large enough to allow alignment of the chunk. - void* chunk = MmapArrayAllocator::allocate(total_size, flags); + void* chunk = MmapArrayAllocator::allocate(total_size, MT); // Clear the allocated memory. // Align the chunk of memory. T** result = (T**)align_up(chunk, alignment); @@ -81,16 +81,16 @@ T** Padded2DArray::create_unfreeable(uint rows, uint column return result; } -template -T* PaddedPrimitiveArray::create_unfreeable(size_t length) { +template +T* PaddedPrimitiveArray::create_unfreeable(size_t length) { void* temp; return create(length, &temp); } -template -T* PaddedPrimitiveArray::create(size_t length, void** alloc_base) { +template +T* PaddedPrimitiveArray::create(size_t length, void** alloc_base) { // Allocate a chunk of memory large enough to allow for some alignment. - void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags); + void* chunk = AllocateHeap(length * sizeof(T) + alignment, MT); memset(chunk, 0, length * sizeof(T) + alignment); diff --git a/src/hotspot/share/memory/resourceArea.hpp b/src/hotspot/share/memory/resourceArea.hpp index 5fd376068c5..b9a1904b507 100644 --- a/src/hotspot/share/memory/resourceArea.hpp +++ b/src/hotspot/share/memory/resourceArea.hpp @@ -51,11 +51,11 @@ class ResourceArea: public Arena { #endif // ASSERT public: - ResourceArea(MEMFLAGS flags = mtThread) : - Arena(flags, Arena::Tag::tag_ra) DEBUG_ONLY(COMMA _nesting(0)) {} + ResourceArea(MemTag mem_tag = mtThread) : + Arena(mem_tag, Arena::Tag::tag_ra) DEBUG_ONLY(COMMA _nesting(0)) {} - ResourceArea(size_t init_size, MEMFLAGS flags = mtThread) : - Arena(flags, Arena::Tag::tag_ra, init_size) DEBUG_ONLY(COMMA _nesting(0)) { + ResourceArea(size_t init_size, MemTag mem_tag = mtThread) : + Arena(mem_tag, Arena::Tag::tag_ra, init_size) DEBUG_ONLY(COMMA _nesting(0)) { } char* allocate_bytes(size_t size, AllocFailType alloc_failmode = AllocFailStrategy::EXIT_OOM); diff --git a/src/hotspot/share/memory/virtualspace.cpp b/src/hotspot/share/memory/virtualspace.cpp index c27e607353a..0cc81ce85a6 100644 --- a/src/hotspot/share/memory/virtualspace.cpp +++ b/src/hotspot/share/memory/virtualspace.cpp @@ -653,7 +653,7 @@ ReservedHeapSpace::ReservedHeapSpace(size_t size, size_t alignment, size_t page_ "area must be distinguishable from marks for mark-sweep"); if (base() != nullptr) { - MemTracker::record_virtual_memory_type((address)base(), mtJavaHeap); + MemTracker::record_virtual_memory_tag((address)base(), mtJavaHeap); } if (_fd_for_heap != -1) { @@ -671,7 +671,7 @@ ReservedCodeSpace::ReservedCodeSpace(size_t r_size, size_t rs_align, size_t rs_page_size) : ReservedSpace() { initialize(r_size, rs_align, rs_page_size, /*requested address*/ nullptr, /*executable*/ true); - MemTracker::record_virtual_memory_type((address)base(), mtCode); + MemTracker::record_virtual_memory_tag((address)base(), mtCode); } // VirtualSpace diff --git a/src/hotspot/share/nmt/allocationSite.hpp b/src/hotspot/share/nmt/allocationSite.hpp index 022fb6f4390..a7bc2e96250 100644 --- a/src/hotspot/share/nmt/allocationSite.hpp +++ b/src/hotspot/share/nmt/allocationSite.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_NMT_ALLOCATIONSITE_HPP #define SHARE_NMT_ALLOCATIONSITE_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/nativeCallStack.hpp" // Allocation site represents a code path that makes a memory @@ -33,9 +33,9 @@ class AllocationSite { private: const NativeCallStack _call_stack; - const MEMFLAGS _flag; + const MemTag _mem_tag; public: - AllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : _call_stack(stack), _flag(flag) { } + AllocationSite(const NativeCallStack& stack, MemTag mem_tag) : _call_stack(stack), _mem_tag(mem_tag) { } bool equals(const NativeCallStack& stack) const { return _call_stack.equals(stack); @@ -49,7 +49,7 @@ class AllocationSite { return &_call_stack; } - MEMFLAGS flag() const { return _flag; } + MemTag mem_tag() const { return _mem_tag; } }; #endif // SHARE_NMT_ALLOCATIONSITE_HPP diff --git a/src/hotspot/share/nmt/arrayWithFreeList.hpp b/src/hotspot/share/nmt/arrayWithFreeList.hpp index 13aa1045fe7..2c1812dcc52 100644 --- a/src/hotspot/share/nmt/arrayWithFreeList.hpp +++ b/src/hotspot/share/nmt/arrayWithFreeList.hpp @@ -31,7 +31,7 @@ // A flat array of elements E, backed by C-heap, growing on-demand. It allows for // returning arbitrary elements and keeps them in a freelist. Elements can be uniquely // identified via array index. -template +template class ArrayWithFreeList { // An E must be trivially copyable and destructible, but it may be constructed @@ -52,7 +52,7 @@ private: E e; }; - GrowableArrayCHeap _backing_storage; + GrowableArrayCHeap _backing_storage; I _free_start; bool is_in_bounds(I i) { diff --git a/src/hotspot/share/nmt/mallocHeader.cpp b/src/hotspot/share/nmt/mallocHeader.cpp index d5a7b689c2a..defe2fc045d 100644 --- a/src/hotspot/share/nmt/mallocHeader.cpp +++ b/src/hotspot/share/nmt/mallocHeader.cpp @@ -26,16 +26,16 @@ #include "nmt/mallocHeader.inline.hpp" #include "nmt/mallocSiteTable.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "runtime/os.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/nativeCallStack.hpp" #include "utilities/ostream.hpp" -// The malloc header, as well as the coming VMATree implementation, rely on MEMFLAGS +// The malloc header, as well as the coming VMATree implementation, rely on MemTag // fitting into eight bits. -STATIC_ASSERT(sizeof(MEMFLAGS) == sizeof(uint8_t)); +STATIC_ASSERT(sizeof(MemTag) == sizeof(uint8_t)); void MallocHeader::print_block_on_error(outputStream* st, address bad_address) const { assert(bad_address >= (address)this, "sanity"); diff --git a/src/hotspot/share/nmt/mallocHeader.hpp b/src/hotspot/share/nmt/mallocHeader.hpp index 9f9f7f97ea7..c76e61fb4b5 100644 --- a/src/hotspot/share/nmt/mallocHeader.hpp +++ b/src/hotspot/share/nmt/mallocHeader.hpp @@ -26,7 +26,7 @@ #ifndef SHARE_NMT_MALLOCHEADER_HPP #define SHARE_NMT_MALLOCHEADER_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/macros.hpp" #include "utilities/nativeCallStack.hpp" @@ -92,7 +92,7 @@ class MallocHeader { NOT_LP64(uint32_t _alt_canary); const size_t _size; const uint32_t _mst_marker; - const MEMFLAGS _flags; + const MemTag _mem_tag; const uint8_t _unused; uint16_t _canary; @@ -121,19 +121,19 @@ public: // Contains all of the necessary data to to deaccount block with NMT. struct FreeInfo { const size_t size; - const MEMFLAGS flags; + const MemTag mem_tag; const uint32_t mst_marker; }; - inline MallocHeader(size_t size, MEMFLAGS flags, uint32_t mst_marker); + inline MallocHeader(size_t size, MemTag mem_tag, uint32_t mst_marker); - inline size_t size() const { return _size; } - inline MEMFLAGS flags() const { return _flags; } + inline size_t size() const { return _size; } + inline MemTag mem_tag() const { return _mem_tag; } inline uint32_t mst_marker() const { return _mst_marker; } // Return the necessary data to deaccount the block with NMT. FreeInfo free_info() { - return FreeInfo{this->size(), this->flags(), this->mst_marker()}; + return FreeInfo{this->size(), this->mem_tag(), this->mst_marker()}; } inline void mark_block_as_dead(); inline void revive(); diff --git a/src/hotspot/share/nmt/mallocHeader.inline.hpp b/src/hotspot/share/nmt/mallocHeader.inline.hpp index d763241b36d..34ec891d33f 100644 --- a/src/hotspot/share/nmt/mallocHeader.inline.hpp +++ b/src/hotspot/share/nmt/mallocHeader.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, 2022 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -34,8 +34,8 @@ #include "utilities/macros.hpp" #include "utilities/nativeCallStack.hpp" -inline MallocHeader::MallocHeader(size_t size, MEMFLAGS flags, uint32_t mst_marker) - : _size(size), _mst_marker(mst_marker), _flags(flags), +inline MallocHeader::MallocHeader(size_t size, MemTag mem_tag, uint32_t mst_marker) + : _size(size), _mst_marker(mst_marker), _mem_tag(mem_tag), _unused(0), _canary(_header_canary_live_mark) { assert(size < max_reasonable_malloc_size, "Too large allocation size?"); diff --git a/src/hotspot/share/nmt/mallocLimit.cpp b/src/hotspot/share/nmt/mallocLimit.cpp index 746c3b9201b..5e16a406821 100644 --- a/src/hotspot/share/nmt/mallocLimit.cpp +++ b/src/hotspot/share/nmt/mallocLimit.cpp @@ -25,7 +25,7 @@ #include "precompiled.hpp" #include "nmt/mallocLimit.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "nmt/nmtCommon.hpp" #include "runtime/java.hpp" #include "runtime/globals.hpp" @@ -80,7 +80,7 @@ public: // Check if string at position matches a category name. // Advances position on match. - bool match_category(MEMFLAGS* out) { + bool match_category(MemTag* out) { if (eof()) { return false; } @@ -90,9 +90,9 @@ public: } stringStream ss; ss.print("%.*s", (int)(end - _p), _p); - MEMFLAGS f = NMTUtil::string_to_flag(ss.base()); - if (f != mtNone) { - *out = f; + MemTag mem_tag = NMTUtil::string_to_mem_tag(ss.base()); + if (mem_tag != mtNone) { + *out = mem_tag; _p = end; return true; } @@ -131,16 +131,16 @@ void MallocLimitSet::set_global_limit(size_t s, MallocLimitMode flag) { _glob.sz = s; _glob.mode = flag; } -void MallocLimitSet::set_category_limit(MEMFLAGS f, size_t s, MallocLimitMode flag) { - const int i = NMTUtil::flag_to_index(f); +void MallocLimitSet::set_category_limit(MemTag mem_tag, size_t s, MallocLimitMode flag) { + const int i = NMTUtil::tag_to_index(mem_tag); _cat[i].sz = s; _cat[i].mode = flag; } void MallocLimitSet::reset() { set_global_limit(0, MallocLimitMode::trigger_fatal); _glob.sz = 0; _glob.mode = MallocLimitMode::trigger_fatal; - for (int i = 0; i < mt_number_of_types; i++) { - set_category_limit(NMTUtil::index_to_flag(i), 0, MallocLimitMode::trigger_fatal); + for (int i = 0; i < mt_number_of_tags; i++) { + set_category_limit(NMTUtil::index_to_tag(i), 0, MallocLimitMode::trigger_fatal); } } @@ -150,10 +150,10 @@ void MallocLimitSet::print_on(outputStream* st) const { st->print_cr("MallocLimit: total limit: " PROPERFMT " (%s)", PROPERFMTARGS(_glob.sz), mode_to_name(_glob.mode)); } else { - for (int i = 0; i < mt_number_of_types; i++) { + for (int i = 0; i < mt_number_of_tags; i++) { if (_cat[i].sz > 0) { st->print_cr("MallocLimit: category \"%s\" limit: " PROPERFMT " (%s)", - NMTUtil::flag_to_enum_name(NMTUtil::index_to_flag(i)), + NMTUtil::tag_to_enum_name(NMTUtil::index_to_tag(i)), PROPERFMTARGS(_cat[i].sz), mode_to_name(_cat[i].mode)); } } @@ -187,13 +187,13 @@ bool MallocLimitSet::parse_malloclimit_option(const char* v, const char** err) { // Category-specific form? else { while (!sst.eof()) { - MEMFLAGS f; + MemTag mem_tag; // Match category, followed by : - BAIL_UNLESS(sst.match_category(&f), "Expected category name"); + BAIL_UNLESS(sst.match_category(&mem_tag), "Expected category name"); BAIL_UNLESS(sst.match_char(':'), "Expected colon following category"); - malloclimit* const modified_limit = &_cat[NMTUtil::flag_to_index(f)]; + malloclimit* const modified_limit = &_cat[NMTUtil::tag_to_index(mem_tag)]; // Match size BAIL_UNLESS(sst.match_size(&modified_limit->sz), "Expected size"); diff --git a/src/hotspot/share/nmt/mallocLimit.hpp b/src/hotspot/share/nmt/mallocLimit.hpp index 2034e3ce24b..ec6799b41a3 100644 --- a/src/hotspot/share/nmt/mallocLimit.hpp +++ b/src/hotspot/share/nmt/mallocLimit.hpp @@ -27,7 +27,7 @@ #define SHARE_SERVICES_MALLOCLIMIT_HPP #include "memory/allStatic.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" @@ -46,18 +46,18 @@ class outputStream; class MallocLimitSet { malloclimit _glob; // global limit - malloclimit _cat[mt_number_of_types]; // per-category limit + malloclimit _cat[mt_number_of_tags]; // per-category limit public: MallocLimitSet(); void reset(); bool parse_malloclimit_option(const char* optionstring, const char** err); - void set_global_limit(size_t s, MallocLimitMode flag); - void set_category_limit(MEMFLAGS f, size_t s, MallocLimitMode flag); + void set_global_limit(size_t s, MallocLimitMode type); + void set_category_limit(MemTag mem_tag, size_t s, MallocLimitMode mode); const malloclimit* global_limit() const { return &_glob; } - const malloclimit* category_limit(MEMFLAGS f) const { return &_cat[(int)f]; } + const malloclimit* category_limit(MemTag mem_tag) const { return &_cat[(int)mem_tag]; } void print_on(outputStream* st) const; }; @@ -69,7 +69,7 @@ class MallocLimitHandler : public AllStatic { public: static const malloclimit* global_limit() { return _limits.global_limit(); } - static const malloclimit* category_limit(MEMFLAGS f) { return _limits.category_limit(f); } + static const malloclimit* category_limit(MemTag mem_tag) { return _limits.category_limit(mem_tag); } static void initialize(const char* options); static void print_on(outputStream* st); diff --git a/src/hotspot/share/nmt/mallocSiteTable.cpp b/src/hotspot/share/nmt/mallocSiteTable.cpp index 0fdf9e0f83d..9411f76c491 100644 --- a/src/hotspot/share/nmt/mallocSiteTable.cpp +++ b/src/hotspot/share/nmt/mallocSiteTable.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -111,15 +111,15 @@ bool MallocSiteTable::walk(MallocSiteWalker* walker) { * 2. Overflow hash bucket. * Under any of above circumstances, caller should handle the situation. */ -MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags) { - assert(flags != mtNone, "Should have a real memory type"); +MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* marker, MemTag mem_tag) { + assert(mem_tag != mtNone, "Should have a real memory tag"); const unsigned int hash = key.calculate_hash(); const unsigned int index = hash_to_index(hash); *marker = 0; // First entry for this hash bucket if (_table[index] == nullptr) { - MallocSiteHashtableEntry* entry = new_entry(key, flags); + MallocSiteHashtableEntry* entry = new_entry(key, mem_tag); // OOM check if (entry == nullptr) return nullptr; @@ -137,14 +137,14 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, uint32_t* while (head != nullptr && pos_idx < MAX_BUCKET_LENGTH) { if (head->hash() == hash) { MallocSite* site = head->data(); - if (site->flag() == flags && site->equals(key)) { + if (site->mem_tag() == mem_tag && site->equals(key)) { *marker = build_marker(index, pos_idx); return head->data(); } } if (head->next() == nullptr && pos_idx < (MAX_BUCKET_LENGTH - 1)) { - MallocSiteHashtableEntry* entry = new_entry(key, flags); + MallocSiteHashtableEntry* entry = new_entry(key, mem_tag); // OOM check if (entry == nullptr) return nullptr; if (head->atomic_insert(entry)) { @@ -177,10 +177,10 @@ MallocSite* MallocSiteTable::malloc_site(uint32_t marker) { // Allocates MallocSiteHashtableEntry object. Special call stack // (pre-installed allocation site) has to be used to avoid infinite // recursion. -MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MEMFLAGS flags) { +MallocSiteHashtableEntry* MallocSiteTable::new_entry(const NativeCallStack& key, MemTag mem_tag) { void* p = AllocateHeap(sizeof(MallocSiteHashtableEntry), mtNMT, *hash_entry_allocation_stack(), AllocFailStrategy::RETURN_NULL); - return ::new (p) MallocSiteHashtableEntry(key, flags); + return ::new (p) MallocSiteHashtableEntry(key, mem_tag); } bool MallocSiteTable::walk_malloc_site(MallocSiteWalker* walker) { diff --git a/src/hotspot/share/nmt/mallocSiteTable.hpp b/src/hotspot/share/nmt/mallocSiteTable.hpp index ae9266f5369..472bc397dd4 100644 --- a/src/hotspot/share/nmt/mallocSiteTable.hpp +++ b/src/hotspot/share/nmt/mallocSiteTable.hpp @@ -38,8 +38,8 @@ class MallocSite : public AllocationSite { MemoryCounter _c; public: - MallocSite(const NativeCallStack& stack, MEMFLAGS flags) : - AllocationSite(stack, flags) {} + MallocSite(const NativeCallStack& stack, MemTag mem_tag) : + AllocationSite(stack, mem_tag) {} void allocate(size_t size) { _c.allocate(size); } void deallocate(size_t size) { _c.deallocate(size); } @@ -63,9 +63,9 @@ class MallocSiteHashtableEntry : public CHeapObj { public: - MallocSiteHashtableEntry(NativeCallStack stack, MEMFLAGS flags): - _malloc_site(stack, flags), _hash(stack.calculate_hash()), _next(nullptr) { - assert(flags != mtNone, "Expect a real memory type"); + MallocSiteHashtableEntry(NativeCallStack stack, MemTag mem_tag): + _malloc_site(stack, mem_tag), _hash(stack.calculate_hash()), _next(nullptr) { + assert(mem_tag != mtNone, "Expect a real memory tag"); } inline const MallocSiteHashtableEntry* next() const { @@ -147,8 +147,8 @@ class MallocSiteTable : AllStatic { // 1. out of memory // 2. overflow hash bucket static inline bool allocation_at(const NativeCallStack& stack, size_t size, - uint32_t* marker, MEMFLAGS flags) { - MallocSite* site = lookup_or_add(stack, marker, flags); + uint32_t* marker, MemTag mem_tag) { + MallocSite* site = lookup_or_add(stack, marker, mem_tag); if (site != nullptr) site->allocate(size); return site != nullptr; } @@ -170,9 +170,9 @@ class MallocSiteTable : AllStatic { static void print_tuning_statistics(outputStream* st); private: - static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MEMFLAGS flags); + static MallocSiteHashtableEntry* new_entry(const NativeCallStack& key, MemTag mem_tag); - static MallocSite* lookup_or_add(const NativeCallStack& key, uint32_t* marker, MEMFLAGS flags); + static MallocSite* lookup_or_add(const NativeCallStack& key, uint32_t* marker, MemTag mem_tag); static MallocSite* malloc_site(uint32_t marker); static bool walk(MallocSiteWalker* walker); diff --git a/src/hotspot/share/nmt/mallocTracker.cpp b/src/hotspot/share/nmt/mallocTracker.cpp index 021ce5d1332..6829db90b4b 100644 --- a/src/hotspot/share/nmt/mallocTracker.cpp +++ b/src/hotspot/share/nmt/mallocTracker.cpp @@ -69,7 +69,7 @@ void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) { s->_all_mallocs = _all_mallocs; size_t total_size = 0; size_t total_count = 0; - for (int index = 0; index < mt_number_of_types; index ++) { + for (int index = 0; index < mt_number_of_tags; index ++) { s->_malloc[index] = _malloc[index]; total_size += s->_malloc[index].malloc_size(); total_count += s->_malloc[index].malloc_count(); @@ -81,7 +81,7 @@ void MallocMemorySnapshot::copy_to(MallocMemorySnapshot* s) { // Total malloc'd memory used by arenas size_t MallocMemorySnapshot::total_arena() const { size_t amount = 0; - for (int index = 0; index < mt_number_of_types; index ++) { + for (int index = 0; index < mt_number_of_tags; index ++) { amount += _malloc[index].arena_size(); } return amount; @@ -91,7 +91,7 @@ size_t MallocMemorySnapshot::total_arena() const { // from total chunks to get total free chunk size void MallocMemorySnapshot::make_adjustment() { size_t arena_size = total_arena(); - int chunk_idx = NMTUtil::flag_to_index(mtChunk); + int chunk_idx = NMTUtil::tag_to_index(mtChunk); _malloc[chunk_idx].record_free(arena_size); _all_mallocs.deallocate(arena_size); } @@ -128,11 +128,11 @@ bool MallocMemorySummary::total_limit_reached(size_t s, size_t so_far, const mal return true; } -bool MallocMemorySummary::category_limit_reached(MEMFLAGS f, size_t s, size_t so_far, const malloclimit* limit) { +bool MallocMemorySummary::category_limit_reached(MemTag mem_tag, size_t s, size_t so_far, const malloclimit* limit) { #define FORMATTED \ "MallocLimit: reached category \"%s\" limit (triggering allocation size: " PROPERFMT ", allocated so far: " PROPERFMT ", limit: " PROPERFMT ") ", \ - NMTUtil::flag_to_enum_name(f), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz) + NMTUtil::tag_to_enum_name(mem_tag), PROPERFMTARGS(s), PROPERFMTARGS(so_far), PROPERFMTARGS(limit->sz) // If we hit the limit during error reporting, we print a short warning but otherwise ignore it. // We don't want to risk recursive assertion or torn hs-err logs. @@ -167,20 +167,20 @@ bool MallocTracker::initialize(NMT_TrackingLevel level) { } // Record a malloc memory allocation -void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, +void* MallocTracker::record_malloc(void* malloc_base, size_t size, MemTag mem_tag, const NativeCallStack& stack) { assert(MemTracker::enabled(), "precondition"); assert(malloc_base != nullptr, "precondition"); - MallocMemorySummary::record_malloc(size, flags); + MallocMemorySummary::record_malloc(size, mem_tag); uint32_t mst_marker = 0; if (MemTracker::tracking_level() == NMT_detail) { - MallocSiteTable::allocation_at(stack, size, &mst_marker, flags); + MallocSiteTable::allocation_at(stack, size, &mst_marker, mem_tag); } // Uses placement global new operator to initialize malloc header - MallocHeader* const header = ::new (malloc_base)MallocHeader(size, flags, mst_marker); + MallocHeader* const header = ::new (malloc_base)MallocHeader(size, mem_tag, mst_marker); void* const memblock = (void*)((char*)malloc_base + sizeof(MallocHeader)); // The alignment check: 8 bytes alignment for 32 bit systems. @@ -192,7 +192,7 @@ void* MallocTracker::record_malloc(void* malloc_base, size_t size, MEMFLAGS flag { const MallocHeader* header2 = MallocHeader::resolve_checked(memblock); assert(header2->size() == size, "Wrong size"); - assert(header2->flags() == flags, "Wrong flags"); + assert(header2->mem_tag() == mem_tag, "Wrong memory tag"); } #endif @@ -213,7 +213,7 @@ void* MallocTracker::record_free_block(void* memblock) { } void MallocTracker::deaccount(MallocHeader::FreeInfo free_info) { - MallocMemorySummary::record_free(free_info.size, free_info.flags); + MallocMemorySummary::record_free(free_info.size, free_info.mem_tag); if (MemTracker::tracking_level() == NMT_detail) { MallocSiteTable::deallocation_at(free_info.size, free_info.mst_marker); } @@ -296,7 +296,7 @@ bool MallocTracker::print_pointer_information(const void* p, outputStream* st) { p2i(p), where, (block->is_dead() ? "dead" : "live"), p2i(block + 1), // lets print the payload start, not the header - block->size(), NMTUtil::flag_to_enum_name(block->flags())); + block->size(), NMTUtil::tag_to_enum_name(block->mem_tag())); if (MemTracker::tracking_level() == NMT_detail) { NativeCallStack ncs; if (MallocSiteTable::access_stack(ncs, *block)) { diff --git a/src/hotspot/share/nmt/mallocTracker.hpp b/src/hotspot/share/nmt/mallocTracker.hpp index 9c14ea04bf0..39d120433ef 100644 --- a/src/hotspot/share/nmt/mallocTracker.hpp +++ b/src/hotspot/share/nmt/mallocTracker.hpp @@ -27,7 +27,7 @@ #define SHARE_NMT_MALLOCTRACKER_HPP #include "nmt/mallocHeader.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "nmt/nmtCommon.hpp" #include "runtime/atomic.hpp" #include "runtime/threadCritical.hpp" @@ -150,18 +150,18 @@ class MallocMemorySnapshot { friend class MallocMemorySummary; private: - MallocMemory _malloc[mt_number_of_types]; + MallocMemory _malloc[mt_number_of_tags]; MemoryCounter _all_mallocs; public: - inline MallocMemory* by_type(MEMFLAGS flags) { - int index = NMTUtil::flag_to_index(flags); + inline MallocMemory* by_type(MemTag mem_tag) { + int index = NMTUtil::tag_to_index(mem_tag); return &_malloc[index]; } - inline const MallocMemory* by_type(MEMFLAGS flags) const { - int index = NMTUtil::flag_to_index(flags); + inline const MallocMemory* by_type(MemTag mem_tag) const { + int index = NMTUtil::tag_to_index(mem_tag); return &_malloc[index]; } @@ -214,31 +214,31 @@ class MallocMemorySummary : AllStatic { // Called when a total limit break was detected. // Will return true if the limit was handled, false if it was ignored. - static bool category_limit_reached(MEMFLAGS f, size_t s, size_t so_far, const malloclimit* limit); + static bool category_limit_reached(MemTag mem_tag, size_t s, size_t so_far, const malloclimit* limit); public: static void initialize(); - static inline void record_malloc(size_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->record_malloc(size); + static inline void record_malloc(size_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->record_malloc(size); as_snapshot()->_all_mallocs.allocate(size); } - static inline void record_free(size_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->record_free(size); + static inline void record_free(size_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->record_free(size); as_snapshot()->_all_mallocs.deallocate(size); } - static inline void record_new_arena(MEMFLAGS flag) { - as_snapshot()->by_type(flag)->record_new_arena(); + static inline void record_new_arena(MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->record_new_arena(); } - static inline void record_arena_free(MEMFLAGS flag) { - as_snapshot()->by_type(flag)->record_arena_free(); + static inline void record_arena_free(MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->record_arena_free(); } - static inline void record_arena_size_change(ssize_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->record_arena_size_change(size); + static inline void record_arena_size_change(ssize_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->record_arena_size_change(size); } static void snapshot(MallocMemorySnapshot* s) { @@ -257,7 +257,7 @@ class MallocMemorySummary : AllStatic { // MallocLimit: returns true if allocating s bytes on f would trigger // either global or the category limit - static inline bool check_exceeds_limit(size_t s, MEMFLAGS f); + static inline bool check_exceeds_limit(size_t s, MemTag mem_tag); }; @@ -280,7 +280,7 @@ class MallocTracker : AllStatic { // // Record malloc on specified memory block - static void* record_malloc(void* malloc_base, size_t size, MEMFLAGS flags, + static void* record_malloc(void* malloc_base, size_t size, MemTag mem_tag, const NativeCallStack& stack); // Given a block returned by os::malloc() or os::realloc(): @@ -289,21 +289,21 @@ class MallocTracker : AllStatic { // Given the free info from a block, de-account block from NMT. static void deaccount(MallocHeader::FreeInfo free_info); - static inline void record_new_arena(MEMFLAGS flags) { - MallocMemorySummary::record_new_arena(flags); + static inline void record_new_arena(MemTag mem_tag) { + MallocMemorySummary::record_new_arena(mem_tag); } - static inline void record_arena_free(MEMFLAGS flags) { - MallocMemorySummary::record_arena_free(flags); + static inline void record_arena_free(MemTag mem_tag) { + MallocMemorySummary::record_arena_free(mem_tag); } - static inline void record_arena_size_change(ssize_t size, MEMFLAGS flags) { - MallocMemorySummary::record_arena_size_change(size, flags); + static inline void record_arena_size_change(ssize_t size, MemTag mem_tag) { + MallocMemorySummary::record_arena_size_change(size, mem_tag); } // MallocLimt: Given an allocation size s, check if mallocing this much - // under category f would hit either the global limit or the limit for category f. - static inline bool check_exceeds_limit(size_t s, MEMFLAGS f); + // for MemTag would hit either the global limit or the limit for MemTag. + static inline bool check_exceeds_limit(size_t s, MemTag mem_tag); // Given a pointer, look for the containing malloc block. // Print the block. Note that since there is very low risk of memory looking diff --git a/src/hotspot/share/nmt/mallocTracker.inline.hpp b/src/hotspot/share/nmt/mallocTracker.inline.hpp index 243f965a382..19d3775ed77 100644 --- a/src/hotspot/share/nmt/mallocTracker.inline.hpp +++ b/src/hotspot/share/nmt/mallocTracker.inline.hpp @@ -32,7 +32,7 @@ #include "utilities/globalDefinitions.hpp" // Returns true if allocating s bytes on f would trigger either global or the category limit -inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MEMFLAGS f) { +inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MemTag mem_tag) { // Note: checks are ordered to have as little impact as possible on the standard code path, // when MallocLimit is unset, resp. it is set but we have reached no limit yet. @@ -50,12 +50,12 @@ inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MEMFLAGS f) { } } else { // Category Limit? - l = MallocLimitHandler::category_limit(f); + l = MallocLimitHandler::category_limit(mem_tag); if (l->sz > 0) { - const MallocMemory* mm = as_snapshot()->by_type(f); + const MallocMemory* mm = as_snapshot()->by_type(mem_tag); size_t so_far = mm->malloc_size() + mm->arena_size(); if ((so_far + s) > l->sz) { - return category_limit_reached(f, s, so_far, l); + return category_limit_reached(mem_tag, s, so_far, l); } } } @@ -64,8 +64,8 @@ inline bool MallocMemorySummary::check_exceeds_limit(size_t s, MEMFLAGS f) { return false; } -inline bool MallocTracker::check_exceeds_limit(size_t s, MEMFLAGS f) { - return MallocMemorySummary::check_exceeds_limit(s, f); +inline bool MallocTracker::check_exceeds_limit(size_t s, MemTag mem_tag) { + return MallocMemorySummary::check_exceeds_limit(s, mem_tag); } diff --git a/src/hotspot/share/nmt/memBaseline.cpp b/src/hotspot/share/nmt/memBaseline.cpp index 7c7dd3ec24e..6f82b2de9f1 100644 --- a/src/hotspot/share/nmt/memBaseline.cpp +++ b/src/hotspot/share/nmt/memBaseline.cpp @@ -61,11 +61,11 @@ int compare_malloc_site(const MallocSite& s1, const MallocSite& s2) { return s1.call_stack()->compare(*s2.call_stack()); } -// Sort into allocation site addresses and memory type order for baseline comparison +// Sort into allocation site addresses and memory tag order for baseline comparison int compare_malloc_site_and_type(const MallocSite& s1, const MallocSite& s2) { int res = compare_malloc_site(s1, s2); if (res == 0) { - res = (int)(NMTUtil::flag_to_index(s1.flag()) - NMTUtil::flag_to_index(s2.flag())); + res = (int)(NMTUtil::tag_to_index(s1.mem_tag()) - NMTUtil::tag_to_index(s2.mem_tag())); } return res; @@ -207,7 +207,7 @@ bool MemBaseline::aggregate_virtual_memory_allocation_sites() { const ReservedMemoryRegion* rgn; VirtualMemoryAllocationSite* site; while ((rgn = itr.next()) != nullptr) { - VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->flag()); + VirtualMemoryAllocationSite tmp(*rgn->call_stack(), rgn->mem_tag()); site = allocation_sites.find(tmp); if (site == nullptr) { LinkedListNode* node = diff --git a/src/hotspot/share/nmt/memBaseline.hpp b/src/hotspot/share/nmt/memBaseline.hpp index 903f5580511..be389e375e3 100644 --- a/src/hotspot/share/nmt/memBaseline.hpp +++ b/src/hotspot/share/nmt/memBaseline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -53,7 +53,7 @@ class MemBaseline { by_address, // by memory address by_size, // by memory size by_site, // by call site where the memory is allocated from - by_site_and_type // by call site and memory type + by_site_and_type // by call site and memory tag }; private: @@ -144,14 +144,14 @@ class MemBaseline { return bl->_malloc_memory_snapshot.malloc_overhead(); } - MallocMemory* malloc_memory(MEMFLAGS flag) { + MallocMemory* malloc_memory(MemTag mem_tag) { assert(baseline_type() != Not_baselined, "Not yet baselined"); - return _malloc_memory_snapshot.by_type(flag); + return _malloc_memory_snapshot.by_type(mem_tag); } - VirtualMemory* virtual_memory(MEMFLAGS flag) { + VirtualMemory* virtual_memory(MemTag mem_tag) { assert(baseline_type() != Not_baselined, "Not yet baselined"); - return _virtual_memory_snapshot.by_type(flag); + return _virtual_memory_snapshot.by_type(mem_tag); } @@ -203,7 +203,7 @@ class MemBaseline { void malloc_sites_to_size_order(); // Sort allocation sites in call site address order void malloc_sites_to_allocation_site_order(); - // Sort allocation sites in call site address and memory type order + // Sort allocation sites in call site address and memory tag order void malloc_sites_to_allocation_site_and_type_order(); // Sort allocation sites in reserved size order diff --git a/src/hotspot/share/nmt/memMapPrinter.cpp b/src/hotspot/share/nmt/memMapPrinter.cpp index 5f920b102a9..9d84121a452 100644 --- a/src/hotspot/share/nmt/memMapPrinter.cpp +++ b/src/hotspot/share/nmt/memMapPrinter.cpp @@ -32,8 +32,8 @@ #include "memory/allocation.hpp" #include "memory/universe.hpp" #include "memory/resourceArea.hpp" -#include "nmt/memflags.hpp" -#include "nmt/memFlagBitmap.hpp" +#include "nmt/memTag.hpp" +#include "nmt/memTagBitmap.hpp" #include "nmt/memMapPrinter.hpp" #include "nmt/memTracker.hpp" #include "nmt/virtualMemoryTracker.hpp" @@ -50,9 +50,9 @@ /// NMT mechanics // Short, clear, descriptive names for all possible markers. Note that we only expect to see -// those that have been used with mmap. Flags left out are printed with their nmt flag name. +// those that have been used with mmap. Flags left out are printed with their nmt tags name. #define NMT_FLAGS_DO(f) \ - /* flag, short, description */ \ + /* mem_tag, short, description */ \ f(mtGCCardSet, "CARDTBL", "GC Card table") \ f(mtClassShared, "CDS", "CDS archives") \ f(mtClass, "CLASS", "Class Space") \ @@ -67,11 +67,11 @@ f(mtTest, "TEST", "JVM internal test mappings") //end -static const char* get_shortname_for_nmt_flag(MEMFLAGS f) { -#define DO(flag, shortname, text) if (flag == f) return shortname; +static const char* get_shortname_for_mem_tag(MemTag mem_tag) { +#define DO(t, shortname, text) if (t == mem_tag) return shortname; NMT_FLAGS_DO(DO) #undef DO - return NMTUtil::flag_to_enum_name(f); + return NMTUtil::tag_to_enum_name(mem_tag); } /// NMT virtual memory @@ -80,7 +80,7 @@ static bool range_intersects(const void* from1, const void* to1, const void* fro return MAX2(from1, from2) < MIN2(to1, to2); } -// A Cache that correlates range with MEMFLAG, optimized to be iterated quickly +// A Cache that correlates range with MemTag, optimized to be iterated quickly // (cache friendly). class CachedNMTInformation : public VirtualMemoryWalker { struct Range { const void* from; const void* to; }; @@ -88,24 +88,24 @@ class CachedNMTInformation : public VirtualMemoryWalker { // structure would have, and it allows for faster iteration of ranges since more // of them fit into a cache line. Range* _ranges; - MEMFLAGS* _flags; + MemTag* _mem_tags; size_t _count, _capacity; mutable size_t _last; public: - CachedNMTInformation() : _ranges(nullptr), _flags(nullptr), + CachedNMTInformation() : _ranges(nullptr), _mem_tags(nullptr), _count(0), _capacity(0), _last(0) {} ~CachedNMTInformation() { ALLOW_C_FUNCTION(free, ::free(_ranges);) - ALLOW_C_FUNCTION(free, ::free(_flags);) + ALLOW_C_FUNCTION(free, ::free(_mem_tags);) } - bool add(const void* from, const void* to, MEMFLAGS f) { + bool add(const void* from, const void* to, MemTag mem_tag) { // We rely on NMT regions being sorted by base assert(_count == 0 || (from >= _ranges[_count - 1].to), "NMT regions unordered?"); - // we can just fold two regions if they are adjacent and have the same flag. - if (_count > 0 && from == _ranges[_count - 1].to && f == _flags[_count - 1]) { + // we can just fold two regions if they are adjacent and have the same mem_tag. + if (_count > 0 && from == _ranges[_count - 1].to && mem_tag == _mem_tags[_count - 1]) { _ranges[_count - 1].to = to; return true; } @@ -114,8 +114,8 @@ public: const size_t new_capacity = MAX2((size_t)4096, 2 * _capacity); // Unfortunately, we need to allocate manually, raw, since we must prevent NMT deadlocks (ThreadCritical). ALLOW_C_FUNCTION(realloc, _ranges = (Range*)::realloc(_ranges, new_capacity * sizeof(Range));) - ALLOW_C_FUNCTION(realloc, _flags = (MEMFLAGS*)::realloc(_flags, new_capacity * sizeof(MEMFLAGS));) - if (_ranges == nullptr || _flags == nullptr) { + ALLOW_C_FUNCTION(realloc, _mem_tags = (MemTag*)::realloc(_mem_tags, new_capacity * sizeof(MemTag));) + if (_ranges == nullptr || _mem_tags == nullptr) { // In case of OOM lets make no fuss. Just return. return false; } @@ -123,14 +123,14 @@ public: } assert(_capacity > _count, "Sanity"); _ranges[_count] = Range { from, to }; - _flags[_count] = f; + _mem_tags[_count] = mem_tag; _count++; return true; } // Given a vma [from, to), find all regions that intersect with this vma and // return their collective flags. - MemFlagBitmap lookup(const void* from, const void* to) const { + MemTagBitmap lookup(const void* from, const void* to) const { assert(from <= to, "Sanity"); // We optimize for sequential lookups. Since this class is used when a list // of OS mappings is scanned (VirtualQuery, /proc/pid/maps), and these lists @@ -139,10 +139,10 @@ public: // the range is to the right of the given section, we need to re-start the search _last = 0; } - MemFlagBitmap bm; + MemTagBitmap bm; for(uintx i = _last; i < _count; i++) { if (range_intersects(from, to, _ranges[i].from, _ranges[i].to)) { - bm.set_flag(_flags[i]); + bm.set_tag(_mem_tags[i]); } else if (to <= _ranges[i].from) { _last = i; break; @@ -153,7 +153,7 @@ public: bool do_allocation_site(const ReservedMemoryRegion* rgn) override { // Cancel iteration if we run out of memory (add returns false); - return add(rgn->base(), rgn->end(), rgn->flag()); + return add(rgn->base(), rgn->end(), rgn->mem_tag()); } // Iterate all NMT virtual memory regions and fill this cache. @@ -247,16 +247,16 @@ bool MappingPrintSession::print_nmt_info_for_region(const void* vma_from, const // print NMT information, if available if (MemTracker::enabled()) { // Correlate vma region (from, to) with NMT region(s) we collected previously. - const MemFlagBitmap flags = _nmt_info.lookup(vma_from, vma_to); + const MemTagBitmap flags = _nmt_info.lookup(vma_from, vma_to); if (flags.has_any()) { - for (int i = 0; i < mt_number_of_types; i++) { - const MEMFLAGS flag = (MEMFLAGS)i; - if (flags.has_flag(flag)) { + for (int i = 0; i < mt_number_of_tags; i++) { + const MemTag mem_tag = (MemTag)i; + if (flags.has_tag(mem_tag)) { if (num_printed > 0) { _out->put(','); } - _out->print("%s", get_shortname_for_nmt_flag(flag)); - if (flag == mtThreadStack) { + _out->print("%s", get_shortname_for_mem_tag(mem_tag)); + if (mem_tag == mtThreadStack) { print_thread_details_for_supposed_stack_address(vma_from, vma_to, _out); } num_printed++; diff --git a/src/hotspot/share/nmt/memMapPrinter.hpp b/src/hotspot/share/nmt/memMapPrinter.hpp index aa35a830001..533fa752104 100644 --- a/src/hotspot/share/nmt/memMapPrinter.hpp +++ b/src/hotspot/share/nmt/memMapPrinter.hpp @@ -27,7 +27,7 @@ #define SHARE_SERVICES_MEMMAPPRINTER_HPP #include "memory/allStatic.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/globalDefinitions.hpp" #ifdef LINUX diff --git a/src/hotspot/share/nmt/memReporter.cpp b/src/hotspot/share/nmt/memReporter.cpp index d53782dfdaa..6ce6206ebcc 100644 --- a/src/hotspot/share/nmt/memReporter.cpp +++ b/src/hotspot/share/nmt/memReporter.cpp @@ -26,7 +26,7 @@ #include "memory/metaspace.hpp" #include "memory/metaspaceUtils.hpp" #include "nmt/mallocTracker.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "nmt/memReporter.hpp" #include "nmt/memoryFileTracker.hpp" #include "nmt/threadStackTracker.hpp" @@ -70,17 +70,17 @@ void MemReporterBase::print_total(size_t reserved, size_t committed, size_t peak } } -void MemReporterBase::print_malloc(const MemoryCounter* c, MEMFLAGS flag) const { +void MemReporterBase::print_malloc(const MemoryCounter* c, MemTag mem_tag) const { const char* scale = current_scale(); outputStream* out = output(); - const char* alloc_type = (flag == mtThreadStack) ? "" : "malloc="; + const char* alloc_type = (mem_tag == mtThreadStack) ? "" : "malloc="; const size_t amount = c->size(); const size_t count = c->count(); - if (flag != mtNone) { + if (mem_tag != mtNone) { out->print("(%s" SIZE_FORMAT "%s type=%s", alloc_type, - amount_in_current_scale(amount), scale, NMTUtil::flag_to_name(flag)); + amount_in_current_scale(amount), scale, NMTUtil::tag_to_name(mem_tag)); } else { out->print("(%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(amount), scale); @@ -176,31 +176,31 @@ void MemSummaryReporter::report() { out->cr(); out->cr(); - // Summary by memory type - for (int index = 0; index < mt_number_of_types; index ++) { - MEMFLAGS flag = NMTUtil::index_to_flag(index); + // Summary by memory tag + for (int index = 0; index < mt_number_of_tags; index ++) { + MemTag mem_tag = NMTUtil::index_to_tag(index); // thread stack is reported as part of thread category - if (flag == mtThreadStack) continue; - MallocMemory* malloc_memory = _malloc_snapshot->by_type(flag); - VirtualMemory* virtual_memory = _vm_snapshot->by_type(flag); + if (mem_tag == mtThreadStack) continue; + MallocMemory* malloc_memory = _malloc_snapshot->by_type(mem_tag); + VirtualMemory* virtual_memory = _vm_snapshot->by_type(mem_tag); - report_summary_of_type(flag, malloc_memory, virtual_memory); + report_summary_of_type(mem_tag, malloc_memory, virtual_memory); } } -void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, +void MemSummaryReporter::report_summary_of_type(MemTag mem_tag, MallocMemory* malloc_memory, VirtualMemory* virtual_memory) { size_t reserved_amount = reserved_total (malloc_memory, virtual_memory); size_t committed_amount = committed_total(malloc_memory, virtual_memory); // Count thread's native stack in "Thread" category - if (flag == mtThread) { + if (mem_tag == mtThread) { const VirtualMemory* thread_stack_usage = (const VirtualMemory*)_vm_snapshot->by_type(mtThreadStack); reserved_amount += thread_stack_usage->reserved(); committed_amount += thread_stack_usage->committed(); - } else if (flag == mtNMT) { + } else if (mem_tag == mtNMT) { // Count malloc headers in "NMT" category reserved_amount += _malloc_snapshot->malloc_overhead(); committed_amount += _malloc_snapshot->malloc_overhead(); @@ -219,10 +219,10 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, outputStream* out = output(); const char* scale = current_scale(); constexpr int indent = 28; - out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag)); + out->print("-%*s (", indent - 2, NMTUtil::tag_to_name(mem_tag)); print_total(reserved_amount, committed_amount); #if INCLUDE_CDS - if (flag == mtClassShared) { + if (mem_tag == mtClassShared) { size_t read_only_bytes = FileMapInfo::readonly_total(); output()->print(", readonly=" SIZE_FORMAT "%s", amount_in_current_scale(read_only_bytes), scale); @@ -232,12 +232,12 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, streamIndentor si(out, indent); - if (flag == mtClass) { + if (mem_tag == mtClass) { // report class count out->print_cr("(classes #" SIZE_FORMAT ")", (_instance_class_count + _array_class_count)); out->print_cr("( instance classes #" SIZE_FORMAT ", array classes #" SIZE_FORMAT ")", _instance_class_count, _array_class_count); - } else if (flag == mtThread) { + } else if (mem_tag == mtThread) { const VirtualMemory* thread_stack_usage = _vm_snapshot->by_type(mtThreadStack); // report thread count @@ -263,11 +263,11 @@ void MemSummaryReporter::report_summary_of_type(MEMFLAGS flag, out->cr(); } - if (flag == mtNMT && + if (mem_tag == mtNMT && amount_in_current_scale(_malloc_snapshot->malloc_overhead()) > 0) { out->print_cr("(tracking overhead=" SIZE_FORMAT "%s)", amount_in_current_scale(_malloc_snapshot->malloc_overhead()), scale); - } else if (flag == mtClass) { + } else if (mem_tag == mtClass) { // Metadata information report_metadata(Metaspace::NonClassType); if (Metaspace::using_class_space()) { @@ -338,12 +338,12 @@ int MemDetailReporter::report_malloc_sites() { } const NativeCallStack* stack = malloc_site->call_stack(); _stackprinter.print_stack(stack); - MEMFLAGS flag = malloc_site->flag(); - assert(NMTUtil::flag_is_valid(flag) && flag != mtNone, - "Must have a valid memory type"); + MemTag mem_tag = malloc_site->mem_tag(); + assert(NMTUtil::tag_is_valid(mem_tag) && mem_tag != mtNone, + "Must have a valid memory tag"); INDENT_BY(29, out->print("("); - print_malloc(malloc_site->counter(), flag); + print_malloc(malloc_site->counter(), mem_tag); out->print_cr(")"); ) out->cr(); @@ -378,9 +378,9 @@ int MemDetailReporter::report_virtual_memory_allocation_sites() { INDENT_BY(29, out->print("("); print_total(virtual_memory_site->reserved(), virtual_memory_site->committed()); - const MEMFLAGS flag = virtual_memory_site->flag(); - if (flag != mtNone) { - out->print(" Type=%s", NMTUtil::flag_to_name(flag)); + const MemTag mem_tag = virtual_memory_site->mem_tag(); + if (mem_tag != mtNone) { + out->print(" Type=%s", NMTUtil::tag_to_name(mem_tag)); } out->print_cr(")"); ) @@ -423,7 +423,7 @@ void MemDetailReporter::report_virtual_memory_region(const ReservedMemoryRegion* const char* region_type = (all_committed ? "reserved and committed" : "reserved"); out->cr(); print_virtual_memory_region(region_type, reserved_rgn->base(), reserved_rgn->size()); - out->print(" for %s", NMTUtil::flag_to_name(reserved_rgn->flag())); + out->print(" for %s", NMTUtil::tag_to_name(reserved_rgn->mem_tag())); if (stack->is_empty()) { out->cr(); } else { @@ -519,31 +519,31 @@ void MemSummaryDiffReporter::report_diff() { out->cr(); out->cr(); - // Summary diff by memory type - for (int index = 0; index < mt_number_of_types; index ++) { - MEMFLAGS flag = NMTUtil::index_to_flag(index); + // Summary diff by memory tag + for (int index = 0; index < mt_number_of_tags; index ++) { + MemTag mem_tag = NMTUtil::index_to_tag(index); // thread stack is reported as part of thread category - if (flag == mtThreadStack) continue; - diff_summary_of_type(flag, - _early_baseline.malloc_memory(flag), - _early_baseline.virtual_memory(flag), + if (mem_tag == mtThreadStack) continue; + diff_summary_of_type(mem_tag, + _early_baseline.malloc_memory(mem_tag), + _early_baseline.virtual_memory(mem_tag), _early_baseline.metaspace_stats(), - _current_baseline.malloc_memory(flag), - _current_baseline.virtual_memory(flag), + _current_baseline.malloc_memory(mem_tag), + _current_baseline.virtual_memory(mem_tag), _current_baseline.metaspace_stats()); } } void MemSummaryDiffReporter::print_malloc_diff(size_t current_amount, size_t current_count, - size_t early_amount, size_t early_count, MEMFLAGS flags) const { + size_t early_amount, size_t early_count, MemTag mem_tag) const { const char* scale = current_scale(); outputStream* out = output(); - const char* alloc_type = (flags == mtThread) ? "" : "malloc="; + const char* alloc_tag = (mem_tag == mtThread) ? "" : "malloc="; - out->print("%s" SIZE_FORMAT "%s", alloc_type, amount_in_current_scale(current_amount), scale); + out->print("%s" SIZE_FORMAT "%s", alloc_tag, amount_in_current_scale(current_amount), scale); // Report type only if it is valid and not under "thread" category - if (flags != mtNone && flags != mtThread) { - out->print(" type=%s", NMTUtil::flag_to_name(flags)); + if (mem_tag != mtNone && mem_tag != mtThread) { + out->print(" type=%s", NMTUtil::tag_to_name(mem_tag)); } int64_t amount_diff = diff_in_current_scale(current_amount, early_amount); @@ -594,7 +594,7 @@ void MemSummaryDiffReporter::print_virtual_memory_diff(size_t current_reserved, } -void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, +void MemSummaryDiffReporter::diff_summary_of_type(MemTag mem_tag, const MallocMemory* early_malloc, const VirtualMemory* early_vm, const MetaspaceCombinedStats& early_ms, const MallocMemory* current_malloc, const VirtualMemory* current_vm, @@ -613,7 +613,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, size_t early_committed_amount = committed_total(early_malloc, early_vm); // Adjust virtual memory total - if (flag == mtThread) { + if (mem_tag == mtThread) { const VirtualMemory* early_thread_stack_usage = _early_baseline.virtual_memory(mtThreadStack); const VirtualMemory* current_thread_stack_usage = @@ -624,7 +624,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, current_reserved_amount += current_thread_stack_usage->reserved(); current_committed_amount += current_thread_stack_usage->committed(); - } else if (flag == mtNMT) { + } else if (mem_tag == mtNMT) { early_reserved_amount += _early_baseline.malloc_tracking_overhead(); early_committed_amount += _early_baseline.malloc_tracking_overhead(); @@ -636,7 +636,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, diff_in_current_scale(current_reserved_amount, early_reserved_amount) != 0) { // print summary line - out->print("-%*s (", indent - 2, NMTUtil::flag_to_name(flag)); + out->print("-%*s (", indent - 2, NMTUtil::tag_to_name(mem_tag)); print_virtual_memory_diff(current_reserved_amount, current_committed_amount, early_reserved_amount, early_committed_amount); out->print_cr(")"); @@ -644,7 +644,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, streamIndentor si(out, indent); // detail lines - if (flag == mtClass) { + if (mem_tag == mtClass) { // report class count out->print("(classes #" SIZE_FORMAT, _current_baseline.class_count()); const ssize_t class_count_diff = @@ -668,7 +668,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, } out->print_cr(")"); - } else if (flag == mtThread) { + } else if (mem_tag == mtThread) { // report thread count out->print("(threads #" SIZE_FORMAT, _current_baseline.thread_count()); const ssize_t thread_count_diff = counter_diff(_current_baseline.thread_count(), _early_baseline.thread_count()); @@ -696,7 +696,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, if (amount_in_current_scale(current_malloc_amount) > 0 || diff_in_current_scale(current_malloc_amount, early_malloc_amount) != 0) { out->print("("); - print_malloc_diff(current_malloc_amount, (flag == mtChunk) ? 0 : current_malloc->malloc_count(), + print_malloc_diff(current_malloc_amount, (mem_tag == mtChunk) ? 0 : current_malloc->malloc_count(), early_malloc_amount, early_malloc->malloc_count(), mtNone); out->print_cr(")"); } @@ -720,7 +720,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, } // Report native memory tracking overhead - if (flag == mtNMT) { + if (mem_tag == mtNMT) { size_t current_tracking_overhead = amount_in_current_scale(_current_baseline.malloc_tracking_overhead()); size_t early_tracking_overhead = amount_in_current_scale(_early_baseline.malloc_tracking_overhead()); @@ -733,7 +733,7 @@ void MemSummaryDiffReporter::diff_summary_of_type(MEMFLAGS flag, out->print(" " INT64_PLUS_FORMAT "%s", overhead_diff, scale); } out->print_cr(")"); - } else if (flag == mtClass) { + } else if (mem_tag == mtClass) { print_metaspace_diff(current_ms, early_ms); } out->cr(); @@ -847,9 +847,9 @@ void MemDetailDiffReporter::diff_virtual_memory_sites() const { } else if (compVal > 0) { old_virtual_memory_site(early_site); early_site = early_itr.next(); - } else if (early_site->flag() != current_site->flag()) { - // This site was originally allocated with one flag, then released, - // then re-allocated at the same site (as far as we can tell) with a different flag. + } else if (early_site->mem_tag() != current_site->mem_tag()) { + // This site was originally allocated with one memory tag, then released, + // then re-allocated at the same site (as far as we can tell) with a different memory tag. old_virtual_memory_site(early_site); early_site = early_itr.next(); new_virtual_memory_site(current_site); @@ -866,29 +866,29 @@ void MemDetailDiffReporter::diff_virtual_memory_sites() const { void MemDetailDiffReporter::new_malloc_site(const MallocSite* malloc_site) const { diff_malloc_site(malloc_site->call_stack(), malloc_site->size(), malloc_site->count(), - 0, 0, malloc_site->flag()); + 0, 0, malloc_site->mem_tag()); } void MemDetailDiffReporter::old_malloc_site(const MallocSite* malloc_site) const { diff_malloc_site(malloc_site->call_stack(), 0, 0, malloc_site->size(), - malloc_site->count(), malloc_site->flag()); + malloc_site->count(), malloc_site->mem_tag()); } void MemDetailDiffReporter::diff_malloc_site(const MallocSite* early, const MallocSite* current) const { - if (early->flag() != current->flag()) { + if (early->mem_tag() != current->mem_tag()) { // If malloc site type changed, treat it as deallocation of old type and // allocation of new type. old_malloc_site(early); new_malloc_site(current); } else { diff_malloc_site(current->call_stack(), current->size(), current->count(), - early->size(), early->count(), early->flag()); + early->size(), early->count(), early->mem_tag()); } } void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_t current_size, - size_t current_count, size_t early_size, size_t early_count, MEMFLAGS flags) const { + size_t current_count, size_t early_size, size_t early_count, MemTag mem_tag) const { outputStream* out = output(); assert(stack != nullptr, "null stack"); @@ -900,7 +900,7 @@ void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_ _stackprinter.print_stack(stack); INDENT_BY(28, out->print("("); - print_malloc_diff(current_size, current_count, early_size, early_count, flags); + print_malloc_diff(current_size, current_count, early_size, early_count, mem_tag); out->print_cr(")"); ) out->cr(); @@ -909,21 +909,21 @@ void MemDetailDiffReporter::diff_malloc_site(const NativeCallStack* stack, size_ void MemDetailDiffReporter::new_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { - diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->flag()); + diff_virtual_memory_site(site->call_stack(), site->reserved(), site->committed(), 0, 0, site->mem_tag()); } void MemDetailDiffReporter::old_virtual_memory_site(const VirtualMemoryAllocationSite* site) const { - diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->flag()); + diff_virtual_memory_site(site->call_stack(), 0, 0, site->reserved(), site->committed(), site->mem_tag()); } void MemDetailDiffReporter::diff_virtual_memory_site(const VirtualMemoryAllocationSite* early, const VirtualMemoryAllocationSite* current) const { diff_virtual_memory_site(current->call_stack(), current->reserved(), current->committed(), - early->reserved(), early->committed(), current->flag()); + early->reserved(), early->committed(), current->mem_tag()); } void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, - size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const { + size_t current_committed, size_t early_reserved, size_t early_committed, MemTag mem_tag) const { outputStream* out = output(); // no change @@ -936,8 +936,8 @@ void MemDetailDiffReporter::diff_virtual_memory_site(const NativeCallStack* stac INDENT_BY(28, out->print("(mmap: "); print_virtual_memory_diff(current_reserved, current_committed, early_reserved, early_committed); - if (flag != mtNone) { - out->print(" Type=%s", NMTUtil::flag_to_name(flag)); + if (mem_tag != mtNone) { + out->print(" Type=%s", NMTUtil::tag_to_name(mem_tag)); } out->print_cr(")"); ) diff --git a/src/hotspot/share/nmt/memReporter.hpp b/src/hotspot/share/nmt/memReporter.hpp index 095c0550939..773377b5e20 100644 --- a/src/hotspot/share/nmt/memReporter.hpp +++ b/src/hotspot/share/nmt/memReporter.hpp @@ -108,7 +108,7 @@ class MemReporterBase : public StackObj { // Print summary total, malloc and virtual memory void print_total(size_t reserved, size_t committed, size_t peak = 0) const; - void print_malloc(const MemoryCounter* c, MEMFLAGS flag = mtNone) const; + void print_malloc(const MemoryCounter* c, MemTag mem_tag = mtNone) const; void print_virtual_memory(size_t reserved, size_t committed, size_t peak) const; void print_arena(const MemoryCounter* c) const; @@ -138,8 +138,8 @@ class MemSummaryReporter : public MemReporterBase { // Generate summary report virtual void report(); private: - // Report summary for each memory type - void report_summary_of_type(MEMFLAGS type, MallocMemory* malloc_memory, + // Report summary for each memory tag + void report_summary_of_type(MemTag mem_tag, MallocMemory* malloc_memory, VirtualMemory* virtual_memory); void report_metadata(Metaspace::MetadataType type) const; @@ -203,8 +203,8 @@ class MemSummaryDiffReporter : public MemReporterBase { virtual void report_diff(); private: - // report the comparison of each memory type - void diff_summary_of_type(MEMFLAGS type, + // report the comparison of each mem_tag + void diff_summary_of_type(MemTag mem_tag, const MallocMemory* early_malloc, const VirtualMemory* early_vm, const MetaspaceCombinedStats& early_ms, const MallocMemory* current_malloc, const VirtualMemory* current_vm, @@ -212,7 +212,7 @@ class MemSummaryDiffReporter : public MemReporterBase { protected: void print_malloc_diff(size_t current_amount, size_t current_count, - size_t early_amount, size_t early_count, MEMFLAGS flags) const; + size_t early_amount, size_t early_count, MemTag mem_tag) const; void print_virtual_memory_diff(size_t current_reserved, size_t current_committed, size_t early_reserved, size_t early_committed) const; void print_arena_diff(size_t current_amount, size_t current_count, @@ -262,9 +262,9 @@ class MemDetailDiffReporter : public MemSummaryDiffReporter { const VirtualMemoryAllocationSite* current) const; void diff_malloc_site(const NativeCallStack* stack, size_t current_size, - size_t currrent_count, size_t early_size, size_t early_count, MEMFLAGS flags) const; + size_t currrent_count, size_t early_size, size_t early_count, MemTag mem_tag) const; void diff_virtual_memory_site(const NativeCallStack* stack, size_t current_reserved, - size_t current_committed, size_t early_reserved, size_t early_committed, MEMFLAGS flag) const; + size_t current_committed, size_t early_reserved, size_t early_committed, MemTag mem_tag) const; }; #endif // SHARE_NMT_MEMREPORTER_HPP diff --git a/src/hotspot/share/nmt/memflags.hpp b/src/hotspot/share/nmt/memTag.hpp similarity index 84% rename from src/hotspot/share/nmt/memflags.hpp rename to src/hotspot/share/nmt/memTag.hpp index 530c9ae9d95..9255645638d 100644 --- a/src/hotspot/share/nmt/memflags.hpp +++ b/src/hotspot/share/nmt/memTag.hpp @@ -22,13 +22,13 @@ * */ -#ifndef SHARE_NMT_MEMFLAGS_HPP -#define SHARE_NMT_MEMFLAGS_HPP +#ifndef SHARE_NMT_MEM_TAG_HPP +#define SHARE_NMT_MEM_TAG_HPP #include "utilities/globalDefinitions.hpp" -#define MEMORY_TYPES_DO(f) \ - /* Memory type by sub systems. It occupies lower byte. */ \ +#define MEMORY_TAG_DO(f) \ + /* Memory tag by sub systems. It occupies lower byte. */ \ f(mtJavaHeap, "Java Heap") /* Java heap */ \ f(mtClass, "Class") /* Java classes */ \ f(mtThread, "Thread") /* thread objects */ \ @@ -61,22 +61,22 @@ f(mtNone, "Unknown") \ //end -#define MEMORY_TYPE_DECLARE_ENUM(type, human_readable) \ - type, +#define MEMORY_TAG_DECLARE_ENUM(mem_tag, human_readable) \ +mem_tag, -enum class MEMFLAGS : uint8_t { - MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_ENUM) - mt_number_of_types // number of memory types (mtDontTrack - // is not included as validate type) +enum class MemTag : uint8_t { + MEMORY_TAG_DO(MEMORY_TAG_DECLARE_ENUM) + mt_number_of_tags // number of memory tags (mtDontTrack + // is not included as validate tag) }; -#define MEMORY_TYPE_SHORTNAME(type, human_readable) \ - constexpr MEMFLAGS type = MEMFLAGS::type; +#define MEMORY_TAG_SHORTNAME(mem_tag, human_readable) \ + constexpr MemTag mem_tag = MemTag::mem_tag; -// Generate short aliases for the enum values. E.g. mtGC instead of MEMFLAGS::mtGC. -MEMORY_TYPES_DO(MEMORY_TYPE_SHORTNAME) +// Generate short aliases for the enum values. E.g. mtGC instead of MemTag::mtGC. +MEMORY_TAG_DO(MEMORY_TAG_SHORTNAME) // Make an int version of the sentinel end value. -constexpr int mt_number_of_types = static_cast(MEMFLAGS::mt_number_of_types); +constexpr int mt_number_of_tags = static_cast(MemTag::mt_number_of_tags); -#endif // SHARE_NMT_MEMFLAGS_HPP +#endif // SHARE_NMT_MEM_TAG_HPP diff --git a/src/hotspot/share/nmt/memFlagBitmap.hpp b/src/hotspot/share/nmt/memTagBitmap.hpp similarity index 75% rename from src/hotspot/share/nmt/memFlagBitmap.hpp rename to src/hotspot/share/nmt/memTagBitmap.hpp index 0464179948b..f65dce60fa6 100644 --- a/src/hotspot/share/nmt/memFlagBitmap.hpp +++ b/src/hotspot/share/nmt/memTagBitmap.hpp @@ -23,34 +23,34 @@ * */ -#ifndef SHARE_NMT_MEMFLAGBITMAP_HPP -#define SHARE_NMT_MEMFLAGBITMAP_HPP +#ifndef SHARE_NMT_MEMTAGBITMAP_HPP +#define SHARE_NMT_MEMTAGBITMAP_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" -class MemFlagBitmap { +class MemTagBitmap { uint32_t _v; - STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_types); + STATIC_ASSERT(sizeof(_v) * BitsPerByte >= mt_number_of_tags); public: - MemFlagBitmap(uint32_t v = 0) : _v(v) {} - MemFlagBitmap(const MemFlagBitmap& o) : _v(o._v) {} + MemTagBitmap(uint32_t v = 0) : _v(v) {} + MemTagBitmap(const MemTagBitmap& o) : _v(o._v) {} uint32_t raw_value() const { return _v; } - void set_flag(MEMFLAGS f) { - const int bitno = (int)f; + void set_tag(MemTag mem_tag) { + const int bitno = (int)mem_tag; _v |= nth_bit(bitno); } - bool has_flag(MEMFLAGS f) const { - const int bitno = (int)f; + bool has_tag(MemTag mem_tag) const { + const int bitno = (int)mem_tag; return _v & nth_bit(bitno); } bool has_any() const { return _v > 0; } }; -#endif // SHARE_NMT_NMTUSAGE_HPP +#endif // SHARE_NMT_MEMTAGBITMAP_HPP diff --git a/src/hotspot/share/nmt/memTracker.cpp b/src/hotspot/share/nmt/memTracker.cpp index f40f9428443..fb9c9a50db1 100644 --- a/src/hotspot/share/nmt/memTracker.cpp +++ b/src/hotspot/share/nmt/memTracker.cpp @@ -63,7 +63,7 @@ void MemTracker::initialize() { // Memory type is encoded into tracking header as a byte field, // make sure that we don't overflow it. - STATIC_ASSERT(mt_number_of_types <= max_jubyte); + STATIC_ASSERT(mt_number_of_tags <= max_jubyte); if (level > NMT_off) { if (!MallocTracker::initialize(level) || diff --git a/src/hotspot/share/nmt/memTracker.hpp b/src/hotspot/share/nmt/memTracker.hpp index 74aa9f803b1..31b1e66b8a6 100644 --- a/src/hotspot/share/nmt/memTracker.hpp +++ b/src/hotspot/share/nmt/memTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -75,11 +75,11 @@ class MemTracker : AllStatic { return enabled() ? MallocTracker::overhead_per_malloc : 0; } - static inline void* record_malloc(void* mem_base, size_t size, MEMFLAGS flag, + static inline void* record_malloc(void* mem_base, size_t size, MemTag mem_tag, const NativeCallStack& stack) { assert(mem_base != nullptr, "caller should handle null"); if (enabled()) { - return MallocTracker::record_malloc(mem_base, size, flag, stack); + return MallocTracker::record_malloc(mem_base, size, mem_tag, stack); } return mem_base; } @@ -99,34 +99,34 @@ class MemTracker : AllStatic { } // Record creation of an arena - static inline void record_new_arena(MEMFLAGS flag) { + static inline void record_new_arena(MemTag mem_tag) { if (!enabled()) return; - MallocTracker::record_new_arena(flag); + MallocTracker::record_new_arena(mem_tag); } // Record destruction of an arena - static inline void record_arena_free(MEMFLAGS flag) { + static inline void record_arena_free(MemTag mem_tag) { if (!enabled()) return; - MallocTracker::record_arena_free(flag); + MallocTracker::record_arena_free(mem_tag); } // Record arena size change. Arena size is the size of all arena // chunks that are backing up the arena. - static inline void record_arena_size_change(ssize_t diff, MEMFLAGS flag) { + static inline void record_arena_size_change(ssize_t diff, MemTag mem_tag) { if (!enabled()) return; - MallocTracker::record_arena_size_change(diff, flag); + MallocTracker::record_arena_size_change(diff, mem_tag); } // Note: virtual memory operations should only ever be called after NMT initialization // (we do not do any reservations before that). static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack, - MEMFLAGS flag = mtNone) { + MemTag mem_tag = mtNone) { assert_post_init(); if (!enabled()) return; if (addr != nullptr) { ThreadCritical tc; - VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); + VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, mem_tag); } } @@ -147,12 +147,12 @@ class MemTracker : AllStatic { } static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size, - const NativeCallStack& stack, MEMFLAGS flag = mtNone) { + const NativeCallStack& stack, MemTag mem_tag = mtNone) { assert_post_init(); if (!enabled()) return; if (addr != nullptr) { ThreadCritical tc; - VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, flag); + VirtualMemoryTracker::add_reserved_region((address)addr, size, stack, mem_tag); VirtualMemoryTracker::add_committed_region((address)addr, size, stack); } } @@ -183,12 +183,12 @@ class MemTracker : AllStatic { } static inline void allocate_memory_in(MemoryFileTracker::MemoryFile* file, size_t offset, size_t size, - const NativeCallStack& stack, MEMFLAGS flag) { + const NativeCallStack& stack, MemTag mem_tag) { assert_post_init(); if (!enabled()) return; assert(file != nullptr, "must be"); MemoryFileTracker::Instance::Locker lock; - MemoryFileTracker::Instance::allocate_memory(file, offset, size, stack, flag); + MemoryFileTracker::Instance::allocate_memory(file, offset, size, stack, mem_tag); } static inline void free_memory_in(MemoryFileTracker::MemoryFile* file, @@ -206,21 +206,21 @@ class MemTracker : AllStatic { // // The two new memory regions will be both registered under stack and // memory flags of the original region. - static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) { + static inline void record_virtual_memory_split_reserved(void* addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) { assert_post_init(); if (!enabled()) return; if (addr != nullptr) { ThreadCritical tc; - VirtualMemoryTracker::split_reserved_region((address)addr, size, split, flag, split_flag); + VirtualMemoryTracker::split_reserved_region((address)addr, size, split, mem_tag, split_tag); } } - static inline void record_virtual_memory_type(void* addr, MEMFLAGS flag) { + static inline void record_virtual_memory_tag(void* addr, MemTag mem_tag) { assert_post_init(); if (!enabled()) return; if (addr != nullptr) { ThreadCritical tc; - VirtualMemoryTracker::set_reserved_region_type((address)addr, flag); + VirtualMemoryTracker::set_reserved_region_type((address)addr, mem_tag); } } @@ -262,8 +262,8 @@ class MemTracker : AllStatic { static void tuning_statistics(outputStream* out); // MallocLimt: Given an allocation size s, check if mallocing this much - // under category f would hit either the global limit or the limit for category f. - static inline bool check_exceeds_limit(size_t s, MEMFLAGS f); + // for MemTag would hit either the global limit or the limit for MemTag. + static inline bool check_exceeds_limit(size_t s, MemTag mem_tag); // Given an unknown pointer, check if it points into a known region; print region if found // and return true; false if not found. diff --git a/src/hotspot/share/nmt/memTracker.inline.hpp b/src/hotspot/share/nmt/memTracker.inline.hpp index 500f2a75d8c..a850c6b07fd 100644 --- a/src/hotspot/share/nmt/memTracker.inline.hpp +++ b/src/hotspot/share/nmt/memTracker.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2023 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -30,11 +30,11 @@ #include "nmt/mallocTracker.inline.hpp" -inline bool MemTracker::check_exceeds_limit(size_t s, MEMFLAGS f) { +inline bool MemTracker::check_exceeds_limit(size_t s, MemTag mem_tag) { if (!enabled()) { return false; } - return MallocTracker::check_exceeds_limit(s, f); + return MallocTracker::check_exceeds_limit(s, mem_tag); } #endif // SHARE_NMT_MEMTRACKER_INLINE_HPP diff --git a/src/hotspot/share/nmt/memoryFileTracker.cpp b/src/hotspot/share/nmt/memoryFileTracker.cpp index 25f2667e5c3..ede483ed337 100644 --- a/src/hotspot/share/nmt/memoryFileTracker.cpp +++ b/src/hotspot/share/nmt/memoryFileTracker.cpp @@ -42,23 +42,23 @@ MemoryFileTracker::MemoryFileTracker(bool is_detailed_mode) void MemoryFileTracker::allocate_memory(MemoryFile* file, size_t offset, size_t size, const NativeCallStack& stack, - MEMFLAGS flag) { + MemTag mem_tag) { NativeCallStackStorage::StackIndex sidx = _stack_storage.push(stack); - VMATree::RegionData regiondata(sidx, flag); + VMATree::RegionData regiondata(sidx, mem_tag); VMATree::SummaryDiff diff = file->_tree.commit_mapping(offset, size, regiondata); - for (int i = 0; i < mt_number_of_types; i++) { - VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_flag(i)); - summary->reserve_memory(diff.flag[i].commit); - summary->commit_memory(diff.flag[i].commit); + for (int i = 0; i < mt_number_of_tags; i++) { + VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_tag(i)); + summary->reserve_memory(diff.tag[i].commit); + summary->commit_memory(diff.tag[i].commit); } } void MemoryFileTracker::free_memory(MemoryFile* file, size_t offset, size_t size) { VMATree::SummaryDiff diff = file->_tree.release_mapping(offset, size); - for (int i = 0; i < mt_number_of_types; i++) { - VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_flag(i)); - summary->reserve_memory(diff.flag[i].commit); - summary->commit_memory(diff.flag[i].commit); + for (int i = 0; i < mt_number_of_tags; i++) { + VirtualMemory* summary = file->_summary.by_type(NMTUtil::index_to_tag(i)); + summary->reserve_memory(diff.tag[i].commit); + summary->commit_memory(diff.tag[i].commit); } } @@ -79,7 +79,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st return; } #ifdef ASSERT - if (broken_start != nullptr && prev->val().out.type() != current->val().in.type()) { + if (broken_start != nullptr && prev->val().out.mem_tag() != current->val().in.mem_tag()) { broken_start = prev; broken_end = current; } @@ -91,7 +91,7 @@ void MemoryFileTracker::print_report_on(const MemoryFile* file, outputStream* st start_addr, end_addr, NMTUtil::amount_in_scale(end_addr - start_addr, scale), NMTUtil::scale_name(scale), - NMTUtil::flag_to_name(prev->val().out.flag())); + NMTUtil::tag_to_name(prev->val().out.mem_tag())); { streamIndentor si(stream, 4); _stack_storage.get(prev->val().out.stack()).print_on(stream); @@ -138,8 +138,8 @@ bool MemoryFileTracker::Instance::initialize(NMT_TrackingLevel tracking_level) { void MemoryFileTracker::Instance::allocate_memory(MemoryFile* file, size_t offset, size_t size, const NativeCallStack& stack, - MEMFLAGS flag) { - _tracker->allocate_memory(file, offset, size, stack, flag); + MemTag mem_tag) { + _tracker->allocate_memory(file, offset, size, stack, mem_tag); } void MemoryFileTracker::Instance::free_memory(MemoryFile* file, size_t offset, size_t size) { @@ -181,9 +181,9 @@ const GrowableArrayCHeap& MemoryFileTrack void MemoryFileTracker::summary_snapshot(VirtualMemorySnapshot* snapshot) const { for (int d = 0; d < _files.length(); d++) { const MemoryFile* file = _files.at(d); - for (int i = 0; i < mt_number_of_types; i++) { - VirtualMemory* snap = snapshot->by_type(NMTUtil::index_to_flag(i)); - const VirtualMemory* current = file->_summary.by_type(NMTUtil::index_to_flag(i)); + for (int i = 0; i < mt_number_of_tags; i++) { + VirtualMemory* snap = snapshot->by_type(NMTUtil::index_to_tag(i)); + const VirtualMemory* current = file->_summary.by_type(NMTUtil::index_to_tag(i)); // Only account the committed memory. snap->commit_memory(current->committed()); } diff --git a/src/hotspot/share/nmt/memoryFileTracker.hpp b/src/hotspot/share/nmt/memoryFileTracker.hpp index 432b6f9d99e..42902701a16 100644 --- a/src/hotspot/share/nmt/memoryFileTracker.hpp +++ b/src/hotspot/share/nmt/memoryFileTracker.hpp @@ -66,7 +66,7 @@ public: MemoryFileTracker(bool is_detailed_mode); void allocate_memory(MemoryFile* file, size_t offset, size_t size, const NativeCallStack& stack, - MEMFLAGS flag); + MemTag mem_tag); void free_memory(MemoryFile* file, size_t offset, size_t size); MemoryFile* make_file(const char* descriptive_name); @@ -96,7 +96,7 @@ public: static void free_file(MemoryFile* device); static void allocate_memory(MemoryFile* device, size_t offset, size_t size, - const NativeCallStack& stack, MEMFLAGS flag); + const NativeCallStack& stack, MemTag mem_tag); static void free_memory(MemoryFile* device, size_t offset, size_t size); static void summary_snapshot(VirtualMemorySnapshot* snapshot); diff --git a/src/hotspot/share/nmt/nativeCallStackPrinter.hpp b/src/hotspot/share/nmt/nativeCallStackPrinter.hpp index deebb338626..78fd541fc98 100644 --- a/src/hotspot/share/nmt/nativeCallStackPrinter.hpp +++ b/src/hotspot/share/nmt/nativeCallStackPrinter.hpp @@ -27,7 +27,7 @@ #define SHARE_NMT_NATIVECALLSTACKPRINTER_HPP #include "memory/arena.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/resourceHash.hpp" diff --git a/src/hotspot/share/nmt/nmtCommon.cpp b/src/hotspot/share/nmt/nmtCommon.cpp index dadb830f291..24a4cb1105a 100644 --- a/src/hotspot/share/nmt/nmtCommon.cpp +++ b/src/hotspot/share/nmt/nmtCommon.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,11 +29,11 @@ STATIC_ASSERT(NMT_off > NMT_unknown); STATIC_ASSERT(NMT_summary > NMT_off); STATIC_ASSERT(NMT_detail > NMT_summary); -#define MEMORY_TYPE_DECLARE_NAME(type, human_readable) \ +#define MEMORY_TAG_DECLARE_NAME(type, human_readable) \ { #type, human_readable }, NMTUtil::S NMTUtil::_strings[] = { - MEMORY_TYPES_DO(MEMORY_TYPE_DECLARE_NAME) + MEMORY_TAG_DO(MEMORY_TAG_DECLARE_NAME) }; const char* NMTUtil::scale_name(size_t scale) { @@ -87,14 +87,14 @@ NMT_TrackingLevel NMTUtil::parse_tracking_level(const char* s) { return NMT_unknown; } -MEMFLAGS NMTUtil::string_to_flag(const char* s) { - for (int i = 0; i < mt_number_of_types; i ++) { +MemTag NMTUtil::string_to_mem_tag(const char* s) { + for (int i = 0; i < mt_number_of_tags; i ++) { assert(::strlen(_strings[i].enum_s) > 2, "Sanity"); // should always start with "mt" if (::strcasecmp(_strings[i].human_readable, s) == 0 || ::strcasecmp(_strings[i].enum_s, s) == 0 || ::strcasecmp(_strings[i].enum_s + 2, s) == 0) // "mtXXX" -> match also "XXX" or "xxx" { - return (MEMFLAGS)i; + return (MemTag)i; } } return mtNone; diff --git a/src/hotspot/share/nmt/nmtCommon.hpp b/src/hotspot/share/nmt/nmtCommon.hpp index 8ca0965b3d3..3f72960f21f 100644 --- a/src/hotspot/share/nmt/nmtCommon.hpp +++ b/src/hotspot/share/nmt/nmtCommon.hpp @@ -28,7 +28,7 @@ #define SHARE_NMT_NMTCOMMON_HPP #include "memory/allStatic.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "utilities/align.hpp" #include "utilities/globalDefinitions.hpp" @@ -75,37 +75,37 @@ const int NMT_TrackingStackDepth = 4; // A few common utilities for native memory tracking class NMTUtil : AllStatic { public: - // Check if index is a valid MEMFLAGS enum value (including mtNone) - static inline bool flag_index_is_valid(int index) { - return index >= 0 && index < mt_number_of_types; + // Check if index is a valid MemTag enum value (including mtNone) + static inline bool tag_index_is_valid(int index) { + return index >= 0 && index < mt_number_of_tags; } - // Check if flag value is a valid MEMFLAGS enum value (including mtNone) - static inline bool flag_is_valid(MEMFLAGS flag) { - const int index = static_cast(flag); - return flag_index_is_valid(index); + // Check if tag value is a valid MemTag enum value (including mtNone) + static inline bool tag_is_valid(MemTag mem_tag) { + const int index = static_cast(mem_tag); + return tag_index_is_valid(index); } - // Map memory type to index - static inline int flag_to_index(MEMFLAGS flag) { - assert(flag_is_valid(flag), "Invalid flag (%u)", (unsigned)flag); - return static_cast(flag); + // Map memory tag to index + static inline int tag_to_index(MemTag mem_tag) { + assert(tag_is_valid(mem_tag), "Invalid type (%u)", (unsigned)mem_tag); + return static_cast(mem_tag); } - // Map memory type to human readable name - static const char* flag_to_name(MEMFLAGS flag) { - return _strings[flag_to_index(flag)].human_readable; + // Map memory tag to human readable name + static const char* tag_to_name(MemTag mem_tag) { + return _strings[tag_to_index(mem_tag)].human_readable; } - // Map memory type to literalized enum name (e.g. "mtTest") - static const char* flag_to_enum_name(MEMFLAGS flag) { - return _strings[flag_to_index(flag)].enum_s; + // Map memory tag to literalized enum name (e.g. "mtTest") + static const char* tag_to_enum_name(MemTag mem_tag) { + return _strings[tag_to_index(mem_tag)].enum_s; } - // Map an index to memory type - static MEMFLAGS index_to_flag(int index) { - assert(flag_index_is_valid(index), "Invalid flag index (%d)", index); - return static_cast(index); + // Map an index to memory tag + static MemTag index_to_tag(int index) { + assert(tag_index_is_valid(index), "Invalid type index (%d)", index); + return static_cast(index); } // Memory size scale @@ -121,10 +121,10 @@ class NMTUtil : AllStatic { // string is not a valid level. static NMT_TrackingLevel parse_tracking_level(const char* s); - // Given a string, return associated flag. mtNone if name is invalid. + // Given a string, return associated mem_tag. mtNone if name is invalid. // String can be either the human readable name or the // stringified enum (with or without leading "mt". In all cases, case is ignored. - static MEMFLAGS string_to_flag(const char* name); + static MemTag string_to_mem_tag(const char* name); // Returns textual representation of a tracking level. static const char* tracking_level_to_string(NMT_TrackingLevel level); @@ -134,7 +134,7 @@ class NMTUtil : AllStatic { const char* enum_s; // e.g. "mtNMT" const char* human_readable; // e.g. "Native Memory Tracking" }; - static S _strings[mt_number_of_types]; + static S _strings[mt_number_of_tags]; }; diff --git a/src/hotspot/share/nmt/nmtPreInit.cpp b/src/hotspot/share/nmt/nmtPreInit.cpp index a8ff18f3b62..0aa74566f42 100644 --- a/src/hotspot/share/nmt/nmtPreInit.cpp +++ b/src/hotspot/share/nmt/nmtPreInit.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2022, 2023 SAP SE. All rights reserved. - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -198,8 +198,8 @@ void NMTPreInit::create_table() { } // Allocate with os::malloc (hidden to prevent having to include os.hpp) -void* NMTPreInit::do_os_malloc(size_t size, MEMFLAGS memflags) { - return os::malloc(size, memflags); +void* NMTPreInit::do_os_malloc(size_t size, MemTag mem_tag) { + return os::malloc(size, mem_tag); } // Switches from NMT pre-init state to NMT post-init state; diff --git a/src/hotspot/share/nmt/nmtPreInit.hpp b/src/hotspot/share/nmt/nmtPreInit.hpp index 38a34616b3f..1524c2bd7dc 100644 --- a/src/hotspot/share/nmt/nmtPreInit.hpp +++ b/src/hotspot/share/nmt/nmtPreInit.hpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2022, 2023 SAP SE. All rights reserved. - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -255,7 +255,7 @@ class NMTPreInit : public AllStatic { } // Just a wrapper for os::malloc to avoid including os.hpp here. - static void* do_os_malloc(size_t size, MEMFLAGS memflags); + static void* do_os_malloc(size_t size, MemTag mem_tag); public: @@ -283,7 +283,7 @@ public: // Called from os::realloc. // Returns true if reallocation was handled here; in that case, // *rc contains the return address. - static bool handle_realloc(void** rc, void* old_p, size_t new_size, MEMFLAGS memflags) { + static bool handle_realloc(void** rc, void* old_p, size_t new_size, MemTag mem_tag) { if (old_p == nullptr) { // realloc(null, n) return handle_malloc(rc, new_size); } @@ -322,7 +322,7 @@ public: // and confusing us. const NMTPreInitAllocation* a = find_in_map(old_p); if (a != nullptr) { // this was originally a pre-init allocation - void* p_new = do_os_malloc(new_size, memflags); + void* p_new = do_os_malloc(new_size, mem_tag); ::memcpy(p_new, a->payload, MIN2(a->size, new_size)); (*rc) = p_new; return true; diff --git a/src/hotspot/share/nmt/nmtUsage.cpp b/src/hotspot/share/nmt/nmtUsage.cpp index 62c41225441..a854f001593 100644 --- a/src/hotspot/share/nmt/nmtUsage.cpp +++ b/src/hotspot/share/nmt/nmtUsage.cpp @@ -57,9 +57,9 @@ void NMTUsage::update_malloc_usage() { const MallocMemorySnapshot* ms = MallocMemorySummary::as_snapshot(); size_t total_arena_size = 0; - for (int i = 0; i < mt_number_of_types; i++) { - MEMFLAGS flag = NMTUtil::index_to_flag(i); - const MallocMemory* mm = ms->by_type(flag); + for (int i = 0; i < mt_number_of_tags; i++) { + MemTag mem_tag = NMTUtil::index_to_tag(i); + const MallocMemory* mm = ms->by_type(mem_tag); _malloc_by_type[i] = mm->malloc_size() + mm->arena_size(); total_arena_size += mm->arena_size(); } @@ -68,11 +68,11 @@ void NMTUsage::update_malloc_usage() { _malloc_total = ms->total(); // Adjustment due to mtChunk double counting. - _malloc_by_type[NMTUtil::flag_to_index(mtChunk)] -= total_arena_size; + _malloc_by_type[NMTUtil::tag_to_index(mtChunk)] -= total_arena_size; _malloc_total -= total_arena_size; // Adjust mtNMT to include malloc overhead. - _malloc_by_type[NMTUtil::flag_to_index(mtNMT)] += ms->malloc_overhead(); + _malloc_by_type[NMTUtil::tag_to_index(mtNMT)] += ms->malloc_overhead(); } void NMTUsage::update_vm_usage() { @@ -81,9 +81,9 @@ void NMTUsage::update_vm_usage() { // Reset total to allow recalculation. _vm_total.committed = 0; _vm_total.reserved = 0; - for (int i = 0; i < mt_number_of_types; i++) { - MEMFLAGS flag = NMTUtil::index_to_flag(i); - const VirtualMemory* vm = vms->by_type(flag); + for (int i = 0; i < mt_number_of_tags; i++) { + MemTag mem_tag = NMTUtil::index_to_tag(i); + const VirtualMemory* vm = vms->by_type(mem_tag); _vm_by_type[i].reserved = vm->reserved(); _vm_by_type[i].committed = vm->committed(); @@ -116,12 +116,12 @@ size_t NMTUsage::total_committed() const { return _malloc_total + _vm_total.committed; } -size_t NMTUsage::reserved(MEMFLAGS flag) const { - int index = NMTUtil::flag_to_index(flag); +size_t NMTUsage::reserved(MemTag mem_tag) const { + int index = NMTUtil::tag_to_index(mem_tag); return _malloc_by_type[index] + _vm_by_type[index].reserved; } -size_t NMTUsage::committed(MEMFLAGS flag) const { - int index = NMTUtil::flag_to_index(flag); +size_t NMTUsage::committed(MemTag mem_tag) const { + int index = NMTUtil::tag_to_index(mem_tag); return _malloc_by_type[index] + _vm_by_type[index].committed; } diff --git a/src/hotspot/share/nmt/nmtUsage.hpp b/src/hotspot/share/nmt/nmtUsage.hpp index cfff59db9af..390d207250c 100644 --- a/src/hotspot/share/nmt/nmtUsage.hpp +++ b/src/hotspot/share/nmt/nmtUsage.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -41,9 +41,9 @@ struct NMTUsageOptions { class NMTUsage : public CHeapObj { private: - size_t _malloc_by_type[mt_number_of_types]; + size_t _malloc_by_type[mt_number_of_tags]; size_t _malloc_total; - NMTUsagePair _vm_by_type[mt_number_of_types]; + NMTUsagePair _vm_by_type[mt_number_of_tags]; NMTUsagePair _vm_total; NMTUsageOptions _usage_options; @@ -61,8 +61,8 @@ public: size_t total_reserved() const; size_t total_committed() const; - size_t reserved(MEMFLAGS flag) const; - size_t committed(MEMFLAGS flag) const; + size_t reserved(MemTag mem_tag) const; + size_t committed(MemTag mem_tag) const; }; #endif // SHARE_NMT_NMTUSAGE_HPP diff --git a/src/hotspot/share/nmt/virtualMemoryTracker.cpp b/src/hotspot/share/nmt/virtualMemoryTracker.cpp index d25f3689a42..d298381f103 100644 --- a/src/hotspot/share/nmt/virtualMemoryTracker.cpp +++ b/src/hotspot/share/nmt/virtualMemoryTracker.cpp @@ -142,7 +142,7 @@ bool ReservedMemoryRegion::add_committed_region(address addr, size_t size, const // At this point the previous overlapping regions have been // cleared, and the full region is guaranteed to be inserted. - VirtualMemorySummary::record_committed_memory(size, flag()); + VirtualMemorySummary::record_committed_memory(size, mem_tag()); // Try to merge with prev and possibly next. if (try_merge_with(prev, addr, size, stack)) { @@ -212,14 +212,14 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { crgn = head->data(); if (crgn->same_region(addr, sz)) { - VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); + VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag()); _committed_regions.remove_after(prev); return true; } // del_rgn contains crgn if (del_rgn.contain_region(crgn->base(), crgn->size())) { - VirtualMemorySummary::record_uncommitted_memory(crgn->size(), flag()); + VirtualMemorySummary::record_uncommitted_memory(crgn->size(), mem_tag()); head = head->next(); _committed_regions.remove_after(prev); continue; // don't update head or prev @@ -230,20 +230,20 @@ bool ReservedMemoryRegion::remove_uncommitted_region(address addr, size_t sz) { // (1) Found addr+size in current crgn as well. (del_rgn is contained in crgn) if (crgn->contain_address(end - 1)) { - VirtualMemorySummary::record_uncommitted_memory(sz, flag()); + VirtualMemorySummary::record_uncommitted_memory(sz, mem_tag()); return remove_uncommitted_region(head, addr, sz); // done! } else { // (2) Did not find del_rgn's end in crgn. size_t size = crgn->end() - del_rgn.base(); crgn->exclude_region(addr, size); - VirtualMemorySummary::record_uncommitted_memory(size, flag()); + VirtualMemorySummary::record_uncommitted_memory(size, mem_tag()); } } else if (crgn->contain_address(end - 1)) { // Found del_rgn's end, but not its base addr. size_t size = del_rgn.end() - crgn->base(); crgn->exclude_region(crgn->base(), size); - VirtualMemorySummary::record_uncommitted_memory(size, flag()); + VirtualMemorySummary::record_uncommitted_memory(size, mem_tag()); return true; // should be done if the list is sorted properly! } @@ -292,19 +292,19 @@ size_t ReservedMemoryRegion::committed_size() const { return committed; } -void ReservedMemoryRegion::set_flag(MEMFLAGS f) { - assert((flag() == mtNone || flag() == f), - "Overwrite memory type for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.", - p2i(base()), p2i(end()), (unsigned)flag(), (unsigned)f); - if (flag() != f) { - VirtualMemorySummary::move_reserved_memory(flag(), f, size()); - VirtualMemorySummary::move_committed_memory(flag(), f, committed_size()); - _flag = f; +void ReservedMemoryRegion::set_mem_tag(MemTag new_mem_tag) { + assert((mem_tag() == mtNone || mem_tag() == new_mem_tag), + "Overwrite memory tag for region [" INTPTR_FORMAT "-" INTPTR_FORMAT "), %u->%u.", + p2i(base()), p2i(end()), (unsigned)mem_tag(), (unsigned)new_mem_tag); + if (mem_tag() != new_mem_tag) { + VirtualMemorySummary::move_reserved_memory(mem_tag(), new_mem_tag, size()); + VirtualMemorySummary::move_committed_memory(mem_tag(), new_mem_tag, committed_size()); + _mem_tag = new_mem_tag; } } address ReservedMemoryRegion::thread_stack_uncommitted_bottom() const { - assert(flag() == mtThreadStack, "Only for thread stack"); + assert(mem_tag() == mtThreadStack, "Only for thread stack"); LinkedListNode* head = _committed_regions.head(); address bottom = base(); address top = base() + size(); @@ -334,26 +334,26 @@ bool VirtualMemoryTracker::initialize(NMT_TrackingLevel level) { } bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, - const NativeCallStack& stack, MEMFLAGS flag) { + const NativeCallStack& stack, MemTag mem_tag) { assert(base_addr != nullptr, "Invalid address"); assert(size > 0, "Invalid size"); assert(_reserved_regions != nullptr, "Sanity check"); - ReservedMemoryRegion rgn(base_addr, size, stack, flag); + ReservedMemoryRegion rgn(base_addr, size, stack, mem_tag); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); log_debug(nmt)("Add reserved region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ")", - rgn.flag_name(), p2i(rgn.base()), rgn.size()); + rgn.mem_tag_name(), p2i(rgn.base()), rgn.size()); if (reserved_rgn == nullptr) { - VirtualMemorySummary::record_reserved_memory(size, flag); + VirtualMemorySummary::record_reserved_memory(size, mem_tag); return _reserved_regions->add(rgn) != nullptr; } else { // Deal with recursive reservation // os::reserve_memory() -> pd_reserve_memory() -> os::reserve_memory() // See JDK-8198226. if (reserved_rgn->same_region(base_addr, size) && - (reserved_rgn->flag() == flag || reserved_rgn->flag() == mtNone)) { + (reserved_rgn->mem_tag() == mem_tag || reserved_rgn->mem_tag() == mtNone)) { reserved_rgn->set_call_stack(stack); - reserved_rgn->set_flag(flag); + reserved_rgn->set_mem_tag(mem_tag); return true; } else { assert(reserved_rgn->overlap_region(base_addr, size), "Must be"); @@ -362,16 +362,16 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, // It can happen when the regions are thread stacks, as JNI // thread does not detach from VM before exits, and leads to // leak JavaThread object - if (reserved_rgn->flag() == mtThreadStack) { + if (reserved_rgn->mem_tag() == mtThreadStack) { guarantee(!CheckJNICalls, "Attached JNI thread exited without being detached"); // Overwrite with new region // Release old region - VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->flag()); - VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->flag()); + VirtualMemorySummary::record_uncommitted_memory(reserved_rgn->committed_size(), reserved_rgn->mem_tag()); + VirtualMemorySummary::record_released_memory(reserved_rgn->size(), reserved_rgn->mem_tag()); // Add new region - VirtualMemorySummary::record_reserved_memory(rgn.size(), flag); + VirtualMemorySummary::record_reserved_memory(rgn.size(), mem_tag); *reserved_rgn = rgn; return true; @@ -380,27 +380,27 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, // CDS mapping region. // CDS reserves the whole region for mapping CDS archive, then maps each section into the region. // NMT reports CDS as a whole. - if (reserved_rgn->flag() == mtClassShared) { + if (reserved_rgn->mem_tag() == mtClassShared) { log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")", - reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); + reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); assert(reserved_rgn->contain_region(base_addr, size), "Reserved CDS region should contain this mapping region"); return true; } // Mapped CDS string region. // The string region(s) is part of the java heap. - if (reserved_rgn->flag() == mtJavaHeap) { + if (reserved_rgn->mem_tag() == mtJavaHeap) { log_debug(nmt)("CDS reserved region \'%s\' as a whole (" INTPTR_FORMAT ", " SIZE_FORMAT ")", - reserved_rgn->flag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); + reserved_rgn->mem_tag_name(), p2i(reserved_rgn->base()), reserved_rgn->size()); assert(reserved_rgn->contain_region(base_addr, size), "Reserved heap region should contain this mapping region"); return true; } // Print some more details. Don't use UL here to avoid circularities. - tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.\n" - " new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), flag %u.", - p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->flag(), - p2i(base_addr), p2i(base_addr + size), (unsigned)flag); + tty->print_cr("Error: existing region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.\n" + " new region: [" INTPTR_FORMAT "-" INTPTR_FORMAT "), memory tag %u.", + p2i(reserved_rgn->base()), p2i(reserved_rgn->end()), (unsigned)reserved_rgn->mem_tag(), + p2i(base_addr), p2i(base_addr + size), (unsigned)mem_tag); if (MemTracker::tracking_level() == NMT_detail) { tty->print_cr("Existing region allocated from:"); reserved_rgn->call_stack()->print_on(tty); @@ -413,7 +413,7 @@ bool VirtualMemoryTracker::add_reserved_region(address base_addr, size_t size, } } -void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) { +void VirtualMemoryTracker::set_reserved_region_type(address addr, MemTag mem_tag) { assert(addr != nullptr, "Invalid address"); assert(_reserved_regions != nullptr, "Sanity check"); @@ -421,10 +421,10 @@ void VirtualMemoryTracker::set_reserved_region_type(address addr, MEMFLAGS flag) ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); if (reserved_rgn != nullptr) { assert(reserved_rgn->contain_address(addr), "Containment"); - if (reserved_rgn->flag() != flag) { - assert(reserved_rgn->flag() == mtNone, "Overwrite memory type (should be mtNone, is: \"%s\")", - NMTUtil::flag_to_name(reserved_rgn->flag())); - reserved_rgn->set_flag(flag); + if (reserved_rgn->mem_tag() != mem_tag) { + assert(reserved_rgn->mem_tag() == mtNone, "Overwrite memory tag (should be mtNone, is: \"%s\")", + NMTUtil::tag_to_name(reserved_rgn->mem_tag())); + reserved_rgn->set_mem_tag(mem_tag); } } } @@ -440,13 +440,13 @@ bool VirtualMemoryTracker::add_committed_region(address addr, size_t size, if (reserved_rgn == nullptr) { log_debug(nmt)("Add committed region \'%s\', No reserved region found for (" INTPTR_FORMAT ", " SIZE_FORMAT ")", - rgn.flag_name(), p2i(rgn.base()), rgn.size()); + rgn.mem_tag_name(), p2i(rgn.base()), rgn.size()); } assert(reserved_rgn != nullptr, "Add committed region, No reserved region found"); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); bool result = reserved_rgn->add_committed_region(addr, size, stack); log_debug(nmt)("Add committed region \'%s\'(" INTPTR_FORMAT ", " SIZE_FORMAT ") %s", - reserved_rgn->flag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed")); + reserved_rgn->mem_tag_name(), p2i(rgn.base()), rgn.size(), (result ? "Succeeded" : "Failed")); return result; } @@ -459,10 +459,10 @@ bool VirtualMemoryTracker::remove_uncommitted_region(address addr, size_t size) ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); assert(reserved_rgn != nullptr, "No reserved region (" INTPTR_FORMAT ", " SIZE_FORMAT ")", p2i(addr), size); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); - const char* flag_name = reserved_rgn->flag_name(); // after remove, info is not complete + const char* type_name = reserved_rgn->mem_tag_name(); // after remove, info is not complete bool result = reserved_rgn->remove_uncommitted_region(addr, size); log_debug(nmt)("Removed uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s", - flag_name, p2i(addr), size, (result ? " Succeeded" : "Failed")); + type_name, p2i(addr), size, (result ? " Succeeded" : "Failed")); return result; } @@ -474,15 +474,15 @@ bool VirtualMemoryTracker::remove_released_region(ReservedMemoryRegion* rgn) { ReservedMemoryRegion backup(*rgn); bool result = rgn->remove_uncommitted_region(rgn->base(), rgn->size()); log_debug(nmt)("Remove uncommitted region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") %s", - backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); + backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); if (!result) { return false; } - VirtualMemorySummary::record_released_memory(rgn->size(), rgn->flag()); + VirtualMemorySummary::record_released_memory(rgn->size(), rgn->mem_tag()); result = _reserved_regions->remove(*rgn); log_debug(nmt)("Removed region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") from _reserved_regions %s" , - backup.flag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); + backup.mem_tag_name(), p2i(backup.base()), backup.size(), (result ? "Succeeded" : "Failed")); return result; } @@ -508,7 +508,7 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { return false; } - if (reserved_rgn->flag() == mtClassShared) { + if (reserved_rgn->mem_tag() == mtClassShared) { if (reserved_rgn->contain_region(addr, size)) { // This is an unmapped CDS region, which is part of the reserved shared // memory region. @@ -523,14 +523,14 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { (size - reserved_rgn->size())); ReservedMemoryRegion* cls_rgn = _reserved_regions->find(class_rgn); assert(cls_rgn != nullptr, "Class space region not recorded?"); - assert(cls_rgn->flag() == mtClass, "Must be class type"); + assert(cls_rgn->mem_tag() == mtClass, "Must be class mem tag"); remove_released_region(reserved_rgn); remove_released_region(cls_rgn); return true; } } - VirtualMemorySummary::record_released_memory(size, reserved_rgn->flag()); + VirtualMemorySummary::record_released_memory(size, reserved_rgn->mem_tag()); assert(reserved_rgn->contain_region(addr, size), "Not completely contained"); if (reserved_rgn->base() == addr || @@ -541,7 +541,7 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { address top = reserved_rgn->end(); address high_base = addr + size; ReservedMemoryRegion high_rgn(high_base, top - high_base, - *reserved_rgn->call_stack(), reserved_rgn->flag()); + *reserved_rgn->call_stack(), reserved_rgn->mem_tag()); // use original region for lower region reserved_rgn->exclude_region(addr, top - addr); @@ -557,8 +557,8 @@ bool VirtualMemoryTracker::remove_released_region(address addr, size_t size) { // Given an existing memory mapping registered with NMT, split the mapping in // two. The newly created two mappings will be registered under the call -// stack and the memory flags of the original section. -bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag) { +// stack and the memory tags of the original section. +bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_tag) { ReservedMemoryRegion rgn(addr, size); ReservedMemoryRegion* reserved_rgn = _reserved_regions->find(rgn); @@ -567,15 +567,15 @@ bool VirtualMemoryTracker::split_reserved_region(address addr, size_t size, size assert(reserved_rgn->committed_size() == 0, "Splitting committed region?"); NativeCallStack original_stack = *reserved_rgn->call_stack(); - MEMFLAGS original_flags = reserved_rgn->flag(); + MemTag original_tag = reserved_rgn->mem_tag(); - const char* name = reserved_rgn->flag_name(); + const char* name = reserved_rgn->mem_tag_name(); remove_released_region(reserved_rgn); log_debug(nmt)("Split region \'%s\' (" INTPTR_FORMAT ", " SIZE_FORMAT ") with size " SIZE_FORMAT, name, p2i(rgn.base()), rgn.size(), split); // Now, create two new regions. - add_reserved_region(addr, split, original_stack, flag); - add_reserved_region(addr + split, size - split, original_stack, split_flag); + add_reserved_region(addr, split, original_stack, mem_tag); + add_reserved_region(addr + split, size - split, original_stack, split_tag); return true; } @@ -621,7 +621,7 @@ public: SnapshotThreadStackWalker() {} bool do_allocation_site(const ReservedMemoryRegion* rgn) { - if (rgn->flag() == mtThreadStack) { + if (rgn->mem_tag() == mtThreadStack) { address stack_bottom = rgn->thread_stack_uncommitted_bottom(); address committed_start; size_t committed_size; @@ -688,7 +688,7 @@ public: bool do_allocation_site(const ReservedMemoryRegion* rgn) { if (rgn->contain_address(_p)) { _st->print_cr(PTR_FORMAT " in mmap'd memory region [" PTR_FORMAT " - " PTR_FORMAT "], tag %s", - p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::flag_to_enum_name(rgn->flag())); + p2i(_p), p2i(rgn->base()), p2i(rgn->base() + rgn->size()), NMTUtil::tag_to_enum_name(rgn->mem_tag())); if (MemTracker::tracking_level() == NMT_detail) { _stackprinter.print_stack(rgn->call_stack()); _st->cr(); diff --git a/src/hotspot/share/nmt/virtualMemoryTracker.hpp b/src/hotspot/share/nmt/virtualMemoryTracker.hpp index e84245ce1f8..6e36d3a858a 100644 --- a/src/hotspot/share/nmt/virtualMemoryTracker.hpp +++ b/src/hotspot/share/nmt/virtualMemoryTracker.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -77,8 +77,8 @@ class VirtualMemory { class VirtualMemoryAllocationSite : public AllocationSite { VirtualMemory _c; public: - VirtualMemoryAllocationSite(const NativeCallStack& stack, MEMFLAGS flag) : - AllocationSite(stack, flag) { } + VirtualMemoryAllocationSite(const NativeCallStack& stack, MemTag mem_tag) : + AllocationSite(stack, mem_tag) { } inline void reserve_memory(size_t sz) { _c.reserve_memory(sz); } inline void commit_memory (size_t sz) { _c.commit_memory(sz); } @@ -95,22 +95,22 @@ class VirtualMemorySnapshot : public ResourceObj { friend class VirtualMemorySummary; private: - VirtualMemory _virtual_memory[mt_number_of_types]; + VirtualMemory _virtual_memory[mt_number_of_tags]; public: - inline VirtualMemory* by_type(MEMFLAGS flag) { - int index = NMTUtil::flag_to_index(flag); + inline VirtualMemory* by_type(MemTag mem_tag) { + int index = NMTUtil::tag_to_index(mem_tag); return &_virtual_memory[index]; } - inline const VirtualMemory* by_type(MEMFLAGS flag) const { - int index = NMTUtil::flag_to_index(flag); + inline const VirtualMemory* by_type(MemTag mem_tag) const { + int index = NMTUtil::tag_to_index(mem_tag); return &_virtual_memory[index]; } inline size_t total_reserved() const { size_t amount = 0; - for (int index = 0; index < mt_number_of_types; index ++) { + for (int index = 0; index < mt_number_of_tags; index ++) { amount += _virtual_memory[index].reserved(); } return amount; @@ -118,14 +118,14 @@ class VirtualMemorySnapshot : public ResourceObj { inline size_t total_committed() const { size_t amount = 0; - for (int index = 0; index < mt_number_of_types; index ++) { + for (int index = 0; index < mt_number_of_tags; index ++) { amount += _virtual_memory[index].committed(); } return amount; } void copy_to(VirtualMemorySnapshot* s) { - for (int index = 0; index < mt_number_of_types; index ++) { + for (int index = 0; index < mt_number_of_tags; index ++) { s->_virtual_memory[index] = _virtual_memory[index]; } } @@ -134,32 +134,32 @@ class VirtualMemorySnapshot : public ResourceObj { class VirtualMemorySummary : AllStatic { public: - static inline void record_reserved_memory(size_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->reserve_memory(size); + static inline void record_reserved_memory(size_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->reserve_memory(size); } - static inline void record_committed_memory(size_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->commit_memory(size); + static inline void record_committed_memory(size_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->commit_memory(size); } - static inline void record_uncommitted_memory(size_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->uncommit_memory(size); + static inline void record_uncommitted_memory(size_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->uncommit_memory(size); } - static inline void record_released_memory(size_t size, MEMFLAGS flag) { - as_snapshot()->by_type(flag)->release_memory(size); + static inline void record_released_memory(size_t size, MemTag mem_tag) { + as_snapshot()->by_type(mem_tag)->release_memory(size); } - // Move virtual memory from one memory type to another. - // Virtual memory can be reserved before it is associated with a memory type, and tagged + // Move virtual memory from one memory tag to another. + // Virtual memory can be reserved before it is associated with a memory tag, and tagged // as 'unknown'. Once the memory is tagged, the virtual memory will be moved from 'unknown' - // type to specified memory type. - static inline void move_reserved_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { + // type to specified memory tag. + static inline void move_reserved_memory(MemTag from, MemTag to, size_t size) { as_snapshot()->by_type(from)->release_memory(size); as_snapshot()->by_type(to)->reserve_memory(size); } - static inline void move_committed_memory(MEMFLAGS from, MEMFLAGS to, size_t size) { + static inline void move_committed_memory(MemTag from, MemTag to, size_t size) { as_snapshot()->by_type(from)->uncommit_memory(size); as_snapshot()->by_type(to)->commit_memory(size); } @@ -293,16 +293,16 @@ class ReservedMemoryRegion : public VirtualMemoryRegion { _committed_regions; NativeCallStack _stack; - MEMFLAGS _flag; + MemTag _mem_tag; public: ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack, - MEMFLAGS flag = mtNone) : - VirtualMemoryRegion(base, size), _stack(stack), _flag(flag) { } + MemTag mem_tag = mtNone) : + VirtualMemoryRegion(base, size), _stack(stack), _mem_tag(mem_tag) { } ReservedMemoryRegion(address base, size_t size) : - VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _flag(mtNone) { } + VirtualMemoryRegion(base, size), _stack(NativeCallStack::empty_stack()), _mem_tag(mtNone) { } // Copy constructor ReservedMemoryRegion(const ReservedMemoryRegion& rr) : @@ -313,8 +313,8 @@ class ReservedMemoryRegion : public VirtualMemoryRegion { inline void set_call_stack(const NativeCallStack& stack) { _stack = stack; } inline const NativeCallStack* call_stack() const { return &_stack; } - void set_flag(MEMFLAGS flag); - inline MEMFLAGS flag() const { return _flag; } + void set_mem_tag(MemTag mem_tag); + inline MemTag mem_tag() const { return _mem_tag; } // uncommitted thread stack bottom, above guard pages if there is any. address thread_stack_uncommitted_bottom() const; @@ -336,8 +336,8 @@ class ReservedMemoryRegion : public VirtualMemoryRegion { set_base(other.base()); set_size(other.size()); - _stack = *other.call_stack(); - _flag = other.flag(); + _stack = *other.call_stack(); + _mem_tag = other.mem_tag(); _committed_regions.clear(); CommittedRegionIterator itr = other.iterate_committed_regions(); @@ -350,7 +350,7 @@ class ReservedMemoryRegion : public VirtualMemoryRegion { return *this; } - const char* flag_name() const { return NMTUtil::flag_to_name(_flag); } + const char* mem_tag_name() const { return NMTUtil::tag_to_name(_mem_tag); } private: // The committed region contains the uncommitted region, subtract the uncommitted @@ -380,18 +380,18 @@ class VirtualMemoryTracker : AllStatic { public: static bool initialize(NMT_TrackingLevel level); - static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MEMFLAGS flag = mtNone); + static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag = mtNone); static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack); static bool remove_uncommitted_region (address base_addr, size_t size); static bool remove_released_region (address base_addr, size_t size); static bool remove_released_region (ReservedMemoryRegion* rgn); - static void set_reserved_region_type (address addr, MEMFLAGS flag); + static void set_reserved_region_type (address addr, MemTag mem_tag); // Given an existing memory mapping registered with NMT, split the mapping in // two. The newly created two mappings will be registered under the call - // stack and the memory flags of the original section. - static bool split_reserved_region(address addr, size_t size, size_t split, MEMFLAGS flag, MEMFLAGS split_flag); + // stack and the memory tag of the original section. + static bool split_reserved_region(address addr, size_t size, size_t split, MemTag mem_tag, MemTag split_type); // Walk virtual memory data structure for creating baseline, etc. static bool walk_virtual_memory(VirtualMemoryWalker* walker); diff --git a/src/hotspot/share/nmt/vmatree.cpp b/src/hotspot/share/nmt/vmatree.cpp index cef2d48e816..3795376d476 100644 --- a/src/hotspot/share/nmt/vmatree.cpp +++ b/src/hotspot/share/nmt/vmatree.cpp @@ -83,8 +83,8 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType } else { // If the state is not matching then we have different operations, such as: // reserve [x1, A); ... commit [A, x2); or - // reserve [x1, A), flag1; ... reserve [A, x2), flag2; or - // reserve [A, x1), flag1; ... reserve [A, x2), flag2; + // reserve [x1, A), mem_tag1; ... reserve [A, x2), mem_tag2; or + // reserve [A, x1), mem_tag1; ... reserve [A, x2), mem_tag2; // then we re-use the existing out node, overwriting its old metadata. leqA_n->val() = stA; } @@ -147,7 +147,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType if (to_be_deleted_inbetween_a_b.length() == 0 && LEQ_A_found) { // We must have smashed a hole in an existing region (or replaced it entirely). // LEQ_A < A < B <= C - SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(LEQ_A.out().flag())]; + SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(LEQ_A.out().mem_tag())]; if (LEQ_A.out().type() == StateType::Reserved) { rescom.reserve -= B - A; } else if (LEQ_A.out().type() == StateType::Committed) { @@ -163,7 +163,7 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType _tree.remove(delete_me.address); // Perform summary accounting - SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(delete_me.in().flag())]; + SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(delete_me.in().mem_tag())]; if (delete_me.in().type() == StateType::Reserved) { rescom.reserve -= delete_me.address - prev.address; } else if (delete_me.in().type() == StateType::Committed) { @@ -178,17 +178,17 @@ VMATree::SummaryDiff VMATree::register_mapping(position A, position B, StateType // A - prev - B - (some node >= B) // It might be that prev.address == B == (some node >= B), this is fine. if (prev.out().type() == StateType::Reserved) { - SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(prev.out().flag())]; + SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(prev.out().mem_tag())]; rescom.reserve -= B - prev.address; } else if (prev.out().type() == StateType::Committed) { - SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(prev.out().flag())]; + SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(prev.out().mem_tag())]; rescom.commit -= B - prev.address; rescom.reserve -= B - prev.address; } } // Finally, we can register the new region [A, B)'s summary data. - SingleDiff& rescom = diff.flag[NMTUtil::flag_to_index(metadata.flag)]; + SingleDiff& rescom = diff.tag[NMTUtil::tag_to_index(metadata.mem_tag)]; if (state == StateType::Reserved) { rescom.reserve += B - A; } else if (state == StateType::Committed) { diff --git a/src/hotspot/share/nmt/vmatree.hpp b/src/hotspot/share/nmt/vmatree.hpp index 3316219a1d3..55399e51b9d 100644 --- a/src/hotspot/share/nmt/vmatree.hpp +++ b/src/hotspot/share/nmt/vmatree.hpp @@ -35,7 +35,7 @@ // A VMATree stores a sequence of points on the natural number line. // Each of these points stores information about a state change. // For example, the state may go from released memory to committed memory, -// or from committed memory of a certain MEMFLAGS to committed memory of a different MEMFLAGS. +// or from committed memory of a certain MemTag to committed memory of a different MemTag. // The set of points is stored in a balanced binary tree for efficient querying and updating. class VMATree { friend class NMTVMATreeTest; @@ -69,15 +69,15 @@ public: // Each point has some stack and a flag associated with it. struct RegionData { const NativeCallStackStorage::StackIndex stack_idx; - const MEMFLAGS flag; + const MemTag mem_tag; - RegionData() : stack_idx(), flag(mtNone) {} + RegionData() : stack_idx(), mem_tag(mtNone) {} - RegionData(NativeCallStackStorage::StackIndex stack_idx, MEMFLAGS flag) - : stack_idx(stack_idx), flag(flag) {} + RegionData(NativeCallStackStorage::StackIndex stack_idx, MemTag mem_tag) + : stack_idx(stack_idx), mem_tag(mem_tag) {} static bool equals(const RegionData& a, const RegionData& b) { - return a.flag == b.flag && + return a.mem_tag == b.mem_tag && NativeCallStackStorage::equals(a.stack_idx, b.stack_idx); } }; @@ -87,16 +87,16 @@ public: private: struct IntervalState { private: - // Store the type and flag as two bytes + // Store the type and mem_tag as two bytes uint8_t type_flag[2]; NativeCallStackStorage::StackIndex sidx; public: IntervalState() : type_flag{0,0}, sidx() {} IntervalState(const StateType type, const RegionData data) { - assert(!(type == StateType::Released) || data.flag == mtNone, "Released type must have flag mtNone"); + assert(!(type == StateType::Released) || data.mem_tag == mtNone, "Released type must have memory tag mtNone"); type_flag[0] = static_cast(type); - type_flag[1] = static_cast(data.flag); + type_flag[1] = static_cast(data.mem_tag); sidx = data.stack_idx; } @@ -104,12 +104,12 @@ private: return static_cast(type_flag[0]); } - MEMFLAGS flag() const { - return static_cast(type_flag[1]); + MemTag mem_tag() const { + return static_cast(type_flag[1]); } RegionData regiondata() const { - return RegionData{sidx, flag()}; + return RegionData{sidx, mem_tag()}; } NativeCallStackStorage::StackIndex stack() const { @@ -159,10 +159,10 @@ public: delta commit; }; struct SummaryDiff { - SingleDiff flag[mt_number_of_types]; + SingleDiff tag[mt_number_of_tags]; SummaryDiff() { - for (int i = 0; i < mt_number_of_types; i++) { - flag[i] = SingleDiff{0, 0}; + for (int i = 0; i < mt_number_of_tags; i++) { + tag[i] = SingleDiff{0, 0}; } } }; diff --git a/src/hotspot/share/prims/jni.cpp b/src/hotspot/share/prims/jni.cpp index fbb2c6e3e08..1f115c783e6 100644 --- a/src/hotspot/share/prims/jni.cpp +++ b/src/hotspot/share/prims/jni.cpp @@ -2409,7 +2409,7 @@ static char* get_bad_address() { if (bad_address != nullptr) { os::protect_memory(bad_address, size, os::MEM_PROT_READ, /*is_committed*/false); - MemTracker::record_virtual_memory_type((void*)bad_address, mtInternal); + MemTracker::record_virtual_memory_tag((void*)bad_address, mtInternal); } } return bad_address; diff --git a/src/hotspot/share/prims/jvmtiAgentList.hpp b/src/hotspot/share/prims/jvmtiAgentList.hpp index 671def02681..d53f5e63d9b 100644 --- a/src/hotspot/share/prims/jvmtiAgentList.hpp +++ b/src/hotspot/share/prims/jvmtiAgentList.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_PRIMS_JVMTIAGENTLIST_HPP #define SHARE_PRIMS_JVMTIAGENTLIST_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "prims/jvmtiAgent.hpp" #include "utilities/growableArray.hpp" diff --git a/src/hotspot/share/prims/whitebox.cpp b/src/hotspot/share/prims/whitebox.cpp index 87f4a751e80..ca440b69913 100644 --- a/src/hotspot/share/prims/whitebox.cpp +++ b/src/hotspot/share/prims/whitebox.cpp @@ -676,7 +676,7 @@ WB_END #endif // INCLUDE_G1GC -// Alloc memory using the test memory type so that we can use that to see if +// Alloc memory using the test memory tag so that we can use that to see if // NMT picks it up correctly WB_ENTRY(jlong, WB_NMTMalloc(JNIEnv* env, jobject o, jlong size)) jlong addr = 0; @@ -692,11 +692,11 @@ WB_ENTRY(jlong, WB_NMTMallocWithPseudoStack(JNIEnv* env, jobject o, jlong size, return (jlong)(uintptr_t)os::malloc(size, mtTest, stack); WB_END -// Alloc memory with pseudo call stack and specific memory type. -WB_ENTRY(jlong, WB_NMTMallocWithPseudoStackAndType(JNIEnv* env, jobject o, jlong size, jint pseudo_stack, jint type)) +// Alloc memory with pseudo call stack and specific memory tag. +WB_ENTRY(jlong, WB_NMTMallocWithPseudoStackAndType(JNIEnv* env, jobject o, jlong size, jint pseudo_stack, jint mem_tag)) address pc = (address)(size_t)pseudo_stack; NativeCallStack stack(&pc, 1); - return (jlong)(uintptr_t)os::malloc(size, (MEMFLAGS)type, stack); + return (jlong)(uintptr_t)os::malloc(size, (MemTag)mem_tag, stack); WB_END // Free the memory allocated by NMTAllocTest @@ -708,21 +708,21 @@ WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size)) jlong addr = 0; addr = (jlong)(uintptr_t)os::reserve_memory(size); - MemTracker::record_virtual_memory_type((address)addr, mtTest); + MemTracker::record_virtual_memory_tag((address)addr, mtTest); return addr; WB_END WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size)) addr = (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size); - MemTracker::record_virtual_memory_type((address)addr, mtTest); + MemTracker::record_virtual_memory_tag((address)addr, mtTest); return addr; WB_END WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) os::commit_memory((char *)(uintptr_t)addr, size, !ExecMem); - MemTracker::record_virtual_memory_type((address)(uintptr_t)addr, mtTest); + MemTracker::record_virtual_memory_tag((address)(uintptr_t)addr, mtTest); WB_END WB_ENTRY(void, WB_NMTUncommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size)) diff --git a/src/hotspot/share/runtime/handles.hpp b/src/hotspot/share/runtime/handles.hpp index 39e59cc1ef0..8ed16d33a2f 100644 --- a/src/hotspot/share/runtime/handles.hpp +++ b/src/hotspot/share/runtime/handles.hpp @@ -187,7 +187,7 @@ class HandleArea: public Arena { HandleArea* _prev; // link to outer (older) area public: // Constructor - HandleArea(MEMFLAGS flags, HandleArea* prev) : Arena(flags, Tag::tag_ha, Chunk::tiny_size) { + HandleArea(MemTag mem_tag, HandleArea* prev) : Arena(mem_tag, Tag::tag_ha, Chunk::tiny_size) { debug_only(_handle_mark_nesting = 0); debug_only(_no_handle_mark_nesting = 0); _prev = prev; diff --git a/src/hotspot/share/runtime/javaThread.cpp b/src/hotspot/share/runtime/javaThread.cpp index 416e79d5844..3528fc5b1bc 100644 --- a/src/hotspot/share/runtime/javaThread.cpp +++ b/src/hotspot/share/runtime/javaThread.cpp @@ -409,8 +409,8 @@ void JavaThread::check_for_valid_safepoint_state() { // A JavaThread is a normal Java thread -JavaThread::JavaThread(MEMFLAGS flags) : - Thread(flags), +JavaThread::JavaThread(MemTag mem_tag) : + Thread(mem_tag), // Initialize fields _on_thread_list(false), DEBUG_ONLY(_java_call_counter(0) COMMA) @@ -634,7 +634,7 @@ void JavaThread::block_if_vm_exited() { } } -JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz, MEMFLAGS flags) : JavaThread(flags) { +JavaThread::JavaThread(ThreadFunction entry_point, size_t stack_sz, MemTag mem_tag) : JavaThread(mem_tag) { set_entry_point(entry_point); // Create the native thread itself. // %note runtime_23 diff --git a/src/hotspot/share/runtime/javaThread.hpp b/src/hotspot/share/runtime/javaThread.hpp index 54a11dad9b8..e36b7dfe888 100644 --- a/src/hotspot/share/runtime/javaThread.hpp +++ b/src/hotspot/share/runtime/javaThread.hpp @@ -479,8 +479,8 @@ private: public: // Constructor - JavaThread(MEMFLAGS flags = mtThread); // delegating constructor - JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MEMFLAGS flags = mtThread); + JavaThread(MemTag mem_tag = mtThread); // delegating constructor + JavaThread(ThreadFunction entry_point, size_t stack_size = 0, MemTag mem_tag = mtThread); ~JavaThread(); // Factory method to create a new JavaThread whose attach state is "is attaching" diff --git a/src/hotspot/share/runtime/lightweightSynchronizer.cpp b/src/hotspot/share/runtime/lightweightSynchronizer.cpp index 0e360dba97b..5037bb9607b 100644 --- a/src/hotspot/share/runtime/lightweightSynchronizer.cpp +++ b/src/hotspot/share/runtime/lightweightSynchronizer.cpp @@ -29,7 +29,7 @@ #include "logging/log.hpp" #include "memory/allStatic.hpp" #include "memory/resourceArea.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.hpp" #include "runtime/basicLock.inline.hpp" @@ -60,14 +60,14 @@ class ObjectMonitorTable : AllStatic { } static void* allocate_node(void* context, size_t size, Value const& value) { ObjectMonitorTable::inc_items_count(); - return AllocateHeap(size, MEMFLAGS::mtObjectMonitor); + return AllocateHeap(size, mtObjectMonitor); }; static void free_node(void* context, void* memory, Value const& value) { ObjectMonitorTable::dec_items_count(); FreeHeap(memory); } }; - using ConcurrentTable = ConcurrentHashTable; + using ConcurrentTable = ConcurrentHashTable; static ConcurrentTable* _table; static volatile size_t _items_count; diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index a8f3bc5c1d5..9c9dfbc6230 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -600,16 +600,16 @@ bool os::find_builtin_agent(JvmtiAgent* agent, const char *syms[], // --------------------- heap allocation utilities --------------------- -char *os::strdup(const char *str, MEMFLAGS flags) { +char *os::strdup(const char *str, MemTag mem_tag) { size_t size = strlen(str); - char *dup_str = (char *)malloc(size + 1, flags); + char *dup_str = (char *)malloc(size + 1, mem_tag); if (dup_str == nullptr) return nullptr; strcpy(dup_str, str); return dup_str; } -char* os::strdup_check_oom(const char* str, MEMFLAGS flags) { - char* p = os::strdup(str, flags); +char* os::strdup_check_oom(const char* str, MemTag mem_tag) { + char* p = os::strdup(str, mem_tag); if (p == nullptr) { vm_exit_out_of_memory(strlen(str) + 1, OOM_MALLOC_ERROR, "os::strdup_check_oom"); } @@ -629,11 +629,11 @@ static void break_if_ptr_caught(void* ptr) { } #endif // ASSERT -void* os::malloc(size_t size, MEMFLAGS flags) { - return os::malloc(size, flags, CALLER_PC); +void* os::malloc(size_t size, MemTag mem_tag) { + return os::malloc(size, mem_tag, CALLER_PC); } -void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { +void* os::malloc(size_t size, MemTag mem_tag, const NativeCallStack& stack) { // Special handling for NMT preinit phase before arguments are parsed void* rc = nullptr; @@ -651,7 +651,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { size = MAX2((size_t)1, size); // Observe MallocLimit - if (MemTracker::check_exceeds_limit(size, memflags)) { + if (MemTracker::check_exceeds_limit(size, mem_tag)) { return nullptr; } @@ -667,7 +667,7 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { return nullptr; } - void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, memflags, stack); + void* const inner_ptr = MemTracker::record_malloc((address)outer_ptr, size, mem_tag, stack); if (CDSConfig::is_dumping_static_archive()) { // Need to deterministically fill all the alignment gaps in C++ structures. @@ -679,20 +679,20 @@ void* os::malloc(size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { return inner_ptr; } -void* os::realloc(void *memblock, size_t size, MEMFLAGS flags) { - return os::realloc(memblock, size, flags, CALLER_PC); +void* os::realloc(void *memblock, size_t size, MemTag mem_tag) { + return os::realloc(memblock, size, mem_tag, CALLER_PC); } -void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCallStack& stack) { +void* os::realloc(void *memblock, size_t size, MemTag mem_tag, const NativeCallStack& stack) { // Special handling for NMT preinit phase before arguments are parsed void* rc = nullptr; - if (NMTPreInit::handle_realloc(&rc, memblock, size, memflags)) { + if (NMTPreInit::handle_realloc(&rc, memblock, size, mem_tag)) { return rc; } if (memblock == nullptr) { - return os::malloc(size, memflags, stack); + return os::malloc(size, mem_tag, stack); } DEBUG_ONLY(check_crash_protection()); @@ -715,15 +715,15 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa const size_t old_size = MallocTracker::malloc_header(memblock)->size(); // Observe MallocLimit - if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, memflags)) { + if ((size > old_size) && MemTracker::check_exceeds_limit(size - old_size, mem_tag)) { return nullptr; } // Perform integrity checks on and mark the old block as dead *before* calling the real realloc(3) since it // may invalidate the old block, including its header. MallocHeader* header = MallocHeader::resolve_checked(memblock); - assert(memflags == header->flags(), "weird NMT flags mismatch (new:\"%s\" != old:\"%s\")\n", - NMTUtil::flag_to_name(memflags), NMTUtil::flag_to_name(header->flags())); + assert(mem_tag == header->mem_tag(), "weird NMT type mismatch (new:\"%s\" != old:\"%s\")\n", + NMTUtil::tag_to_name(mem_tag), NMTUtil::tag_to_name(header->mem_tag())); const MallocHeader::FreeInfo free_info = header->free_info(); header->mark_block_as_dead(); @@ -742,7 +742,7 @@ void* os::realloc(void *memblock, size_t size, MEMFLAGS memflags, const NativeCa // After a successful realloc(3), we account the resized block with its new size // to NMT. - void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, memflags, stack); + void* const new_inner_ptr = MemTracker::record_malloc(new_outer_ptr, size, mem_tag, stack); #ifdef ASSERT assert(old_size == free_info.size, "Sanity"); @@ -1871,10 +1871,10 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) { return os::pd_create_stack_guard_pages(addr, bytes); } -char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) { +char* os::reserve_memory(size_t bytes, bool executable, MemTag mem_tag) { char* result = pd_reserve_memory(bytes, executable); if (result != nullptr) { - MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, flags); + MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, mem_tag); log_debug(os, map)("Reserved " RANGEFMT, RANGEFMTARGS(result, bytes)); } else { log_info(os, map)("Reserve failed (%zu bytes)", bytes); @@ -1882,10 +1882,10 @@ char* os::reserve_memory(size_t bytes, bool executable, MEMFLAGS flags) { return result; } -char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MEMFLAGS flag) { +char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MemTag mem_tag) { char* result = SimulateFullAddressSpace ? nullptr : pd_attempt_reserve_memory_at(addr, bytes, executable); if (result != nullptr) { - MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, flag); + MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, mem_tag); log_debug(os, map)("Reserved " RANGEFMT, RANGEFMTARGS(result, bytes)); } else { log_info(os, map)("Attempt to reserve " RANGEFMT " failed", @@ -2235,31 +2235,31 @@ void os::pretouch_memory(void* start, void* end, size_t page_size) { } } -char* os::map_memory_to_file(size_t bytes, int file_desc, MEMFLAGS flag) { +char* os::map_memory_to_file(size_t bytes, int file_desc, MemTag mem_tag) { // Could have called pd_reserve_memory() followed by replace_existing_mapping_with_file_mapping(), // but AIX may use SHM in which case its more trouble to detach the segment and remap memory to the file. // On all current implementations null is interpreted as any available address. char* result = os::map_memory_to_file(nullptr /* addr */, bytes, file_desc); if (result != nullptr) { - MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC, flag); + MemTracker::record_virtual_memory_reserve_and_commit(result, bytes, CALLER_PC, mem_tag); } return result; } -char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc, MEMFLAGS flag) { +char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc, MemTag mem_tag) { char* result = pd_attempt_map_memory_to_file_at(addr, bytes, file_desc); if (result != nullptr) { - MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flag); + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, mem_tag); } return result; } char* os::map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only, - bool allow_exec, MEMFLAGS flags) { + bool allow_exec, MemTag mem_tag) { char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec); if (result != nullptr) { - MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, flags); + MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, mem_tag); } return result; } diff --git a/src/hotspot/share/runtime/os.hpp b/src/hotspot/share/runtime/os.hpp index 1d81612c033..35a5be77b60 100644 --- a/src/hotspot/share/runtime/os.hpp +++ b/src/hotspot/share/runtime/os.hpp @@ -450,14 +450,14 @@ class os: AllStatic { inline static size_t cds_core_region_alignment(); // Reserves virtual memory. - static char* reserve_memory(size_t bytes, bool executable = false, MEMFLAGS flags = mtNone); + static char* reserve_memory(size_t bytes, bool executable = false, MemTag mem_tag = mtNone); // Reserves virtual memory that starts at an address that is aligned to 'alignment'. static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable = false); // Attempts to reserve the virtual memory at [addr, addr + bytes). // Does not overwrite existing mappings. - static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false, MEMFLAGS flag = mtNone); + static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false, MemTag mem_tag = mtNone); // Given an address range [min, max), attempts to reserve memory within this area, with the given alignment. // If randomize is true, the location will be randomized. @@ -509,16 +509,16 @@ class os: AllStatic { static int create_file_for_heap(const char* dir); // Map memory to the file referred by fd. This function is slightly different from map_memory() // and is added to be used for implementation of -XX:AllocateHeapAt - static char* map_memory_to_file(size_t size, int fd, MEMFLAGS flag = mtNone); - static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MEMFLAGS flag = mtNone); + static char* map_memory_to_file(size_t size, int fd, MemTag mem_tag = mtNone); + static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag = mtNone); static char* map_memory_to_file(char* base, size_t size, int fd); - static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MEMFLAGS flag = mtNone); + static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MemTag mem_tag = mtNone); // Replace existing reserved memory with file mapping static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd); static char* map_memory(int fd, const char* file_name, size_t file_offset, char *addr, size_t bytes, bool read_only = false, - bool allow_exec = false, MEMFLAGS flags = mtNone); + bool allow_exec = false, MemTag mem_tag = mtNone); static bool unmap_memory(char *addr, size_t bytes); static void disclaim_memory(char *addr, size_t bytes); static void realign_memory(char *addr, size_t bytes, size_t alignment_hint); @@ -900,16 +900,16 @@ class os: AllStatic { static int get_native_stack(address* stack, int size, int toSkip = 0); // General allocation (must be MT-safe) - static void* malloc (size_t size, MEMFLAGS flags, const NativeCallStack& stack); - static void* malloc (size_t size, MEMFLAGS flags); - static void* realloc (void *memblock, size_t size, MEMFLAGS flag, const NativeCallStack& stack); - static void* realloc (void *memblock, size_t size, MEMFLAGS flag); + static void* malloc (size_t size, MemTag mem_tag, const NativeCallStack& stack); + static void* malloc (size_t size, MemTag mem_tag); + static void* realloc (void *memblock, size_t size, MemTag mem_tag, const NativeCallStack& stack); + static void* realloc (void *memblock, size_t size, MemTag mem_tag); // handles null pointers static void free (void *memblock); - static char* strdup(const char *, MEMFLAGS flags = mtInternal); // Like strdup + static char* strdup(const char *, MemTag mem_tag = mtInternal); // Like strdup // Like strdup, but exit VM when strdup() returns null - static char* strdup_check_oom(const char*, MEMFLAGS flags = mtInternal); + static char* strdup_check_oom(const char*, MemTag mem_tag = mtInternal); // SocketInterface (ex HPI SocketInterface ) static int socket_close(int fd); diff --git a/src/hotspot/share/runtime/safepointMechanism.cpp b/src/hotspot/share/runtime/safepointMechanism.cpp index 624583db3d1..a6aadf5ebc4 100644 --- a/src/hotspot/share/runtime/safepointMechanism.cpp +++ b/src/hotspot/share/runtime/safepointMechanism.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -60,7 +60,7 @@ void SafepointMechanism::default_initialize() { const size_t allocation_size = 2 * page_size; char* polling_page = os::reserve_memory(allocation_size); os::commit_memory_or_exit(polling_page, allocation_size, false, "Unable to commit Safepoint polling page"); - MemTracker::record_virtual_memory_type((address)polling_page, mtSafepoint); + MemTracker::record_virtual_memory_tag((address)polling_page, mtSafepoint); char* bad_page = polling_page; char* good_page = polling_page + page_size; diff --git a/src/hotspot/share/runtime/thread.cpp b/src/hotspot/share/runtime/thread.cpp index e72077adabf..7c01c8d4e16 100644 --- a/src/hotspot/share/runtime/thread.cpp +++ b/src/hotspot/share/runtime/thread.cpp @@ -64,7 +64,7 @@ THREAD_LOCAL Thread* Thread::_thr_current = nullptr; DEBUG_ONLY(Thread* Thread::_starting_thread = nullptr;) -Thread::Thread(MEMFLAGS flags) { +Thread::Thread(MemTag mem_tag) { DEBUG_ONLY(_run_state = PRE_CALL_RUN;) @@ -78,9 +78,9 @@ Thread::Thread(MEMFLAGS flags) { // allocated data structures set_osthread(nullptr); - set_resource_area(new (flags) ResourceArea(flags)); + set_resource_area(new (mem_tag) ResourceArea(mem_tag)); DEBUG_ONLY(_current_resource_mark = nullptr;) - set_handle_area(new (flags) HandleArea(flags, nullptr)); + set_handle_area(new (mem_tag) HandleArea(mem_tag, nullptr)); set_metadata_handles(new (mtClass) GrowableArray(30, mtClass)); set_last_handle_mark(nullptr); DEBUG_ONLY(_missed_ic_stub_refill_verifier = nullptr); diff --git a/src/hotspot/share/runtime/thread.hpp b/src/hotspot/share/runtime/thread.hpp index e9fee4d113a..567a76d0ead 100644 --- a/src/hotspot/share/runtime/thread.hpp +++ b/src/hotspot/share/runtime/thread.hpp @@ -277,7 +277,7 @@ class Thread: public ThreadShadow { // is waiting to lock public: // Constructor - Thread(MEMFLAGS flag = mtThread); + Thread(MemTag mem_tag = mtThread); virtual ~Thread() = 0; // Thread is abstract. // Manage Thread::current() diff --git a/src/hotspot/share/services/threadService.cpp b/src/hotspot/share/services/threadService.cpp index 09cb7ffb25a..a1ad0a910f6 100644 --- a/src/hotspot/share/services/threadService.cpp +++ b/src/hotspot/share/services/threadService.cpp @@ -32,7 +32,7 @@ #include "memory/oopFactory.hpp" #include "memory/resourceArea.hpp" #include "memory/universe.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "oops/instanceKlass.hpp" #include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" diff --git a/src/hotspot/share/utilities/bitMap.cpp b/src/hotspot/share/utilities/bitMap.cpp index 6ea473dda48..423bc22cd9d 100644 --- a/src/hotspot/share/utilities/bitMap.cpp +++ b/src/hotspot/share/utilities/bitMap.cpp @@ -173,8 +173,8 @@ bm_word_t* ResourceBitMap::reallocate(bm_word_t* old_map, size_t old_size_in_wor return pseudo_reallocate(*this, old_map, old_size_in_words, new_size_in_words); } -CHeapBitMap::CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags, bool clear) - : GrowableBitMap(), _flags(flags) { +CHeapBitMap::CHeapBitMap(idx_t size_in_bits, MemTag mem_tag, bool clear) + : GrowableBitMap(), _mem_tag(mem_tag) { initialize(size_in_bits, clear); } @@ -183,7 +183,7 @@ CHeapBitMap::~CHeapBitMap() { } bm_word_t* CHeapBitMap::allocate(idx_t size_in_words) const { - return MallocArrayAllocator::allocate(size_in_words, _flags); + return MallocArrayAllocator::allocate(size_in_words, _mem_tag); } // GrowableBitMap::resize uses free(ptr, size) for T as CHeapBitMap, ArenaBitMap and ResourceBitMap allocators. @@ -193,7 +193,7 @@ void CHeapBitMap::free(bm_word_t* map, idx_t size_in_words) const { } bm_word_t* CHeapBitMap::reallocate(bm_word_t* map, size_t old_size_in_words, size_t new_size_in_words) const { - return MallocArrayAllocator::reallocate(map, new_size_in_words, _flags); + return MallocArrayAllocator::reallocate(map, new_size_in_words, _mem_tag); } #ifdef ASSERT diff --git a/src/hotspot/share/utilities/bitMap.hpp b/src/hotspot/share/utilities/bitMap.hpp index 33636536485..1c69a0d43f8 100644 --- a/src/hotspot/share/utilities/bitMap.hpp +++ b/src/hotspot/share/utilities/bitMap.hpp @@ -25,7 +25,7 @@ #ifndef SHARE_UTILITIES_BITMAP_HPP #define SHARE_UTILITIES_BITMAP_HPP -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "runtime/atomic.hpp" #include "utilities/globalDefinitions.hpp" @@ -638,16 +638,16 @@ class ResourceBitMap : public GrowableBitMap { // A BitMap with storage in the CHeap. class CHeapBitMap : public GrowableBitMap { - // NMT memory type - const MEMFLAGS _flags; + // NMT memory tag + const MemTag _mem_tag; // Don't allow copy or assignment, to prevent the // allocated memory from leaking out to other instances. NONCOPYABLE(CHeapBitMap); public: - explicit CHeapBitMap(MEMFLAGS flags) : GrowableBitMap(), _flags(flags) {} - CHeapBitMap(idx_t size_in_bits, MEMFLAGS flags, bool clear = true); + explicit CHeapBitMap(MemTag mem_tag) : GrowableBitMap(), _mem_tag(mem_tag) {} + CHeapBitMap(idx_t size_in_bits, MemTag mem_tag, bool clear = true); ~CHeapBitMap(); bm_word_t* allocate(idx_t size_in_words) const; diff --git a/src/hotspot/share/utilities/chunkedList.hpp b/src/hotspot/share/utilities/chunkedList.hpp index 9a600e4ce1b..3269c305e78 100644 --- a/src/hotspot/share/utilities/chunkedList.hpp +++ b/src/hotspot/share/utilities/chunkedList.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -28,7 +28,7 @@ #include "memory/allocation.hpp" #include "utilities/debug.hpp" -template class ChunkedList : public CHeapObj { +template class ChunkedList : public CHeapObj { template friend class TestChunkedList; static const size_t BufferSize = 64; @@ -36,8 +36,8 @@ template class ChunkedList : public CHeapObj { T _values[BufferSize]; T* _top; - ChunkedList* _next_used; - ChunkedList* _next_free; + ChunkedList* _next_used; + ChunkedList* _next_free; T const * end() const { return &_values[BufferSize]; @@ -62,11 +62,11 @@ template class ChunkedList : public CHeapObj { _top++; } - void set_next_used(ChunkedList* buffer) { _next_used = buffer; } - void set_next_free(ChunkedList* buffer) { _next_free = buffer; } + void set_next_used(ChunkedList* buffer) { _next_used = buffer; } + void set_next_free(ChunkedList* buffer) { _next_free = buffer; } - ChunkedList* next_used() const { return _next_used; } - ChunkedList* next_free() const { return _next_free; } + ChunkedList* next_used() const { return _next_used; } + ChunkedList* next_free() const { return _next_free; } size_t size() const { return pointer_delta(_top, _values, sizeof(T)); diff --git a/src/hotspot/share/utilities/concurrentHashTable.hpp b/src/hotspot/share/utilities/concurrentHashTable.hpp index 4e506d5fe84..402a767c0b8 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.hpp @@ -40,8 +40,8 @@ class Thread; class Mutex; -template -class ConcurrentHashTable : public CHeapObj { +template +class ConcurrentHashTable : public CHeapObj { typedef typename CONFIG::Value VALUE; private: // _stats_rate is null if statistics are not enabled. @@ -61,7 +61,7 @@ class ConcurrentHashTable : public CHeapObj { TableStatistics statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f); // This is the internal node structure. - // Only constructed with placement new from memory allocated with MEMFLAGS of + // Only constructed with placement new from memory allocated with MemTag of // the InternalTable or user-defined memory. class Node { private: @@ -105,7 +105,7 @@ class ConcurrentHashTable : public CHeapObj { } }; - // Only constructed with placement new from an array allocated with MEMFLAGS + // Only constructed with placement new from an array allocated with MemTag // of InternalTable. class Bucket { private: @@ -202,7 +202,7 @@ class ConcurrentHashTable : public CHeapObj { // - Re-size can only change the size into half or double // (any pow 2 would also be possible). // - Use masking of hash for bucket index. - class InternalTable : public CHeapObj { + class InternalTable : public CHeapObj { private: Bucket* _buckets; // Bucket array. public: @@ -277,10 +277,10 @@ class ConcurrentHashTable : public CHeapObj { class ScopedCS: public StackObj { protected: Thread* _thread; - ConcurrentHashTable* _cht; + ConcurrentHashTable* _cht; GlobalCounter::CSContext _cs_context; public: - ScopedCS(Thread* thread, ConcurrentHashTable* cht); + ScopedCS(Thread* thread, ConcurrentHashTable* cht); ~ScopedCS(); }; @@ -372,7 +372,7 @@ class ConcurrentHashTable : public CHeapObj { // Check for dead items in a bucket. template size_t delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, - size_t num_del, Node** ndel, GrowableArrayCHeap& ndel_heap); + size_t num_del, Node** ndel, GrowableArrayCHeap& ndel_heap); // Check for dead items in this table. During shrink/grow we cannot guarantee // that we only visit nodes once. To keep it simple caller will have locked @@ -539,12 +539,12 @@ class ConcurrentHashTable : public CHeapObj { // Moves all nodes from this table to to_cht with new hash code. // Must be done at a safepoint. - void rehash_nodes_to(Thread* thread, ConcurrentHashTable* to_cht); + void rehash_nodes_to(Thread* thread, ConcurrentHashTable* to_cht); // Scoped multi getter. class MultiGetHandle : private ScopedCS { public: - MultiGetHandle(Thread* thread, ConcurrentHashTable* cht) + MultiGetHandle(Thread* thread, ConcurrentHashTable* cht) : ScopedCS(thread, cht) {} // In the MultiGetHandle scope you can lookup items matching LOOKUP_FUNC. // The VALUEs are safe as long as you never save the VALUEs outside the diff --git a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp index f035aeae448..5656655275a 100644 --- a/src/hotspot/share/utilities/concurrentHashTable.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTable.inline.hpp @@ -58,28 +58,28 @@ static void* const POISON_PTR = (void*)0xffbadbac; #endif // Node -template -inline typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Node* +ConcurrentHashTable:: Node::next() const { return Atomic::load_acquire(&_next); } // Bucket -template -inline typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Node* +ConcurrentHashTable:: Bucket::first_raw() const { return Atomic::load_acquire(&_first); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::release_assign_node_ptr( - typename ConcurrentHashTable::Node* const volatile * dst, - typename ConcurrentHashTable::Node* node) const + typename ConcurrentHashTable::Node* const volatile * dst, + typename ConcurrentHashTable::Node* node) const { // Due to this assert this methods is not static. assert(is_locked(), "Must be locked."); @@ -87,31 +87,31 @@ inline void ConcurrentHashTable:: Atomic::release_store(tmp, clear_set_state(node, *dst)); } -template -inline typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Node* +ConcurrentHashTable:: Bucket::first() const { // We strip the states bit before returning the ptr. return clear_state(Atomic::load_acquire(&_first)); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: Bucket::have_redirect() const { return is_state(first_raw(), STATE_REDIRECT_BIT); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: Bucket::is_locked() const { return is_state(first_raw(), STATE_LOCK_BIT); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::lock() { int i = 0; @@ -128,10 +128,10 @@ inline void ConcurrentHashTable:: } } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::release_assign_last_node_next( - typename ConcurrentHashTable::Node* node) + typename ConcurrentHashTable::Node* node) { assert(is_locked(), "Must be locked."); Node* const volatile * ret = first_ptr(); @@ -141,10 +141,10 @@ inline void ConcurrentHashTable:: release_assign_node_ptr(ret, node); } -template -inline bool ConcurrentHashTable:: - Bucket::cas_first(typename ConcurrentHashTable::Node* node, - typename ConcurrentHashTable::Node* expect +template +inline bool ConcurrentHashTable:: + Bucket::cas_first(typename ConcurrentHashTable::Node* node, + typename ConcurrentHashTable::Node* expect ) { if (is_locked()) { @@ -156,8 +156,8 @@ inline bool ConcurrentHashTable:: return false; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: Bucket::trylock() { if (is_locked()) { @@ -171,8 +171,8 @@ inline bool ConcurrentHashTable:: return false; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::unlock() { assert(is_locked(), "Must be locked."); @@ -181,8 +181,8 @@ inline void ConcurrentHashTable:: Atomic::release_store(&_first, clear_state(first())); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: Bucket::redirect() { assert(is_locked(), "Must be locked."); @@ -190,15 +190,15 @@ inline void ConcurrentHashTable:: } // InternalTable -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: InternalTable::InternalTable(size_t log2_size) : _log2_size(log2_size), _size(((size_t)1ul) << _log2_size), _hash_mask(~(~((size_t)0) << _log2_size)) { assert(_log2_size >= SIZE_SMALL_LOG2 && _log2_size <= SIZE_BIG_LOG2, "Bad size"); - _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, F); + _buckets = NEW_C_HEAP_ARRAY(Bucket, _size, MT); // Use placement new for each element instead of new[] which could use more // memory than allocated. for (size_t i = 0; i < _size; ++i) { @@ -206,17 +206,17 @@ inline ConcurrentHashTable:: } } -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: InternalTable::~InternalTable() { FREE_C_HEAP_ARRAY(Bucket, _buckets); } // ScopedCS -template -inline ConcurrentHashTable:: - ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable* cht) +template +inline ConcurrentHashTable:: + ScopedCS::ScopedCS(Thread* thread, ConcurrentHashTable* cht) : _thread(thread), _cht(cht), _cs_context(GlobalCounter::critical_section_begin(_thread)) @@ -227,25 +227,25 @@ inline ConcurrentHashTable:: } } -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: ScopedCS::~ScopedCS() { GlobalCounter::critical_section_end(_thread, _cs_context); } -template +template template -inline typename CONFIG::Value* ConcurrentHashTable:: +inline typename CONFIG::Value* ConcurrentHashTable:: MultiGetHandle::get(LOOKUP_FUNC& lookup_f, bool* grow_hint) { return ScopedCS::_cht->internal_get(ScopedCS::_thread, lookup_f, grow_hint); } // HaveDeletables -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: HaveDeletables::have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f, Bucket* prefetch_bucket) @@ -271,9 +271,9 @@ inline bool ConcurrentHashTable:: return false; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: HaveDeletables::have_deletable(Bucket* bucket, EVALUATE_FUNC& eval_f, Bucket* preb) @@ -287,8 +287,8 @@ inline bool ConcurrentHashTable:: } // ConcurrentHashTable -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: write_synchonize_on_visible_epoch(Thread* thread) { assert(_resize_lock_owner == thread, "Re-size lock not held"); @@ -304,8 +304,8 @@ inline void ConcurrentHashTable:: GlobalCounter::write_synchronize(); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: try_resize_lock(Thread* locker) { if (_resize_lock->try_lock()) { @@ -323,8 +323,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: lock_resize_lock(Thread* locker) { size_t i = 0; @@ -348,8 +348,8 @@ inline void ConcurrentHashTable:: _invisible_epoch = nullptr; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: unlock_resize_lock(Thread* locker) { _invisible_epoch = nullptr; @@ -358,8 +358,8 @@ inline void ConcurrentHashTable:: _resize_lock->unlock(); } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: free_nodes() { // We assume we are not MT during freeing. @@ -374,25 +374,25 @@ inline void ConcurrentHashTable:: } } -template -inline typename ConcurrentHashTable::InternalTable* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::InternalTable* +ConcurrentHashTable:: get_table() const { return Atomic::load_acquire(&_table); } -template -inline typename ConcurrentHashTable::InternalTable* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::InternalTable* +ConcurrentHashTable:: get_new_table() const { return Atomic::load_acquire(&_new_table); } -template -inline typename ConcurrentHashTable::InternalTable* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::InternalTable* +ConcurrentHashTable:: set_table_from_new() { InternalTable* old_table = _table; @@ -406,8 +406,8 @@ ConcurrentHashTable:: return old_table; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_grow_range(Thread* thread, size_t start, size_t stop) { assert(stop <= _table->_size, "Outside backing array"); @@ -446,9 +446,9 @@ inline void ConcurrentHashTable:: } } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: internal_remove(Thread* thread, LOOKUP_FUNC& lookup_f, DELETE_FUNC& delete_f) { Bucket* bucket = get_bucket_locked(thread, lookup_f.get_hash()); @@ -478,9 +478,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_bulk_delete_locked_for(Thread* thread, size_t start_idx, size_t stop_idx, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f, bool is_mt) { @@ -513,7 +513,7 @@ inline void ConcurrentHashTable:: // We left critical section but the bucket cannot be removed while we hold // the _resize_lock. bucket->lock(); - GrowableArrayCHeap extra(0); // use this buffer if StackBufferSize is not enough + GrowableArrayCHeap extra(0); // use this buffer if StackBufferSize is not enough size_t nd = delete_check_nodes(bucket, eval_f, StackBufferSize, ndel_stack, extra); bucket->unlock(); if (is_mt) { @@ -533,9 +533,9 @@ inline void ConcurrentHashTable:: GlobalCounter::critical_section_end(thread, cs_context); } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: delete_in_bucket(Thread* thread, Bucket* bucket, LOOKUP_FUNC& lookup_f) { assert(bucket->is_locked(), "Must be locked."); @@ -568,9 +568,9 @@ inline void ConcurrentHashTable:: } } -template -inline typename ConcurrentHashTable::Bucket* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Bucket* +ConcurrentHashTable:: get_bucket(uintx hash) const { InternalTable* table = get_table(); @@ -582,9 +582,9 @@ ConcurrentHashTable:: return bucket; } -template -inline typename ConcurrentHashTable::Bucket* -ConcurrentHashTable:: +template +inline typename ConcurrentHashTable::Bucket* +ConcurrentHashTable:: get_bucket_locked(Thread* thread, const uintx hash) { Bucket* bucket; @@ -613,10 +613,10 @@ ConcurrentHashTable:: } // Always called within critical section -template +template template -typename ConcurrentHashTable::Node* -ConcurrentHashTable:: +typename ConcurrentHashTable::Node* +ConcurrentHashTable:: get_node(const Bucket* const bucket, LOOKUP_FUNC& lookup_f, bool* have_dead, size_t* loops) const { @@ -638,8 +638,8 @@ ConcurrentHashTable:: return node; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: unzip_bucket(Thread* thread, InternalTable* old_table, InternalTable* new_table, size_t even_index, size_t odd_index) { @@ -698,8 +698,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_shrink_prolog(Thread* thread, size_t log2_size) { if (!try_resize_lock(thread)) { @@ -715,8 +715,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_shrink_epilog(Thread* thread) { assert(_resize_lock_owner == thread, "Re-size lock not held"); @@ -734,8 +734,8 @@ inline void ConcurrentHashTable:: delete old_table; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_shrink_range(Thread* thread, size_t start, size_t stop) { // The state is also copied here. @@ -771,8 +771,8 @@ inline void ConcurrentHashTable:: } } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_shrink(Thread* thread, size_t log2_size) { if (!internal_shrink_prolog(thread, log2_size)) { @@ -786,8 +786,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_reset(size_t log2_size) { assert(_table != nullptr, "table failed"); @@ -800,8 +800,8 @@ inline void ConcurrentHashTable:: Atomic::release_store(&_table, table); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_grow_prolog(Thread* thread, size_t log2_size) { // This double checking of _size_limit_reached/is_max_size_reached() @@ -826,8 +826,8 @@ inline bool ConcurrentHashTable:: return true; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: internal_grow_epilog(Thread* thread) { assert(_resize_lock_owner == thread, "Should be locked"); @@ -844,8 +844,8 @@ inline void ConcurrentHashTable:: delete old_table; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: internal_grow(Thread* thread, size_t log2_size) { if (!internal_grow_prolog(thread, log2_size)) { @@ -860,9 +860,9 @@ inline bool ConcurrentHashTable:: } // Always called within critical section -template +template template -inline typename CONFIG::Value* ConcurrentHashTable:: +inline typename CONFIG::Value* ConcurrentHashTable:: internal_get(Thread* thread, LOOKUP_FUNC& lookup_f, bool* grow_hint) { bool clean = false; @@ -881,9 +881,9 @@ inline typename CONFIG::Value* ConcurrentHashTable:: return ret; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: internal_insert_get(Thread* thread, LOOKUP_FUNC& lookup_f, const VALUE& value, FOUND_FUNC& foundf, bool* grow_hint, bool* clean_hint) { @@ -949,9 +949,9 @@ inline bool ConcurrentHashTable:: return ret; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: visit_nodes(Bucket* bucket, FUNC& visitor_f) { Node* current_node = bucket->first(); @@ -965,9 +965,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_scan_locked(Thread* thread, FUNC& scan_f) { assert(_resize_lock_owner == thread, "Re-size lock not held"); @@ -982,11 +982,11 @@ inline void ConcurrentHashTable:: } /* ends critical section */ } -template +template template -inline size_t ConcurrentHashTable:: +inline size_t ConcurrentHashTable:: delete_check_nodes(Bucket* bucket, EVALUATE_FUNC& eval_f, - size_t num_del, Node** ndel, GrowableArrayCHeap& extra) + size_t num_del, Node** ndel, GrowableArrayCHeap& extra) { size_t dels = 0; Node* const volatile * rem_n_prev = bucket->first_ptr(); @@ -1013,8 +1013,8 @@ inline size_t ConcurrentHashTable:: } // Constructor -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bool enable_statistics, Mutex::Rank rank, void* context) : _context(context), _new_table(nullptr), _log2_size_limit(log2size_limit), _log2_start_size(log2size), _grow_hint(grow_hint), @@ -1032,8 +1032,8 @@ ConcurrentHashTable(size_t log2size, size_t log2size_limit, size_t grow_hint, bo _size_limit_reached = _table->_log2_size == _log2_size_limit; } -template -inline ConcurrentHashTable:: +template +inline ConcurrentHashTable:: ~ConcurrentHashTable() { delete _resize_lock; @@ -1042,24 +1042,24 @@ inline ConcurrentHashTable:: delete _stats_rate; } -template -inline size_t ConcurrentHashTable:: +template +inline size_t ConcurrentHashTable:: get_mem_size(Thread* thread) { ScopedCS cs(thread, this); return sizeof(*this) + _table->get_mem_size(); } -template -inline size_t ConcurrentHashTable:: +template +inline size_t ConcurrentHashTable:: get_size_log2(Thread* thread) { ScopedCS cs(thread, this); return _table->_log2_size; } -template -inline size_t ConcurrentHashTable:: +template +inline size_t ConcurrentHashTable:: get_dynamic_node_size(size_t value_size) { assert(Node::is_dynamic_sized_value_compatible(), "VALUE must be compatible"); @@ -1067,8 +1067,8 @@ inline size_t ConcurrentHashTable:: return sizeof(Node) - sizeof(VALUE) + value_size; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: shrink(Thread* thread, size_t size_limit_log2) { size_t tmp = size_limit_log2 == 0 ? _log2_start_size : size_limit_log2; @@ -1076,25 +1076,25 @@ inline bool ConcurrentHashTable:: return ret; } -template -inline void ConcurrentHashTable:: +template +inline void ConcurrentHashTable:: unsafe_reset(size_t size_log2) { size_t tmp = size_log2 == 0 ? _log2_start_size : size_log2; internal_reset(tmp); } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: grow(Thread* thread, size_t size_limit_log2) { size_t tmp = size_limit_log2 == 0 ? _log2_size_limit : size_limit_log2; return internal_grow(thread, tmp); } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: get(Thread* thread, LOOKUP_FUNC& lookup_f, FOUND_FUNC& found_f, bool* grow_hint) { bool ret = false; @@ -1107,8 +1107,8 @@ inline bool ConcurrentHashTable:: return ret; } -template -inline bool ConcurrentHashTable:: +template +inline bool ConcurrentHashTable:: unsafe_insert(const VALUE& value) { bool dead_hash = false; size_t hash = CONFIG::get_hash(value, &dead_hash); @@ -1128,9 +1128,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: try_scan(Thread* thread, SCAN_FUNC& scan_f) { if (!try_resize_lock(thread)) { @@ -1141,9 +1141,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_scan(Thread* thread, SCAN_FUNC& scan_f) { assert(!SafepointSynchronize::is_at_safepoint(), @@ -1155,9 +1155,9 @@ inline void ConcurrentHashTable:: assert(_resize_lock_owner != thread, "Re-size lock held"); } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: do_safepoint_scan(SCAN_FUNC& scan_f) { // We only allow this method to be used during a safepoint. @@ -1179,9 +1179,9 @@ inline void ConcurrentHashTable:: do_scan_for_range(scan_f, 0, table->_size, table); } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: do_scan_for_range(FUNC& scan_f, size_t start_idx, size_t stop_idx, InternalTable* table) { assert(start_idx < stop_idx, "Must be"); @@ -1204,9 +1204,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline bool ConcurrentHashTable:: +inline bool ConcurrentHashTable:: try_bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { if (!try_resize_lock(thread)) { @@ -1218,9 +1218,9 @@ inline bool ConcurrentHashTable:: return true; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: bulk_delete(Thread* thread, EVALUATE_FUNC& eval_f, DELETE_FUNC& del_f) { assert(!SafepointSynchronize::is_at_safepoint(), @@ -1230,9 +1230,9 @@ inline void ConcurrentHashTable:: unlock_resize_lock(thread); } -template +template template -inline TableStatistics ConcurrentHashTable:: +inline TableStatistics ConcurrentHashTable:: statistics_calculate(Thread* thread, VALUE_SIZE_FUNC& vs_f) { constexpr size_t batch_size = 128; @@ -1268,9 +1268,9 @@ inline TableStatistics ConcurrentHashTable:: } } -template +template template -inline TableStatistics ConcurrentHashTable:: +inline TableStatistics ConcurrentHashTable:: statistics_get(Thread* thread, VALUE_SIZE_FUNC& vs_f, TableStatistics old) { if (!try_resize_lock(thread)) { @@ -1283,9 +1283,9 @@ inline TableStatistics ConcurrentHashTable:: return ts; } -template +template template -inline void ConcurrentHashTable:: +inline void ConcurrentHashTable:: statistics_to(Thread* thread, VALUE_SIZE_FUNC& vs_f, outputStream* st, const char* table_name) { @@ -1300,9 +1300,9 @@ inline void ConcurrentHashTable:: ts.print(st, table_name); } -template -inline void ConcurrentHashTable:: - rehash_nodes_to(Thread* thread, ConcurrentHashTable* to_cht) +template +inline void ConcurrentHashTable:: + rehash_nodes_to(Thread* thread, ConcurrentHashTable* to_cht) { assert(is_safepoint_safe(), "rehashing is at a safepoint - cannot be resizing"); assert(_new_table == nullptr || _new_table == POISON_PTR, "Must be null"); diff --git a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp index db9dcae987c..44b2f91c3f2 100644 --- a/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp +++ b/src/hotspot/share/utilities/concurrentHashTableTasks.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,10 +35,10 @@ // operations, which they are serialized with each other. // Base class for pause and/or parallel bulk operations. -template -class ConcurrentHashTable::BucketsOperation { +template +class ConcurrentHashTable::BucketsOperation { protected: - ConcurrentHashTable* _cht; + ConcurrentHashTable* _cht; class InternalTableClaimer { volatile size_t _next; @@ -88,7 +88,7 @@ public: InternalTableClaimer _table_claimer; bool _is_mt; - BucketsOperation(ConcurrentHashTable* cht, bool is_mt = false) + BucketsOperation(ConcurrentHashTable* cht, bool is_mt = false) : _cht(cht), _table_claimer(DEFAULT_TASK_SIZE_LOG2, _cht->_table), _is_mt(is_mt) {} // Returns true if you succeeded to claim the range start -> (stop-1). @@ -146,12 +146,12 @@ public: }; // For doing pausable/parallel bulk delete. -template -class ConcurrentHashTable::BulkDeleteTask : +template +class ConcurrentHashTable::BulkDeleteTask : public BucketsOperation { public: - BulkDeleteTask(ConcurrentHashTable* cht, bool is_mt = false) + BulkDeleteTask(ConcurrentHashTable* cht, bool is_mt = false) : BucketsOperation(cht, is_mt) { } // Before start prepare must be called. @@ -190,12 +190,12 @@ class ConcurrentHashTable::BulkDeleteTask : } }; -template -class ConcurrentHashTable::GrowTask : +template +class ConcurrentHashTable::GrowTask : public BucketsOperation { public: - GrowTask(ConcurrentHashTable* cht) : BucketsOperation(cht) { + GrowTask(ConcurrentHashTable* cht) : BucketsOperation(cht) { } // Before start prepare must be called. bool prepare(Thread* thread) { @@ -229,8 +229,8 @@ class ConcurrentHashTable::GrowTask : } }; -template -class ConcurrentHashTable::ScanTask : +template +class ConcurrentHashTable::ScanTask : public BucketsOperation { // If there is a paused resize, we need to scan items already @@ -255,11 +255,11 @@ class ConcurrentHashTable::ScanTask : } public: - ScanTask(ConcurrentHashTable* cht, size_t claim_size) : BucketsOperation(cht), _new_table_claimer() { + ScanTask(ConcurrentHashTable* cht, size_t claim_size) : BucketsOperation(cht), _new_table_claimer() { set(cht, claim_size); } - void set(ConcurrentHashTable* cht, size_t claim_size) { + void set(ConcurrentHashTable* cht, size_t claim_size) { this->_table_claimer.set(claim_size, cht->get_table()); InternalTable* new_table = cht->get_new_table(); diff --git a/src/hotspot/share/utilities/debug.cpp b/src/hotspot/share/utilities/debug.cpp index 9f839fc1a13..119d1cf17fd 100644 --- a/src/hotspot/share/utilities/debug.cpp +++ b/src/hotspot/share/utilities/debug.cpp @@ -711,7 +711,7 @@ static ucontext_t g_stored_assertion_context; void initialize_assert_poison() { char* page = os::reserve_memory(os::vm_page_size()); if (page) { - MemTracker::record_virtual_memory_type(page, mtInternal); + MemTracker::record_virtual_memory_tag(page, mtInternal); if (os::commit_memory(page, os::vm_page_size(), false) && os::protect_memory(page, os::vm_page_size(), os::MEM_PROT_NONE)) { g_assert_poison = page; diff --git a/src/hotspot/share/utilities/growableArray.cpp b/src/hotspot/share/utilities/growableArray.cpp index 8e1057dd9f8..60ed0a477e8 100644 --- a/src/hotspot/share/utilities/growableArray.cpp +++ b/src/hotspot/share/utilities/growableArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -42,13 +42,13 @@ void* GrowableArrayArenaAllocator::allocate(int max, int element_size, Arena* ar return arena->Amalloc(byte_size); } -void* GrowableArrayCHeapAllocator::allocate(int max, int element_size, MEMFLAGS memflags) { +void* GrowableArrayCHeapAllocator::allocate(int max, int element_size, MemTag mem_tag) { assert(max >= 0, "integer overflow"); size_t byte_size = element_size * (size_t) max; - // memory type has to be specified for C heap allocation - assert(memflags != mtNone, "memory type not specified for C heap object"); - return (void*)AllocateHeap(byte_size, memflags); + // memory tag has to be specified for C heap allocation + assert(mem_tag != mtNone, "memory tag not specified for C heap object"); + return (void*)AllocateHeap(byte_size, mem_tag); } void GrowableArrayCHeapAllocator::deallocate(void* elements) { diff --git a/src/hotspot/share/utilities/growableArray.hpp b/src/hotspot/share/utilities/growableArray.hpp index 79d03f58a9e..2eb8e6fd09e 100644 --- a/src/hotspot/share/utilities/growableArray.hpp +++ b/src/hotspot/share/utilities/growableArray.hpp @@ -595,7 +595,7 @@ public: // CHeap allocator class GrowableArrayCHeapAllocator { public: - static void* allocate(int max, int element_size, MEMFLAGS memflags); + static void* allocate(int max, int element_size, MemTag mem_tag); static void deallocate(void* mem); }; @@ -628,9 +628,9 @@ class GrowableArrayMetadata { } // CHeap allocation - static uintptr_t bits(MEMFLAGS memflags) { - assert(memflags != mtNone, "Must provide a proper MEMFLAGS"); - return (uintptr_t(memflags) << 1) | 1; + static uintptr_t bits(MemTag mem_tag) { + assert(mem_tag != mtNone, "Must provide a proper MemTag"); + return (uintptr_t(mem_tag) << 1) | 1; } // Arena allocation @@ -653,8 +653,8 @@ public: } // CHeap allocation - GrowableArrayMetadata(MEMFLAGS memflags) : - _bits(bits(memflags)) + GrowableArrayMetadata(MemTag mem_tag) : + _bits(bits(mem_tag)) debug_only(COMMA _nesting_check(false)) { } @@ -683,14 +683,14 @@ public: bool on_arena() const { return (_bits & 1) == 0 && _bits != 0; } Arena* arena() const { return (Arena*)_bits; } - MEMFLAGS memflags() const { return MEMFLAGS(_bits >> 1); } + MemTag mem_tag() const { return MemTag(_bits >> 1); } }; // THE GrowableArray. // // Supports multiple allocation strategies: // - Resource stack allocation: if no extra argument is provided -// - CHeap allocation: if memflags is provided +// - CHeap allocation: if mem_tag is provided // - Arena allocation: if an arena is provided // // There are some drawbacks of using GrowableArray, that are removed in some @@ -712,8 +712,8 @@ class GrowableArray : public GrowableArrayWithAllocator> { return (E*)GrowableArrayResourceAllocator::allocate(max, sizeof(E)); } - static E* allocate(int max, MEMFLAGS memflags) { - return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), memflags); + static E* allocate(int max, MemTag mem_tag) { + return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag); } static E* allocate(int max, Arena* arena) { @@ -736,7 +736,7 @@ class GrowableArray : public GrowableArrayWithAllocator> { } if (on_C_heap()) { - return allocate(this->_capacity, _metadata.memflags()); + return allocate(this->_capacity, _metadata.mem_tag()); } assert(on_arena(), "Sanity"); @@ -760,11 +760,11 @@ public: init_checks(); } - GrowableArray(int initial_capacity, MEMFLAGS memflags) : + GrowableArray(int initial_capacity, MemTag mem_tag) : GrowableArrayWithAllocator( - allocate(initial_capacity, memflags), + allocate(initial_capacity, mem_tag), initial_capacity), - _metadata(memflags) { + _metadata(mem_tag) { init_checks(); } @@ -776,11 +776,11 @@ public: init_checks(); } - GrowableArray(int initial_capacity, int initial_len, const E& filler, MEMFLAGS memflags) : + GrowableArray(int initial_capacity, int initial_len, const E& filler, MemTag mem_tag) : GrowableArrayWithAllocator( - allocate(initial_capacity, memflags), + allocate(initial_capacity, mem_tag), initial_capacity, initial_len, filler), - _metadata(memflags) { + _metadata(mem_tag) { init_checks(); } @@ -799,25 +799,25 @@ public: } }; -// Leaner GrowableArray for CHeap backed data arrays, with compile-time decided MEMFLAGS. -template -class GrowableArrayCHeap : public GrowableArrayWithAllocator > { - friend class GrowableArrayWithAllocator >; +// Leaner GrowableArray for CHeap backed data arrays, with compile-time decided MemTag. +template +class GrowableArrayCHeap : public GrowableArrayWithAllocator > { + friend class GrowableArrayWithAllocator >; - STATIC_ASSERT(F != mtNone); + STATIC_ASSERT(MT != mtNone); - static E* allocate(int max, MEMFLAGS flags) { + static E* allocate(int max, MemTag mem_tag) { if (max == 0) { return nullptr; } - return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), flags); + return (E*)GrowableArrayCHeapAllocator::allocate(max, sizeof(E), mem_tag); } NONCOPYABLE(GrowableArrayCHeap); E* allocate() { - return allocate(this->_capacity, F); + return allocate(this->_capacity, MT); } void deallocate(E* mem) { @@ -826,13 +826,13 @@ class GrowableArrayCHeap : public GrowableArrayWithAllocator >( - allocate(initial_capacity, F), + GrowableArrayWithAllocator >( + allocate(initial_capacity, MT), initial_capacity) {} GrowableArrayCHeap(int initial_capacity, int initial_len, const E& filler) : - GrowableArrayWithAllocator >( - allocate(initial_capacity, F), + GrowableArrayWithAllocator >( + allocate(initial_capacity, MT), initial_capacity, initial_len, filler) {} ~GrowableArrayCHeap() { @@ -840,11 +840,11 @@ public: } void* operator new(size_t size) { - return AnyObj::operator new(size, F); + return AnyObj::operator new(size, MT); } void* operator new(size_t size, const std::nothrow_t& nothrow_constant) throw() { - return AnyObj::operator new(size, nothrow_constant, F); + return AnyObj::operator new(size, nothrow_constant, MT); } void operator delete(void *p) { AnyObj::operator delete(p); diff --git a/src/hotspot/share/utilities/linkedlist.hpp b/src/hotspot/share/utilities/linkedlist.hpp index eec7ea1e48d..f0802b754ff 100644 --- a/src/hotspot/share/utilities/linkedlist.hpp +++ b/src/hotspot/share/utilities/linkedlist.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -138,7 +138,7 @@ template class LinkedList : public AnyObj { // A linked list implementation. // The linked list can be allocated in various type of memory: C heap, arena and resource area, etc. template + MemTag MT = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL> class LinkedListImpl : public LinkedList { protected: Arena* _arena; @@ -342,9 +342,9 @@ template (e); + return new(std::nothrow, MT) LinkedListNode(e); } else { - return new(F) LinkedListNode(e); + return new(MT) LinkedListNode(e); } } default: @@ -365,14 +365,14 @@ template - class SortedLinkedList : public LinkedListImpl { + MemTag MT = mtNMT, AllocFailType alloc_failmode = AllocFailStrategy::RETURN_NULL> + class SortedLinkedList : public LinkedListImpl { public: SortedLinkedList() { } - SortedLinkedList(Arena* a) : LinkedListImpl(a) { } + SortedLinkedList(Arena* a) : LinkedListImpl(a) { } virtual LinkedListNode* add(const E& e) { - return LinkedListImpl::add(e); + return LinkedListImpl::add(e); } virtual void move(LinkedList* list) { @@ -409,7 +409,7 @@ template * list) { - return LinkedListImpl::add(list); + return LinkedListImpl::add(list); } virtual LinkedListNode* find_node(const E& e) { diff --git a/src/hotspot/share/utilities/objectBitSet.hpp b/src/hotspot/share/utilities/objectBitSet.hpp index 002e107972c..124188cd321 100644 --- a/src/hotspot/share/utilities/objectBitSet.hpp +++ b/src/hotspot/share/utilities/objectBitSet.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,8 +39,8 @@ class MemRegion; * allocated on-demand only, in fragments covering 64M heap ranges. Fragments are never deleted * during the lifetime of the ObjectBitSet. The underlying memory is allocated from C-Heap. */ -template -class ObjectBitSet : public CHeapObj { +template +class ObjectBitSet : public CHeapObj { const static size_t _bitmap_granularity_shift = 26; // 64M const static size_t _bitmap_granularity_size = (size_t)1 << _bitmap_granularity_shift; const static size_t _bitmap_granularity_mask = _bitmap_granularity_size - 1; @@ -52,7 +52,7 @@ class ObjectBitSet : public CHeapObj { return hash ^ (hash >> 3); } - typedef ResizeableResourceHashtable BitMapFragmentTable; CHeapBitMap* get_fragment_bits(uintptr_t addr); @@ -81,8 +81,8 @@ class ObjectBitSet : public CHeapObj { } }; -template -class ObjectBitSet::BitMapFragment : public CHeapObj { +template +class ObjectBitSet::BitMapFragment : public CHeapObj { CHeapBitMap _bits; BitMapFragment* _next; diff --git a/src/hotspot/share/utilities/objectBitSet.inline.hpp b/src/hotspot/share/utilities/objectBitSet.inline.hpp index 144f886cbd9..482a97bc2d1 100644 --- a/src/hotspot/share/utilities/objectBitSet.inline.hpp +++ b/src/hotspot/share/utilities/objectBitSet.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2019, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2019, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,22 +30,22 @@ #include "memory/memRegion.hpp" #include "utilities/bitMap.inline.hpp" -template -ObjectBitSet::BitMapFragment::BitMapFragment(uintptr_t granule, BitMapFragment* next) : - _bits(_bitmap_granularity_size >> LogMinObjAlignmentInBytes, F, true /* clear */), +template +ObjectBitSet::BitMapFragment::BitMapFragment(uintptr_t granule, BitMapFragment* next) : + _bits(_bitmap_granularity_size >> LogMinObjAlignmentInBytes, MT, true /* clear */), _next(next) { } -template -ObjectBitSet::ObjectBitSet() : +template +ObjectBitSet::ObjectBitSet() : _bitmap_fragments(32, 8*K), _fragment_list(nullptr), _last_fragment_bits(nullptr), _last_fragment_granule(UINTPTR_MAX) { } -template -ObjectBitSet::~ObjectBitSet() { +template +ObjectBitSet::~ObjectBitSet() { BitMapFragment* current = _fragment_list; while (current != nullptr) { BitMapFragment* next = current->next(); @@ -56,13 +56,13 @@ ObjectBitSet::~ObjectBitSet() { // ResizeableResourceHashtableStorage deletes the table. } -template -inline BitMap::idx_t ObjectBitSet::addr_to_bit(uintptr_t addr) const { +template +inline BitMap::idx_t ObjectBitSet::addr_to_bit(uintptr_t addr) const { return (addr & _bitmap_granularity_mask) >> LogMinObjAlignmentInBytes; } -template -inline CHeapBitMap* ObjectBitSet::get_fragment_bits(uintptr_t addr) { +template +inline CHeapBitMap* ObjectBitSet::get_fragment_bits(uintptr_t addr) { uintptr_t granule = addr >> _bitmap_granularity_shift; if (granule == _last_fragment_granule) { return _last_fragment_bits; @@ -86,15 +86,15 @@ inline CHeapBitMap* ObjectBitSet::get_fragment_bits(uintptr_t addr) { return bits; } -template -inline void ObjectBitSet::mark_obj(uintptr_t addr) { +template +inline void ObjectBitSet::mark_obj(uintptr_t addr) { CHeapBitMap* bits = get_fragment_bits(addr); const BitMap::idx_t bit = addr_to_bit(addr); bits->set_bit(bit); } -template -inline bool ObjectBitSet::is_marked(uintptr_t addr) { +template +inline bool ObjectBitSet::is_marked(uintptr_t addr) { CHeapBitMap* bits = get_fragment_bits(addr); const BitMap::idx_t bit = addr_to_bit(addr); return bits->at(bit); diff --git a/src/hotspot/share/utilities/resizeableResourceHash.hpp b/src/hotspot/share/utilities/resizeableResourceHash.hpp index 03ae8cec4c1..b0c992bf1ef 100644 --- a/src/hotspot/share/utilities/resizeableResourceHash.hpp +++ b/src/hotspot/share/utilities/resizeableResourceHash.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,7 +30,7 @@ template< typename K, typename V, AnyObj::allocation_type ALLOC_TYPE, - MEMFLAGS MEM_TYPE> + MemTag MEM_TAG> class ResizeableResourceHashtableStorage : public AnyObj { using Node = ResourceHashtableNode; @@ -52,7 +52,7 @@ protected: Node** alloc_table(unsigned table_size) { Node** table; if (ALLOC_TYPE == C_HEAP) { - table = NEW_C_HEAP_ARRAY(Node*, table_size, MEM_TYPE); + table = NEW_C_HEAP_ARRAY(Node*, table_size, MEM_TAG); } else { table = NEW_RESOURCE_ARRAY(Node*, table_size); } @@ -72,17 +72,17 @@ protected: template< typename K, typename V, AnyObj::allocation_type ALLOC_TYPE = AnyObj::RESOURCE_AREA, - MEMFLAGS MEM_TYPE = mtInternal, + MemTag MEM_TAG = mtInternal, unsigned (*HASH) (K const&) = primitive_hash, bool (*EQUALS)(K const&, K const&) = primitive_equals > class ResizeableResourceHashtable : public ResourceHashtableBase< - ResizeableResourceHashtableStorage, - K, V, ALLOC_TYPE, MEM_TYPE, HASH, EQUALS> { + ResizeableResourceHashtableStorage, + K, V, ALLOC_TYPE, MEM_TAG, HASH, EQUALS> { unsigned _max_size; - using BASE = ResourceHashtableBase, - K, V, ALLOC_TYPE, MEM_TYPE, HASH, EQUALS>; + using BASE = ResourceHashtableBase, + K, V, ALLOC_TYPE, MEM_TAG, HASH, EQUALS>; using Node = ResourceHashtableNode; NONCOPYABLE(ResizeableResourceHashtable); diff --git a/src/hotspot/share/utilities/resourceHash.hpp b/src/hotspot/share/utilities/resourceHash.hpp index b449dc6ea6c..a99239b21a0 100644 --- a/src/hotspot/share/utilities/resourceHash.hpp +++ b/src/hotspot/share/utilities/resourceHash.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -54,7 +54,7 @@ template< class STORAGE, typename K, typename V, AnyObj::allocation_type ALLOC_TYPE, - MEMFLAGS MEM_TYPE, + MemTag MEM_TAG, unsigned (*HASH) (K const&), bool (*EQUALS)(K const&, K const&) > @@ -153,7 +153,7 @@ class ResourceHashtableBase : public STORAGE { assert(*lookup_node(hv, key) == nullptr, "use put_if_absent"); Node** ptr = bucket_at(index); if (ALLOC_TYPE == AnyObj::C_HEAP) { - *ptr = new (MEM_TYPE) Node(hv, key, value, *ptr); + *ptr = new (MEM_TAG) Node(hv, key, value, *ptr); } else { *ptr = new Node(hv, key, value, *ptr); } @@ -174,7 +174,7 @@ class ResourceHashtableBase : public STORAGE { return false; } else { if (ALLOC_TYPE == AnyObj::C_HEAP) { - *ptr = new (MEM_TYPE) Node(hv, key, value); + *ptr = new (MEM_TAG) Node(hv, key, value); } else { *ptr = new Node(hv, key, value); } @@ -193,7 +193,7 @@ class ResourceHashtableBase : public STORAGE { Node** ptr = lookup_node(hv, key); if (*ptr == nullptr) { if (ALLOC_TYPE == AnyObj::C_HEAP) { - *ptr = new (MEM_TYPE) Node(hv, key); + *ptr = new (MEM_TAG) Node(hv, key); } else { *ptr = new Node(hv, key); } @@ -215,7 +215,7 @@ class ResourceHashtableBase : public STORAGE { Node** ptr = lookup_node(hv, key); if (*ptr == nullptr) { if (ALLOC_TYPE == AnyObj::C_HEAP) { - *ptr = new (MEM_TYPE) Node(hv, key, value); + *ptr = new (MEM_TAG) Node(hv, key, value); } else { *ptr = new Node(hv, key, value); } @@ -364,17 +364,17 @@ template< typename K, typename V, unsigned SIZE = 256, AnyObj::allocation_type ALLOC_TYPE = AnyObj::RESOURCE_AREA, - MEMFLAGS MEM_TYPE = mtInternal, + MemTag MEM_TAG = mtInternal, unsigned (*HASH) (K const&) = primitive_hash, bool (*EQUALS)(K const&, K const&) = primitive_equals > class ResourceHashtable : public ResourceHashtableBase< FixedResourceHashtableStorage, - K, V, ALLOC_TYPE, MEM_TYPE, HASH, EQUALS> { + K, V, ALLOC_TYPE, MEM_TAG, HASH, EQUALS> { NONCOPYABLE(ResourceHashtable); public: ResourceHashtable() : ResourceHashtableBase, - K, V, ALLOC_TYPE, MEM_TYPE, HASH, EQUALS>() {} + K, V, ALLOC_TYPE, MEM_TAG, HASH, EQUALS>() {} }; #endif // SHARE_UTILITIES_RESOURCEHASH_HPP diff --git a/src/hotspot/share/utilities/stack.hpp b/src/hotspot/share/utilities/stack.hpp index 8b2ce3802e0..1a9df130dff 100644 --- a/src/hotspot/share/utilities/stack.hpp +++ b/src/hotspot/share/utilities/stack.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -51,11 +51,11 @@ // implementation in class Stack assumes that alloc() will terminate the process // if the allocation fails. -template class StackIterator; +template class StackIterator; // StackBase holds common data/methods that don't depend on the element type, // factored out to reduce template code duplication. -template class StackBase +template class StackBase { public: size_t segment_size() const { return _seg_size; } // Elements per segment. @@ -85,11 +85,11 @@ protected: size_t _cache_size; // Number of segments in the cache. }; -template -class Stack: public StackBase +template +class Stack: public StackBase { public: - friend class StackIterator; + friend class StackIterator; // Number of elements that fit in 4K bytes minus the size of two pointers // (link field and malloc header). @@ -160,13 +160,13 @@ private: E* _cache; // Segment cache to avoid ping-ponging. }; -template +template class StackIterator: public StackObj { public: - StackIterator(Stack& stack): _stack(stack) { sync(); } + StackIterator(Stack& stack): _stack(stack) { sync(); } - Stack& stack() const { return _stack; } + Stack& stack() const { return _stack; } bool is_empty() const { return _cur_seg == nullptr; } @@ -176,7 +176,7 @@ public: void sync(); // Sync the iterator's state to the stack's current state. private: - Stack& _stack; + Stack& _stack; size_t _cur_seg_size; E* _cur_seg; size_t _full_seg_size; diff --git a/src/hotspot/share/utilities/stack.inline.hpp b/src/hotspot/share/utilities/stack.inline.hpp index bed33bd6652..49ccf416629 100644 --- a/src/hotspot/share/utilities/stack.inline.hpp +++ b/src/hotspot/share/utilities/stack.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2009, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2009, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -31,7 +31,7 @@ #include "utilities/align.hpp" #include "utilities/copy.hpp" -template StackBase::StackBase(size_t segment_size, size_t max_cache_size, +template StackBase::StackBase(size_t segment_size, size_t max_cache_size, size_t max_size): _seg_size(segment_size), _max_size(adjust_max_size(max_size, segment_size)), @@ -40,7 +40,7 @@ template StackBase::StackBase(size_t segment_size, size_t max_ca assert(_max_size % _seg_size == 0, "not a multiple"); } -template size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size) +template size_t StackBase::adjust_max_size(size_t max_size, size_t seg_size) { assert(seg_size > 0, "cannot be 0"); assert(max_size >= seg_size || max_size == 0, "max_size too small"); @@ -51,15 +51,15 @@ template size_t StackBase::adjust_max_size(size_t max_size, size return (max_size + seg_size - 1) / seg_size * seg_size; } -template -Stack::Stack(size_t segment_size, size_t max_cache_size, size_t max_size): - StackBase(adjust_segment_size(segment_size), max_cache_size, max_size) +template +Stack::Stack(size_t segment_size, size_t max_cache_size, size_t max_size): + StackBase(adjust_segment_size(segment_size), max_cache_size, max_size) { reset(true); } -template -void Stack::push(E item) +template +void Stack::push(E item) { assert(!is_full(), "pushing onto a full stack"); size_t index = this->_cur_seg_size; @@ -71,8 +71,8 @@ void Stack::push(E item) this->_cur_seg_size = index + 1; } -template -E Stack::pop() +template +E Stack::pop() { assert(!is_empty(), "popping from an empty stack"); // _cur_seg_size is never 0 if not empty. pop that empties a @@ -85,16 +85,16 @@ E Stack::pop() return result; } -template -void Stack::clear(bool clear_cache) +template +void Stack::clear(bool clear_cache) { free_segments(_cur_seg); if (clear_cache) free_segments(_cache); reset(clear_cache); } -template -size_t Stack::adjust_segment_size(size_t seg_size) +template +size_t Stack::adjust_segment_size(size_t seg_size) { const size_t elem_sz = sizeof(E); const size_t ptr_sz = sizeof(E*); @@ -105,45 +105,45 @@ size_t Stack::adjust_segment_size(size_t seg_size) return seg_size; } -template -size_t Stack::link_offset() const +template +size_t Stack::link_offset() const { return align_up(this->_seg_size * sizeof(E), sizeof(E*)); } -template -size_t Stack::segment_bytes() const +template +size_t Stack::segment_bytes() const { return link_offset() + sizeof(E*); } -template -E** Stack::link_addr(E* seg) const +template +E** Stack::link_addr(E* seg) const { return (E**) ((char*)seg + link_offset()); } -template -E* Stack::get_link(E* seg) const +template +E* Stack::get_link(E* seg) const { return *link_addr(seg); } -template -E* Stack::set_link(E* new_seg, E* old_seg) +template +E* Stack::set_link(E* new_seg, E* old_seg) { *link_addr(new_seg) = old_seg; return new_seg; } -template -E* Stack::alloc(size_t bytes) +template +E* Stack::alloc(size_t bytes) { - return (E*) NEW_C_HEAP_ARRAY(char, bytes, F); + return (E*) NEW_C_HEAP_ARRAY(char, bytes, MT); } -template -void Stack::free(E* addr, size_t bytes) +template +void Stack::free(E* addr, size_t bytes) { FREE_C_HEAP_ARRAY(char, (char*) addr); } @@ -152,8 +152,8 @@ void Stack::free(E* addr, size_t bytes) // code gets inlined. This is generally good, but when too much code has // been inlined, further inlining in the caller might be inhibited. So // prevent infrequent slow path segment manipulation from being inlined. -template -NOINLINE void Stack::push_segment() +template +NOINLINE void Stack::push_segment() { assert(this->_cur_seg_size == this->_seg_size, "current segment is not full"); E* next; @@ -173,8 +173,8 @@ NOINLINE void Stack::push_segment() DEBUG_ONLY(verify(at_empty_transition);) } -template -NOINLINE void Stack::pop_segment() +template +NOINLINE void Stack::pop_segment() { assert(this->_cur_seg_size == 0, "current segment is not empty"); E* const prev = get_link(_cur_seg); @@ -194,8 +194,8 @@ NOINLINE void Stack::pop_segment() DEBUG_ONLY(verify(at_empty_transition);) } -template -void Stack::free_segments(E* seg) +template +void Stack::free_segments(E* seg) { const size_t bytes = segment_bytes(); while (seg != nullptr) { @@ -205,8 +205,8 @@ void Stack::free_segments(E* seg) } } -template -void Stack::reset(bool reset_cache) +template +void Stack::reset(bool reset_cache) { this->_cur_seg_size = this->_seg_size; // So push() will alloc a new segment. this->_full_seg_size = 0; @@ -218,8 +218,8 @@ void Stack::reset(bool reset_cache) } #ifdef ASSERT -template -void Stack::verify(bool at_empty_transition) const +template +void Stack::verify(bool at_empty_transition) const { assert(size() <= this->max_size(), "stack exceeded bounds"); assert(this->cache_size() <= this->max_cache_size(), "cache exceeded bounds"); @@ -234,8 +234,8 @@ void Stack::verify(bool at_empty_transition) const } } -template -void Stack::zap_segment(E* seg, bool zap_link_field) const +template +void Stack::zap_segment(E* seg, bool zap_link_field) const { if (!ZapStackSegments) return; const size_t zap_bytes = segment_bytes() - (zap_link_field ? 0 : sizeof(E*)); @@ -243,16 +243,16 @@ void Stack::zap_segment(E* seg, bool zap_link_field) const } #endif -template -void StackIterator::sync() +template +void StackIterator::sync() { _full_seg_size = _stack._full_seg_size; _cur_seg_size = _stack._cur_seg_size; _cur_seg = _stack._cur_seg; } -template -E* StackIterator::next_addr() +template +E* StackIterator::next_addr() { assert(!is_empty(), "no items left"); if (_cur_seg_size == 1) { diff --git a/test/hotspot/gtest/nmt/test_arrayWithFreeList.cpp b/test/hotspot/gtest/nmt/test_arrayWithFreeList.cpp index a2110e9e22e..e1b00850582 100644 --- a/test/hotspot/gtest/nmt/test_arrayWithFreeList.cpp +++ b/test/hotspot/gtest/nmt/test_arrayWithFreeList.cpp @@ -74,7 +74,7 @@ struct LL { // That's a very fancy word that means that a templated type like Foo can be passed around like only Foo at first // and then be 'applied' to some E. Think of it like passing around a lambda or function pointer, but on a template level, // where Foo is a function that can be called on some type with the return type being Foo. -template class Allocator> +template class Allocator> struct LL2 { struct Node; using NodeAllocator = Allocator; diff --git a/test/hotspot/gtest/nmt/test_nmt_cornercases.cpp b/test/hotspot/gtest/nmt/test_nmt_cornercases.cpp index f735022ea2b..84ed2858952 100644 --- a/test/hotspot/gtest/nmt/test_nmt_cornercases.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_cornercases.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2022, 2023 SAP SE. All rights reserved. - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -33,10 +33,10 @@ #include "unittest.hpp" // Check NMT header for integrity, as well as expected type and size. -static void check_expected_malloc_header(const void* payload, MEMFLAGS type, size_t size) { +static void check_expected_malloc_header(const void* payload, MemTag mem_tag, size_t size) { const MallocHeader* hdr = MallocHeader::resolve_checked(payload); EXPECT_EQ(hdr->size(), size); - EXPECT_EQ(hdr->flags(), type); + EXPECT_EQ(hdr->mem_tag(), mem_tag); } // ASAN complains about allocating very large sizes diff --git a/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp b/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp index 7f6000b1212..c054ed0e676 100644 --- a/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_malloclimit.cpp @@ -42,9 +42,9 @@ static bool compare_limits(const malloclimit* a, const malloclimit* b) { static bool compare_sets(const MallocLimitSet* a, const MallocLimitSet* b) { if (compare_limits(a->global_limit(), b->global_limit())) { - for (int i = 0; i < mt_number_of_types; i++) { - if (!compare_limits(a->category_limit(NMTUtil::index_to_flag(i)), - b->category_limit(NMTUtil::index_to_flag(i)))) { + for (int i = 0; i < mt_number_of_tags; i++) { + if (!compare_limits(a->category_limit(NMTUtil::index_to_tag(i)), + b->category_limit(NMTUtil::index_to_tag(i)))) { return false; } } @@ -96,11 +96,11 @@ TEST(NMT, MallocLimitPerCategory) { TEST(NMT, MallocLimitCategoryEnumNames) { MallocLimitSet expected; stringStream option; - for (int i = 0; i < mt_number_of_types; i++) { - MEMFLAGS f = NMTUtil::index_to_flag(i); - if (f != MEMFLAGS::mtNone) { - expected.set_category_limit(f, (i + 1) * M, MallocLimitMode::trigger_fatal); - option.print("%s%s:%dM", (i > 0 ? "," : ""), NMTUtil::flag_to_enum_name(f), i + 1); + for (int i = 0; i < mt_number_of_tags; i++) { + MemTag mem_tag = NMTUtil::index_to_tag(i); + if (mem_tag != MemTag::mtNone) { + expected.set_category_limit(mem_tag, (i + 1) * M, MallocLimitMode::trigger_fatal); + option.print("%s%s:%dM", (i > 0 ? "," : ""), NMTUtil::tag_to_enum_name(mem_tag), i + 1); } } test(option.base(), expected); @@ -109,11 +109,11 @@ TEST(NMT, MallocLimitCategoryEnumNames) { TEST(NMT, MallocLimitAllCategoriesHaveHumanReadableNames) { MallocLimitSet expected; stringStream option; - for (int i = 0; i < mt_number_of_types; i++) { - MEMFLAGS f = NMTUtil::index_to_flag(i); - if (f != MEMFLAGS::mtNone) { - expected.set_category_limit(f, (i + 1) * M, MallocLimitMode::trigger_fatal); - option.print("%s%s:%dM", (i > 0 ? "," : ""), NMTUtil::flag_to_name(f), i + 1); + for (int i = 0; i < mt_number_of_tags; i++) { + MemTag mem_tag = NMTUtil::index_to_tag(i); + if (mem_tag != MemTag::mtNone) { + expected.set_category_limit(mem_tag, (i + 1) * M, MallocLimitMode::trigger_fatal); + option.print("%s%s:%dM", (i > 0 ? "," : ""), NMTUtil::tag_to_name(mem_tag), i + 1); } } test(option.base(), expected); diff --git a/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp b/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp index 0708ce5f300..b5f88990346 100644 --- a/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_reserved_region.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2023 SAP SE. All rights reserved. - * Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "nmt/nmtCommon.hpp" #include "nmt/memTracker.hpp" #include "nmt/virtualMemoryTracker.hpp" #include "runtime/os.hpp" @@ -33,12 +34,12 @@ TEST_VM(NMT, ReservedRegionCopy) { address dummy1 = (address)0x10000000; NativeCallStack stack1(&dummy1, 1); ReservedMemoryRegion region1(dummy1, os::vm_page_size(), stack1, mtThreadStack); - VirtualMemorySummary::record_reserved_memory(os::vm_page_size(), region1.flag()); + VirtualMemorySummary::record_reserved_memory(os::vm_page_size(), region1.mem_tag()); region1.add_committed_region(dummy1, os::vm_page_size(), stack1); address dummy2 = (address)0x20000000; NativeCallStack stack2(&dummy2, 1); ReservedMemoryRegion region2(dummy2, os::vm_page_size(), stack2, mtCode); - VirtualMemorySummary::record_reserved_memory(os::vm_page_size(), region2.flag()); + VirtualMemorySummary::record_reserved_memory(os::vm_page_size(), region2.mem_tag()); region2.add_committed_region(dummy2, os::vm_page_size(), stack2); region2 = region1; @@ -46,7 +47,7 @@ TEST_VM(NMT, ReservedRegionCopy) { CommittedRegionIterator itr = region2.iterate_committed_regions(); const CommittedMemoryRegion* rgn = itr.next(); ASSERT_EQ(rgn->base(), dummy1); // Now we should see dummy1 - ASSERT_EQ(region2.flag(), mtThreadStack); // Should be correct flag + ASSERT_EQ(region2.mem_tag(), mtThreadStack); // Should be correct memory tag ASSERT_EQ(region2.call_stack()->get_frame(0), dummy1); // Check the stack rgn = itr.next(); ASSERT_EQ(rgn, (const CommittedMemoryRegion*)nullptr); // and nothing else diff --git a/test/hotspot/gtest/nmt/test_nmt_totals.cpp b/test/hotspot/gtest/nmt/test_nmt_totals.cpp index bf2c1397e7d..01ffc7c0ce3 100644 --- a/test/hotspot/gtest/nmt/test_nmt_totals.cpp +++ b/test/hotspot/gtest/nmt/test_nmt_totals.cpp @@ -1,6 +1,6 @@ /* * Copyright (c) 2022 SAP SE. All rights reserved. - * Copyright (c) 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -89,8 +89,8 @@ TEST_VM(NMTNumbers, totals) { void* p[NUM_ALLOCS]; for (int i = 0; i < NUM_ALLOCS; i ++) { // spread over categories - int category = i % (mt_number_of_types - 1); - p[i] = NEW_C_HEAP_ARRAY(char, ALLOC_SIZE, (MEMFLAGS)category); + int category = i % (mt_number_of_tags - 1); + p[i] = NEW_C_HEAP_ARRAY(char, ALLOC_SIZE, (MemTag)category); } const totals_t t2 = get_totals(); diff --git a/test/hotspot/gtest/nmt/test_vmatree.cpp b/test/hotspot/gtest/nmt/test_vmatree.cpp index 1c1bc31b5b4..08b4340ae4f 100644 --- a/test/hotspot/gtest/nmt/test_vmatree.cpp +++ b/test/hotspot/gtest/nmt/test_vmatree.cpp @@ -24,7 +24,7 @@ #include "precompiled.hpp" #include "memory/allocation.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "nmt/nmtNativeCallStackStorage.hpp" #include "nmt/vmatree.hpp" #include "runtime/os.hpp" @@ -214,7 +214,7 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) { treap(tree).visit_in_order([&](Node* x) { EXPECT_TRUE(x->key() == 0 || x->key() == 100); if (x->key() == 0) { - EXPECT_EQ(x->val().out.regiondata().flag, mtTest); + EXPECT_EQ(x->val().out.regiondata().mem_tag, mtTest); } }); @@ -240,6 +240,7 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) { EXPECT_EQ(nullptr, treap_root(tree)); } + { // A committed region inside of/replacing a reserved region // should replace the reserved region's metadata. Tree::RegionData rd{si[0], mtNMT}; @@ -249,10 +250,10 @@ TEST_VM_F(NMTVMATreeTest, LowLevel) { tree.commit_mapping(0, 100, rd2); treap(tree).visit_range_in_order(0, 99999, [&](Node* x) { if (x->key() == 0) { - EXPECT_EQ(mtTest, x->val().out.regiondata().flag); + EXPECT_EQ(mtTest, x->val().out.regiondata().mem_tag); } if (x->key() == 100) { - EXPECT_EQ(mtTest, x->val().in.regiondata().flag); + EXPECT_EQ(mtTest, x->val().in.regiondata().mem_tag); } }); } @@ -274,11 +275,11 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) { Tree::RegionData rd2(NCS::StackIndex(), mtNMT); Tree tree; VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); - VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(100, diff.reserve); all_diff = tree.reserve_mapping(50, 25, rd2); - diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; - VMATree::SingleDiff diff2 = all_diff.flag[NMTUtil::flag_to_index(mtNMT)]; + diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; + VMATree::SingleDiff diff2 = all_diff.tag[NMTUtil::tag_to_index(mtNMT)]; EXPECT_EQ(-25, diff.reserve); EXPECT_EQ(25, diff2.reserve); } @@ -286,31 +287,31 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) { Tree::RegionData rd(NCS::StackIndex(), mtTest); Tree tree; VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); - VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(100, diff.reserve); all_diff = tree.release_mapping(0, 100); - diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(-100, diff.reserve); } { // Convert some of a released mapping to a committed one Tree::RegionData rd(NCS::StackIndex(), mtTest); Tree tree; VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); - VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(diff.reserve, 100); all_diff = tree.commit_mapping(0, 100, rd); - diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(0, diff.reserve); EXPECT_EQ(100, diff.commit); } - { // Adjacent reserved mappings with same flag + { // Adjacent reserved mappings with same type Tree::RegionData rd(NCS::StackIndex(), mtTest); Tree tree; VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); - VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(diff.reserve, 100); all_diff = tree.reserve_mapping(100, 100, rd); - diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(100, diff.reserve); } { // Adjacent reserved mappings with different flags @@ -318,12 +319,12 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) { Tree::RegionData rd2(NCS::StackIndex(), mtNMT); Tree tree; VMATree::SummaryDiff all_diff = tree.reserve_mapping(0, 100, rd); - VMATree::SingleDiff diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + VMATree::SingleDiff diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(diff.reserve, 100); all_diff = tree.reserve_mapping(100, 100, rd2); - diff = all_diff.flag[NMTUtil::flag_to_index(mtTest)]; + diff = all_diff.tag[NMTUtil::tag_to_index(mtTest)]; EXPECT_EQ(0, diff.reserve); - diff = all_diff.flag[NMTUtil::flag_to_index(mtNMT)]; + diff = all_diff.tag[NMTUtil::tag_to_index(mtNMT)]; EXPECT_EQ(100, diff.reserve); } @@ -334,27 +335,27 @@ TEST_VM_F(NMTVMATreeTest, SummaryAccounting) { tree.commit_mapping(128, 128, rd); tree.commit_mapping(512, 128, rd); VMATree::SummaryDiff diff = tree.commit_mapping(0, 1024, rd); - EXPECT_EQ(768, diff.flag[NMTUtil::flag_to_index(mtTest)].commit); - EXPECT_EQ(768, diff.flag[NMTUtil::flag_to_index(mtTest)].reserve); + EXPECT_EQ(768, diff.tag[NMTUtil::tag_to_index(mtTest)].commit); + EXPECT_EQ(768, diff.tag[NMTUtil::tag_to_index(mtTest)].reserve); } } // Exceedingly simple tracker for page-granular allocations // Use it for testing consistency with VMATree. -struct SimpleVMATracker : public CHeapObj { + struct SimpleVMATracker : public CHeapObj { const size_t page_size = 4096; - enum Type { Reserved, Committed, Free }; + enum Kind { Reserved, Committed, Free }; struct Info { - Type type; - MEMFLAGS flag; + Kind kind; + MemTag mem_tag; NativeCallStack stack; - Info() : type(Free), flag(mtNone), stack() {} + Info() : kind(Free), mem_tag(mtNone), stack() {} - Info(Type type, NativeCallStack stack, MEMFLAGS flag) - : type(type), flag(flag), stack(stack) {} + Info(Kind kind, NativeCallStack stack, MemTag mem_tag) + : kind(kind), mem_tag(mem_tag), stack(stack) {} bool eq(Info other) { - return flag == other.flag && stack.equals(other.stack); + return kind == other.kind && stack.equals(other.stack); } }; // Page (4KiB) granular array @@ -368,7 +369,7 @@ struct SimpleVMATracker : public CHeapObj { } } - VMATree::SummaryDiff do_it(Type type, size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) { + VMATree::SummaryDiff do_it(Kind kind, size_t start, size_t size, NativeCallStack stack, MemTag mem_tag) { assert(is_aligned(size, page_size) && is_aligned(start, page_size), "page alignment"); VMATree::SummaryDiff diff; @@ -377,23 +378,23 @@ struct SimpleVMATracker : public CHeapObj { const size_t end_idx = start_idx + page_count; assert(end_idx < SimpleVMATracker::num_pages, ""); - Info new_info(type, stack, flag); + Info new_info(kind, stack, mem_tag); for (size_t i = start_idx; i < end_idx; i++) { Info& old_info = pages[i]; // Register diff - if (old_info.type == Reserved) { - diff.flag[(int)old_info.flag].reserve -= page_size; - } else if (old_info.type == Committed) { - diff.flag[(int)old_info.flag].reserve -= page_size; - diff.flag[(int)old_info.flag].commit -= page_size; + if (old_info.kind == Reserved) { + diff.tag[(int)old_info.mem_tag].reserve -= page_size; + } else if (old_info.kind == Committed) { + diff.tag[(int)old_info.mem_tag].reserve -= page_size; + diff.tag[(int)old_info.mem_tag].commit -= page_size; } - if (type == Reserved) { - diff.flag[(int)new_info.flag].reserve += page_size; - } else if(type == Committed) { - diff.flag[(int)new_info.flag].reserve += page_size; - diff.flag[(int)new_info.flag].commit += page_size; + if (kind == Reserved) { + diff.tag[(int)new_info.mem_tag].reserve += page_size; + } else if (kind == Committed) { + diff.tag[(int)new_info.mem_tag].reserve += page_size; + diff.tag[(int)new_info.mem_tag].commit += page_size; } // Overwrite old one with new pages[i] = new_info; @@ -401,12 +402,12 @@ struct SimpleVMATracker : public CHeapObj { return diff; } - VMATree::SummaryDiff reserve(size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) { - return do_it(Reserved, start, size, stack, flag); + VMATree::SummaryDiff reserve(size_t start, size_t size, NativeCallStack stack, MemTag mem_tag) { + return do_it(Reserved, start, size, stack, mem_tag); } - VMATree::SummaryDiff commit(size_t start, size_t size, NativeCallStack stack, MEMFLAGS flag) { - return do_it(Committed, start, size, stack, flag); + VMATree::SummaryDiff commit(size_t start, size_t size, NativeCallStack stack, MemTag mem_tag) { + return do_it(Committed, start, size, stack, mem_tag); } VMATree::SummaryDiff release(size_t start, size_t size) { @@ -423,7 +424,7 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { const size_t page_size = tr->page_size; VMATree tree; NCS ncss(true); - constexpr const int candidates_len_flags = 4; + constexpr const int candidates_len_tags = 4; constexpr const int candidates_len_stacks = 2; NativeCallStack candidate_stacks[candidates_len_stacks] = { @@ -431,7 +432,7 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { make_stack(0xB), }; - const MEMFLAGS candidate_flags[candidates_len_flags] = { + const MemTag candidate_tags[candidates_len_tags] = { mtNMT, mtTest, }; @@ -455,30 +456,30 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { const size_t start = page_start * page_size; const size_t size = num_pages * page_size; - const MEMFLAGS flag = candidate_flags[os::random() % candidates_len_flags]; + const MemTag mem_tag = candidate_tags[os::random() % candidates_len_tags]; const NativeCallStack stack = candidate_stacks[os::random() % candidates_len_stacks]; const NCS::StackIndex si = ncss.push(stack); - VMATree::RegionData data(si, flag); + VMATree::RegionData data(si, mem_tag); - const SimpleVMATracker::Type type = (SimpleVMATracker::Type)(os::random() % 3); + const SimpleVMATracker::Kind kind = (SimpleVMATracker::Kind)(os::random() % 3); VMATree::SummaryDiff tree_diff; VMATree::SummaryDiff simple_diff; - if (type == SimpleVMATracker::Reserved) { - simple_diff = tr->reserve(start, size, stack, flag); + if (kind == SimpleVMATracker::Reserved) { + simple_diff = tr->reserve(start, size, stack, mem_tag); tree_diff = tree.reserve_mapping(start, size, data); - } else if (type == SimpleVMATracker::Committed) { - simple_diff = tr->commit(start, size, stack, flag); + } else if (kind == SimpleVMATracker::Committed) { + simple_diff = tr->commit(start, size, stack, mem_tag); tree_diff = tree.commit_mapping(start, size, data); } else { simple_diff = tr->release(start, size); tree_diff = tree.release_mapping(start, size); } - for (int j = 0; j < mt_number_of_types; j++) { - VMATree::SingleDiff td = tree_diff.flag[j]; - VMATree::SingleDiff sd = simple_diff.flag[j]; + for (int j = 0; j < mt_number_of_tags; j++) { + VMATree::SingleDiff td = tree_diff.tag[j]; + VMATree::SingleDiff sd = simple_diff.tag[j]; ASSERT_EQ(td.reserve, sd.reserve); ASSERT_EQ(td.commit, sd.commit); } @@ -489,7 +490,7 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { size_t j = 0; while (j < SimpleVMATracker::num_pages) { while (j < SimpleVMATracker::num_pages && - tr->pages[j].type == SimpleVMATracker::Free) { + tr->pages[j].kind == SimpleVMATracker::Free) { j++; } @@ -520,8 +521,8 @@ TEST_VM_F(NMTVMATreeTest, TestConsistencyWithSimpleTracker) { ASSERT_TRUE(starti.stack.equals(start_stack)); ASSERT_TRUE(endi.stack.equals(end_stack)); - ASSERT_EQ(starti.flag, startn->val().out.flag()); - ASSERT_EQ(endi.flag, endn->val().in.flag()); + ASSERT_EQ(starti.mem_tag, startn->val().out.mem_tag()); + ASSERT_EQ(endi.mem_tag, endn->val().in.mem_tag()); } } } diff --git a/test/hotspot/gtest/utilities/test_growableArray.cpp b/test/hotspot/gtest/utilities/test_growableArray.cpp index 74eb354cb2e..cc8d49cd6b5 100644 --- a/test/hotspot/gtest/utilities/test_growableArray.cpp +++ b/test/hotspot/gtest/utilities/test_growableArray.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -35,8 +35,8 @@ struct WithEmbeddedArray { // Arena allocated data array WithEmbeddedArray(Arena* arena, int initial_max) : _a(arena, initial_max, 0, 0) {} // CHeap allocated data array - WithEmbeddedArray(int initial_max, MEMFLAGS memflags) : _a(initial_max, memflags) { - assert(memflags != mtNone, "test requirement"); + WithEmbeddedArray(int initial_max, MemTag mem_tag) : _a(initial_max, mem_tag) { + assert(mem_tag != mtNone, "test requirement"); } WithEmbeddedArray(const GrowableArray& other) : _a(other) {} }; diff --git a/test/hotspot/gtest/utilities/test_resourceHash.cpp b/test/hotspot/gtest/utilities/test_resourceHash.cpp index 9124f4b977c..e9834ef6e2e 100644 --- a/test/hotspot/gtest/utilities/test_resourceHash.cpp +++ b/test/hotspot/gtest/utilities/test_resourceHash.cpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2015, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -23,6 +23,7 @@ #include "precompiled.hpp" #include "classfile/symbolTable.hpp" +#include "nmt/nmtCommon.hpp" #include "memory/allocation.hpp" #include "memory/resourceArea.hpp" #include "oops/symbolHandle.hpp" @@ -35,7 +36,7 @@ class CommonResourceHashtableTest : public ::testing::Test { protected: typedef void* K; typedef uintx V; - const static MEMFLAGS MEM_TYPE = mtInternal; + const static MemTag MEM_TAG = mtInternal; static unsigned identity_hash(const K& k) { return (unsigned) (uintptr_t) k; @@ -93,7 +94,7 @@ class SmallResourceHashtableTest : public CommonResourceHashtableTest { static void test(V step) { EqualityTestIter et; - ResourceHashtable rh; + ResourceHashtable rh; ASSERT_FALSE(rh.contains(as_K(step))); @@ -225,7 +226,7 @@ class GenericResourceHashtableTest : public CommonResourceHashtableTest { static void test(unsigned num_elements = SIZE) { EqualityTestIter et; - ResourceHashtable rh; + ResourceHashtable rh; for (uintptr_t i = 0; i < num_elements; ++i) { ASSERT_TRUE(rh.put(as_K(i), i)); diff --git a/test/hotspot/gtest/utilities/test_utf8.cpp b/test/hotspot/gtest/utilities/test_utf8.cpp index 80f6671207b..1636862f767 100644 --- a/test/hotspot/gtest/utilities/test_utf8.cpp +++ b/test/hotspot/gtest/utilities/test_utf8.cpp @@ -22,7 +22,7 @@ */ #include "precompiled.hpp" -#include "nmt/memflags.hpp" +#include "nmt/memTag.hpp" #include "runtime/os.hpp" #include "utilities/utf8.hpp" #include "unittest.hpp"