mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
8354535: [BACKOUT] Force clients to explicitly pass mem_tag value, even if it is mtNone
Reviewed-by: stefank, jsjolen
This commit is contained in:
parent
de0e648844
commit
0da480a91d
@ -70,7 +70,7 @@ static char* reserve_at_eor_compatible_address(size_t size, bool aslr) {
|
||||
const uint64_t immediate = ((uint64_t)immediates[index]) << 32;
|
||||
assert(immediate > 0 && Assembler::operand_valid_for_logical_immediate(/*is32*/false, immediate),
|
||||
"Invalid immediate %d " UINT64_FORMAT, index, immediate);
|
||||
result = os::attempt_reserve_memory_at((char*)immediate, size, mtNone);
|
||||
result = os::attempt_reserve_memory_at((char*)immediate, size, false);
|
||||
if (result == nullptr) {
|
||||
log_trace(metaspace, map)("Failed to attach at " UINT64_FORMAT_X, immediate);
|
||||
}
|
||||
@ -114,7 +114,7 @@ char* CompressedKlassPointers::reserve_address_space_for_compressed_classes(size
|
||||
if (result == nullptr) {
|
||||
constexpr size_t alignment = nth_bit(32);
|
||||
log_debug(metaspace, map)("Trying to reserve at a 32-bit-aligned address");
|
||||
result = os::reserve_memory_aligned(size, alignment, mtNone);
|
||||
result = os::reserve_memory_aligned(size, alignment, false);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
||||
@ -77,7 +77,7 @@ ZPhysicalMemoryBacking::ZPhysicalMemoryBacking(size_t max_capacity)
|
||||
_initialized(false) {
|
||||
|
||||
// Reserve address space for backing memory
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity, mtJavaHeap);
|
||||
_base = (uintptr_t)os::reserve_memory(max_capacity, false, mtJavaHeap);
|
||||
if (_base == 0) {
|
||||
// Failed
|
||||
ZInitialize::error("Failed to reserve address space for backing memory");
|
||||
|
||||
@ -4577,7 +4577,7 @@ static void workaround_expand_exec_shield_cs_limit() {
|
||||
*/
|
||||
char* hint = (char*)(os::Linux::initial_thread_stack_bottom() -
|
||||
(StackOverflow::stack_guard_zone_size() + page_size));
|
||||
char* codebuf = os::attempt_reserve_memory_at(hint, page_size, mtThread);
|
||||
char* codebuf = os::attempt_reserve_memory_at(hint, page_size, false, mtThread);
|
||||
|
||||
if (codebuf == nullptr) {
|
||||
// JDK-8197429: There may be a stack gap of one megabyte between
|
||||
@ -4585,7 +4585,7 @@ static void workaround_expand_exec_shield_cs_limit() {
|
||||
// Linux kernel workaround for CVE-2017-1000364. If we failed to
|
||||
// map our codebuf, try again at an address one megabyte lower.
|
||||
hint -= 1 * M;
|
||||
codebuf = os::attempt_reserve_memory_at(hint, page_size, mtThread);
|
||||
codebuf = os::attempt_reserve_memory_at(hint, page_size, false, mtThread);
|
||||
}
|
||||
|
||||
if ((codebuf == nullptr) || (!os::commit_memory(codebuf, page_size, true))) {
|
||||
|
||||
@ -491,9 +491,9 @@ static char* chop_extra_memory(size_t size, size_t alignment, char* extra_base,
|
||||
// Multiple threads can race in this code, and can remap over each other with MAP_FIXED,
|
||||
// so on posix, unmap the section at the start and at the end of the chunk that we mapped
|
||||
// rather than unmapping and remapping the whole chunk to get requested alignment.
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, MemTag mem_tag, bool exec) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
|
||||
size_t extra_size = calculate_aligned_extra_size(size, alignment);
|
||||
char* extra_base = os::reserve_memory(extra_size, mem_tag, exec);
|
||||
char* extra_base = os::reserve_memory(extra_size, exec);
|
||||
if (extra_base == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ static char* backing_store_file_name = nullptr; // name of the backing store
|
||||
static char* create_standard_memory(size_t size) {
|
||||
|
||||
// allocate an aligned chuck of memory
|
||||
char* mapAddress = os::reserve_memory(size, mtInternal);
|
||||
char* mapAddress = os::reserve_memory(size);
|
||||
|
||||
if (mapAddress == nullptr) {
|
||||
return nullptr;
|
||||
|
||||
@ -3019,7 +3019,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
PAGE_READWRITE);
|
||||
// If reservation failed, return null
|
||||
if (p_buf == nullptr) return nullptr;
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC, mtNone);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, size_of_reserve, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes + chunk_size);
|
||||
|
||||
// we still need to round up to a page boundary (in case we are using large pages)
|
||||
@ -3080,7 +3080,7 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
// need to create a dummy 'reserve' record to match
|
||||
// the release.
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf,
|
||||
bytes_to_release, CALLER_PC, mtNone);
|
||||
bytes_to_release, CALLER_PC);
|
||||
os::release_memory(p_buf, bytes_to_release);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
@ -3098,9 +3098,9 @@ static char* allocate_pages_individually(size_t bytes, char* addr, DWORD flags,
|
||||
// Although the memory is allocated individually, it is returned as one.
|
||||
// NMT records it as one block.
|
||||
if ((flags & MEM_COMMIT) != 0) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC, mtNone);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)p_buf, bytes, CALLER_PC);
|
||||
} else {
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC, mtNone);
|
||||
MemTracker::record_virtual_memory_reserve((address)p_buf, bytes, CALLER_PC);
|
||||
}
|
||||
|
||||
// made it this far, success
|
||||
@ -3240,7 +3240,7 @@ char* os::replace_existing_mapping_with_file_mapping(char* base, size_t size, in
|
||||
// Multiple threads can race in this code but it's not possible to unmap small sections of
|
||||
// virtual space to get requested alignment, like posix-like os's.
|
||||
// Windows prevents multiple thread from remapping over each other so this loop is thread-safe.
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag) {
|
||||
static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int file_desc, MemTag mem_tag = mtNone) {
|
||||
assert(is_aligned(alignment, os::vm_allocation_granularity()),
|
||||
"Alignment must be a multiple of allocation granularity (page size)");
|
||||
assert(is_aligned(size, os::vm_allocation_granularity()),
|
||||
@ -3254,7 +3254,7 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
|
||||
for (int attempt = 0; attempt < max_attempts && aligned_base == nullptr; attempt ++) {
|
||||
char* extra_base = file_desc != -1 ? os::map_memory_to_file(extra_size, file_desc, mem_tag) :
|
||||
os::reserve_memory(extra_size, mem_tag);
|
||||
os::reserve_memory(extra_size, false, mem_tag);
|
||||
if (extra_base == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -3271,7 +3271,7 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
|
||||
// Which may fail, hence the loop.
|
||||
aligned_base = file_desc != -1 ? os::attempt_map_memory_to_file_at(aligned_base, size, file_desc, mem_tag) :
|
||||
os::attempt_reserve_memory_at(aligned_base, size, mem_tag);
|
||||
os::attempt_reserve_memory_at(aligned_base, size, false, mem_tag);
|
||||
}
|
||||
|
||||
assert(aligned_base != nullptr,
|
||||
@ -3280,9 +3280,9 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
return aligned_base;
|
||||
}
|
||||
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, MemTag mem_tag, bool exec) {
|
||||
char* os::reserve_memory_aligned(size_t size, size_t alignment, bool exec) {
|
||||
// exec can be ignored
|
||||
return map_or_reserve_memory_aligned(size, alignment, -1/* file_desc */, mem_tag);
|
||||
return map_or_reserve_memory_aligned(size, alignment, -1 /* file_desc */);
|
||||
}
|
||||
|
||||
char* os::map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag) {
|
||||
@ -5187,7 +5187,7 @@ char* os::pd_map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
}
|
||||
|
||||
// Record virtual memory allocation
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC, mtNone);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)addr, bytes, CALLER_PC);
|
||||
|
||||
DWORD bytes_read;
|
||||
OVERLAPPED overlapped;
|
||||
|
||||
@ -54,7 +54,7 @@ typedef BOOL (WINAPI *SetSecurityDescriptorControlFnPtr)(
|
||||
static char* create_standard_memory(size_t size) {
|
||||
|
||||
// allocate an aligned chuck of memory
|
||||
char* mapAddress = os::reserve_memory(size, mtInternal);
|
||||
char* mapAddress = os::reserve_memory(size);
|
||||
|
||||
if (mapAddress == nullptr) {
|
||||
return nullptr;
|
||||
|
||||
@ -309,8 +309,7 @@ address ArchiveBuilder::reserve_buffer() {
|
||||
size_t buffer_size = LP64_ONLY(CompressedClassSpaceSize) NOT_LP64(256 * M);
|
||||
ReservedSpace rs = MemoryReserver::reserve(buffer_size,
|
||||
MetaspaceShared::core_region_alignment(),
|
||||
os::vm_page_size(),
|
||||
mtClassShared);
|
||||
os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
log_error(cds)("Failed to reserve %zu bytes of output buffer.", buffer_size);
|
||||
MetaspaceShared::unrecoverable_writing_error();
|
||||
|
||||
@ -1066,10 +1066,10 @@ void FileMapInfo::close() {
|
||||
*/
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MemTag mem_tag) {
|
||||
bool allow_exec, MemTag mem_tag = mtNone) {
|
||||
char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes,
|
||||
mem_tag, AlwaysPreTouch ? false : read_only,
|
||||
allow_exec);
|
||||
AlwaysPreTouch ? false : read_only,
|
||||
allow_exec, mem_tag);
|
||||
if (mem != nullptr && AlwaysPreTouch) {
|
||||
os::pretouch_memory(mem, mem + bytes);
|
||||
}
|
||||
@ -1094,7 +1094,7 @@ bool FileMapInfo::remap_shared_readonly_as_readwrite() {
|
||||
assert(WINDOWS_ONLY(false) NOT_WINDOWS(true), "Don't call on Windows");
|
||||
// Replace old mapping with new one that is writable.
|
||||
char *base = os::map_memory(_fd, _full_path, r->file_offset(),
|
||||
addr, size, mtNone, false /* !read_only */,
|
||||
addr, size, false /* !read_only */,
|
||||
r->allow_exec());
|
||||
close();
|
||||
// These have to be errors because the shared region is now unmapped.
|
||||
@ -1620,7 +1620,7 @@ bool FileMapInfo::map_heap_region_impl() {
|
||||
} else {
|
||||
base = map_memory(_fd, _full_path, r->file_offset(),
|
||||
addr, _mapped_heap_memregion.byte_size(), r->read_only(),
|
||||
r->allow_exec(), mtJavaHeap);
|
||||
r->allow_exec());
|
||||
if (base == nullptr || base != addr) {
|
||||
dealloc_heap_region();
|
||||
log_info(cds)("UseSharedSpaces: Unable to map at required address in java heap. "
|
||||
|
||||
@ -1537,8 +1537,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
archive_space_rs = MemoryReserver::reserve((char*)base_address,
|
||||
archive_space_size,
|
||||
archive_space_alignment,
|
||||
os::vm_page_size(),
|
||||
mtNone);
|
||||
os::vm_page_size());
|
||||
if (archive_space_rs.is_reserved()) {
|
||||
assert(base_address == nullptr ||
|
||||
(address)archive_space_rs.base() == base_address, "Sanity");
|
||||
@ -1606,13 +1605,11 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
archive_space_rs = MemoryReserver::reserve((char*)base_address,
|
||||
archive_space_size,
|
||||
archive_space_alignment,
|
||||
os::vm_page_size(),
|
||||
mtNone);
|
||||
os::vm_page_size());
|
||||
class_space_rs = MemoryReserver::reserve((char*)ccs_base,
|
||||
class_space_size,
|
||||
class_space_alignment,
|
||||
os::vm_page_size(),
|
||||
mtNone);
|
||||
os::vm_page_size());
|
||||
}
|
||||
if (!archive_space_rs.is_reserved() || !class_space_rs.is_reserved()) {
|
||||
release_reserved_spaces(total_space_rs, archive_space_rs, class_space_rs);
|
||||
@ -1625,8 +1622,7 @@ char* MetaspaceShared::reserve_address_space_for_archives(FileMapInfo* static_ma
|
||||
total_space_rs = MemoryReserver::reserve((char*) base_address,
|
||||
total_range_size,
|
||||
base_address_alignment,
|
||||
os::vm_page_size(),
|
||||
mtNone);
|
||||
os::vm_page_size());
|
||||
} else {
|
||||
// We did not manage to reserve at the preferred address, or were instructed to relocate. In that
|
||||
// case we reserve wherever possible, but the start address needs to be encodable as narrow Klass
|
||||
|
||||
@ -226,7 +226,7 @@ HashtableTextDump::HashtableTextDump(const char* filename) : _fd(-1) {
|
||||
if (_fd < 0) {
|
||||
quit("Unable to open hashtable dump file", filename);
|
||||
}
|
||||
_base = os::map_memory(_fd, filename, 0, nullptr, _size, mtNone, true, false);
|
||||
_base = os::map_memory(_fd, filename, 0, nullptr, _size, true, false);
|
||||
if (_base == nullptr) {
|
||||
quit("Unable to map hashtable dump file", filename);
|
||||
}
|
||||
|
||||
@ -1230,8 +1230,7 @@ G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* des
|
||||
// Allocate a new reserved space, preferring to use large pages.
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
alignment,
|
||||
preferred_page_size,
|
||||
mtGC);
|
||||
preferred_page_size);
|
||||
|
||||
size_t page_size = rs.page_size();
|
||||
G1RegionToSpaceMapper* result =
|
||||
|
||||
@ -48,8 +48,7 @@ ParMarkBitMap::initialize(MemRegion covered_region)
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
|
||||
rs_align,
|
||||
page_sz,
|
||||
mtGC);
|
||||
page_sz);
|
||||
|
||||
if (!rs.is_reserved()) {
|
||||
// Failed to reserve memory for the bitmap,
|
||||
|
||||
@ -246,8 +246,7 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(_reserved_byte_size,
|
||||
rs_align,
|
||||
page_sz,
|
||||
mtGC);
|
||||
page_sz);
|
||||
|
||||
if (!rs.is_reserved()) {
|
||||
// Failed to reserve memory.
|
||||
|
||||
@ -80,7 +80,7 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
|
||||
HeapWord* high_bound = _whole_heap.end();
|
||||
|
||||
const size_t rs_align = MAX2(_page_size, os::vm_allocation_granularity());
|
||||
ReservedSpace rs = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size, mtGC);
|
||||
ReservedSpace rs = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
|
||||
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve enough space for the "
|
||||
|
||||
@ -45,7 +45,7 @@ void ShenandoahCardTable::initialize() {
|
||||
// ReservedSpace constructor would assert rs_align >= os::vm_page_size().
|
||||
const size_t rs_align = MAX2(_page_size, granularity);
|
||||
|
||||
ReservedSpace write_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size, mtGC);
|
||||
ReservedSpace write_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
|
||||
initialize(write_space);
|
||||
|
||||
// The assembler store_check code will do an unsigned shift of the oop,
|
||||
@ -60,7 +60,7 @@ void ShenandoahCardTable::initialize() {
|
||||
_write_byte_map = _byte_map;
|
||||
_write_byte_map_base = _byte_map_base;
|
||||
|
||||
ReservedSpace read_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size, mtGC);
|
||||
ReservedSpace read_space = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
|
||||
initialize(read_space);
|
||||
|
||||
_read_byte_map = (CardValue*) read_space.base();
|
||||
|
||||
@ -166,7 +166,7 @@ static ReservedSpace reserve(size_t size, size_t preferred_page_size) {
|
||||
size = align_up(size, alignment);
|
||||
}
|
||||
|
||||
const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size, mtGC);
|
||||
const ReservedSpace reserved = MemoryReserver::reserve(size, alignment, preferred_page_size);
|
||||
if (!reserved.is_reserved()) {
|
||||
vm_exit_during_initialization("Could not reserve space");
|
||||
}
|
||||
@ -380,7 +380,7 @@ jint ShenandoahHeap::initialize() {
|
||||
for (uintptr_t addr = min; addr <= max; addr <<= 1u) {
|
||||
char* req_addr = (char*)addr;
|
||||
assert(is_aligned(req_addr, cset_align), "Should be aligned");
|
||||
cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size, mtGC);
|
||||
cset_rs = MemoryReserver::reserve(req_addr, cset_size, cset_align, cset_page_size);
|
||||
if (cset_rs.is_reserved()) {
|
||||
assert(cset_rs.base() == req_addr, "Allocated where requested: " PTR_FORMAT ", " PTR_FORMAT, p2i(cset_rs.base()), addr);
|
||||
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
|
||||
@ -389,7 +389,7 @@ jint ShenandoahHeap::initialize() {
|
||||
}
|
||||
|
||||
if (_collection_set == nullptr) {
|
||||
cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size(), mtGC);
|
||||
cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size());
|
||||
if (!cset_rs.is_reserved()) {
|
||||
vm_exit_during_initialization("Cannot reserve memory for collection set");
|
||||
}
|
||||
|
||||
@ -106,8 +106,7 @@ bool JfrVirtualMemorySegment::initialize(size_t reservation_size_request_bytes)
|
||||
assert(is_aligned(reservation_size_request_bytes, os::vm_allocation_granularity()), "invariant");
|
||||
_rs = MemoryReserver::reserve(reservation_size_request_bytes,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTracing);
|
||||
os::vm_page_size());
|
||||
if (!_rs.is_reserved()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ template <class E>
|
||||
E* MmapArrayAllocator<E>::allocate_or_null(size_t length, MemTag mem_tag) {
|
||||
size_t size = size_for(length);
|
||||
|
||||
char* addr = os::reserve_memory(size, mem_tag);
|
||||
char* addr = os::reserve_memory(size, !ExecMem, mem_tag);
|
||||
if (addr == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -75,7 +75,7 @@ template <class E>
|
||||
E* MmapArrayAllocator<E>::allocate(size_t length, MemTag mem_tag) {
|
||||
size_t size = size_for(length);
|
||||
|
||||
char* addr = os::reserve_memory(size, mem_tag);
|
||||
char* addr = os::reserve_memory(size, !ExecMem, mem_tag);
|
||||
if (addr == nullptr) {
|
||||
vm_exit_out_of_memory(size, OOM_MMAP_ERROR, "Allocator (reserve)");
|
||||
}
|
||||
|
||||
@ -90,13 +90,13 @@ static char* reserve_memory_inner(char* requested_address,
|
||||
assert(is_aligned(requested_address, alignment),
|
||||
"Requested address " PTR_FORMAT " must be aligned to %zu",
|
||||
p2i(requested_address), alignment);
|
||||
return os::attempt_reserve_memory_at(requested_address, size, mem_tag, exec);
|
||||
return os::attempt_reserve_memory_at(requested_address, size, exec, mem_tag);
|
||||
}
|
||||
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
char* base = os::reserve_memory(size, mem_tag, exec);
|
||||
char* base = os::reserve_memory(size, exec, mem_tag);
|
||||
if (is_aligned(base, alignment)) {
|
||||
return base;
|
||||
}
|
||||
@ -107,7 +107,7 @@ static char* reserve_memory_inner(char* requested_address,
|
||||
}
|
||||
|
||||
// Map using the requested alignment.
|
||||
return os::reserve_memory_aligned(size, alignment, mem_tag, exec);
|
||||
return os::reserve_memory_aligned(size, alignment, exec);
|
||||
}
|
||||
|
||||
ReservedSpace MemoryReserver::reserve_memory(char* requested_address,
|
||||
@ -261,7 +261,7 @@ static char* map_memory_to_file(char* requested_address,
|
||||
// Optimistically assume that the OS returns an aligned base pointer.
|
||||
// When reserving a large address range, most OSes seem to align to at
|
||||
// least 64K.
|
||||
char* base = os::map_memory_to_file(size, fd, mem_tag);
|
||||
char* base = os::map_memory_to_file(size, fd);
|
||||
if (is_aligned(base, alignment)) {
|
||||
return base;
|
||||
}
|
||||
|
||||
@ -58,12 +58,12 @@ public:
|
||||
size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MemTag mem_tag);
|
||||
MemTag mem_tag = mtNone);
|
||||
|
||||
static ReservedSpace reserve(size_t size,
|
||||
size_t alignment,
|
||||
size_t page_size,
|
||||
MemTag mem_tag);
|
||||
MemTag mem_tag = mtNone);
|
||||
|
||||
static ReservedSpace reserve(size_t size,
|
||||
MemTag mem_tag);
|
||||
|
||||
@ -594,7 +594,7 @@ ReservedSpace Metaspace::reserve_address_space_for_compressed_classes(size_t siz
|
||||
if (result == nullptr) {
|
||||
// Fallback: reserve anywhere
|
||||
log_debug(metaspace, map)("Trying anywhere...");
|
||||
result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), mtClass);
|
||||
result = os::reserve_memory_aligned(size, Metaspace::reserve_alignment(), false);
|
||||
}
|
||||
|
||||
// Wrap resulting range in ReservedSpace
|
||||
@ -767,8 +767,7 @@ void Metaspace::global_initialize() {
|
||||
rs = MemoryReserver::reserve((char*)base,
|
||||
size,
|
||||
Metaspace::reserve_alignment(),
|
||||
os::vm_page_size(),
|
||||
mtClass);
|
||||
os::vm_page_size());
|
||||
|
||||
if (rs.is_reserved()) {
|
||||
log_info(metaspace)("Successfully forced class space address to " PTR_FORMAT, p2i(base));
|
||||
|
||||
@ -82,7 +82,7 @@ MetaspaceTestContext::MetaspaceTestContext(const char* name, size_t commit_limit
|
||||
reserve_limit, Metaspace::reserve_alignment_words());
|
||||
if (reserve_limit > 0) {
|
||||
// have reserve limit -> non-expandable context
|
||||
_rs = MemoryReserver::reserve(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size(), mtTest);
|
||||
_rs = MemoryReserver::reserve(reserve_limit * BytesPerWord, Metaspace::reserve_alignment(), os::vm_page_size());
|
||||
_context = MetaspaceContext::create_nonexpandable_context(name, _rs, &_commit_limiter);
|
||||
} else {
|
||||
// no reserve limit -> expandable vslist
|
||||
@ -142,3 +142,4 @@ size_t MetaspaceTestContext::reserved_words() const {
|
||||
}
|
||||
|
||||
} // namespace metaspace
|
||||
|
||||
|
||||
@ -256,7 +256,7 @@ VirtualSpaceNode* VirtualSpaceNode::create_node(size_t word_size,
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(word_size * BytesPerWord,
|
||||
Settings::virtual_space_node_reserve_alignment_words() * BytesPerWord,
|
||||
os::vm_page_size(), mtMetaspace);
|
||||
os::vm_page_size());
|
||||
if (!rs.is_reserved()) {
|
||||
vm_exit_out_of_memory(word_size * BytesPerWord, OOM_MMAP_ERROR, "Failed to reserve memory for metaspace");
|
||||
}
|
||||
|
||||
@ -249,7 +249,7 @@ void MemSummaryReporter::report_summary_of_tag(MemTag mem_tag,
|
||||
|
||||
// report malloc'd memory
|
||||
if (amount_in_current_scale(MAX2(malloc_memory->malloc_size(), pk_malloc)) > 0) {
|
||||
print_malloc(malloc_memory->malloc_counter(), mem_tag);
|
||||
print_malloc(malloc_memory->malloc_counter());
|
||||
out->cr();
|
||||
}
|
||||
|
||||
|
||||
@ -108,7 +108,7 @@ class MemReporterBase : public StackObj {
|
||||
|
||||
// Print summary total, malloc and virtual memory
|
||||
void print_total(size_t reserved, size_t committed, size_t peak = 0) const;
|
||||
void print_malloc(const MemoryCounter* c, MemTag mem_tag) const;
|
||||
void print_malloc(const MemoryCounter* c, MemTag mem_tag = mtNone) const;
|
||||
void print_virtual_memory(size_t reserved, size_t committed, size_t peak) const;
|
||||
void print_arena(const MemoryCounter* c) const;
|
||||
|
||||
|
||||
@ -127,7 +127,7 @@ class MemTracker : AllStatic {
|
||||
// (we do not do any reservations before that).
|
||||
|
||||
static inline void record_virtual_memory_reserve(void* addr, size_t size, const NativeCallStack& stack,
|
||||
MemTag mem_tag) {
|
||||
MemTag mem_tag = mtNone) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
if (addr != nullptr) {
|
||||
@ -153,7 +153,7 @@ class MemTracker : AllStatic {
|
||||
}
|
||||
|
||||
static inline void record_virtual_memory_reserve_and_commit(void* addr, size_t size,
|
||||
const NativeCallStack& stack, MemTag mem_tag) {
|
||||
const NativeCallStack& stack, MemTag mem_tag = mtNone) {
|
||||
assert_post_init();
|
||||
if (!enabled()) return;
|
||||
if (addr != nullptr) {
|
||||
|
||||
@ -297,7 +297,7 @@ class ReservedMemoryRegion : public VirtualMemoryRegion {
|
||||
|
||||
public:
|
||||
ReservedMemoryRegion(address base, size_t size, const NativeCallStack& stack,
|
||||
MemTag mem_tag) :
|
||||
MemTag mem_tag = mtNone) :
|
||||
VirtualMemoryRegion(base, size), _stack(stack), _mem_tag(mem_tag) { }
|
||||
|
||||
|
||||
@ -380,7 +380,7 @@ class VirtualMemoryTracker : AllStatic {
|
||||
public:
|
||||
static bool initialize(NMT_TrackingLevel level);
|
||||
|
||||
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag);
|
||||
static bool add_reserved_region (address base_addr, size_t size, const NativeCallStack& stack, MemTag mem_tag = mtNone);
|
||||
|
||||
static bool add_committed_region (address base_addr, size_t size, const NativeCallStack& stack);
|
||||
static bool remove_uncommitted_region (address base_addr, size_t size);
|
||||
|
||||
@ -2400,7 +2400,7 @@ static char* get_bad_address() {
|
||||
static char* bad_address = nullptr;
|
||||
if (bad_address == nullptr) {
|
||||
size_t size = os::vm_allocation_granularity();
|
||||
bad_address = os::reserve_memory(size, mtInternal);
|
||||
bad_address = os::reserve_memory(size, false, mtInternal);
|
||||
if (bad_address != nullptr) {
|
||||
os::protect_memory(bad_address, size, os::MEM_PROT_READ,
|
||||
/*is_committed*/false);
|
||||
|
||||
@ -728,11 +728,11 @@ WB_ENTRY(void, WB_NMTFree(JNIEnv* env, jobject o, jlong mem))
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_NMTReserveMemory(JNIEnv* env, jobject o, jlong size))
|
||||
return (jlong)(uintptr_t)os::reserve_memory(size, mtTest);
|
||||
return (jlong)(uintptr_t)os::reserve_memory(size, false, mtTest);
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jlong, WB_NMTAttemptReserveMemoryAt(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
return (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size, mtTest);
|
||||
return (jlong)(uintptr_t)os::attempt_reserve_memory_at((char*)(uintptr_t)addr, (size_t)size, false, mtTest);
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(void, WB_NMTCommitMemory(JNIEnv* env, jobject o, jlong addr, jlong size))
|
||||
@ -1524,7 +1524,7 @@ WB_ENTRY(void, WB_ReadReservedMemory(JNIEnv* env, jobject o))
|
||||
static char c;
|
||||
static volatile char* p;
|
||||
|
||||
p = os::reserve_memory(os::vm_allocation_granularity(), mtTest);
|
||||
p = os::reserve_memory(os::vm_allocation_granularity());
|
||||
if (p == nullptr) {
|
||||
THROW_MSG(vmSymbols::java_lang_OutOfMemoryError(), "Failed to reserve memory");
|
||||
}
|
||||
|
||||
@ -1916,7 +1916,7 @@ bool os::create_stack_guard_pages(char* addr, size_t bytes) {
|
||||
return os::pd_create_stack_guard_pages(addr, bytes);
|
||||
}
|
||||
|
||||
char* os::reserve_memory(size_t bytes, MemTag mem_tag, bool executable) {
|
||||
char* os::reserve_memory(size_t bytes, bool executable, MemTag mem_tag) {
|
||||
char* result = pd_reserve_memory(bytes, executable);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve(result, bytes, CALLER_PC, mem_tag);
|
||||
@ -1927,7 +1927,7 @@ char* os::reserve_memory(size_t bytes, MemTag mem_tag, bool executable) {
|
||||
return result;
|
||||
}
|
||||
|
||||
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, MemTag mem_tag, bool executable) {
|
||||
char* os::attempt_reserve_memory_at(char* addr, size_t bytes, bool executable, MemTag mem_tag) {
|
||||
char* result = SimulateFullAddressSpace ? nullptr : pd_attempt_reserve_memory_at(addr, bytes, executable);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, mem_tag);
|
||||
@ -2133,7 +2133,7 @@ char* os::attempt_reserve_memory_between(char* min, char* max, size_t bytes, siz
|
||||
assert(is_aligned(result, alignment), "alignment invalid (" ERRFMT ")", ERRFMTARGS);
|
||||
log_trace(os, map)(ERRFMT, ERRFMTARGS);
|
||||
log_debug(os, map)("successfully attached at " PTR_FORMAT, p2i(result));
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC, mtNone);
|
||||
MemTracker::record_virtual_memory_reserve((address)result, bytes, CALLER_PC);
|
||||
} else {
|
||||
log_debug(os, map)("failed to attach anywhere in [" PTR_FORMAT "-" PTR_FORMAT ")", p2i(min), p2i(max));
|
||||
}
|
||||
@ -2300,8 +2300,8 @@ char* os::attempt_map_memory_to_file_at(char* addr, size_t bytes, int file_desc,
|
||||
}
|
||||
|
||||
char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, MemTag mem_tag,
|
||||
bool read_only, bool allow_exec) {
|
||||
char *addr, size_t bytes, bool read_only,
|
||||
bool allow_exec, MemTag mem_tag) {
|
||||
char* result = pd_map_memory(fd, file_name, file_offset, addr, bytes, read_only, allow_exec);
|
||||
if (result != nullptr) {
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, bytes, CALLER_PC, mem_tag);
|
||||
@ -2339,7 +2339,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size
|
||||
char* result = pd_reserve_memory_special(size, alignment, page_size, addr, executable);
|
||||
if (result != nullptr) {
|
||||
// The memory is committed
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC, mtNone);
|
||||
MemTracker::record_virtual_memory_reserve_and_commit((address)result, size, CALLER_PC);
|
||||
log_debug(os, map)("Reserved and committed " RANGEFMT, RANGEFMTARGS(result, size));
|
||||
} else {
|
||||
log_info(os, map)("Reserve and commit failed (%zu bytes)", size);
|
||||
|
||||
@ -457,14 +457,14 @@ class os: AllStatic {
|
||||
inline static size_t cds_core_region_alignment();
|
||||
|
||||
// Reserves virtual memory.
|
||||
static char* reserve_memory(size_t bytes, MemTag mem_tag, bool executable = false);
|
||||
static char* reserve_memory(size_t bytes, bool executable = false, MemTag mem_tag = mtNone);
|
||||
|
||||
// Reserves virtual memory that starts at an address that is aligned to 'alignment'.
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, MemTag mem_tag, bool executable = false);
|
||||
static char* reserve_memory_aligned(size_t size, size_t alignment, bool executable = false);
|
||||
|
||||
// Attempts to reserve the virtual memory at [addr, addr + bytes).
|
||||
// Does not overwrite existing mappings.
|
||||
static char* attempt_reserve_memory_at(char* addr, size_t bytes, MemTag mem_tag, bool executable = false);
|
||||
static char* attempt_reserve_memory_at(char* addr, size_t bytes, bool executable = false, MemTag mem_tag = mtNone);
|
||||
|
||||
// Given an address range [min, max), attempts to reserve memory within this area, with the given alignment.
|
||||
// If randomize is true, the location will be randomized.
|
||||
@ -516,16 +516,16 @@ class os: AllStatic {
|
||||
static int create_file_for_heap(const char* dir);
|
||||
// Map memory to the file referred by fd. This function is slightly different from map_memory()
|
||||
// and is added to be used for implementation of -XX:AllocateHeapAt
|
||||
static char* map_memory_to_file(size_t size, int fd, MemTag mem_tag);
|
||||
static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag);
|
||||
static char* map_memory_to_file(size_t size, int fd, MemTag mem_tag = mtNone);
|
||||
static char* map_memory_to_file_aligned(size_t size, size_t alignment, int fd, MemTag mem_tag = mtNone);
|
||||
static char* map_memory_to_file(char* base, size_t size, int fd);
|
||||
static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MemTag mem_tag);
|
||||
static char* attempt_map_memory_to_file_at(char* base, size_t size, int fd, MemTag mem_tag = mtNone);
|
||||
// Replace existing reserved memory with file mapping
|
||||
static char* replace_existing_mapping_with_file_mapping(char* base, size_t size, int fd);
|
||||
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, MemTag mem_tag, bool read_only = false,
|
||||
bool allow_exec = false);
|
||||
char *addr, size_t bytes, bool read_only = false,
|
||||
bool allow_exec = false, MemTag mem_tag = mtNone);
|
||||
static bool unmap_memory(char *addr, size_t bytes);
|
||||
static void disclaim_memory(char *addr, size_t bytes);
|
||||
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
|
||||
@ -57,7 +57,7 @@ void SafepointMechanism::default_initialize() {
|
||||
// Polling page
|
||||
const size_t page_size = os::vm_page_size();
|
||||
const size_t allocation_size = 2 * page_size;
|
||||
char* polling_page = os::reserve_memory(allocation_size, mtSafepoint);
|
||||
char* polling_page = os::reserve_memory(allocation_size, !ExecMem, mtSafepoint);
|
||||
os::commit_memory_or_exit(polling_page, allocation_size, !ExecMem, "Unable to commit Safepoint polling page");
|
||||
|
||||
char* bad_page = polling_page;
|
||||
|
||||
@ -712,7 +712,7 @@ struct TestMultipleStaticAssertFormsInClassScope {
|
||||
// Support for showing register content on asserts/guarantees.
|
||||
#ifdef CAN_SHOW_REGISTERS_ON_ASSERT
|
||||
void initialize_assert_poison() {
|
||||
char* page = os::reserve_memory(os::vm_page_size(), mtInternal);
|
||||
char* page = os::reserve_memory(os::vm_page_size(), !ExecMem, mtInternal);
|
||||
if (page) {
|
||||
if (os::commit_memory(page, os::vm_page_size(), !ExecMem) &&
|
||||
os::protect_memory(page, os::vm_page_size(), os::MEM_PROT_NONE)) {
|
||||
|
||||
@ -82,8 +82,7 @@ TEST_VM(G1RegionToSpaceMapper, smallStressAdjacent) {
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTest);
|
||||
os::vm_page_size());
|
||||
|
||||
G1RegionToSpaceMapper* small_mapper =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
@ -91,7 +90,7 @@ TEST_VM(G1RegionToSpaceMapper, smallStressAdjacent) {
|
||||
page_size,
|
||||
region_size,
|
||||
G1BlockOffsetTable::heap_map_factor(),
|
||||
mtTest);
|
||||
mtGC);
|
||||
|
||||
|
||||
|
||||
@ -109,15 +108,14 @@ TEST_VM(G1RegionToSpaceMapper, largeStressAdjacent) {
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTest);
|
||||
os::vm_page_size());
|
||||
G1RegionToSpaceMapper* large_mapper =
|
||||
G1RegionToSpaceMapper::create_mapper(rs,
|
||||
size,
|
||||
page_size,
|
||||
region_size,
|
||||
G1BlockOffsetTable::heap_map_factor(),
|
||||
mtTest);
|
||||
mtGC);
|
||||
|
||||
G1TestCommitUncommit task(large_mapper);
|
||||
G1MapperWorkers::run_task(&task);
|
||||
|
||||
@ -56,7 +56,7 @@ public:
|
||||
const size_t increment = MAX2(align_up(unused / 100, ZGranuleSize), ZGranuleSize);
|
||||
|
||||
for (uintptr_t start = 0; start + ZGranuleSize <= ZAddressOffsetMax; start += increment) {
|
||||
char* const reserved = os::attempt_reserve_memory_at((char*)ZAddressHeapBase + start, ZGranuleSize, mtTest);
|
||||
char* const reserved = os::attempt_reserve_memory_at((char*)ZAddressHeapBase + start, ZGranuleSize, false /* executable */);
|
||||
if (reserved != nullptr) {
|
||||
// Success
|
||||
return reserved;
|
||||
|
||||
@ -73,7 +73,7 @@ namespace {
|
||||
static void test_reserved_size_alignment(size_t size, size_t alignment) {
|
||||
ASSERT_PRED2(is_size_aligned, size, alignment) << "Incorrect input parameters";
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, alignment, page_size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, alignment, page_size);
|
||||
|
||||
ASSERT_TRUE(rs.base() != nullptr) << "rs.special = " << rs.special();
|
||||
ASSERT_EQ(size, rs.size()) << "rs.special = " << rs.special();
|
||||
@ -101,7 +101,7 @@ namespace {
|
||||
bool large = maybe_large && UseLargePages && size >= os::large_page_size();
|
||||
size_t page_size = large ? os::large_page_size() : os::vm_page_size();
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, alignment, page_size, mtTest);
|
||||
ReservedSpace rs = MemoryReserver::reserve(size, alignment, page_size);
|
||||
MemoryReleaser releaser(&rs);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr) << "rs.special: " << rs.special();
|
||||
@ -217,8 +217,7 @@ namespace {
|
||||
case Commit:
|
||||
return MemoryReserver::reserve(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTest);
|
||||
os::vm_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -297,7 +296,7 @@ TEST_VM(VirtualSpace, actual_committed_space_one_large_page) {
|
||||
|
||||
size_t large_page_size = os::large_page_size();
|
||||
|
||||
ReservedSpace reserved = MemoryReserver::reserve(large_page_size, large_page_size, large_page_size, mtTest);
|
||||
ReservedSpace reserved = MemoryReserver::reserve(large_page_size, large_page_size, large_page_size);
|
||||
ReservedSpaceReleaser releaser(&reserved);
|
||||
ASSERT_TRUE(reserved.is_reserved());
|
||||
|
||||
@ -366,8 +365,7 @@ class TestReservedSpace : AllStatic {
|
||||
size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
alignment,
|
||||
page_size,
|
||||
mtTest);
|
||||
page_size);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -412,8 +410,7 @@ class TestReservedSpace : AllStatic {
|
||||
|
||||
ReservedSpace rs = MemoryReserver::reserve(size,
|
||||
alignment,
|
||||
page_size,
|
||||
mtTest);
|
||||
page_size);
|
||||
|
||||
EXPECT_TRUE(rs.base() != nullptr);
|
||||
EXPECT_EQ(rs.size(), size) << "rs.size: " << rs.size();
|
||||
@ -517,14 +514,12 @@ class TestVirtualSpace : AllStatic {
|
||||
default:
|
||||
case Default:
|
||||
case Reserve:
|
||||
return MemoryReserver::reserve(reserve_size_aligned,
|
||||
mtTest);
|
||||
return MemoryReserver::reserve(reserve_size_aligned, mtTest);
|
||||
case Disable:
|
||||
case Commit:
|
||||
return MemoryReserver::reserve(reserve_size_aligned,
|
||||
os::vm_allocation_granularity(),
|
||||
os::vm_page_size(),
|
||||
mtTest);
|
||||
os::vm_page_size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -581,8 +576,7 @@ class TestVirtualSpace : AllStatic {
|
||||
|
||||
ReservedSpace reserved = MemoryReserver::reserve(large_page_size,
|
||||
large_page_size,
|
||||
large_page_size,
|
||||
mtTest);
|
||||
large_page_size);
|
||||
|
||||
ASSERT_TRUE(reserved.is_reserved());
|
||||
|
||||
|
||||
@ -113,7 +113,7 @@ TEST_VM(NMT, DISABLED_location_printing_cheap_dead_7) { test_for_dead_c_heap_blo
|
||||
#endif
|
||||
|
||||
static void test_for_mmap(size_t sz, ssize_t offset) {
|
||||
char* addr = os::reserve_memory(sz, mtTest);
|
||||
char* addr = os::reserve_memory(sz, false, mtTest);
|
||||
if (MemTracker::enabled()) {
|
||||
test_pointer(addr + offset, true, "in mmap'd memory region");
|
||||
} else {
|
||||
|
||||
@ -91,7 +91,7 @@ public:
|
||||
static void test_committed_region_impl(size_t num_pages, size_t touch_pages, int* page_num) {
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, mtThreadStack);
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtThreadStack);
|
||||
bool result = os::commit_memory(base, size, !ExecMem);
|
||||
size_t index;
|
||||
ASSERT_NE(base, (char*)nullptr);
|
||||
@ -159,7 +159,7 @@ public:
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t num_pages = 4;
|
||||
const size_t size = num_pages * page_sz;
|
||||
char* base = os::reserve_memory(size, mtTest);
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtTest);
|
||||
ASSERT_NE(base, (char*)nullptr);
|
||||
result = os::commit_memory(base, size, !ExecMem);
|
||||
|
||||
@ -205,7 +205,7 @@ public:
|
||||
const size_t page_sz = os::vm_page_size();
|
||||
const size_t size = num_pages * page_sz;
|
||||
|
||||
char* base = os::reserve_memory(size, mtTest);
|
||||
char* base = os::reserve_memory(size, !ExecMem, mtTest);
|
||||
ASSERT_NE(base, (char*)nullptr);
|
||||
|
||||
result = os::commit_memory(base, size, !ExecMem);
|
||||
|
||||
@ -258,7 +258,7 @@ TEST_VM(os, test_print_hex_dump) {
|
||||
|
||||
// two pages, first one protected.
|
||||
const size_t ps = os::vm_page_size();
|
||||
char* two_pages = os::reserve_memory(ps * 2, mtTest);
|
||||
char* two_pages = os::reserve_memory(ps * 2, false, mtTest);
|
||||
os::commit_memory(two_pages, ps * 2, false);
|
||||
os::protect_memory(two_pages, ps, os::MEM_PROT_NONE, true);
|
||||
|
||||
@ -492,7 +492,7 @@ TEST_VM(os, realpath) {
|
||||
static inline bool can_reserve_executable_memory(void) {
|
||||
bool executable = true;
|
||||
size_t len = 128;
|
||||
char* p = os::reserve_memory(len, mtTest, executable);
|
||||
char* p = os::reserve_memory(len, executable);
|
||||
bool exec_supported = (p != nullptr);
|
||||
if (exec_supported) {
|
||||
os::release_memory(p, len);
|
||||
@ -530,7 +530,7 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
for (int tries = 0; tries < 256 && p == nullptr; tries ++) {
|
||||
size_t total_range_len = num_stripes * stripe_len;
|
||||
// Reserve a large contiguous area to get the address space...
|
||||
p = (address)os::reserve_memory(total_range_len, mtTest);
|
||||
p = (address)os::reserve_memory(total_range_len);
|
||||
EXPECT_NE(p, (address)nullptr);
|
||||
// .. release it...
|
||||
EXPECT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
@ -544,7 +544,7 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
#else
|
||||
const bool executable = stripe % 2 == 0;
|
||||
#endif
|
||||
q = (address)os::attempt_reserve_memory_at((char*)q, stripe_len, mtTest, executable);
|
||||
q = (address)os::attempt_reserve_memory_at((char*)q, stripe_len, executable);
|
||||
if (q == nullptr) {
|
||||
// Someone grabbed that area concurrently. Cleanup, then retry.
|
||||
tty->print_cr("reserve_multiple: retry (%d)...", stripe);
|
||||
@ -564,7 +564,7 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
static address reserve_one_commit_multiple(int num_stripes, size_t stripe_len) {
|
||||
assert(is_aligned(stripe_len, os::vm_allocation_granularity()), "Sanity");
|
||||
size_t total_range_len = num_stripes * stripe_len;
|
||||
address p = (address)os::reserve_memory(total_range_len, mtTest);
|
||||
address p = (address)os::reserve_memory(total_range_len);
|
||||
EXPECT_NE(p, (address)nullptr);
|
||||
for (int stripe = 0; stripe < num_stripes; stripe++) {
|
||||
address q = p + (stripe * stripe_len);
|
||||
@ -631,7 +631,7 @@ TEST_VM(os, release_multi_mappings) {
|
||||
PRINT_MAPPINGS("B");
|
||||
|
||||
// ...re-reserve the middle stripes. This should work unless release silently failed.
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)p_middle_stripes, middle_stripe_len, mtTest);
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)p_middle_stripes, middle_stripe_len);
|
||||
|
||||
ASSERT_EQ(p2, p_middle_stripes);
|
||||
|
||||
@ -654,7 +654,7 @@ TEST_VM_ASSERT_MSG(os, release_bad_ranges, ".*bad release") {
|
||||
#else
|
||||
TEST_VM(os, release_bad_ranges) {
|
||||
#endif
|
||||
char* p = os::reserve_memory(4 * M, mtTest);
|
||||
char* p = os::reserve_memory(4 * M);
|
||||
ASSERT_NE(p, (char*)nullptr);
|
||||
// Release part of range
|
||||
ASSERT_FALSE(os::release_memory(p, M));
|
||||
@ -689,7 +689,7 @@ TEST_VM(os, release_one_mapping_multi_commits) {
|
||||
|
||||
// // make things even more difficult by trying to reserve at the border of the region
|
||||
address border = p + num_stripes * stripe_len;
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)border, stripe_len, mtTest);
|
||||
address p2 = (address)os::attempt_reserve_memory_at((char*)border, stripe_len);
|
||||
PRINT_MAPPINGS("B");
|
||||
|
||||
ASSERT_TRUE(p2 == nullptr || p2 == border);
|
||||
@ -730,7 +730,7 @@ TEST_VM(os, show_mappings_small_range) {
|
||||
TEST_VM(os, show_mappings_full_range) {
|
||||
// Reserve a small range and fill it with a marker string, should show up
|
||||
// on implementations displaying range snippets
|
||||
char* p = os::reserve_memory(1 * M, mtTest);
|
||||
char* p = os::reserve_memory(1 * M, false, mtInternal);
|
||||
if (p != nullptr) {
|
||||
if (os::commit_memory(p, 1 * M, false)) {
|
||||
strcpy(p, "ABCDEFGHIJKLMNOPQRSTUVWXYZ");
|
||||
@ -754,7 +754,7 @@ TEST_VM(os, find_mapping_simple) {
|
||||
|
||||
// A simple allocation
|
||||
{
|
||||
address p = (address)os::reserve_memory(total_range_len, mtTest);
|
||||
address p = (address)os::reserve_memory(total_range_len);
|
||||
ASSERT_NE(p, (address)nullptr);
|
||||
PRINT_MAPPINGS("A");
|
||||
for (size_t offset = 0; offset < total_range_len; offset += 4711) {
|
||||
@ -1059,9 +1059,9 @@ TEST_VM(os, open_O_CLOEXEC) {
|
||||
}
|
||||
|
||||
TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_smallpages) {
|
||||
char* p1 = os::reserve_memory(M, mtTest);
|
||||
char* p1 = os::reserve_memory(M, false, mtTest);
|
||||
ASSERT_NE(p1, nullptr);
|
||||
char* p2 = os::attempt_reserve_memory_at(p1, M, mtTest);
|
||||
char* p2 = os::attempt_reserve_memory_at(p1, M);
|
||||
ASSERT_EQ(p2, nullptr); // should have failed
|
||||
os::release_memory(p1, M);
|
||||
}
|
||||
@ -1069,7 +1069,7 @@ TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_smallpages) {
|
||||
TEST_VM(os, reserve_at_wish_address_shall_not_replace_mappings_largepages) {
|
||||
if (UseLargePages && !os::can_commit_large_page_memory()) { // aka special
|
||||
const size_t lpsz = os::large_page_size();
|
||||
char* p1 = os::reserve_memory_aligned(lpsz, lpsz, mtTest);
|
||||
char* p1 = os::reserve_memory_aligned(lpsz, lpsz, false);
|
||||
ASSERT_NE(p1, nullptr);
|
||||
char* p2 = os::reserve_memory_special(lpsz, lpsz, lpsz, p1, false);
|
||||
ASSERT_EQ(p2, nullptr); // should have failed
|
||||
@ -1095,7 +1095,7 @@ TEST_VM(os, free_without_uncommit) {
|
||||
const size_t pages = 64;
|
||||
const size_t size = pages * page_sz;
|
||||
|
||||
char* base = os::reserve_memory(size, mtTest);
|
||||
char* base = os::reserve_memory(size, false, mtTest);
|
||||
ASSERT_NE(base, (char*) nullptr);
|
||||
ASSERT_TRUE(os::commit_memory(base, size, false));
|
||||
|
||||
|
||||
@ -34,9 +34,9 @@
|
||||
TEST_VM(os_aix, aix_reserve_at_non_shmlba_aligned_address) {
|
||||
if (os::vm_page_size() != 4*K && !os::Aix::supports_64K_mmap_pages()) {
|
||||
// With this condition true shmget() is used inside
|
||||
char* p = os::attempt_reserve_memory_at((char*)0x1f00000, M, mtTest);
|
||||
char* p = os::attempt_reserve_memory_at((char*)0x1f00000, M);
|
||||
ASSERT_EQ(p, nullptr); // should have failed
|
||||
p = os::attempt_reserve_memory_at((char*)((64 * G) + M), M, mtTest);
|
||||
p = os::attempt_reserve_memory_at((char*)((64 * G) + M), M);
|
||||
ASSERT_EQ(p, nullptr); // should have failed
|
||||
}
|
||||
}
|
||||
|
||||
@ -354,7 +354,7 @@ TEST_VM(os_linux, pretouch_thp_and_use_concurrent) {
|
||||
const size_t size = 1 * G;
|
||||
const bool useThp = UseTransparentHugePages;
|
||||
UseTransparentHugePages = true;
|
||||
char* const heap = os::reserve_memory(size, mtTest);
|
||||
char* const heap = os::reserve_memory(size, false, mtInternal);
|
||||
EXPECT_NE(heap, nullptr);
|
||||
EXPECT_TRUE(os::commit_memory(heap, size, false));
|
||||
|
||||
|
||||
@ -157,7 +157,7 @@ public:
|
||||
// the hole.
|
||||
const uintptr_t candidate = nth_bit(i);
|
||||
if ((candidate + _len) <= ARMB_constants::absolute_max) {
|
||||
_base = os::attempt_reserve_memory_at((char*)candidate, _len, mtTest);
|
||||
_base = os::attempt_reserve_memory_at((char*)candidate, _len);
|
||||
}
|
||||
}
|
||||
if (_base == nullptr) {
|
||||
@ -165,8 +165,8 @@ public:
|
||||
}
|
||||
// Release total mapping, remap the individual non-holy parts
|
||||
os::release_memory(_base, _len);
|
||||
_p1 = os::attempt_reserve_memory_at(_base + _p1_offset, _p1_size, mtTest);
|
||||
_p2 = os::attempt_reserve_memory_at(_base + _p2_offset, _p2_size, mtTest);
|
||||
_p1 = os::attempt_reserve_memory_at(_base + _p1_offset, _p1_size);
|
||||
_p2 = os::attempt_reserve_memory_at(_base + _p2_offset, _p2_size);
|
||||
if (_p1 == nullptr || _p2 == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ public class MallocRoundingReportTest {
|
||||
// NMT does not track memory allocations less than 1KB, and rounds to the nearest KB
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
"Test (reserved=" + numKB + "KB, committed=" + numKB + "KB)",
|
||||
"(malloc=" + numKB + "KB tag=Test #1) (at peak)" // (malloc=1KB tag=Test #1) (at peak)
|
||||
"(malloc=" + numKB + "KB #1) (at peak)"
|
||||
);
|
||||
|
||||
wb.NMTFree(mallocd_total);
|
||||
@ -65,7 +65,7 @@ public class MallocRoundingReportTest {
|
||||
// Run 'jcmd <pid> VM.native_memory summary', check for expected output
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
"Test (reserved=0KB, committed=0KB)",
|
||||
"(malloc=0KB tag=Test) (peak=" + numKB + "KB #1)"
|
||||
"(malloc=0KB) (peak=" + numKB + "KB #1)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ public class MallocTestType {
|
||||
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
new String[]{"Test (reserved=384KB, committed=384KB)",
|
||||
"(malloc=384KB tag=Test #2) (at peak)"});
|
||||
"(malloc=384KB #2) (at peak)"});
|
||||
|
||||
wb.NMTFree(memAlloc3); // current +256K #1 peak +384K #2
|
||||
long memAlloc1 = wb.NMTMalloc(512 * 1024); // current +768K #2 peak +768K #2
|
||||
@ -54,13 +54,13 @@ public class MallocTestType {
|
||||
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
new String[]{"Test (reserved=512KB, committed=512KB)",
|
||||
"(malloc=512KB tag=Test #1) (peak=768KB #2)"});
|
||||
"(malloc=512KB #1) (peak=768KB #2)"});
|
||||
|
||||
// Free the memory allocated by NMTAllocTest
|
||||
wb.NMTFree(memAlloc1); // current 0K #0 peak +768K #2
|
||||
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
new String[]{"Test (reserved=0KB, committed=0KB)",
|
||||
"(malloc=0KB tag=Test) (peak=768KB #2)"});
|
||||
"(malloc=0KB) (peak=768KB #2)"});
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,7 +72,7 @@ public class MallocTrackingVerify {
|
||||
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
"Test (reserved=4KB, committed=4KB)",
|
||||
"(malloc=4KB tag=Test #" + mallocd_memory.size() + ") (at peak)"
|
||||
"(malloc=4KB #" + mallocd_memory.size() + ") (at peak)"
|
||||
);
|
||||
|
||||
// Free
|
||||
@ -83,7 +83,7 @@ public class MallocTrackingVerify {
|
||||
// Run 'jcmd <pid> VM.native_memory summary', check for expected output
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
"Test (reserved=0KB, committed=0KB)",
|
||||
"(malloc=0KB tag=Test) (peak=4KB #" + + mallocd_memory.size() + ")"
|
||||
"(malloc=0KB) (peak=4KB #" + + mallocd_memory.size() + ")"
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
@ -61,7 +61,7 @@ public class ThreadedMallocTestType {
|
||||
// Run 'jcmd <pid> VM.native_memory summary'
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
"Test (reserved=896KB, committed=896KB)",
|
||||
"(malloc=896KB tag=Test #3) (at peak)"
|
||||
"(malloc=896KB #3) (at peak)"
|
||||
);
|
||||
|
||||
Thread freeThread = new Thread() {
|
||||
@ -78,7 +78,7 @@ public class ThreadedMallocTestType {
|
||||
|
||||
NMTTestUtils.runJcmdSummaryReportAndCheckOutput(
|
||||
"Test (reserved=0KB, committed=0KB)",
|
||||
"(malloc=0KB tag=Test) (peak=896KB #3)"
|
||||
"(malloc=0KB) (peak=896KB #3)"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user