mirror of
https://github.com/openjdk/jdk.git
synced 2026-01-28 03:58:21 +00:00
make memory functions void and remove error strings
This commit is contained in:
parent
9666743a76
commit
23bd4decab
@ -1733,7 +1733,7 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return true;
|
||||
}
|
||||
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size, const char* err_msg) {
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
// Do not call this; no need to commit stack pages on AIX.
|
||||
ShouldNotReachHere();
|
||||
return true;
|
||||
|
||||
@ -1763,8 +1763,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
|
||||
// If this is a growable mapping, remove the guard pages entirely by
|
||||
// munmap()ping them. If not, just call uncommit_memory().
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size, const char* err_msg) {
|
||||
return os::uncommit_memory(addr, size, false, err_msg);
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
os::uncommit_memory(addr, size, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
|
||||
@ -3563,13 +3563,14 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
// It's safe to always unmap guard pages for primordial thread because we
|
||||
// always place it right after end of the mapped region.
|
||||
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size, const char* err_msg) {
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
uintptr_t stack_extent, stack_base;
|
||||
|
||||
if (os::is_primordial_thread()) {
|
||||
return ::munmap(addr, size) == 0;
|
||||
}
|
||||
return os::uncommit_memory(addr, size, false, err_msg);
|
||||
os::uncommit_memory(addr, size, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
// 'requested_addr' is only treated as a hint, the return value may or
|
||||
|
||||
@ -3257,11 +3257,10 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
|
||||
// Do manual alignment
|
||||
aligned_base = align_up(extra_base, alignment);
|
||||
|
||||
bool rc = (file_desc != -1) ? os::unmap_memory(extra_base, extra_size) :
|
||||
os::release_memory(extra_base, extra_size);
|
||||
assert(rc, "release failed");
|
||||
if (!rc) {
|
||||
return nullptr;
|
||||
if ((file_desc != -1)) {
|
||||
os::unmap_memory(extra_base, extra_size);
|
||||
} else {
|
||||
os::release_memory(extra_base, extra_size);
|
||||
}
|
||||
|
||||
// Attempt to map, into the just vacated space, the slightly smaller aligned area.
|
||||
@ -3657,8 +3656,9 @@ bool os::pd_create_stack_guard_pages(char* addr, size_t size) {
|
||||
return os::commit_memory(addr, size, !ExecMem);
|
||||
}
|
||||
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size, const char* err_msg) {
|
||||
return os::uncommit_memory(addr, size, false, err_msg);
|
||||
bool os::remove_stack_guard_pages(char* addr, size_t size) {
|
||||
os::uncommit_memory(addr, size, false);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool protect_pages_individually(char* addr, size_t bytes, unsigned int p, DWORD *old_status) {
|
||||
|
||||
@ -1325,9 +1325,7 @@ char* FileMapInfo::map_auxiliary_region(int region_index, bool read_only) {
|
||||
|
||||
if (VerifySharedSpaces && !r->check_region_crc(mapped_base)) {
|
||||
aot_log_error(aot)("region %d CRC error", region_index);
|
||||
char err_msg[] = "os::unmap_memory of region 2147483647 failed";
|
||||
os::snprintf_checked(err_msg, sizeof(err_msg), "os::unmap_memory of region %d failed", region_index);
|
||||
os::unmap_memory(mapped_base, r->used_aligned(), err_msg);
|
||||
os::unmap_memory(mapped_base, r->used_aligned());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
@ -78,12 +78,13 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
|
||||
}
|
||||
|
||||
char* const base_addr = committed_high_addr() - bytes;
|
||||
bool result = special() || os::uncommit_memory(base_addr, bytes);
|
||||
if (result) {
|
||||
_committed_high_addr -= bytes;
|
||||
if (!special()) {
|
||||
os::uncommit_memory(base_addr, bytes);
|
||||
}
|
||||
|
||||
return result;
|
||||
_committed_high_addr -= bytes;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
@ -1768,7 +1768,7 @@ void ShenandoahHeap::scan_roots_for_iteration(ShenandoahScanObjectStack* oop_sta
|
||||
|
||||
void ShenandoahHeap::reclaim_aux_bitmap_for_iteration() {
|
||||
if (!_aux_bitmap_region_special) {
|
||||
os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false, "Auxiliary marking bitmap uncommit failed");
|
||||
os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2618,7 +2618,7 @@ void ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
|
||||
size_t len = _bitmap_bytes_per_slice;
|
||||
|
||||
char* addr = (char*) _bitmap_region.start() + off;
|
||||
os::uncommit_memory(addr, len, false, "Bitmap slice uncommit failed");
|
||||
os::uncommit_memory(addr, len, false);
|
||||
}
|
||||
|
||||
void ShenandoahHeap::forbid_uncommit() {
|
||||
|
||||
@ -811,7 +811,7 @@ void ShenandoahHeapRegion::do_commit() {
|
||||
void ShenandoahHeapRegion::do_uncommit() {
|
||||
ShenandoahHeap* heap = ShenandoahHeap::heap();
|
||||
if (!heap->is_heap_region_special()) {
|
||||
os::uncommit_memory((char *) bottom(), RegionSizeBytes, false, "Region uncommit failed");
|
||||
os::uncommit_memory((char *) bottom(), RegionSizeBytes, false);
|
||||
}
|
||||
if (!heap->is_bitmap_region_special()) {
|
||||
heap->uncommit_bitmap_slice(this);
|
||||
|
||||
@ -229,13 +229,13 @@ ReservedSpace MemoryReserver::reserve(size_t size,
|
||||
mem_tag);
|
||||
}
|
||||
|
||||
bool MemoryReserver::release(const ReservedSpace& reserved) {
|
||||
void MemoryReserver::release(const ReservedSpace& reserved) {
|
||||
assert(reserved.is_reserved(), "Precondition");
|
||||
|
||||
if (reserved.special()) {
|
||||
return os::release_memory_special(reserved.base(), reserved.size());
|
||||
os::release_memory_special(reserved.base(), reserved.size());
|
||||
} else {
|
||||
return os::release_memory(reserved.base(), reserved.size());
|
||||
os::release_memory(reserved.base(), reserved.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -70,7 +70,7 @@ public:
|
||||
MemTag mem_tag);
|
||||
|
||||
// Release reserved memory
|
||||
static bool release(const ReservedSpace& reserved);
|
||||
static void release(const ReservedSpace& reserved);
|
||||
};
|
||||
|
||||
class CodeMemoryReserver : AllStatic {
|
||||
|
||||
@ -190,7 +190,7 @@ void VirtualSpaceNode::uncommit_range(MetaWord* p, size_t word_size) {
|
||||
}
|
||||
|
||||
// Uncommit...
|
||||
os::uncommit_memory((char*)p, word_size * BytesPerWord, false, "Failed to uncommit metaspace.");
|
||||
os::uncommit_memory((char*)p, word_size * BytesPerWord, false);
|
||||
|
||||
|
||||
ASAN_POISON_MEMORY_REGION((char*)p, word_size * BytesPerWord);
|
||||
|
||||
@ -2257,22 +2257,13 @@ void os::commit_memory_or_exit(char* addr, size_t size, size_t alignment_hint,
|
||||
os::pd_commit_memory_or_exit(addr, size, alignment_hint, executable, mesg);
|
||||
MemTracker::record_virtual_memory_commit((address)addr, size, CALLER_PC);
|
||||
}
|
||||
// Helper for os::uncommit_memory and os::unmap_memory
|
||||
static void print_err_fatal(char* addr, size_t bytes, const char* err_msg1, const char* err_msg2) {
|
||||
const char* s = ": ";
|
||||
if (err_msg1 == nullptr) {
|
||||
err_msg1 = "";
|
||||
s = "";
|
||||
}
|
||||
fatal("%s%s%s" RANGEFMT, err_msg1, s, err_msg2, RANGEFMTARGS(addr, bytes));
|
||||
}
|
||||
|
||||
// The scope of NmtVirtualMemoryLocker covers both pd_uncommit_memory and record_virtual_memory_uncommit because
|
||||
// these operations must happen atomically to avoid races causing NMT to fall out os sync with the OS reality.
|
||||
// We do not have the same lock protection for pd_commit_memory and record_virtual_memory_commit.
|
||||
// We assume that there is some external synchronization that prevents a region from being uncommitted
|
||||
// before it is finished being committed.
|
||||
bool os::uncommit_memory(char* addr, size_t bytes, bool executable, const char* err_msg) {
|
||||
void os::uncommit_memory(char* addr, size_t bytes, bool executable) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
bool res;
|
||||
if (MemTracker::enabled()) {
|
||||
@ -2286,11 +2277,9 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable, const char*
|
||||
}
|
||||
|
||||
if (!res) {
|
||||
print_err_fatal(addr, bytes, err_msg, "Failed to uncommit ");
|
||||
fatal("Failed to uncommit " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
}
|
||||
log_debug(os, map)("Uncommitted " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// The scope of NmtVirtualMemoryLocker covers both pd_release_memory and record_virtual_memory_release because
|
||||
@ -2298,7 +2287,7 @@ bool os::uncommit_memory(char* addr, size_t bytes, bool executable, const char*
|
||||
// We do not have the same lock protection for pd_reserve_memory and record_virtual_memory_reserve.
|
||||
// We assume that there is some external synchronization that prevents a region from being released
|
||||
// before it is finished being reserved.
|
||||
bool os::release_memory(char* addr, size_t bytes) {
|
||||
void os::release_memory(char* addr, size_t bytes) {
|
||||
assert_nonempty_range(addr, bytes);
|
||||
bool res;
|
||||
if (MemTracker::enabled()) {
|
||||
@ -2314,8 +2303,6 @@ bool os::release_memory(char* addr, size_t bytes) {
|
||||
fatal("Failed to release " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
}
|
||||
log_debug(os, map)("Released " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Prints all mappings
|
||||
@ -2384,7 +2371,7 @@ char* os::map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
return result;
|
||||
}
|
||||
|
||||
bool os::unmap_memory(char *addr, size_t bytes, const char* err_msg) {
|
||||
void os::unmap_memory(char *addr, size_t bytes) {
|
||||
bool result;
|
||||
if (MemTracker::enabled()) {
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
@ -2396,9 +2383,8 @@ bool os::unmap_memory(char *addr, size_t bytes, const char* err_msg) {
|
||||
result = pd_unmap_memory(addr, bytes);
|
||||
}
|
||||
if (!result) {
|
||||
print_err_fatal(addr, bytes, err_msg, "Failed to unmap memory ");
|
||||
fatal("Failed to unmap memory " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void os::disclaim_memory(char *addr, size_t bytes) {
|
||||
@ -2426,7 +2412,7 @@ char* os::reserve_memory_special(size_t size, size_t alignment, size_t page_size
|
||||
return result;
|
||||
}
|
||||
|
||||
bool os::release_memory_special(char* addr, size_t bytes) {
|
||||
void os::release_memory_special(char* addr, size_t bytes) {
|
||||
bool res;
|
||||
if (MemTracker::enabled()) {
|
||||
MemTracker::NmtVirtualMemoryLocker nvml;
|
||||
@ -2440,7 +2426,6 @@ bool os::release_memory_special(char* addr, size_t bytes) {
|
||||
if (!res) {
|
||||
fatal("Failed to release memory special " RANGEFMT, RANGEFMTARGS(addr, bytes));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Convenience wrapper around naked_short_sleep to allow for longer sleep
|
||||
|
||||
@ -485,8 +485,8 @@ class os: AllStatic {
|
||||
static void commit_memory_or_exit(char* addr, size_t size,
|
||||
size_t alignment_hint,
|
||||
bool executable, const char* mesg);
|
||||
static bool uncommit_memory(char* addr, size_t bytes, bool executable = false, const char* err_msg = nullptr);
|
||||
static bool release_memory(char* addr, size_t bytes);
|
||||
static void uncommit_memory(char* addr, size_t bytes, bool executable = false);
|
||||
static void release_memory(char* addr, size_t bytes);
|
||||
|
||||
// Does the platform support trimming the native heap?
|
||||
static bool can_trim_native_heap();
|
||||
@ -515,7 +515,7 @@ class os: AllStatic {
|
||||
static bool unguard_memory(char* addr, size_t bytes);
|
||||
static bool create_stack_guard_pages(char* addr, size_t bytes);
|
||||
static bool pd_create_stack_guard_pages(char* addr, size_t bytes);
|
||||
static bool remove_stack_guard_pages(char* addr, size_t bytes, const char* err_msg = nullptr);
|
||||
static bool remove_stack_guard_pages(char* addr, size_t bytes);
|
||||
// Helper function to create a new file with template jvmheap.XXXXXX.
|
||||
// Returns a valid fd on success or else returns -1
|
||||
static int create_file_for_heap(const char* dir);
|
||||
@ -531,7 +531,7 @@ class os: AllStatic {
|
||||
static char* map_memory(int fd, const char* file_name, size_t file_offset,
|
||||
char *addr, size_t bytes, MemTag mem_tag, bool read_only = false,
|
||||
bool allow_exec = false);
|
||||
static bool unmap_memory(char *addr, size_t bytes, const char* err_msg = nullptr);
|
||||
static void unmap_memory(char *addr, size_t bytes);
|
||||
static void disclaim_memory(char *addr, size_t bytes);
|
||||
static void realign_memory(char *addr, size_t bytes, size_t alignment_hint);
|
||||
|
||||
@ -554,7 +554,7 @@ class os: AllStatic {
|
||||
// reserve, commit and pin the entire memory region
|
||||
static char* reserve_memory_special(size_t size, size_t alignment, size_t page_size,
|
||||
char* addr, bool executable);
|
||||
static bool release_memory_special(char* addr, size_t bytes);
|
||||
static void release_memory_special(char* addr, size_t bytes);
|
||||
static void large_page_init();
|
||||
static size_t large_page_size();
|
||||
static bool can_commit_large_page_memory();
|
||||
|
||||
@ -116,7 +116,7 @@ void StackOverflow::remove_stack_guard_pages() {
|
||||
size_t len = stack_guard_zone_size();
|
||||
|
||||
if (os::must_commit_stack_guard_pages()) {
|
||||
if (os::remove_stack_guard_pages((char *) low_addr, len, "Attempt to deallocate stack guard pages failed")) {
|
||||
if (os::remove_stack_guard_pages((char *) low_addr, len)) {
|
||||
_stack_guard_state = stack_guard_unused;
|
||||
} else {
|
||||
log_warning(os, thread)("Attempt to deallocate stack guard pages failed ("
|
||||
|
||||
@ -168,8 +168,7 @@ public:
|
||||
ASSERT_EQ(vmem, ZVirtualMemory(base_offset + 2 * ZGranuleSize, ZGranuleSize));
|
||||
_reserver->unreserve(vmem);
|
||||
|
||||
const bool released = os::release_memory((char*)untype(blocked), ZGranuleSize);
|
||||
ASSERT_TRUE(released);
|
||||
os::release_memory((char*)untype(blocked), ZGranuleSize);
|
||||
}
|
||||
|
||||
void test_remove_from_low() {
|
||||
|
||||
@ -34,7 +34,7 @@ namespace {
|
||||
public:
|
||||
MemoryReleaser(ReservedSpace* rs) : _rs(rs) { }
|
||||
~MemoryReleaser() {
|
||||
EXPECT_TRUE(MemoryReserver::release(*_rs));
|
||||
MemoryReserver::release(*_rs);
|
||||
}
|
||||
};
|
||||
|
||||
@ -355,9 +355,9 @@ class TestReservedSpace : AllStatic {
|
||||
|
||||
static void release_memory_for_test(ReservedSpace rs) {
|
||||
if (rs.special()) {
|
||||
EXPECT_TRUE(os::release_memory_special(rs.base(), rs.size()));
|
||||
os::release_memory_special(rs.base(), rs.size());
|
||||
} else {
|
||||
EXPECT_TRUE(os::release_memory(rs.base(), rs.size()));
|
||||
os::release_memory(rs.base(), rs.size());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -511,7 +511,7 @@ static inline bool can_reserve_executable_memory(void) {
|
||||
static void carefully_release_multiple(address start, int num_stripes, size_t stripe_len) {
|
||||
for (int stripe = 0; stripe < num_stripes; stripe++) {
|
||||
address q = start + (stripe * stripe_len);
|
||||
EXPECT_TRUE(os::release_memory((char*)q, stripe_len));
|
||||
os::release_memory((char*)q, stripe_len);
|
||||
}
|
||||
}
|
||||
|
||||
@ -534,7 +534,7 @@ static address reserve_multiple(int num_stripes, size_t stripe_len) {
|
||||
p = (address)os::reserve_memory(total_range_len, mtTest);
|
||||
EXPECT_NE(p, (address)nullptr);
|
||||
// .. release it...
|
||||
EXPECT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
os::release_memory((char*)p, total_range_len);
|
||||
// ... re-reserve in the same spot multiple areas...
|
||||
for (int stripe = 0; stripe < num_stripes; stripe++) {
|
||||
address q = p + (stripe * stripe_len);
|
||||
@ -627,7 +627,7 @@ TEST_VM(os, release_multi_mappings) {
|
||||
// On Windows, temporarily switch on UseNUMAInterleaving to allow release_memory to release
|
||||
// multiple mappings in one go (otherwise we assert, which we test too, see death test below).
|
||||
WINDOWS_ONLY(NUMASwitcher b(true);)
|
||||
ASSERT_TRUE(os::release_memory((char*)p_middle_stripes, middle_stripe_len));
|
||||
os::release_memory((char*)p_middle_stripes, middle_stripe_len);
|
||||
}
|
||||
PRINT_MAPPINGS("B");
|
||||
|
||||
@ -641,7 +641,7 @@ TEST_VM(os, release_multi_mappings) {
|
||||
// Clean up. Release all mappings.
|
||||
{
|
||||
WINDOWS_ONLY(NUMASwitcher b(true);) // allow release_memory to release multiple regions
|
||||
ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
os::release_memory((char*)p, total_range_len);
|
||||
}
|
||||
}
|
||||
#endif // !AIX
|
||||
@ -650,29 +650,72 @@ TEST_VM(os, release_multi_mappings) {
|
||||
// On Windows, test that we recognize bad ranges.
|
||||
// On debug this would assert. Test that too.
|
||||
// On other platforms, we are unable to recognize bad ranges.
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_bad_ranges, ".*bad release") {
|
||||
#else
|
||||
TEST_VM(os, release_bad_ranges) {
|
||||
#endif
|
||||
char* p = os::reserve_memory(4 * M, mtTest);
|
||||
ASSERT_NE(p, (char*)nullptr);
|
||||
// Release part of range
|
||||
ASSERT_FALSE(os::release_memory(p, M));
|
||||
// Release part of range
|
||||
ASSERT_FALSE(os::release_memory(p + M, M));
|
||||
// Release more than the range (explicitly switch off NUMA here
|
||||
// to make os::release_memory() test more strictly and to not
|
||||
// accidentally release neighbors)
|
||||
{
|
||||
NUMASwitcher b(false);
|
||||
ASSERT_FALSE(os::release_memory(p, M * 5));
|
||||
ASSERT_FALSE(os::release_memory(p - M, M * 5));
|
||||
ASSERT_FALSE(os::release_memory(p - M, M * 6));
|
||||
}
|
||||
|
||||
ASSERT_TRUE(os::release_memory(p, 4 * M)); // Release for real
|
||||
ASSERT_FALSE(os::release_memory(p, 4 * M)); // Again, should fail
|
||||
static char* setup_release_test_memory() {
|
||||
char* p = os::reserve_memory(4 * M, mtTest);
|
||||
EXPECT_NE(p, (char*)nullptr);
|
||||
return p;
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_bad_range_start, ".*bad release") {
|
||||
#else
|
||||
TEST_VM_FATAL_ERROR_MSG(os, release_bad_range_start, ".*Failed to release.*") {
|
||||
#endif
|
||||
char* p = setup_release_test_memory();
|
||||
os::release_memory(p, M); // Release part of the range
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_bad_range_middle, ".*bad release") {
|
||||
#else
|
||||
TEST_VM_FATAL_ERROR_MSG(os, release_bad_range_middle, ".*Failed to release.*") {
|
||||
#endif
|
||||
char* p = setup_release_test_memory();
|
||||
os::release_memory(p + M, M); // Release middle part
|
||||
}
|
||||
|
||||
// Release more than the range (explicitly switch off NUMA here
|
||||
// to make os::release_memory() test more strict and to not
|
||||
// accidentally release neighbors)
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_beyond_range1, ".*bad release") {
|
||||
#else
|
||||
TEST_VM_FATAL_ERROR_MSG(os, release_beyond_range1, ".*Failed to release.*") {
|
||||
#endif
|
||||
char* p = setup_release_test_memory();
|
||||
NUMASwitcher b(false);
|
||||
os::release_memory(p, M * 5);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_beyond_range2, ".*bad release") {
|
||||
#else
|
||||
TEST_VM_FATAL_ERROR_MSG(os, release_beyond_range2, ".*Failed to release.*") {
|
||||
#endif
|
||||
char* p = setup_release_test_memory();
|
||||
NUMASwitcher b(false);
|
||||
os::release_memory(p - M, M * 5);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_beyond_range3, ".*bad release") {
|
||||
#else
|
||||
TEST_VM_FATAL_ERROR_MSG(os, release_beyond_range3, ".*Failed to release.*") {
|
||||
#endif
|
||||
char* p = setup_release_test_memory();
|
||||
NUMASwitcher b(false);
|
||||
os::release_memory(p - M, M * 6);
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
TEST_VM_ASSERT_MSG(os, release_already_released, ".*bad release") {
|
||||
#else
|
||||
TEST_VM_FATAL_ERROR_MSG(os, release_already_released, ".*Failed to release.*") {
|
||||
#endif
|
||||
char* p = setup_release_test_memory();
|
||||
os::release_memory(p, 4 * M); // Release for real
|
||||
os::release_memory(p, 4 * M); // Again, should fail
|
||||
}
|
||||
#endif // _WIN32
|
||||
|
||||
@ -695,11 +738,11 @@ TEST_VM(os, release_one_mapping_multi_commits) {
|
||||
|
||||
ASSERT_TRUE(p2 == nullptr || p2 == border);
|
||||
|
||||
ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
os::release_memory((char*)p, total_range_len);
|
||||
PRINT_MAPPINGS("C");
|
||||
|
||||
if (p2 != nullptr) {
|
||||
ASSERT_TRUE(os::release_memory((char*)p2, stripe_len));
|
||||
os::release_memory((char*)p2, stripe_len);
|
||||
PRINT_MAPPINGS("D");
|
||||
}
|
||||
}
|
||||
@ -772,7 +815,7 @@ TEST_VM(os, find_mapping_simple) {
|
||||
if (os::win32::find_mapping(p + total_range_len, &mapping_info)) {
|
||||
ASSERT_NE(mapping_info.base, p);
|
||||
}
|
||||
ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
os::release_memory((char*)p, total_range_len);
|
||||
PRINT_MAPPINGS("B");
|
||||
ASSERT_FALSE(os::win32::find_mapping(p, &mapping_info));
|
||||
}
|
||||
@ -801,7 +844,7 @@ TEST_VM(os, find_mapping_2) {
|
||||
if (os::win32::find_mapping(p + total_range_len, &mapping_info)) {
|
||||
ASSERT_NE(mapping_info.base, p);
|
||||
}
|
||||
ASSERT_TRUE(os::release_memory((char*)p, total_range_len));
|
||||
os::release_memory((char*)p, total_range_len);
|
||||
PRINT_MAPPINGS("B");
|
||||
ASSERT_FALSE(os::win32::find_mapping(p, &mapping_info));
|
||||
}
|
||||
@ -1132,11 +1175,11 @@ TEST_VM(os, commit_memory_or_exit) {
|
||||
ASSERT_NOT_NULL(base);
|
||||
os::commit_memory_or_exit(base, size, false, "Commit failed.");
|
||||
strcpy(base, letters);
|
||||
ASSERT_TRUE(os::uncommit_memory(base, size, false));
|
||||
os::uncommit_memory(base, size, false);
|
||||
os::commit_memory_or_exit(base, size, page_sz, false, "Commit with alignment hint failed.");
|
||||
strcpy(base, letters);
|
||||
ASSERT_TRUE(os::uncommit_memory(base, size, false));
|
||||
EXPECT_TRUE(os::release_memory(base, size));
|
||||
os::uncommit_memory(base, size, false);
|
||||
os::release_memory(base, size);
|
||||
}
|
||||
|
||||
#if !defined(_AIX)
|
||||
@ -1152,7 +1195,7 @@ TEST_VM(os, map_memory_to_file) {
|
||||
char* result = os::map_memory_to_file(size, fd, mtTest);
|
||||
ASSERT_NOT_NULL(result);
|
||||
EXPECT_EQ(strcmp(letters, result), 0);
|
||||
EXPECT_TRUE(os::unmap_memory(result, size));
|
||||
os::unmap_memory(result, size);
|
||||
::close(fd);
|
||||
}
|
||||
|
||||
@ -1169,7 +1212,7 @@ TEST_VM(os, map_unmap_memory) {
|
||||
char* result = os::map_memory(fd, path, 0, nullptr, size, mtTest, true, false);
|
||||
ASSERT_NOT_NULL(result);
|
||||
EXPECT_EQ(strcmp(letters, result), 0);
|
||||
EXPECT_TRUE(os::unmap_memory(result, size));
|
||||
os::unmap_memory(result, size);
|
||||
::close(fd);
|
||||
}
|
||||
|
||||
@ -1184,7 +1227,7 @@ TEST_VM(os, map_memory_to_file_aligned) {
|
||||
char* result = os::map_memory_to_file_aligned(os::vm_allocation_granularity(), os::vm_allocation_granularity(), fd, mtTest);
|
||||
ASSERT_NOT_NULL(result);
|
||||
EXPECT_EQ(strcmp(letters, result), 0);
|
||||
EXPECT_TRUE(os::unmap_memory(result, os::vm_allocation_granularity()));
|
||||
os::unmap_memory(result, os::vm_allocation_granularity());
|
||||
::close(fd);
|
||||
}
|
||||
|
||||
|
||||
@ -379,8 +379,8 @@ TEST_VM(os_linux, pretouch_thp_and_use_concurrent) {
|
||||
for (int i = 0; i < 1000; i++)
|
||||
EXPECT_EQ(*iptr++, i);
|
||||
|
||||
EXPECT_TRUE(os::uncommit_memory(heap, size, false));
|
||||
EXPECT_TRUE(os::release_memory(heap, size));
|
||||
os::uncommit_memory(heap, size, false);
|
||||
os::release_memory(heap, size);
|
||||
UseTransparentHugePages = useThp;
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user