From e825ccfe6652577e4e828e8e4dfe19be0ea77813 Mon Sep 17 00:00:00 2001 From: Robert Toyonaga Date: Mon, 24 Jun 2024 13:33:20 +0000 Subject: [PATCH] 8332362: Implement os::committed_in_range for MacOS and AIX Reviewed-by: stuefe --- src/hotspot/os/linux/os_linux.cpp | 75 --------------- src/hotspot/os/posix/os_posix.cpp | 91 +++++++++++++++++++ src/hotspot/share/runtime/os.cpp | 7 -- .../runtime/test_committed_virtualmemory.cpp | 43 +++++++++ .../Thread/TestAlwaysPreTouchStacks.java | 39 ++++++-- 5 files changed, 166 insertions(+), 89 deletions(-) diff --git a/src/hotspot/os/linux/os_linux.cpp b/src/hotspot/os/linux/os_linux.cpp index 52866a44b26..87150365ed5 100644 --- a/src/hotspot/os/linux/os_linux.cpp +++ b/src/hotspot/os/linux/os_linux.cpp @@ -3525,81 +3525,6 @@ static address get_stack_commited_bottom(address bottom, size_t size) { return nbot; } -bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { - int mincore_return_value; - const size_t stripe = 1024; // query this many pages each time - unsigned char vec[stripe + 1]; - // set a guard - vec[stripe] = 'X'; - - const size_t page_sz = os::vm_page_size(); - uintx pages = size / page_sz; - - assert(is_aligned(start, page_sz), "Start address must be page aligned"); - assert(is_aligned(size, page_sz), "Size must be page aligned"); - - committed_start = nullptr; - - int loops = checked_cast((pages + stripe - 1) / stripe); - int committed_pages = 0; - address loop_base = start; - bool found_range = false; - - for (int index = 0; index < loops && !found_range; index ++) { - assert(pages > 0, "Nothing to do"); - uintx pages_to_query = (pages >= stripe) ? stripe : pages; - pages -= pages_to_query; - - // Get stable read - while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN); - - // During shutdown, some memory goes away without properly notifying NMT, - // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object. - // Bailout and return as not committed for now. - if (mincore_return_value == -1 && errno == ENOMEM) { - return false; - } - - // If mincore is not supported. - if (mincore_return_value == -1 && errno == ENOSYS) { - return false; - } - - assert(vec[stripe] == 'X', "overflow guard"); - assert(mincore_return_value == 0, "Range must be valid"); - // Process this stripe - for (uintx vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) { - if ((vec[vecIdx] & 0x01) == 0) { // not committed - // End of current contiguous region - if (committed_start != nullptr) { - found_range = true; - break; - } - } else { // committed - // Start of region - if (committed_start == nullptr) { - committed_start = loop_base + page_sz * vecIdx; - } - committed_pages ++; - } - } - - loop_base += pages_to_query * page_sz; - } - - if (committed_start != nullptr) { - assert(committed_pages > 0, "Must have committed region"); - assert(committed_pages <= int(size / page_sz), "Can not commit more than it has"); - assert(committed_start >= start && committed_start < start + size, "Out of range"); - committed_size = page_sz * committed_pages; - return true; - } else { - assert(committed_pages == 0, "Should not have committed region"); - return false; - } -} - - // Linux uses a growable mapping for the stack, and if the mapping for // the stack guard pages is not removed when we detach a thread the // stack cannot grow beyond the pages where the stack guard was diff --git a/src/hotspot/os/posix/os_posix.cpp b/src/hotspot/os/posix/os_posix.cpp index 1e7473eea1d..26bff6c8bd4 100644 --- a/src/hotspot/os/posix/os_posix.cpp +++ b/src/hotspot/os/posix/os_posix.cpp @@ -93,6 +93,9 @@ #define MAP_ANONYMOUS MAP_ANON #endif +/* Input/Output types for mincore(2) */ +typedef LINUX_ONLY(unsigned) char mincore_vec_t; + static jlong initial_time_count = 0; static int clock_tics_per_sec = 100; @@ -146,6 +149,94 @@ void os::check_dump_limit(char* buffer, size_t bufferSize) { VMError::record_coredump_status(buffer, success); } +bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { + +#ifdef _AIX + committed_start = start; + committed_size = size; + return true; +#else + + int mincore_return_value; + constexpr size_t stripe = 1024; // query this many pages each time + mincore_vec_t vec [stripe + 1]; + + // set a guard + DEBUG_ONLY(vec[stripe] = 'X'); + + size_t page_sz = os::vm_page_size(); + uintx pages = size / page_sz; + + assert(is_aligned(start, page_sz), "Start address must be page aligned"); + assert(is_aligned(size, page_sz), "Size must be page aligned"); + + committed_start = nullptr; + + int loops = checked_cast((pages + stripe - 1) / stripe); + int committed_pages = 0; + address loop_base = start; + bool found_range = false; + + for (int index = 0; index < loops && !found_range; index ++) { + assert(pages > 0, "Nothing to do"); + uintx pages_to_query = (pages >= stripe) ? stripe : pages; + pages -= pages_to_query; + + // Get stable read + int fail_count = 0; + while ((mincore_return_value = mincore(loop_base, pages_to_query * page_sz, vec)) == -1 && errno == EAGAIN){ + if (++fail_count == 1000){ + return false; + } + } + + // During shutdown, some memory goes away without properly notifying NMT, + // E.g. ConcurrentGCThread/WatcherThread can exit without deleting thread object. + // Bailout and return as not committed for now. + if (mincore_return_value == -1 && errno == ENOMEM) { + return false; + } + + // If mincore is not supported. + if (mincore_return_value == -1 && errno == ENOSYS) { + return false; + } + + assert(vec[stripe] == 'X', "overflow guard"); + assert(mincore_return_value == 0, "Range must be valid"); + // Process this stripe + for (uintx vecIdx = 0; vecIdx < pages_to_query; vecIdx ++) { + if ((vec[vecIdx] & 0x01) == 0) { // not committed + // End of current contiguous region + if (committed_start != nullptr) { + found_range = true; + break; + } + } else { // committed + // Start of region + if (committed_start == nullptr) { + committed_start = loop_base + page_sz * vecIdx; + } + committed_pages ++; + } + } + + loop_base += pages_to_query * page_sz; + } + + if (committed_start != nullptr) { + assert(committed_pages > 0, "Must have committed region"); + assert(committed_pages <= int(size / page_sz), "Can not commit more than it has"); + assert(committed_start >= start && committed_start < start + size, "Out of range"); + committed_size = page_sz * committed_pages; + return true; + } else { + assert(committed_pages == 0, "Should not have committed region"); + return false; + } +#endif +} + int os::get_native_stack(address* stack, int frames, int toSkip) { int frame_idx = 0; int num_of_frames; // number of frames captured diff --git a/src/hotspot/share/runtime/os.cpp b/src/hotspot/share/runtime/os.cpp index 9860251fc33..97bf33fbaaa 100644 --- a/src/hotspot/share/runtime/os.cpp +++ b/src/hotspot/share/runtime/os.cpp @@ -276,13 +276,6 @@ bool os::dll_build_name(char* buffer, size_t size, const char* fname) { return (n != -1); } -#if !defined(LINUX) && !defined(_WINDOWS) -bool os::committed_in_range(address start, size_t size, address& committed_start, size_t& committed_size) { - committed_start = start; - committed_size = size; - return true; -} -#endif // Helper for dll_locate_lib. // Pass buffer and printbuffer as we already printed the path to buffer diff --git a/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp b/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp index d4959cfa008..2ffef1e211f 100644 --- a/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp +++ b/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp @@ -196,6 +196,42 @@ public: os::release_memory(base, size); } + + static void test_committed_in_range(size_t num_pages, size_t pages_to_touch) { + bool result; + size_t committed_size; + address committed_start; + size_t index; + + const size_t page_sz = os::vm_page_size(); + const size_t size = num_pages * page_sz; + + char* base = os::reserve_memory(size, !ExecMem, mtTest); + ASSERT_NE(base, (char*)nullptr); + + result = os::commit_memory(base, size, !ExecMem); + ASSERT_TRUE(result); + + result = os::committed_in_range((address)base, size, committed_start, committed_size); + ASSERT_FALSE(result); + + // Touch pages + for (index = 0; index < pages_to_touch; index ++) { + base[index * page_sz] = 'a'; + } + + result = os::committed_in_range((address)base, size, committed_start, committed_size); + ASSERT_TRUE(result); + ASSERT_EQ(pages_to_touch * page_sz, committed_size); + ASSERT_EQ(committed_start, (address)base); + + os::uncommit_memory(base, size, false); + + result = os::committed_in_range((address)base, size, committed_start, committed_size); + ASSERT_FALSE(result); + + os::release_memory(base, size); + } }; TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) { @@ -214,3 +250,10 @@ TEST_VM(CommittedVirtualMemoryTracker, test_committed_virtualmemory_region) { } } + +#if !defined(_WINDOWS) && !defined(_AIX) +TEST_VM(CommittedVirtualMemory, test_committed_in_range){ + CommittedVirtualMemoryTest::test_committed_in_range(1024, 1024); + CommittedVirtualMemoryTest::test_committed_in_range(2, 1); +} +#endif diff --git a/test/hotspot/jtreg/runtime/Thread/TestAlwaysPreTouchStacks.java b/test/hotspot/jtreg/runtime/Thread/TestAlwaysPreTouchStacks.java index b12eff0cf84..f16e0ff9da4 100644 --- a/test/hotspot/jtreg/runtime/Thread/TestAlwaysPreTouchStacks.java +++ b/test/hotspot/jtreg/runtime/Thread/TestAlwaysPreTouchStacks.java @@ -1,6 +1,6 @@ /* * Copyright (c) 2022 SAP SE. All rights reserved. - * Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2022, 2024, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -32,14 +32,27 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.concurrent.CyclicBarrier; +import static jdk.test.lib.Platform.isLinux; +import static jdk.test.lib.Platform.isWindows; + /* - * @test + * @test id=preTouch * @summary Test AlwaysPreTouchThreadStacks * @requires os.family != "aix" * @library /test/lib * @modules java.base/jdk.internal.misc * java.management - * @run driver TestAlwaysPreTouchStacks + * @run driver TestAlwaysPreTouchStacks preTouch + */ + +/* + * @test id=noPreTouch + * @summary Test that only touched committed memory is reported as thread stack usage. + * @requires os.family != "aix" + * @library /test/lib + * @modules java.base/jdk.internal.misc + * java.management + * @run driver TestAlwaysPreTouchStacks noPreTouch */ public class TestAlwaysPreTouchStacks { @@ -90,12 +103,22 @@ public class TestAlwaysPreTouchStacks { // should show up with fully - or almost fully - committed thread stacks. } else { + boolean preTouch; + if (args.length == 1 && args[0].equals("noPreTouch")){ + preTouch = false; + } else if (args.length == 1 && args[0].equals("preTouch")){ + preTouch = true; + } else { + throw new RuntimeException("Invalid test input. Must be 'preTouch' or 'noPreTouch'."); + } ArrayList vmArgs = new ArrayList<>(); Collections.addAll(vmArgs, "-XX:+UnlockDiagnosticVMOptions", "-Xmx100M", - "-XX:+AlwaysPreTouchStacks", "-XX:NativeMemoryTracking=summary", "-XX:+PrintNMTStatistics"); + if (preTouch){ + vmArgs.add("-XX:+AlwaysPreTouchStacks"); + } if (System.getProperty("os.name").contains("Linux")) { vmArgs.add("-XX:-UseMadvPopulateWrite"); } @@ -110,8 +133,8 @@ public class TestAlwaysPreTouchStacks { output.shouldContain("Alive: " + i); } - // We want to see, in the final NMT printout, a committed thread stack size very close to reserved - // stack size. Like this: + // If using -XX:+AlwaysPreTouchStacks, we want to see, in the final NMT printout, + // a committed thread stack size very close to reserved stack size. Like this: // - Thread (reserved=10332400KB, committed=10284360KB) // (thread #10021) // (stack: reserved=10301560KB, committed=10253520KB) <<<< @@ -135,8 +158,10 @@ public class TestAlwaysPreTouchStacks { // as thread stack. But without pre-touching, the thread stacks would be committed to about 1/5th // of their reserved size. Requiring them to be committed for over 3/4th shows that pretouch is // really working. - if ((double)committed < ((double)reserved * 0.75)) { + if (preTouch && (double)committed < ((double)reserved * 0.75)) { throw new RuntimeException("Expected a higher ratio between stack committed and reserved."); + } else if (!preTouch && (double)committed > ((double)reserved * 0.50)){ + throw new RuntimeException("Expected a lower ratio between stack committed and reserved."); } // Added sanity tests: we expect our test threads to be still alive when NMT prints its final // report, so their stacks should dominate the NMT-reported total stack size.