8364248: Separate commit and reservation limit detection

Reviewed-by: stuefe, ayang
This commit is contained in:
Joel Sikström 2025-08-01 07:42:45 +00:00
parent e82d7f5810
commit ae11d8f446
5 changed files with 69 additions and 62 deletions

View File

@ -713,7 +713,7 @@ bool os::get_host_name(char* buf, size_t buflen) {
}
#ifndef _LP64
// Helper, on 32bit, for os::has_allocatable_memory_limit
// Helper, on 32bit, for os::commit_memory_limit
static bool is_allocatable(size_t s) {
if (s < 2 * G) {
return true;
@ -731,31 +731,19 @@ static bool is_allocatable(size_t s) {
}
#endif // !_LP64
size_t os::commit_memory_limit() {
// On POSIX systems, the amount of memory that can be commmitted is limited
// by the size of the reservable memory.
size_t reserve_limit = reserve_memory_limit();
bool os::has_allocatable_memory_limit(size_t* limit) {
struct rlimit rlim;
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
// if there was an error when calling getrlimit, assume that there is no limitation
// on virtual memory.
bool result;
if ((getrlimit_res != 0) || (rlim.rlim_cur == RLIM_INFINITY)) {
result = false;
} else {
*limit = (size_t)rlim.rlim_cur;
result = true;
}
#ifdef _LP64
return result;
return reserve_limit;
#else
// arbitrary virtual space limit for 32 bit Unices found by testing. If
// getrlimit above returned a limit, bound it with this limit. Otherwise
// directly use it.
const size_t max_virtual_limit = 3800*M;
if (result) {
*limit = MIN2(*limit, max_virtual_limit);
} else {
*limit = max_virtual_limit;
}
// Arbitrary max reserve limit for 32 bit Unices found by testing.
const size_t max_reserve_limit = 3800 * M;
// Bound the reserve limit with the arbitrary max.
size_t actual_limit = MIN2(reserve_limit, max_reserve_limit);
// bound by actually allocatable memory. The algorithm uses two bounds, an
// upper and a lower limit. The upper limit is the current highest amount of
@ -769,15 +757,15 @@ bool os::has_allocatable_memory_limit(size_t* limit) {
// the minimum amount of memory we care about allocating.
const size_t min_allocation_size = M;
size_t upper_limit = *limit;
size_t upper_limit = actual_limit;
// first check a few trivial cases
if (is_allocatable(upper_limit) || (upper_limit <= min_allocation_size)) {
*limit = upper_limit;
// The actual limit is allocatable, no need to do anything.
} else if (!is_allocatable(min_allocation_size)) {
// we found that not even min_allocation_size is allocatable. Return it
// anyway. There is no point to search for a better value any more.
*limit = min_allocation_size;
actual_limit = min_allocation_size;
} else {
// perform the binary search.
size_t lower_limit = min_allocation_size;
@ -790,12 +778,31 @@ bool os::has_allocatable_memory_limit(size_t* limit) {
upper_limit = temp_limit;
}
}
*limit = lower_limit;
actual_limit = lower_limit;
}
return true;
return actual_limit;
#endif
}
size_t os::reserve_memory_limit() {
struct rlimit rlim;
int getrlimit_res = getrlimit(RLIMIT_AS, &rlim);
// If there was an error calling getrlimit, conservatively assume no limit.
if (getrlimit_res != 0) {
return SIZE_MAX;
}
// If the current limit is not infinity, there is a limit.
if (rlim.rlim_cur != RLIM_INFINITY) {
return (size_t)rlim.rlim_cur;
}
// No limit
return SIZE_MAX;
}
void* os::get_default_process_handle() {
#ifdef __APPLE__
// MacOS X needs to use RTLD_FIRST instead of RTLD_LAZY

View File

@ -897,13 +897,6 @@ size_t os::rss() {
return rss;
}
bool os::has_allocatable_memory_limit(size_t* limit) {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
GlobalMemoryStatusEx(&ms);
*limit = (size_t)ms.ullAvailVirtual;
return true;
}
int os::active_processor_count() {
// User has overridden the number of active processors
@ -3303,6 +3296,18 @@ static char* map_or_reserve_memory_aligned(size_t size, size_t alignment, int fi
return aligned_base;
}
size_t os::commit_memory_limit() {
MEMORYSTATUSEX ms;
ms.dwLength = sizeof(ms);
GlobalMemoryStatusEx(&ms);
return (size_t)ms.ullAvailVirtual;
}
size_t os::reserve_memory_limit() {
// Virtual address space cannot be limited on Windows.
return SIZE_MAX;
}
char* os::reserve_memory_aligned(size_t size, size_t alignment, MemTag mem_tag, bool exec) {
// exec can be ignored
return map_or_reserve_memory_aligned(size, alignment, -1/* file_desc */, mem_tag);

View File

@ -30,25 +30,14 @@
#include "utilities/align.hpp"
#include "utilities/ostream.hpp"
static size_t address_space_limit() {
size_t limit = 0;
if (os::has_allocatable_memory_limit(&limit)) {
return limit;
}
// No limit
return SIZE_MAX;
}
size_t ZAddressSpaceLimit::heap() {
// Allow the heap to occupy 50% of the address space
const size_t limit = address_space_limit() / MaxVirtMemFraction;
const size_t limit = os::reserve_memory_limit() / MaxVirtMemFraction;
return align_up(limit, ZGranuleSize);
}
void ZAddressSpaceLimit::print_limits() {
const size_t limit = address_space_limit();
const size_t limit = os::reserve_memory_limit();
if (limit == SIZE_MAX) {
log_info_p(gc, init)("Address Space Size: unlimited");

View File

@ -1487,19 +1487,16 @@ jint Arguments::set_ergonomics_flags() {
}
size_t Arguments::limit_heap_by_allocatable_memory(size_t limit) {
size_t max_allocatable;
size_t result = limit;
if (os::has_allocatable_memory_limit(&max_allocatable)) {
// The AggressiveHeap check is a temporary workaround to avoid calling
// GCarguments::heap_virtual_to_physical_ratio() before a GC has been
// selected. This works because AggressiveHeap implies UseParallelGC
// where we know the ratio will be 1. Once the AggressiveHeap option is
// removed, this can be cleaned up.
size_t heap_virtual_to_physical_ratio = (AggressiveHeap ? 1 : GCConfig::arguments()->heap_virtual_to_physical_ratio());
size_t fraction = MaxVirtMemFraction * heap_virtual_to_physical_ratio;
result = MIN2(result, max_allocatable / fraction);
}
return result;
// The AggressiveHeap check is a temporary workaround to avoid calling
// GCarguments::heap_virtual_to_physical_ratio() before a GC has been
// selected. This works because AggressiveHeap implies UseParallelGC
// where we know the ratio will be 1. Once the AggressiveHeap option is
// removed, this can be cleaned up.
size_t heap_virtual_to_physical_ratio = (AggressiveHeap ? 1 : GCConfig::arguments()->heap_virtual_to_physical_ratio());
size_t fraction = MaxVirtMemFraction * heap_virtual_to_physical_ratio;
size_t max_allocatable = os::commit_memory_limit();
return MIN2(limit, max_allocatable / fraction);
}
// Use static initialization to get the default before parsing

View File

@ -340,7 +340,6 @@ class os: AllStatic {
static jlong free_swap_space();
static julong physical_memory();
static bool has_allocatable_memory_limit(size_t* limit);
static bool is_server_class_machine();
static size_t rss();
@ -449,6 +448,16 @@ class os: AllStatic {
// Returns the lowest address the process is allowed to map against.
static size_t vm_min_address();
// Returns an upper limit beyond which reserve_memory() calls are guaranteed
// to fail. It is not guaranteed that reserving less memory than this will
// succeed, however.
static size_t reserve_memory_limit();
// Returns an upper limit beyond which commit_memory() calls are guaranteed
// to fail. It is not guaranteed that committing less memory than this will
// succeed, however.
static size_t commit_memory_limit();
inline static size_t cds_core_region_alignment();
// Reserves virtual memory.