8346005: Parallel: Incorrect page size calculation with UseLargePages

Co-authored-by: Joel Sikström <jsikstro@openjdk.org>
Reviewed-by: jsikstro, fandreuzzi
This commit is contained in:
Albert Mingkun Yang 2025-10-21 08:13:06 +00:00
parent ec13c283c4
commit 2be273f20f
15 changed files with 144 additions and 160 deletions

View File

@ -37,21 +37,11 @@
#include "runtime/threadSMR.hpp"
#include "utilities/align.hpp"
MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment), _must_use_large_pages(false) {
MutableNUMASpace::MutableNUMASpace(size_t page_size) : MutableSpace(page_size) {
_lgrp_spaces = new (mtGC) GrowableArray<LGRPSpace*>(0, mtGC);
_page_size = os::vm_page_size();
_adaptation_cycles = 0;
_samples_count = 0;
#ifdef LINUX
// Changing the page size can lead to freeing of memory. When using large pages
// and the memory has been both reserved and committed, Linux does not support
// freeing parts of it.
if (UseLargePages && !os::can_commit_large_page_memory()) {
_must_use_large_pages = true;
}
#endif // LINUX
size_t lgrp_limit = os::numa_get_groups_num();
uint *lgrp_ids = NEW_C_HEAP_ARRAY(uint, lgrp_limit, mtGC);
size_t lgrp_num = os::numa_get_leaf_groups(lgrp_ids, lgrp_limit);
@ -60,7 +50,7 @@ MutableNUMASpace::MutableNUMASpace(size_t alignment) : MutableSpace(alignment),
lgrp_spaces()->reserve(checked_cast<int>(lgrp_num));
// Add new spaces for the new nodes
for (size_t i = 0; i < lgrp_num; i++) {
lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], alignment));
lgrp_spaces()->append(new LGRPSpace(lgrp_ids[i], page_size));
}
FREE_C_HEAP_ARRAY(uint, lgrp_ids);
@ -128,7 +118,10 @@ MutableNUMASpace::LGRPSpace *MutableNUMASpace::lgrp_space_for_thread(Thread* thr
return space->lgrp_id() == (uint)lgrp_id;
});
assert(lgrp_spaces_index != -1, "must have created spaces for all lgrp_ids");
if (lgrp_spaces_index == -1) {
// Running on a CPU with no memory; pick another CPU based on %.
lgrp_spaces_index = lgrp_id % lgrp_spaces()->length();
}
return lgrp_spaces()->at(lgrp_spaces_index);
}
@ -146,22 +139,19 @@ size_t MutableNUMASpace::unsafe_max_tlab_alloc(Thread *thr) const {
// Bias region towards the first-touching lgrp. Set the right page sizes.
void MutableNUMASpace::bias_region(MemRegion mr, uint lgrp_id) {
HeapWord *start = align_up(mr.start(), page_size());
HeapWord *end = align_down(mr.end(), page_size());
if (end > start) {
MemRegion aligned_region(start, end);
assert((intptr_t)aligned_region.start() % page_size() == 0 &&
(intptr_t)aligned_region.byte_size() % page_size() == 0, "Bad alignment");
assert(region().contains(aligned_region), "Sanity");
// First we tell the OS which page size we want in the given range. The underlying
// large page can be broken down if we require small pages.
const size_t os_align = UseLargePages ? page_size() : os::vm_page_size();
os::realign_memory((char*)aligned_region.start(), aligned_region.byte_size(), os_align);
// Then we uncommit the pages in the range.
os::disclaim_memory((char*)aligned_region.start(), aligned_region.byte_size());
// And make them local/first-touch biased.
os::numa_make_local((char*)aligned_region.start(), aligned_region.byte_size(), checked_cast<int>(lgrp_id));
assert(is_aligned(mr.start(), page_size()), "precondition");
assert(is_aligned(mr.end(), page_size()), "precondition");
if (mr.is_empty()) {
return;
}
// First we tell the OS which page size we want in the given range. The underlying
// large page can be broken down if we require small pages.
os::realign_memory((char*) mr.start(), mr.byte_size(), page_size());
// Then we uncommit the pages in the range.
os::disclaim_memory((char*) mr.start(), mr.byte_size());
// And make them local/first-touch biased.
os::numa_make_local((char*)mr.start(), mr.byte_size(), checked_cast<int>(lgrp_id));
}
// Update space layout. Perform adaptation.
@ -210,14 +200,15 @@ size_t MutableNUMASpace::current_chunk_size(int i) {
// Return the default chunk size by equally diving the space.
// page_size() aligned.
size_t MutableNUMASpace::default_chunk_size() {
return base_space_size() / lgrp_spaces()->length() * page_size();
// The number of pages may not be evenly divided.
return align_down(capacity_in_bytes() / lgrp_spaces()->length(), page_size());
}
// Produce a new chunk size. page_size() aligned.
// This function is expected to be called on sequence of i's from 0 to
// lgrp_spaces()->length().
size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
size_t pages_available = base_space_size();
size_t pages_available = capacity_in_bytes() / page_size();
for (int j = 0; j < i; j++) {
pages_available -= align_down(current_chunk_size(j), page_size()) / page_size();
}
@ -263,20 +254,13 @@ size_t MutableNUMASpace::adaptive_chunk_size(int i, size_t limit) {
// |----bottom_region--|---intersection---|------top_region------|
void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection,
MemRegion* bottom_region, MemRegion *top_region) {
assert(is_aligned(new_region.start(), page_size()), "precondition");
assert(is_aligned(new_region.end(), page_size()), "precondition");
assert(is_aligned(intersection.start(), page_size()), "precondition");
assert(is_aligned(intersection.end(), page_size()), "precondition");
// Is there bottom?
if (new_region.start() < intersection.start()) { // Yes
// Try to coalesce small pages into a large one.
if (UseLargePages && page_size() >= alignment()) {
HeapWord* p = align_up(intersection.start(), alignment());
if (new_region.contains(p)
&& pointer_delta(p, new_region.start(), sizeof(char)) >= alignment()) {
if (intersection.contains(p)) {
intersection = MemRegion(p, intersection.end());
} else {
intersection = MemRegion(p, p);
}
}
}
*bottom_region = MemRegion(new_region.start(), intersection.start());
} else {
*bottom_region = MemRegion();
@ -284,18 +268,6 @@ void MutableNUMASpace::select_tails(MemRegion new_region, MemRegion intersection
// Is there top?
if (intersection.end() < new_region.end()) { // Yes
// Try to coalesce small pages into a large one.
if (UseLargePages && page_size() >= alignment()) {
HeapWord* p = align_down(intersection.end(), alignment());
if (new_region.contains(p)
&& pointer_delta(new_region.end(), p, sizeof(char)) >= alignment()) {
if (intersection.contains(p)) {
intersection = MemRegion(intersection.start(), p);
} else {
intersection = MemRegion(p, p);
}
}
}
*top_region = MemRegion(intersection.end(), new_region.end());
} else {
*top_region = MemRegion();
@ -309,6 +281,8 @@ void MutableNUMASpace::initialize(MemRegion mr,
WorkerThreads* pretouch_workers) {
assert(clear_space, "Reallocation will destroy data!");
assert(lgrp_spaces()->length() > 0, "There should be at least one space");
assert(is_aligned(mr.start(), page_size()), "precondition");
assert(is_aligned(mr.end(), page_size()), "precondition");
MemRegion old_region = region(), new_region;
set_bottom(mr.start());
@ -316,37 +290,22 @@ void MutableNUMASpace::initialize(MemRegion mr,
// Must always clear the space
clear(SpaceDecorator::DontMangle);
// Compute chunk sizes
size_t prev_page_size = page_size();
set_page_size(alignment());
HeapWord* rounded_bottom = align_up(bottom(), page_size());
HeapWord* rounded_end = align_down(end(), page_size());
size_t base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
size_t num_pages = mr.byte_size() / page_size();
// Try small pages if the chunk size is too small
if (base_space_size_pages / lgrp_spaces()->length() == 0
&& page_size() > os::vm_page_size()) {
// Changing the page size below can lead to freeing of memory. So we fail initialization.
if (_must_use_large_pages) {
vm_exit_during_initialization("Failed initializing NUMA with large pages. Too small heap size");
}
set_page_size(os::vm_page_size());
rounded_bottom = align_up(bottom(), page_size());
rounded_end = align_down(end(), page_size());
base_space_size_pages = pointer_delta(rounded_end, rounded_bottom, sizeof(char)) / page_size();
if (num_pages < (size_t)lgrp_spaces()->length()) {
log_warning(gc)("Degraded NUMA config: #os-pages (%zu) < #CPU (%d); space-size: %zu, page-size: %zu",
num_pages, lgrp_spaces()->length(), mr.byte_size(), page_size());
// Keep only the first few CPUs.
lgrp_spaces()->trunc_to((int)num_pages);
}
guarantee(base_space_size_pages / lgrp_spaces()->length() > 0, "Space too small");
set_base_space_size(base_space_size_pages);
// Handle space resize
MemRegion top_region, bottom_region;
if (!old_region.equals(region())) {
new_region = MemRegion(rounded_bottom, rounded_end);
new_region = mr;
MemRegion intersection = new_region.intersection(old_region);
if (intersection.start() == nullptr ||
intersection.end() == nullptr ||
prev_page_size > page_size()) { // If the page size got smaller we have to change
// the page size preference for the whole space.
if (intersection.is_empty()) {
intersection = MemRegion(new_region.start(), new_region.start());
}
select_tails(new_region, intersection, &bottom_region, &top_region);
@ -393,19 +352,18 @@ void MutableNUMASpace::initialize(MemRegion mr,
if (i == 0) { // Bottom chunk
if (i != lgrp_spaces()->length() - 1) {
new_region = MemRegion(bottom(), rounded_bottom + (chunk_byte_size >> LogHeapWordSize));
new_region = MemRegion(bottom(), chunk_byte_size >> LogHeapWordSize);
} else {
new_region = MemRegion(bottom(), end());
}
} else
if (i < lgrp_spaces()->length() - 1) { // Middle chunks
MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
new_region = MemRegion(ps->end(),
ps->end() + (chunk_byte_size >> LogHeapWordSize));
} else { // Top chunk
MutableSpace *ps = lgrp_spaces()->at(i - 1)->space();
new_region = MemRegion(ps->end(), end());
}
} else if (i < lgrp_spaces()->length() - 1) { // Middle chunks
MutableSpace* ps = lgrp_spaces()->at(i - 1)->space();
new_region = MemRegion(ps->end(),
chunk_byte_size >> LogHeapWordSize);
} else { // Top chunk
MutableSpace* ps = lgrp_spaces()->at(i - 1)->space();
new_region = MemRegion(ps->end(), end());
}
guarantee(region().contains(new_region), "Region invariant");
@ -432,9 +390,8 @@ void MutableNUMASpace::initialize(MemRegion mr,
// Clear space (set top = bottom) but never mangle.
s->initialize(new_region, SpaceDecorator::Clear, SpaceDecorator::DontMangle, MutableSpace::DontSetupPages);
set_adaptation_cycles(samples_count());
}
set_adaptation_cycles(samples_count());
}
// Set the top of the whole space.

View File

@ -80,8 +80,8 @@ class MutableNUMASpace : public MutableSpace {
SpaceStats _space_stats;
public:
LGRPSpace(uint l, size_t alignment) : _lgrp_id(l), _allocation_failed(false) {
_space = new MutableSpace(alignment);
LGRPSpace(uint l, size_t page_size) : _lgrp_id(l), _allocation_failed(false) {
_space = new MutableSpace(page_size);
_alloc_rate = new AdaptiveWeightedAverage(NUMAChunkResizeWeight);
}
~LGRPSpace() {
@ -117,24 +117,14 @@ class MutableNUMASpace : public MutableSpace {
};
GrowableArray<LGRPSpace*>* _lgrp_spaces;
size_t _page_size;
unsigned _adaptation_cycles, _samples_count;
bool _must_use_large_pages;
void set_page_size(size_t psz) { _page_size = psz; }
size_t page_size() const { return _page_size; }
unsigned adaptation_cycles() { return _adaptation_cycles; }
void set_adaptation_cycles(int v) { _adaptation_cycles = v; }
unsigned samples_count() { return _samples_count; }
void increment_samples_count() { ++_samples_count; }
size_t _base_space_size;
void set_base_space_size(size_t v) { _base_space_size = v; }
size_t base_space_size() const { return _base_space_size; }
// Bias region towards the lgrp.
void bias_region(MemRegion mr, uint lgrp_id);
@ -154,7 +144,7 @@ class MutableNUMASpace : public MutableSpace {
public:
GrowableArray<LGRPSpace*>* lgrp_spaces() const { return _lgrp_spaces; }
MutableNUMASpace(size_t alignment);
MutableNUMASpace(size_t page_size);
virtual ~MutableNUMASpace();
// Space initialization.
virtual void initialize(MemRegion mr,

View File

@ -34,30 +34,26 @@
#include "utilities/align.hpp"
#include "utilities/macros.hpp"
MutableSpace::MutableSpace(size_t alignment) :
MutableSpace::MutableSpace(size_t page_size) :
_last_setup_region(),
_alignment(alignment),
_page_size(page_size),
_bottom(nullptr),
_top(nullptr),
_end(nullptr)
{
assert(MutableSpace::alignment() % os::vm_page_size() == 0,
"Space should be aligned");
}
_end(nullptr) {}
void MutableSpace::numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space) {
if (!mr.is_empty()) {
HeapWord *start = align_up(mr.start(), page_size);
HeapWord *end = align_down(mr.end(), page_size);
if (end > start) {
size_t size = pointer_delta(end, start, sizeof(char));
if (clear_space) {
// Prefer page reallocation to migration.
os::disclaim_memory((char*)start, size);
}
os::numa_make_global((char*)start, size);
}
void MutableSpace::numa_setup_pages(MemRegion mr, bool clear_space) {
assert(is_aligned(mr.start(), page_size()), "precondition");
assert(is_aligned(mr.end(), page_size()), "precondition");
if (mr.is_empty()) {
return;
}
if (clear_space) {
// Prefer page reallocation to migration.
os::disclaim_memory((char*) mr.start(), mr.byte_size());
}
os::numa_make_global((char*) mr.start(), mr.byte_size());
}
void MutableSpace::initialize(MemRegion mr,
@ -105,20 +101,17 @@ void MutableSpace::initialize(MemRegion mr,
}
assert(mr.contains(head) && mr.contains(tail), "Sanity");
size_t page_size = alignment();
if (UseNUMA) {
numa_setup_pages(head, page_size, clear_space);
numa_setup_pages(tail, page_size, clear_space);
numa_setup_pages(head, clear_space);
numa_setup_pages(tail, clear_space);
}
if (AlwaysPreTouch) {
size_t pretouch_page_size = UseLargePages ? page_size : os::vm_page_size();
PretouchTask::pretouch("ParallelGC PreTouch head", (char*)head.start(), (char*)head.end(),
pretouch_page_size, pretouch_workers);
page_size(), pretouch_workers);
PretouchTask::pretouch("ParallelGC PreTouch tail", (char*)tail.start(), (char*)tail.end(),
pretouch_page_size, pretouch_workers);
page_size(), pretouch_workers);
}
// Remember where we stopped so that we can continue later.

View File

@ -51,17 +51,20 @@ class MutableSpace: public CHeapObj<mtGC> {
// The last region which page had been setup to be interleaved.
MemRegion _last_setup_region;
size_t _alignment;
size_t _page_size;
HeapWord* _bottom;
HeapWord* volatile _top;
HeapWord* _end;
void numa_setup_pages(MemRegion mr, size_t page_size, bool clear_space);
void numa_setup_pages(MemRegion mr, bool clear_space);
void set_last_setup_region(MemRegion mr) { _last_setup_region = mr; }
MemRegion last_setup_region() const { return _last_setup_region; }
public:
protected:
size_t page_size() const { return _page_size; }
public:
virtual ~MutableSpace() = default;
MutableSpace(size_t page_size);
@ -77,8 +80,6 @@ class MutableSpace: public CHeapObj<mtGC> {
HeapWord* volatile* top_addr() { return &_top; }
HeapWord** end_addr() { return &_end; }
size_t alignment() { return _alignment; }
MemRegion region() const { return MemRegion(bottom(), end()); }
size_t capacity_in_bytes() const { return capacity_in_words() * HeapWordSize; }

View File

@ -47,7 +47,10 @@ ObjectStartArray::ObjectStartArray(MemRegion covered_region)
// Do not use large-pages for the backing store. The one large page region
// will be used for the heap proper.
ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve, mtGC);
ReservedSpace backing_store = MemoryReserver::reserve(bytes_to_reserve,
os::vm_allocation_granularity(),
os::vm_page_size(),
mtGC);
if (!backing_store.is_reserved()) {
vm_exit_during_initialization("Could not reserve space for ObjectStartArray");
}

View File

@ -103,15 +103,10 @@ void ParallelArguments::initialize() {
FullGCForwarding::initialize_flags(heap_reserved_size_bytes());
}
// The alignment used for spaces in young gen and old gen
static size_t default_space_alignment() {
return 64 * K * HeapWordSize;
}
void ParallelArguments::initialize_alignments() {
// Initialize card size before initializing alignments
CardTable::initialize_card_size();
SpaceAlignment = default_space_alignment();
SpaceAlignment = ParallelScavengeHeap::default_space_alignment();
HeapAlignment = compute_heap_alignment();
}
@ -123,12 +118,23 @@ void ParallelArguments::initialize_heap_flags_and_sizes_one_pass() {
void ParallelArguments::initialize_heap_flags_and_sizes() {
initialize_heap_flags_and_sizes_one_pass();
if (!UseLargePages) {
ParallelScavengeHeap::set_desired_page_size(os::vm_page_size());
return;
}
// If using large-page, need to update SpaceAlignment so that spaces are page-size aligned.
const size_t min_pages = 4; // 1 for eden + 1 for each survivor + 1 for old
const size_t page_sz = os::page_size_for_region_aligned(MinHeapSize, min_pages);
ParallelScavengeHeap::set_desired_page_size(page_sz);
// Can a page size be something else than a power of two?
assert(is_power_of_2((intptr_t)page_sz), "must be a power of 2");
size_t new_alignment = align_up(page_sz, SpaceAlignment);
if (page_sz == os::vm_page_size()) {
log_warning(gc, heap)("MinHeapSize (%zu) must be large enough for 4 * page-size; Disabling UseLargePages for heap", MinHeapSize);
return;
}
// Space is largepage-aligned.
size_t new_alignment = page_sz;
if (new_alignment != SpaceAlignment) {
SpaceAlignment = new_alignment;
// Redo everything from the start

View File

@ -61,11 +61,18 @@ PSYoungGen* ParallelScavengeHeap::_young_gen = nullptr;
PSOldGen* ParallelScavengeHeap::_old_gen = nullptr;
PSAdaptiveSizePolicy* ParallelScavengeHeap::_size_policy = nullptr;
GCPolicyCounters* ParallelScavengeHeap::_gc_policy_counters = nullptr;
size_t ParallelScavengeHeap::_desired_page_size = 0;
jint ParallelScavengeHeap::initialize() {
const size_t reserved_heap_size = ParallelArguments::heap_reserved_size_bytes();
ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment);
assert(_desired_page_size != 0, "Should be initialized");
ReservedHeapSpace heap_rs = Universe::reserve_heap(reserved_heap_size, HeapAlignment, _desired_page_size);
// Adjust SpaceAlignment based on actually used large page size.
if (UseLargePages) {
SpaceAlignment = MAX2(heap_rs.page_size(), default_space_alignment());
}
assert(is_aligned(SpaceAlignment, heap_rs.page_size()), "inv");
trace_actual_reserved_page_size(reserved_heap_size, heap_rs);

View File

@ -76,6 +76,9 @@ class ParallelScavengeHeap : public CollectedHeap {
static PSAdaptiveSizePolicy* _size_policy;
static GCPolicyCounters* _gc_policy_counters;
// At startup, calculate the desired OS page-size based on heap size and large-page flags.
static size_t _desired_page_size;
GCMemoryManager* _young_manager;
GCMemoryManager* _old_manager;
@ -128,6 +131,18 @@ public:
_gc_overhead_counter(0),
_is_heap_almost_full(false) {}
// The alignment used for spaces in young gen and old gen
constexpr static size_t default_space_alignment() {
constexpr size_t alignment = 64 * K * HeapWordSize;
static_assert(is_power_of_2(alignment), "inv");
return alignment;
}
static void set_desired_page_size(size_t page_size) {
assert(is_power_of_2(page_size), "precondition");
_desired_page_size = page_size;
}
Name kind() const override {
return CollectedHeap::Parallel;
}

View File

@ -96,7 +96,7 @@ void PSOldGen::initialize_work() {
// ObjectSpace stuff
//
_object_space = new MutableSpace(virtual_space()->alignment());
_object_space = new MutableSpace(virtual_space()->page_size());
object_space()->initialize(committed_mr,
SpaceDecorator::Clear,
SpaceDecorator::Mangle,

View File

@ -29,8 +29,8 @@
#include "utilities/align.hpp"
PSVirtualSpace::PSVirtualSpace(ReservedSpace rs, size_t alignment) :
_alignment(alignment)
{
_alignment(alignment),
_page_size(rs.page_size()) {
set_reserved(rs);
set_committed(reserved_low_addr(), reserved_low_addr());
DEBUG_ONLY(verify());
@ -88,7 +88,8 @@ bool PSVirtualSpace::shrink_by(size_t bytes) {
#ifndef PRODUCT
void PSVirtualSpace::verify() const {
assert(is_aligned(_alignment, os::vm_page_size()), "bad alignment");
assert(is_aligned(_page_size, os::vm_page_size()), "bad alignment");
assert(is_aligned(_alignment, _page_size), "inv");
assert(is_aligned(reserved_low_addr(), _alignment), "bad reserved_low_addr");
assert(is_aligned(reserved_high_addr(), _alignment), "bad reserved_high_addr");
assert(is_aligned(committed_low_addr(), _alignment), "bad committed_low_addr");

View File

@ -41,6 +41,9 @@ class PSVirtualSpace : public CHeapObj<mtGC> {
// ReservedSpace passed to initialize() must be aligned to this value.
const size_t _alignment;
// OS page size used. If using Transparent Huge Pages, it's the desired large page-size.
const size_t _page_size;
// Reserved area
char* _reserved_low_addr;
char* _reserved_high_addr;
@ -68,6 +71,7 @@ class PSVirtualSpace : public CHeapObj<mtGC> {
// Accessors (all sizes are bytes).
size_t alignment() const { return _alignment; }
size_t page_size() const { return _page_size; }
char* reserved_low_addr() const { return _reserved_low_addr; }
char* reserved_high_addr() const { return _reserved_high_addr; }
char* committed_low_addr() const { return _committed_low_addr; }

View File

@ -83,12 +83,12 @@ void PSYoungGen::initialize_work() {
}
if (UseNUMA) {
_eden_space = new MutableNUMASpace(virtual_space()->alignment());
_eden_space = new MutableNUMASpace(virtual_space()->page_size());
} else {
_eden_space = new MutableSpace(virtual_space()->alignment());
_eden_space = new MutableSpace(virtual_space()->page_size());
}
_from_space = new MutableSpace(virtual_space()->alignment());
_to_space = new MutableSpace(virtual_space()->alignment());
_from_space = new MutableSpace(virtual_space()->page_size());
_to_space = new MutableSpace(virtual_space()->page_size());
// Generation Counters - generation 0, 3 subspaces
_gen_counters = new GenerationCounters("new", 0, 3, min_gen_size(),

View File

@ -40,6 +40,7 @@
/* Parallel GC fields */ \
/**********************/ \
nonstatic_field(PSVirtualSpace, _alignment, const size_t) \
nonstatic_field(PSVirtualSpace, _page_size, const size_t) \
nonstatic_field(PSVirtualSpace, _reserved_low_addr, char*) \
nonstatic_field(PSVirtualSpace, _reserved_high_addr, char*) \
nonstatic_field(PSVirtualSpace, _committed_low_addr, char*) \

View File

@ -955,7 +955,7 @@ void Universe::initialize_tlab() {
}
}
ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment, size_t desired_page_size) {
assert(alignment <= Arguments::conservative_max_heap_alignment(),
"actual alignment %zu must be within maximum heap alignment %zu",
@ -966,15 +966,21 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
assert(!UseCompressedOops || (total_reserved <= (OopEncodingHeapMax - os::vm_page_size())),
"heap size is too big for compressed oops");
size_t page_size = os::vm_page_size();
if (UseLargePages && is_aligned(alignment, os::large_page_size())) {
page_size = os::large_page_size();
size_t page_size;
if (desired_page_size == 0) {
if (UseLargePages) {
page_size = os::large_page_size();
} else {
page_size = os::vm_page_size();
}
} else {
// Parallel is the only collector that might opt out of using large pages
// for the heap.
assert(!UseLargePages || UseParallelGC , "Wrong alignment to use large pages");
assert(UseParallelGC , "only Parallel");
// Use caller provided value.
page_size = desired_page_size;
}
assert(is_aligned(heap_size, page_size), "inv");
// Now create the space.
ReservedHeapSpace rhs = HeapReserver::reserve(total_reserved, alignment, page_size, AllocateHeapAt);

View File

@ -315,7 +315,7 @@ class Universe: AllStatic {
DEBUG_ONLY(static bool is_in_heap_or_null(const void* p) { return p == nullptr || is_in_heap(p); })
// Reserve Java heap and determine CompressedOops mode
static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment);
static ReservedHeapSpace reserve_heap(size_t heap_size, size_t alignment, size_t desired_page_size = 0);
// Global OopStorages
static OopStorage* vm_weak();