8346572: Check is_reserved() before using ReservedSpace instances

Reviewed-by: tschatzl, wkemper, ayang, ysr
This commit is contained in:
Stefan Karlsson 2025-01-24 09:23:29 +00:00
parent a09f06d538
commit 0df9dcb6aa
10 changed files with 93 additions and 77 deletions

View File

@ -50,6 +50,11 @@ ParMarkBitMap::initialize(MemRegion covered_region)
rs_align,
page_sz);
if (!rs.is_reserved()) {
// Failed to reserve memory for the bitmap,
return false;
}
const size_t used_page_sz = rs.page_size();
os::trace_page_sizes("Mark Bitmap", raw_bytes, raw_bytes,
rs.base(), rs.size(), used_page_sz);
@ -57,25 +62,24 @@ ParMarkBitMap::initialize(MemRegion covered_region)
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
_virtual_space = new PSVirtualSpace(rs, page_sz);
if (_virtual_space != nullptr && _virtual_space->expand_by(_reserved_byte_size)) {
_heap_start = covered_region.start();
_heap_size = covered_region.word_size();
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
_beg_bits = BitMapView(map, bits);
return true;
if (!_virtual_space->expand_by(_reserved_byte_size)) {
// Failed to commit memory for the bitmap.
delete _virtual_space;
// Release memory reserved in the space.
MemoryReserver::release(rs);
return false;
}
_heap_start = nullptr;
_heap_size = 0;
if (_virtual_space != nullptr) {
delete _virtual_space;
_virtual_space = nullptr;
// Release memory reserved in the space.
if (rs.is_reserved()) {
MemoryReserver::release(rs);
}
}
return false;
_heap_start = covered_region.start();
_heap_size = covered_region.word_size();
BitMap::bm_word_t* map = (BitMap::bm_word_t*)_virtual_space->reserved_low_addr();
_beg_bits = BitMapView(map, bits);
return true;
}
#ifdef ASSERT

View File

@ -248,25 +248,30 @@ ParallelCompactData::create_vspace(size_t count, size_t element_size)
rs_align,
page_sz);
if (!rs.is_reserved()) {
// Failed to reserve memory.
return nullptr;
}
os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, rs.base(),
rs.size(), page_sz);
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
if (vspace != nullptr) {
if (vspace->expand_by(_reserved_byte_size)) {
return vspace;
}
if (!vspace->expand_by(_reserved_byte_size)) {
// Failed to commit memory.
delete vspace;
// Release memory reserved in the space.
if (rs.is_reserved()) {
MemoryReserver::release(rs);
rs = {};
}
MemoryReserver::release(rs);
return nullptr;
}
return nullptr;
return vspace;
}
bool ParallelCompactData::initialize_region_data(size_t heap_size)
@ -2477,4 +2482,3 @@ void MoveAndUpdateShadowClosure::complete_region(HeapWord* dest_addr, PSParallel
ParCompactionManager::push_shadow_region_mt_safe(_shadow);
}
}

View File

@ -46,14 +46,17 @@ SerialBlockOffsetTable::SerialBlockOffsetTable(MemRegion reserved,
size_t init_word_size):
_reserved(reserved) {
size_t size = compute_size(reserved.word_size());
ReservedSpace rs = MemoryReserver::reserve(size, mtGC);
if (!rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
if (!_vs.initialize(rs, 0)) {
vm_exit_during_initialization("Could not reserve enough space for heap offset array");
}
const bool initialized = _vs.initialize(rs, 0 /* committed_size */);
assert(initialized, "Should never fail when commmitted_size is 0");
_offset_base = (uint8_t*)(_vs.low_boundary() - (uintptr_t(reserved.start()) >> CardTable::card_shift()));
resize(init_word_size);
log_trace(gc, bot)("SerialBlockOffsetTable::SerialBlockOffsetTable: ");

View File

@ -82,15 +82,16 @@ void CardTable::initialize(void* region0_start, void* region1_start) {
const size_t rs_align = MAX2(_page_size, os::vm_allocation_granularity());
ReservedSpace rs = MemoryReserver::reserve(_byte_map_size, rs_align, _page_size);
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
os::trace_page_sizes("Card Table", num_bytes, num_bytes,
rs.base(), rs.size(), _page_size);
if (!rs.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the "
"card marking array");
}
MemTracker::record_virtual_memory_tag((address)rs.base(), mtGC);
os::trace_page_sizes("Card Table", num_bytes, num_bytes,
rs.base(), rs.size(), _page_size);
// The assembler store_check code will do an unsigned shift of the oop,
// then add it to _byte_map_base, i.e.
//

View File

@ -79,13 +79,14 @@ void ShenandoahCardTable::initialize() {
}
void ShenandoahCardTable::initialize(const ReservedSpace& card_table) {
if (!card_table.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the card marking array");
}
MemTracker::record_virtual_memory_tag((address)card_table.base(), mtGC);
os::trace_page_sizes("Card Table", _byte_map_size, _byte_map_size,
card_table.base(), card_table.size(), _page_size);
if (!card_table.is_reserved()) {
vm_exit_during_initialization("Could not reserve enough space for the card marking array");
}
os::commit_memory_or_exit(card_table.base(), _byte_map_size, card_table.alignment(), false,
"Cannot commit memory for card table");
}

View File

@ -386,6 +386,9 @@ jint ShenandoahHeap::initialize() {
if (_collection_set == nullptr) {
cset_rs = MemoryReserver::reserve(cset_size, cset_align, os::vm_page_size());
// Maybe Shenandoah wants to check the the memory got reserved here?
_collection_set = new ShenandoahCollectionSet(this, cset_rs, sh_rs.base());
}
os::trace_page_sizes_for_requested_size("Collection Set",

View File

@ -958,37 +958,34 @@ ReservedHeapSpace Universe::reserve_heap(size_t heap_size, size_t alignment) {
// Now create the space.
ReservedHeapSpace rhs = HeapReserver::reserve(total_reserved, alignment, page_size, AllocateHeapAt);
if (rhs.is_reserved()) {
assert(total_reserved == rhs.size(), "must be exactly of required size");
assert(is_aligned(rhs.base(),alignment),"must be exactly of required alignment");
assert(markWord::encode_pointer_as_mark(rhs.base()).decode_pointer() == rhs.base(),
"area must be distinguishable from marks for mark-sweep");
assert(markWord::encode_pointer_as_mark(&rhs.base()[rhs.size()]).decode_pointer() ==
&rhs.base()[rhs.size()],
"area must be distinguishable from marks for mark-sweep");
// We are good.
if (AllocateHeapAt != nullptr) {
log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
}
if (UseCompressedOops) {
CompressedOops::initialize(rhs);
}
Universe::calculate_verify_data((HeapWord*)rhs.base(), (HeapWord*)rhs.end());
return rhs;
if (!rhs.is_reserved()) {
vm_exit_during_initialization(
err_msg("Could not reserve enough space for %zu KB object heap",
total_reserved/K));
}
vm_exit_during_initialization(
err_msg("Could not reserve enough space for %zuKB object heap",
total_reserved/K));
assert(total_reserved == rhs.size(), "must be exactly of required size");
assert(is_aligned(rhs.base(),alignment),"must be exactly of required alignment");
// satisfy compiler
ShouldNotReachHere();
assert(markWord::encode_pointer_as_mark(rhs.base()).decode_pointer() == rhs.base(),
"area must be distinguishable from marks for mark-sweep");
assert(markWord::encode_pointer_as_mark(&rhs.base()[rhs.size()]).decode_pointer() ==
&rhs.base()[rhs.size()],
"area must be distinguishable from marks for mark-sweep");
// We are good.
if (AllocateHeapAt != nullptr) {
log_info(gc,heap)("Successfully allocated Java heap at location %s", AllocateHeapAt);
}
if (UseCompressedOops) {
CompressedOops::initialize(rhs);
}
Universe::calculate_verify_data((HeapWord*)rhs.base(), (HeapWord*)rhs.end());
return rhs;
}
OopStorage* Universe::vm_weak() {

View File

@ -57,7 +57,7 @@ bool VirtualSpace::initialize(ReservedSpace rs, size_t committed_size) {
}
bool VirtualSpace::initialize_with_granularity(ReservedSpace rs, size_t committed_size, size_t max_commit_granularity) {
if(!rs.is_reserved()) return false; // allocation failed.
assert(rs.is_reserved(), "ReservedSpace should have been initialized");
assert(_low_boundary == nullptr, "VirtualSpace already initialized");
assert(max_commit_granularity > 0, "Granularity must be non-zero.");

View File

@ -300,11 +300,9 @@ WB_END
WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs = HeapReserver::reserve(100 * granularity, granularity, os::vm_page_size(), nullptr);
VirtualSpace vs;
vs.initialize(rhs, 50 * granularity);
// Check if constraints are complied
if (!( UseCompressedOops && rhs.base() != nullptr &&
if (!( UseCompressedOops && rhs.is_reserved() &&
CompressedOops::base() != nullptr &&
CompressedOops::use_implicit_null_checks() )) {
tty->print_cr("WB_ReadFromNoaccessArea method is useless:\n "
@ -318,6 +316,10 @@ WB_ENTRY(void, WB_ReadFromNoaccessArea(JNIEnv* env, jobject o))
CompressedOops::use_implicit_null_checks());
return;
}
VirtualSpace vs;
vs.initialize(rhs, 50 * granularity);
tty->print_cr("Reading from no access area... ");
tty->print_cr("*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ) = %c",
*(vs.low_boundary() - rhs.noaccess_prefix() / 2 ));
@ -327,6 +329,11 @@ static jint wb_stress_virtual_space_resize(size_t reserved_space_size,
size_t magnitude, size_t iterations) {
size_t granularity = os::vm_allocation_granularity();
ReservedHeapSpace rhs = HeapReserver::reserve(reserved_space_size * granularity, granularity, os::vm_page_size(), nullptr);
if (!rhs.is_reserved()) {
tty->print_cr("Failed to initialize ReservedSpace. Can't proceed.");
return 3;
}
VirtualSpace vs;
if (!vs.initialize(rhs, 0)) {
tty->print_cr("Failed to initialize VirtualSpace. Can't proceed.");

View File

@ -544,7 +544,7 @@ class TestVirtualSpace : AllStatic {
ReservedSpace reserved = reserve_memory(reserve_size_aligned, mode);
EXPECT_TRUE(reserved.is_reserved());
ASSERT_TRUE(reserved.is_reserved());
VirtualSpace vs;
bool initialized = initialize_virtual_space(vs, reserved, mode);
@ -564,9 +564,7 @@ class TestVirtualSpace : AllStatic {
EXPECT_LT(vs.actual_committed_size(), commit_size + commit_granularity);
}
if (reserved.is_reserved()) {
MemoryReserver::release(reserved);
}
MemoryReserver::release(reserved);
}
static void test_virtual_space_actual_committed_space_one_large_page() {
@ -580,7 +578,7 @@ class TestVirtualSpace : AllStatic {
large_page_size,
large_page_size);
EXPECT_TRUE(reserved.is_reserved());
ASSERT_TRUE(reserved.is_reserved());
VirtualSpace vs;
bool initialized = vs.initialize(reserved, 0);
@ -590,9 +588,7 @@ class TestVirtualSpace : AllStatic {
EXPECT_EQ(vs.actual_committed_size(), large_page_size);
if (reserved.is_reserved()) {
MemoryReserver::release(reserved);
}
MemoryReserver::release(reserved);
}
static void test_virtual_space_actual_committed_space() {