8376125: Out of memory in the CDS archive error with lot of classes

Co-authored-by: Ioi Lam <iklam@openjdk.org>
Reviewed-by: iklam, jiangli
This commit is contained in:
Xue-Lei Andrew Fan 2026-02-23 17:02:14 +00:00
parent 66ba63a4e9
commit 1ae2fee007
11 changed files with 167 additions and 62 deletions

View File

@ -906,8 +906,25 @@ FILE* os::fdopen(int fd, const char* mode) {
ssize_t os::pd_write(int fd, const void *buf, size_t nBytes) {
ssize_t res;
#ifdef __APPLE__
// macOS fails for individual write operations > 2GB.
// See https://gitlab.haskell.org/ghc/ghc/-/issues/17414
ssize_t total = 0;
while (nBytes > 0) {
size_t bytes_to_write = MIN2(nBytes, (size_t)INT_MAX);
RESTARTABLE(::write(fd, buf, bytes_to_write), res);
if (res == OS_ERR) {
return OS_ERR;
}
buf = (const char*)buf + res;
nBytes -= res;
total += res;
}
return total;
#else
RESTARTABLE(::write(fd, buf, nBytes), res);
return res;
#endif
}
ssize_t os::read_at(int fd, void *buf, unsigned int nBytes, jlong offset) {

View File

@ -36,16 +36,17 @@
class AOTCompressedPointers: public AllStatic {
public:
// For space saving, we can encode the location of metadata objects in the "rw" and "ro"
// regions using a 32-bit offset from the bottom of the mapped AOT metaspace.
// Currently we allow only up to 2GB total size in the rw and ro regions (which are
// contiguous to each other).
// regions using a 32-bit offset from the bottom of the mapped AOT metaspace. Since metadata
// objects are 8-byte aligned, we store scaled offset units (offset_bytes >> 3) to address
// up to ~32GB on 64-bit platforms. We currently limit the MaxMetadataOffsetBytes to about
// 3.5 GB to be compatible with +CompactObjectHeaders.
enum class narrowPtr : u4;
static constexpr size_t MaxMetadataOffsetBytes = 0x7FFFFFFF;
static constexpr size_t MetadataOffsetShift = LP64_ONLY(3) NOT_LP64(0);
static constexpr size_t MaxMetadataOffsetBytes = LP64_ONLY(3584ULL * M) NOT_LP64(0x7FFFFFFF);
// In the future, this could return a different numerical value than
// narrowp if the encoding contains shifts.
// Convert the encoded narrowPtr to a byte offset by applying the shift.
inline static size_t get_byte_offset(narrowPtr narrowp) {
return checked_cast<size_t>(narrowp);
return ((size_t)checked_cast<u4>(narrowp)) << MetadataOffsetShift;
}
inline static narrowPtr null() {
@ -122,7 +123,8 @@ private:
static narrowPtr encode_byte_offset(size_t offset) {
assert(offset != 0, "offset 0 is in protection zone");
precond(offset <= MaxMetadataOffsetBytes);
return checked_cast<narrowPtr>(offset);
assert(is_aligned(offset, (size_t)1 << MetadataOffsetShift), "offset not aligned");
return checked_cast<narrowPtr>(offset >> MetadataOffsetShift);
}
};

View File

@ -321,8 +321,10 @@ void ArchiveBuilder::sort_klasses() {
}
address ArchiveBuilder::reserve_buffer() {
// AOTCodeCache::max_aot_code_size() accounts for aot code region.
size_t buffer_size = LP64_ONLY(CompressedClassSpaceSize) NOT_LP64(256 * M) + AOTCodeCache::max_aot_code_size();
// On 64-bit: reserve address space for archives up to the max encoded offset limit.
// On 32-bit: use 256MB + AOT code size due to limited virtual address space.
size_t buffer_size = LP64_ONLY(AOTCompressedPointers::MaxMetadataOffsetBytes)
NOT_LP64(256 * M + AOTCodeCache::max_aot_code_size());
ReservedSpace rs = MemoryReserver::reserve(buffer_size,
AOTMetaspace::core_region_alignment(),
os::vm_page_size(),

View File

@ -312,22 +312,9 @@ void DumpRegion::pack(DumpRegion* next) {
}
void WriteClosure::do_ptr(void** p) {
// Write ptr into the archive; ptr can be:
// (a) null -> written as 0
// (b) a "buffered" address -> written as is
// (c) a "source" address -> convert to "buffered" and write
// The common case is (c). E.g., when writing the vmClasses into the archive.
// We have (b) only when we don't have a corresponding source object. E.g.,
// the archived c++ vtable entries.
address ptr = *(address*)p;
if (ptr != nullptr && !ArchiveBuilder::current()->is_in_buffer_space(ptr)) {
ptr = ArchiveBuilder::current()->get_buffered_addr(ptr);
}
// null pointers do not need to be converted to offsets
if (ptr != nullptr) {
ptr = (address)ArchiveBuilder::current()->buffer_to_offset(ptr);
}
_dump_region->append_intptr_t((intptr_t)ptr, false);
AOTCompressedPointers::narrowPtr narrowp = AOTCompressedPointers::encode(ptr);
_dump_region->append_intptr_t(checked_cast<intptr_t>(narrowp), false);
}
void ReadClosure::do_ptr(void** p) {

View File

@ -140,7 +140,7 @@ void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
// We already checked (and, if necessary, adjusted n) when the vtables were allocated, so we are
// safe to do memcpy.
log_debug(aot, vtables)("Copying %3d vtable entries for %s", n, name);
log_debug(aot, vtables)("Copying %3d vtable entries for %s to " INTPTR_FORMAT, n, name, p2i(dstvtable));
memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
}

View File

@ -717,8 +717,8 @@ bool FileMapInfo::init_from_file(int fd) {
}
void FileMapInfo::seek_to_position(size_t pos) {
if (os::lseek(_fd, (long)pos, SEEK_SET) < 0) {
aot_log_error(aot)("Unable to seek to position %zu", pos);
if (os::lseek(_fd, (jlong)pos, SEEK_SET) < 0) {
aot_log_error(aot)("Unable to seek to position %zu (errno=%d: %s)", pos, errno, os::strerror(errno));
AOTMetaspace::unrecoverable_loading_error();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2026, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -96,14 +96,16 @@ void CompactHashtableWriter::allocate_table() {
"Too many entries.");
}
_compact_buckets = ArchiveBuilder::new_ro_array<u4>(_num_buckets + 1);
_compact_entries = ArchiveBuilder::new_ro_array<u4>(entries_space);
_num_compact_buckets = checked_cast<size_t>(_num_buckets + 1); // extra slot for TABLEEND_BUCKET_TYPE
_num_compact_entries = checked_cast<size_t>(entries_space);
_compact_buckets = (u4*)ArchiveBuilder::ro_region_alloc(_num_compact_buckets * sizeof(u4));
_compact_entries = (u4*)ArchiveBuilder::ro_region_alloc(_num_compact_entries * sizeof(u4));
_stats->bucket_count = _num_buckets;
_stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord,
_stats->bucket_bytes = align_up(checked_cast<int>(_num_compact_buckets * sizeof(u4)),
SharedSpaceObjectAlignment);
_stats->hashentry_count = _num_entries_written;
_stats->hashentry_bytes = align_up(_compact_entries->size() * BytesPerWord,
_stats->hashentry_bytes = align_up(checked_cast<int>(_num_compact_entries * sizeof(u4)),
SharedSpaceObjectAlignment);
}
@ -114,21 +116,21 @@ void CompactHashtableWriter::dump_table(NumberSeq* summary) {
GrowableArray<Entry>* bucket = _buckets[index];
int bucket_size = bucket->length();
if (bucket_size == 1) {
_compact_buckets->at_put(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
compact_buckets_set(index, BUCKET_INFO(offset, VALUE_ONLY_BUCKET_TYPE));
Entry ent = bucket->at(0);
// bucket with one entry is value_only and only has the encoded_value
_compact_entries->at_put(offset++, ent.encoded_value());
compact_entries_set(offset++, ent.encoded_value());
_num_value_only_buckets++;
} else {
// regular bucket, it could contain zero or more than one entry,
// each entry is a <hash, encoded_value> pair
_compact_buckets->at_put(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
compact_buckets_set(index, BUCKET_INFO(offset, REGULAR_BUCKET_TYPE));
for (int i=0; i<bucket_size; i++) {
Entry ent = bucket->at(i);
_compact_entries->at_put(offset++, u4(ent.hash())); // write entry hash
_compact_entries->at_put(offset++, ent.encoded_value()); // write entry encoded_value
compact_entries_set(offset++, u4(ent.hash())); // write entry hash
compact_entries_set(offset++, ent.encoded_value()); // write entry encoded_value
}
if (bucket_size == 0) {
_num_empty_buckets++;
@ -140,10 +142,19 @@ void CompactHashtableWriter::dump_table(NumberSeq* summary) {
}
// Mark the end of the buckets
_compact_buckets->at_put(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
assert(offset == (u4)_compact_entries->length(), "sanity");
compact_buckets_set(_num_buckets, BUCKET_INFO(offset, TABLEEND_BUCKET_TYPE));
assert(offset == checked_cast<u4>(_num_compact_entries), "sanity");
}
void CompactHashtableWriter::compact_buckets_set(u4 index, u4 value) {
precond(index < _num_compact_buckets);
_compact_buckets[index] = value;
}
void CompactHashtableWriter::compact_entries_set(u4 index, u4 value) {
precond(index < _num_compact_entries);
_compact_entries[index] = value;
}
// Write the compact table
void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table_name) {
@ -154,7 +165,7 @@ void CompactHashtableWriter::dump(SimpleCompactHashtable *cht, const char* table
int table_bytes = _stats->bucket_bytes + _stats->hashentry_bytes;
address base_address = address(SharedBaseAddress);
cht->init(base_address, _num_entries_written, _num_buckets,
_compact_buckets->data(), _compact_entries->data());
_compact_buckets, _compact_entries);
LogMessage(aot, hashtables) msg;
if (msg.is_info()) {

View File

@ -115,8 +115,13 @@ private:
int _num_other_buckets;
GrowableArray<Entry>** _buckets;
CompactHashtableStats* _stats;
Array<u4>* _compact_buckets;
Array<u4>* _compact_entries;
u4* _compact_buckets;
size_t _num_compact_buckets;
u4* _compact_entries;
size_t _num_compact_entries;
void compact_buckets_set(u4 index, u4 value);
void compact_entries_set(u4 index, u4 value);
public:
// This is called at dump-time only

View File

@ -1288,6 +1288,12 @@
VM_INT_CONSTANTS_GC(declare_constant, \
declare_constant_with_value) \
\
/*****************/ \
/* CDS constants */ \
/*****************/ \
\
CDS_ONLY(declare_constant(AOTCompressedPointers::MetadataOffsetShift)) \
\
/******************/ \
/* Useful globals */ \
/******************/ \

View File

@ -41,6 +41,7 @@ public class FileMapInfo {
private static Address rwRegionEndAddress;
private static Address vtablesIndex;
private static Address mapped_base_address;
private static long metadataOffsetShift;
// HashMap created by mapping the vTable addresses in the rw region with
// the corresponding metadata type.
@ -97,12 +98,15 @@ public class FileMapInfo {
headerObj = VMObjectFactory.newObject(FileMapHeader.class, header);
// char* mapped_base_address = header->_mapped_base_address
// narrowPtr cloned_vtable_narrowPtr = header->_cloned_vtable_offset
// narrowPtr cloned_vtable_narrowPtr = header->_cloned_vtables
// size_t cloned_vtable_offset = AOTCompressedPointers::get_byte_offset(cloned_vtable_narrowPtr);
// CppVtableInfo** vtablesIndex = mapped_base_address + cloned_vtable_offset;
mapped_base_address = get_AddressField(FileMapHeader_type, header, "_mapped_base_address");
long cloned_vtable_narrowPtr = get_CIntegerField(FileMapHeader_type, header, "_cloned_vtables");
long cloned_vtable_offset = cloned_vtable_narrowPtr; // Currently narrowPtr is the same as offset
// narrowPtr stores scaled offset units (byte_offset >> MetadataOffsetShift).
// Apply the left shift to convert back to byte offset.
metadataOffsetShift = db.lookupIntConstant("AOTCompressedPointers::MetadataOffsetShift").longValue();
long cloned_vtable_offset = cloned_vtable_narrowPtr << metadataOffsetShift;
vtablesIndex = mapped_base_address.addOffsetTo(cloned_vtable_offset);
// CDSFileMapRegion* rw_region = &header->_region[rw];
@ -176,9 +180,9 @@ public class FileMapInfo {
// vtablesIndex points to to an array like this:
// long info[] = {
// offset of the CppVtableInfo for ConstantPool,
// offset of the CppVtableInfo for InstanceKlass,
// offset of the CppVtableInfo for InstanceClassLoaderKlass,
// narrowPtr of the CppVtableInfo for ConstantPool,
// narrowPtr of the CppVtableInfo for InstanceKlass,
// narrowPtr of the CppVtableInfo for InstanceClassLoaderKlass,
// ...
// };
//
@ -189,8 +193,8 @@ public class FileMapInfo {
// };
//
// The loop below computes the following
// CppVtableInfo* t_ConstantPool = mapped_base_address + info[0];
// CppVtableInfo* t_InstanceKlass = mapped_base_address + info[1];
// CppVtableInfo* t_ConstantPool = mapped_base_address + (info[0] << metadataOffsetShift);
// CppVtableInfo* t_InstanceKlass = mapped_base_address + (info[1] << metadataOffsetShift);
// ...
//
// If we have the following objects
@ -203,21 +207,21 @@ public class FileMapInfo {
//
// To get an idea what these address look like, do this:
//
// $ java -Xlog:cds+vtables=debug -XX:+UnlockDiagnosticVMOptions -XX:ArchiveRelocationMode=0 --version
// [0.002s][debug][cds,vtables] Copying 14 vtable entries for ConstantPool to 0x800000018
// [0.002s][debug][cds,vtables] Copying 41 vtable entries for InstanceKlass to 0x800000090
// [0.002s][debug][cds,vtables] Copying 41 vtable entries for InstanceClassLoaderKlass to 0x8000001e0
// [0.002s][debug][cds,vtables] Copying 41 vtable entries for InstanceMirrorKlass to 0x800000330
// [0.002s][debug][cds,vtables] Copying 41 vtable entries for InstanceRefKlass to 0x800000480
// [0.002s][debug][cds,vtables] Copying 41 vtable entries for InstanceStackChunkKlass to 0x8000005d0
// [0.002s][debug][cds,vtables] Copying 14 vtable entries for Method to 0x800000720
// [0.002s][debug][cds,vtables] Copying 42 vtable entries for ObjArrayKlass to 0x800000798
// [0.002s][debug][cds,vtables] Copying 42 vtable entries for TypeArrayKlass to 0x8000008f0
// java 23-internal 2024-09-17
// $ java -Xlog:aot+vtables=debug -XX:+UnlockDiagnosticVMOptions -XX:ArchiveRelocationMode=0 --version
// [0.002s][debug][aot,vtables] Copying 14 vtable entries for ConstantPool to 0x800000018
// [0.002s][debug][aot,vtables] Copying 41 vtable entries for InstanceKlass to 0x800000090
// [0.002s][debug][aot,vtables] Copying 41 vtable entries for InstanceClassLoaderKlass to 0x8000001e0
// [0.002s][debug][aot,vtables] Copying 41 vtable entries for InstanceMirrorKlass to 0x800000330
// [0.002s][debug][aot,vtables] Copying 41 vtable entries for InstanceRefKlass to 0x800000480
// [0.002s][debug][aot,vtables] Copying 41 vtable entries for InstanceStackChunkKlass to 0x8000005d0
// [0.002s][debug][aot,vtables] Copying 14 vtable entries for Method to 0x800000720
// [0.002s][debug][aot,vtables] Copying 42 vtable entries for ObjArrayKlass to 0x800000798
// [0.002s][debug][aot,vtables] Copying 42 vtable entries for TypeArrayKlass to 0x8000008f0
// ...
for (int i=0; i < metadataTypeArray.length; i++) {
long vtable_offset = vtablesIndex.getJLongAt(i * addressSize); // long offset = _index[i]
long narrowPtr = vtablesIndex.getJLongAt(i * addressSize);
long vtable_offset = narrowPtr << metadataOffsetShift;
// CppVtableInfo* t = the address of the CppVtableInfo for the i-th table
Address vtableInfoAddress = mapped_base_address.addOffsetTo(vtable_offset);

View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2026 salesforce.com, inc. All Rights Reserved
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotCompressedPointers.hpp"
#include "unittest.hpp"
#include "utilities/globalDefinitions.hpp"
#include <cstdint>
TEST_VM(ScaledOffsetsTest, constants) {
#ifdef _LP64
ASSERT_EQ((size_t)3, AOTCompressedPointers::MetadataOffsetShift);
ASSERT_TRUE(is_aligned(AOTCompressedPointers::MaxMetadataOffsetBytes, (size_t)1 << AOTCompressedPointers::MetadataOffsetShift));
ASSERT_EQ((size_t)(3584ULL * M), AOTCompressedPointers::MaxMetadataOffsetBytes);
#else
ASSERT_EQ((size_t)0, AOTCompressedPointers::MetadataOffsetShift);
ASSERT_EQ((size_t)0x7FFFFFFF, AOTCompressedPointers::MaxMetadataOffsetBytes);
#endif
}
TEST_VM(ScaledOffsetsTest, encode_decode_roundtrip) {
// Test that encoding and decoding via get_byte_offset produces correct results
const size_t unit = (size_t)1 << AOTCompressedPointers::MetadataOffsetShift;
// Test that get_byte_offset correctly applies the shift
// Note: We can't directly test encode_byte_offset as it's private, but we can verify
// the shift value is applied correctly in get_byte_offset
AOTCompressedPointers::narrowPtr np1 = static_cast<AOTCompressedPointers::narrowPtr>(1);
ASSERT_EQ(unit, AOTCompressedPointers::get_byte_offset(np1));
AOTCompressedPointers::narrowPtr np2 = static_cast<AOTCompressedPointers::narrowPtr>(2);
ASSERT_EQ(2 * unit, AOTCompressedPointers::get_byte_offset(np2));
AOTCompressedPointers::narrowPtr np1024 = static_cast<AOTCompressedPointers::narrowPtr>(1024);
ASSERT_EQ(1024 * unit, AOTCompressedPointers::get_byte_offset(np1024));
#ifdef _LP64
const uint64_t max_units = (uint64_t)UINT32_MAX;
AOTCompressedPointers::narrowPtr np_max = static_cast<AOTCompressedPointers::narrowPtr>(UINT32_MAX);
const uint64_t max_bytes = max_units << AOTCompressedPointers::MetadataOffsetShift;
ASSERT_EQ(max_bytes, AOTCompressedPointers::get_byte_offset(np_max));
ASSERT_GE(max_bytes, AOTCompressedPointers::MaxMetadataOffsetBytes - unit);
#endif
}
TEST_VM(ScaledOffsetsTest, null_handling) {
// Test that null() returns 0
ASSERT_EQ(static_cast<AOTCompressedPointers::narrowPtr>(0), AOTCompressedPointers::null());
ASSERT_EQ((size_t)0, AOTCompressedPointers::get_byte_offset(AOTCompressedPointers::null()));
}