8365932: Implementation of JEP 516: Ahead-of-Time Object Caching with Any GC

Co-authored-by: Axel Boldt-Christmas <aboldtch@openjdk.org>
Co-authored-by: Joel Sikström <jsikstro@openjdk.org>
Co-authored-by: Stefan Karlsson <stefank@openjdk.org>
Reviewed-by: aboldtch, iklam, kvn
This commit is contained in:
Erik Österlund 2025-11-07 15:28:51 +00:00
parent d5803aa78a
commit c8656449c2
104 changed files with 5281 additions and 1657 deletions

View File

@ -148,7 +148,6 @@ define CreateCDSArchive
$1_$2_DUMP_EXTRA_ARG := $$($1_$2_COOPS_OPTION) $$($1_$2_COH_OPTION)
$1_$2_DUMP_TYPE := $(if $(findstring _nocoops, $2),-NOCOOPS,)$(if $(findstring _coh, $2),-COH,)
# Only G1 supports dumping the shared heap, so explicitly use G1 if the JVM supports it.
$1_$2_CDS_DUMP_FLAGS := $(CDS_DUMP_FLAGS) $(if $(filter g1gc, $(JVM_FEATURES_$1)), -XX:+UseG1GC)
ifeq ($(OPENJDK_TARGET_OS), windows)

View File

@ -23,7 +23,10 @@
*/
#include "cds/aotMapLogger.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotStreamedHeapLoader.hpp"
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "classfile/systemDictionaryShared.hpp"
@ -45,10 +48,7 @@ bool AOTMapLogger::_is_logging_at_bootstrap;
bool AOTMapLogger::_is_runtime_logging;
intx AOTMapLogger::_buffer_to_requested_delta;
intx AOTMapLogger::_requested_to_mapped_metadata_delta;
size_t AOTMapLogger::_num_root_segments;
size_t AOTMapLogger::_num_obj_arrays_logged;
GrowableArrayCHeap<AOTMapLogger::FakeOop, mtClass>* AOTMapLogger::_roots;
ArchiveHeapInfo* AOTMapLogger::_dumptime_heap_info;
class AOTMapLogger::RequestedMetadataAddr {
address _raw_addr;
@ -86,12 +86,10 @@ void AOTMapLogger::ergo_initialize() {
}
void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
ArchiveHeapInfo* heap_info,
ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info,
char* bitmap, size_t bitmap_size_in_bytes) {
_is_runtime_logging = false;
_buffer_to_requested_delta = ArchiveBuilder::current()->buffer_to_requested_delta();
_num_root_segments = mapinfo->heap_root_segments().count();
_dumptime_heap_info = heap_info;
log_file_header(mapinfo);
@ -106,8 +104,11 @@ void AOTMapLogger::dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
log_as_hex((address)bitmap, bitmap_end, nullptr);
#if INCLUDE_CDS_JAVA_HEAP
if (heap_info->is_used()) {
dumptime_log_heap_region(heap_info);
if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
dumptime_log_mapped_heap_region(mapped_heap_info);
}
if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
dumptime_log_streamed_heap_region(streamed_heap_info);
}
#endif
@ -192,7 +193,6 @@ void AOTMapLogger::runtime_log(FileMapInfo* mapinfo, GrowableArrayCHeap<Archived
#if INCLUDE_CDS_JAVA_HEAP
if (mapinfo->has_heap_region() && CDSConfig::is_loading_heap()) {
_num_root_segments = mapinfo->heap_root_segments().count();
runtime_log_heap_region(mapinfo);
}
#endif
@ -501,62 +501,38 @@ void AOTMapLogger::log_as_hex(address base, address top, address requested_base,
// Hence, in general, we cannot use regular oop API (such as oopDesc::obj_field()) on these objects. There
// are a few rare case where regular oop API work, but these are all guarded with the raw_oop() method and
// should be used with care.
//
// Each AOT heap reader and writer has its own oop_iterator() API that retrieves all the data required to build
// fake oops for logging.
class AOTMapLogger::FakeOop {
static int _requested_shift;
static intx _buffer_to_requested_delta;
static address _buffer_start;
static address _buffer_end;
static uint64_t _buffer_start_narrow_oop; // The encoded narrow oop for the objects at _buffer_start
OopDataIterator* _iter;
OopData _data;
address _buffer_addr;
static void assert_range(address buffer_addr) {
assert(_buffer_start <= buffer_addr && buffer_addr < _buffer_end, "range check");
address* buffered_field_addr(int field_offset) {
return (address*)(buffered_addr() + field_offset);
}
address* field_addr(int field_offset) {
return (address*)(_buffer_addr + field_offset);
}
protected:
public:
RequestedMetadataAddr metadata_field(int field_offset) {
return RequestedMetadataAddr(*(address*)(field_addr(field_offset)));
return RequestedMetadataAddr(*(address*)(buffered_field_addr(field_offset)));
}
address buffered_addr() {
return _data._buffered_addr;
}
// Return an "oop" pointer so we can use APIs that accept regular oops. This
// must be used with care, as only a limited number of APIs can work with oops that
// live outside of the range of the heap.
oop raw_oop() { return cast_to_oop(_buffer_addr); }
oop raw_oop() { return _data._raw_oop; }
public:
static void init_globals(address requested_base, address requested_start, int requested_shift,
address buffer_start, address buffer_end) {
_requested_shift = requested_shift;
_buffer_to_requested_delta = requested_start - buffer_start;
_buffer_start = buffer_start;
_buffer_end = buffer_end;
FakeOop() : _data() {}
FakeOop(OopDataIterator* iter, OopData data) : _iter(iter), _data(data) {}
precond(requested_start >= requested_base);
if (UseCompressedOops) {
_buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> _requested_shift;
assert(_buffer_start_narrow_oop < 0xffffffff, "sanity");
} else {
_buffer_start_narrow_oop = 0xdeadbeed;
}
}
FakeOop() : _buffer_addr(nullptr) {}
FakeOop(address buffer_addr) : _buffer_addr(buffer_addr) {
if (_buffer_addr != nullptr) {
assert_range(_buffer_addr);
}
}
FakeMirror& as_mirror();
FakeObjArray& as_obj_array();
FakeString& as_string();
FakeTypeArray& as_type_array();
FakeMirror as_mirror();
FakeObjArray as_obj_array();
FakeString as_string();
FakeTypeArray as_type_array();
RequestedMetadataAddr klass() {
address rk = (address)real_klass();
@ -570,61 +546,45 @@ public:
Klass* real_klass() {
assert(UseCompressedClassPointers, "heap archiving requires UseCompressedClassPointers");
if (_is_runtime_logging) {
return raw_oop()->klass();
} else {
return ArchiveHeapWriter::real_klass_of_buffered_oop(_buffer_addr);
}
return _data._klass;
}
// in heap words
size_t size() {
if (_is_runtime_logging) {
return raw_oop()->size_given_klass(real_klass());
} else {
return ArchiveHeapWriter::size_of_buffered_oop(_buffer_addr);
}
return _data._size;
}
bool is_root_segment() {
return _data._is_root_segment;
}
bool is_array() { return real_klass()->is_array_klass(); }
bool is_null() { return _buffer_addr == nullptr; }
bool is_null() { return buffered_addr() == nullptr; }
int array_length() {
precond(is_array());
return arrayOop(raw_oop())->length();
}
intptr_t target_location() {
return _data._target_location;
}
address requested_addr() {
return _buffer_addr + _buffer_to_requested_delta;
return _data._requested_addr;
}
uint32_t as_narrow_oop_value() {
precond(UseCompressedOops);
if (_buffer_addr == nullptr) {
return 0;
}
uint64_t pd = (uint64_t)(pointer_delta(_buffer_addr, _buffer_start, 1));
return checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
return _data._narrow_location;
}
FakeOop read_oop_at(narrowOop* addr) { // +UseCompressedOops
uint64_t n = (uint64_t)(*addr);
if (n == 0) {
return FakeOop(nullptr);
} else {
precond(n >= _buffer_start_narrow_oop);
address value = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
return FakeOop(value);
}
return FakeOop(_iter, _iter->obj_at(addr));
}
FakeOop read_oop_at(oop* addr) { // -UseCompressedOops
address requested_value = cast_from_oop<address>(*addr);
if (requested_value == nullptr) {
return FakeOop(nullptr);
} else {
return FakeOop(requested_value - _buffer_to_requested_delta);
}
return FakeOop(_iter, _iter->obj_at(addr));
}
FakeOop obj_field(int field_offset) {
@ -644,6 +604,8 @@ public:
class AOTMapLogger::FakeMirror : public AOTMapLogger::FakeOop {
public:
FakeMirror(OopDataIterator* iter, OopData data) : FakeOop(iter, data) {}
void print_class_signature_on(outputStream* st);
Klass* real_mirrored_klass() {
@ -662,6 +624,8 @@ class AOTMapLogger::FakeObjArray : public AOTMapLogger::FakeOop {
}
public:
FakeObjArray(OopDataIterator* iter, OopData data) : FakeOop(iter, data) {}
int length() {
return raw_objArrayOop()->length();
}
@ -676,6 +640,8 @@ public:
class AOTMapLogger::FakeString : public AOTMapLogger::FakeOop {
public:
FakeString(OopDataIterator* iter, OopData data) : FakeOop(iter, data) {}
bool is_latin1() {
jbyte coder = raw_oop()->byte_field(java_lang_String::coder_offset());
assert(CompactStrings || coder == java_lang_String::CODER_UTF16, "Must be UTF16 without CompactStrings");
@ -694,6 +660,8 @@ class AOTMapLogger::FakeTypeArray : public AOTMapLogger::FakeOop {
}
public:
FakeTypeArray(OopDataIterator* iter, OopData data) : FakeOop(iter, data) {}
void print_elements_on(outputStream* st) {
TypeArrayKlass::cast(real_klass())->oop_print_elements_on(raw_typeArrayOop(), st);
}
@ -703,24 +671,24 @@ public:
jchar char_at(int i) { return raw_typeArrayOop()->char_at(i); }
}; // AOTMapLogger::FakeTypeArray
AOTMapLogger::FakeMirror& AOTMapLogger::FakeOop::as_mirror() {
AOTMapLogger::FakeMirror AOTMapLogger::FakeOop::as_mirror() {
precond(real_klass() == vmClasses::Class_klass());
return (FakeMirror&)*this;
return FakeMirror(_iter, _data);
}
AOTMapLogger::FakeObjArray& AOTMapLogger::FakeOop::as_obj_array() {
AOTMapLogger::FakeObjArray AOTMapLogger::FakeOop::as_obj_array() {
precond(real_klass()->is_objArray_klass());
return (FakeObjArray&)*this;
return FakeObjArray(_iter, _data);
}
AOTMapLogger::FakeTypeArray& AOTMapLogger::FakeOop::as_type_array() {
AOTMapLogger::FakeTypeArray AOTMapLogger::FakeOop::as_type_array() {
precond(real_klass()->is_typeArray_klass());
return (FakeTypeArray&)*this;
return FakeTypeArray(_iter, _data);
}
AOTMapLogger::FakeString& AOTMapLogger::FakeOop::as_string() {
AOTMapLogger::FakeString AOTMapLogger::FakeOop::as_string() {
precond(real_klass() == vmClasses::String_klass());
return (FakeString&)*this;
return FakeString(_iter, _data);
}
void AOTMapLogger::FakeMirror::print_class_signature_on(outputStream* st) {
@ -823,90 +791,104 @@ public:
}
}; // AOTMapLogger::ArchivedFieldPrinter
int AOTMapLogger::FakeOop::_requested_shift;
intx AOTMapLogger::FakeOop::_buffer_to_requested_delta;
address AOTMapLogger::FakeOop::_buffer_start;
address AOTMapLogger::FakeOop::_buffer_end;
uint64_t AOTMapLogger::FakeOop::_buffer_start_narrow_oop;
void AOTMapLogger::dumptime_log_heap_region(ArchiveHeapInfo* heap_info) {
void AOTMapLogger::dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) {
MemRegion r = heap_info->buffer_region();
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)ArchiveHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? ArchiveHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
int requested_shift = CompressedOops::shift();
FakeOop::init_globals(requested_base, requested_start, requested_shift, buffer_start, buffer_end);
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? AOTMappedHeapWriter::buffered_addr_to_requested_addr(buffer_start) : requested_base;
log_region_range("heap", buffer_start, buffer_end, requested_start);
log_oops(buffer_start, buffer_end);
log_archived_objects(AOTMappedHeapWriter::oop_iterator(heap_info));
}
void AOTMapLogger::dumptime_log_streamed_heap_region(ArchiveStreamedHeapInfo* heap_info) {
MemRegion r = heap_info->buffer_region();
address buffer_start = address(r.start()); // start of the current oop inside the buffer
address buffer_end = address(r.end());
log_region_range("heap", buffer_start, buffer_end, nullptr);
log_archived_objects(AOTStreamedHeapWriter::oop_iterator(heap_info));
}
void AOTMapLogger::runtime_log_heap_region(FileMapInfo* mapinfo) {
ResourceMark rm;
int heap_region_index = AOTMetaspace::hp;
FileMapRegion* r = mapinfo->region_at(heap_region_index);
size_t alignment = ObjectAlignmentInBytes;
size_t alignment = (size_t)ObjectAlignmentInBytes;
// Allocate a buffer and read the image of the archived heap region. This buffer is outside
// of the real Java heap, so we must use FakeOop to access the contents of the archived heap objects.
char* buffer = resource_allocate_bytes(r->used() + alignment);
address buffer_start = (address)align_up(buffer, alignment);
address buffer_end = buffer_start + r->used();
if (!mapinfo->read_region(heap_region_index, (char*)buffer_start, r->used(), /* do_commit = */ false)) {
log_error(aot)("Cannot read heap region; AOT map logging of heap objects failed");
return;
if (mapinfo->object_streaming_mode()) {
address buffer_start = (address)r->mapped_base();
address buffer_end = buffer_start + r->used();
log_region_range("heap", buffer_start, buffer_end, nullptr);
log_archived_objects(AOTStreamedHeapLoader::oop_iterator(mapinfo, buffer_start, buffer_end));
} else {
// Allocate a buffer and read the image of the archived heap region. This buffer is outside
// of the real Java heap, so we must use FakeOop to access the contents of the archived heap objects.
char* buffer = resource_allocate_bytes(r->used() + alignment);
address buffer_start = (address)align_up(buffer, alignment);
address buffer_end = buffer_start + r->used();
if (!mapinfo->read_region(heap_region_index, (char*)buffer_start, r->used(), /* do_commit = */ false)) {
log_error(aot)("Cannot read heap region; AOT map logging of heap objects failed");
return;
}
address requested_base = UseCompressedOops ? (address)mapinfo->narrow_oop_base() : AOTMappedHeapLoader::heap_region_requested_address(mapinfo);
address requested_start = requested_base + r->mapping_offset();
log_region_range("heap", buffer_start, buffer_end, requested_start);
log_archived_objects(AOTMappedHeapLoader::oop_iterator(mapinfo, buffer_start, buffer_end));
}
address requested_base = UseCompressedOops ? (address)mapinfo->narrow_oop_base() : mapinfo->heap_region_requested_address();
address requested_start = requested_base + r->mapping_offset();
int requested_shift = mapinfo->narrow_oop_shift();
FakeOop::init_globals(requested_base, requested_start, requested_shift, buffer_start, buffer_end);
log_region_range("heap", buffer_start, buffer_end, requested_start);
log_oops(buffer_start, buffer_end);
}
void AOTMapLogger::log_oops(address buffer_start, address buffer_end) {
void AOTMapLogger::log_archived_objects(OopDataIterator* iter) {
LogStreamHandle(Debug, aot, map) st;
if (!st.is_enabled()) {
return;
}
_roots = new GrowableArrayCHeap<FakeOop, mtClass>();
_num_obj_arrays_logged = 0;
for (address fop = buffer_start; fop < buffer_end; ) {
FakeOop fake_oop(fop);
st.print(PTR_FORMAT ": @@ Object ", p2i(fake_oop.requested_addr()));
print_oop_info_cr(&st, fake_oop, /*print_requested_addr=*/false);
// Roots that are not segmented
GrowableArrayCHeap<OopData, mtClass>* normal_roots = iter->roots();
for (int i = 0; i < normal_roots->length(); ++i) {
OopData data = normal_roots->at(i);
FakeOop fop(iter, data);
_roots->append(fop);
st.print(" root[%4d]: ", i);
print_oop_info_cr(&st, fop);
}
while (iter->has_next()) {
FakeOop fake_oop(iter, iter->next());
st.print(PTR_FORMAT ": @@ Object ", fake_oop.target_location());
print_oop_info_cr(&st, fake_oop, /*print_location=*/false);
LogStreamHandle(Trace, aot, map, oops) trace_st;
if (trace_st.is_enabled()) {
print_oop_details(fake_oop, &trace_st);
}
address next_fop = fop + fake_oop.size() * BytesPerWord;
log_as_hex(fop, next_fop, fake_oop.requested_addr(), /*is_heap=*/true);
fop = next_fop;
address fop = fake_oop.buffered_addr();
address end_fop = fop + fake_oop.size() * BytesPerWord;
log_as_hex(fop, end_fop, fake_oop.requested_addr(), /*is_heap=*/true);
}
delete _roots;
delete iter;
delete normal_roots;
}
void AOTMapLogger::print_oop_info_cr(outputStream* st, FakeOop fake_oop, bool print_requested_addr) {
void AOTMapLogger::print_oop_info_cr(outputStream* st, FakeOop fake_oop, bool print_location) {
if (fake_oop.is_null()) {
st->print_cr("null");
} else {
ResourceMark rm;
Klass* real_klass = fake_oop.real_klass();
address requested_addr = fake_oop.requested_addr();
if (print_requested_addr) {
st->print(PTR_FORMAT " ", p2i(requested_addr));
intptr_t target_location = fake_oop.target_location();
if (print_location) {
st->print(PTR_FORMAT " ", target_location);
}
if (UseCompressedOops) {
st->print("(0x%08x) ", fake_oop.as_narrow_oop_value());
@ -919,7 +901,8 @@ void AOTMapLogger::print_oop_info_cr(outputStream* st, FakeOop fake_oop, bool pr
if (real_klass == vmClasses::String_klass()) {
st->print(" ");
fake_oop.as_string().print_on(st);
FakeString fake_str = fake_oop.as_string();
fake_str.print_on(st);
} else if (real_klass == vmClasses::Class_klass()) {
fake_oop.as_mirror().print_class_signature_on(st);
}
@ -942,7 +925,7 @@ void AOTMapLogger::print_oop_details(FakeOop fake_oop, outputStream* st) {
fake_oop.as_type_array().print_elements_on(st);
} else if (real_klass->is_objArray_klass()) {
FakeObjArray fake_obj_array = fake_oop.as_obj_array();
bool is_logging_root_segment = _num_obj_arrays_logged < _num_root_segments;
bool is_logging_root_segment = fake_oop.is_root_segment();
for (int i = 0; i < fake_obj_array.length(); i++) {
FakeOop elm = fake_obj_array.obj_at(i);
@ -954,7 +937,6 @@ void AOTMapLogger::print_oop_details(FakeOop fake_oop, outputStream* st) {
}
print_oop_info_cr(st, elm);
}
_num_obj_arrays_logged ++;
} else {
st->print_cr(" - fields (%zu words):", fake_oop.size());

View File

@ -32,7 +32,8 @@
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
class ArchiveHeapInfo;
class ArchiveMappedHeapInfo;
class ArchiveStreamedHeapInfo;
class CompileTrainingData;
class DumpRegion;
class FileMapInfo;
@ -64,6 +65,7 @@ class AOTMapLogger : AllStatic {
MetaspaceObj::Type _type;
};
public:
// FakeOop and subtypes
class FakeOop;
class FakeMirror;
@ -71,15 +73,48 @@ class AOTMapLogger : AllStatic {
class FakeString;
class FakeTypeArray;
#if INCLUDE_CDS_JAVA_HEAP
struct OopData {
address _buffered_addr;
address _requested_addr;
intptr_t _target_location;
uint32_t _narrow_location;
oopDesc* _raw_oop;
Klass* _klass;
size_t _size;
bool _is_root_segment;
};
class OopDataIterator : public CHeapObj<mtClassShared> {
protected:
OopData null_data() {
return { nullptr,
nullptr,
0,
0,
nullptr,
nullptr,
0,
false };
}
public:
virtual bool has_next() = 0;
virtual OopData next() = 0;
virtual OopData obj_at(narrowOop* p) = 0;
virtual OopData obj_at(oop* p) = 0;
virtual GrowableArrayCHeap<OopData, mtClass>* roots() = 0;
virtual ~OopDataIterator() {}
};
#endif
private:
class RequestedMetadataAddr;
class RuntimeGatherArchivedMetaspaceObjs;
static bool _is_logging_at_bootstrap;
static bool _is_runtime_logging;
static size_t _num_root_segments;
static size_t _num_obj_arrays_logged;
static GrowableArrayCHeap<FakeOop, mtClass>* _roots;
static ArchiveHeapInfo* _dumptime_heap_info;
static intx _buffer_to_requested_delta;
static intx _requested_to_mapped_metadata_delta;
@ -114,12 +149,14 @@ class AOTMapLogger : AllStatic {
#if INCLUDE_CDS_JAVA_HEAP
static void dumptime_log_heap_region(ArchiveHeapInfo* heap_info);
static void dumptime_log_mapped_heap_region(ArchiveMappedHeapInfo* mapped_heap_info);
static void dumptime_log_streamed_heap_region(ArchiveStreamedHeapInfo* streamed_heap_info);
static void runtime_log_heap_region(FileMapInfo* mapinfo);
static void print_oop_info_cr(outputStream* st, FakeOop fake_oop, bool print_requested_addr = true);
static void print_oop_info_cr(outputStream* st, FakeOop fake_oop, bool print_location = true);
static void print_oop_details(FakeOop fake_oop, outputStream* st);
static void log_oops(address buf_start, address buf_end);
static void log_mapped_oops(address buf_start, address buf_end);
static void log_archived_objects(OopDataIterator* iter);
class ArchivedFieldPrinter; // to be replaced by ArchivedFieldPrinter2
#endif
@ -128,7 +165,7 @@ public:
static bool is_logging_at_bootstrap() { return _is_logging_at_bootstrap; }
static void dumptime_log(ArchiveBuilder* builder, FileMapInfo* mapinfo,
ArchiveHeapInfo* heap_info,
ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info,
char* bitmap, size_t bitmap_size_in_bytes);
static void runtime_log(FileMapInfo* static_mapinfo, FileMapInfo* dynamic_mapinfo);
};

View File

@ -0,0 +1,847 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotLogging.hpp"
#include "cds/aotMappedHeapLoader.inline.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "logging/log.hpp"
#include "logging/logMessage.hpp"
#include "logging/logStream.hpp"
#include "logging/logTag.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "sanitizers/ub.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/copy.hpp"
#if INCLUDE_G1GC
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapRegion.hpp"
#endif
#if INCLUDE_CDS_JAVA_HEAP
bool AOTMappedHeapLoader::_is_mapped = false;
bool AOTMappedHeapLoader::_is_loaded = false;
bool AOTMappedHeapLoader::_narrow_oop_base_initialized = false;
address AOTMappedHeapLoader::_narrow_oop_base;
int AOTMappedHeapLoader::_narrow_oop_shift;
// Support for loaded heap.
uintptr_t AOTMappedHeapLoader::_loaded_heap_bottom = 0;
uintptr_t AOTMappedHeapLoader::_loaded_heap_top = 0;
uintptr_t AOTMappedHeapLoader::_dumptime_base = UINTPTR_MAX;
uintptr_t AOTMappedHeapLoader::_dumptime_top = 0;
intx AOTMappedHeapLoader::_runtime_offset = 0;
bool AOTMappedHeapLoader::_loading_failed = false;
// Support for mapped heap.
uintptr_t AOTMappedHeapLoader::_mapped_heap_bottom = 0;
bool AOTMappedHeapLoader::_mapped_heap_relocation_initialized = false;
ptrdiff_t AOTMappedHeapLoader::_mapped_heap_delta = 0;
// Heap roots
GrowableArrayCHeap<OopHandle, mtClassShared>* AOTMappedHeapLoader::_root_segments = nullptr;
int AOTMappedHeapLoader::_root_segment_max_size_elems;
MemRegion AOTMappedHeapLoader::_mapped_heap_memregion;
bool AOTMappedHeapLoader::_heap_pointers_need_patching;
// Every mapped region is offset by _mapped_heap_delta from its requested address.
// See FileMapInfo::heap_region_requested_address().
ATTRIBUTE_NO_UBSAN
void AOTMappedHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
assert(!_mapped_heap_relocation_initialized, "only once");
if (!UseCompressedOops) {
assert(dumptime_oop_shift == 0, "sanity");
}
assert(can_map(), "sanity");
init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
_mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
_mapped_heap_delta = delta;
_mapped_heap_relocation_initialized = true;
}
void AOTMappedHeapLoader::init_narrow_oop_decoding(address base, int shift) {
assert(!_narrow_oop_base_initialized, "only once");
_narrow_oop_base_initialized = true;
_narrow_oop_base = base;
_narrow_oop_shift = shift;
}
void AOTMappedHeapLoader::fixup_region() {
FileMapInfo* mapinfo = FileMapInfo::current_info();
if (is_mapped()) {
fixup_mapped_heap_region(mapinfo);
} else if (_loading_failed) {
fill_failed_loaded_heap();
}
}
// ------------------ Support for Region MAPPING -----------------------------------------
// Patch all the embedded oop pointers inside an archived heap region,
// to be consistent with the runtime oop encoding.
class PatchCompressedEmbeddedPointers: public BitMapClosure {
narrowOop* _start;
public:
PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
bool do_bit(size_t offset) {
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
oop o = AOTMappedHeapLoader::decode_from_mapped_archive(v);
RawAccess<IS_NOT_NULL>::oop_store(p, o);
return true;
}
};
class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
narrowOop* _start;
uint32_t _delta;
public:
PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
bool do_bit(size_t offset) {
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
#ifdef ASSERT
oop o1 = AOTMappedHeapLoader::decode_from_mapped_archive(v);
oop o2 = CompressedOops::decode_not_null(new_v);
assert(o1 == o2, "quick delta must work");
#endif
RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
return true;
}
};
class PatchUncompressedEmbeddedPointers: public BitMapClosure {
oop* _start;
intptr_t _delta;
public:
PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
_start(start),
_delta(runtime_offset) {}
PatchUncompressedEmbeddedPointers(oop* start) :
_start(start),
_delta(AOTMappedHeapLoader::mapped_heap_delta()) {}
bool do_bit(size_t offset) {
oop* p = _start + offset;
intptr_t dumptime_oop = (intptr_t)((void*)*p);
assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
intptr_t runtime_oop = dumptime_oop + _delta;
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
return true;
}
};
void AOTMappedHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
FileMapInfo* info,
MemRegion region) {
narrowOop dt_encoded_bottom = encoded_heap_region_dumptime_address(info);
narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
(uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
// Optimization: if dumptime shift is the same as runtime shift, we can perform a
// quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos();
if (_narrow_oop_shift == CompressedOops::shift()) {
uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
if (quick_delta == 0) {
log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
} else {
PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
bm.iterate(&patcher);
}
} else {
log_info(aot)("heap data quick relocation not possible");
PatchCompressedEmbeddedPointers patcher(patching_start);
bm.iterate(&patcher);
}
}
// Patch all the non-null pointers that are embedded in the archived heap objects
// in this (mapped) region
void AOTMappedHeapLoader::patch_embedded_pointers(FileMapInfo* info,
MemRegion region, address oopmap,
size_t oopmap_size_in_bits) {
BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
if (UseCompressedOops) {
patch_compressed_embedded_pointers(bm, info, region);
} else {
PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos());
bm.iterate(&patcher);
}
}
// ------------------ Support for Region LOADING -----------------------------------------
// The CDS archive remembers each heap object by its address at dump time, but
// the heap object may be loaded at a different address at run time. This structure is used
// to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
// to their runtime addresses.
struct LoadedArchiveHeapRegion {
int _region_index; // index for FileMapInfo::space_at(index)
size_t _region_size; // number of bytes in this region
uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
intx _runtime_offset; // If an object's dump time address P is within in this region, its
// runtime address is P + _runtime_offset
uintptr_t top() {
return _dumptime_base + _region_size;
}
};
void AOTMappedHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
_dumptime_base = loaded_region->_dumptime_base;
_dumptime_top = loaded_region->top();
_runtime_offset = loaded_region->_runtime_offset;
}
bool AOTMappedHeapLoader::can_load() {
return Universe::heap()->can_load_archived_objects();
}
class AOTMappedHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
narrowOop* _start;
intx _offset;
uintptr_t _base;
uintptr_t _top;
public:
PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
: _start(start),
_offset(loaded_region->_runtime_offset),
_base(loaded_region->_dumptime_base),
_top(loaded_region->top()) {}
bool do_bit(size_t offset) {
assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
uintptr_t o = cast_from_oop<uintptr_t>(AOTMappedHeapLoader::decode_from_archive(v));
assert(_base <= o && o < _top, "must be");
o += _offset;
AOTMappedHeapLoader::assert_in_loaded_heap(o);
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
return true;
}
};
bool AOTMappedHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
MemRegion& archive_space) {
size_t total_bytes = 0;
FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
r->assert_is_heap_region();
if (r->used() == 0) {
return false;
}
assert(is_aligned(r->used(), HeapWordSize), "must be");
total_bytes += r->used();
loaded_region->_region_index = AOTMetaspace::hp;
loaded_region->_region_size = r->used();
loaded_region->_dumptime_base = (uintptr_t)heap_region_dumptime_address(mapinfo);
assert(is_aligned(total_bytes, HeapWordSize), "must be");
size_t word_size = total_bytes / HeapWordSize;
HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
if (buffer == nullptr) {
return false;
}
archive_space = MemRegion(buffer, word_size);
_loaded_heap_bottom = (uintptr_t)archive_space.start();
_loaded_heap_top = _loaded_heap_bottom + total_bytes;
loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
return true;
}
bool AOTMappedHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
uintptr_t load_address) {
uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
if (bitmap_base == 0) {
_loading_failed = true;
return false; // OOM or CRC error
}
FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
// There's no easy way to free the buffer, so we will fill it with zero later
// in fill_failed_loaded_heap(), and it will eventually be GC'ed.
log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
_loading_failed = true;
return false;
}
assert(r->mapped_base() == (char*)load_address, "sanity");
log_info(aot)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
" size %6zu delta %zd",
loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
loaded_region->_region_size, loaded_region->_runtime_offset);
uintptr_t oopmap = bitmap_base + r->oopmap_offset();
BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
if (UseCompressedOops) {
PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region);
bm.iterate(&patcher);
} else {
PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->mapped_heap()->oopmap_start_pos(), loaded_region->_runtime_offset);
bm.iterate(&patcher);
}
return true;
}
bool AOTMappedHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
assert(can_load(), "loaded heap for must be supported");
init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
LoadedArchiveHeapRegion loaded_region;
memset(&loaded_region, 0, sizeof(loaded_region));
MemRegion archive_space;
if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
return false;
}
if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
assert(_loading_failed, "must be");
return false;
}
init_loaded_heap_relocation(&loaded_region);
_is_loaded = true;
return true;
}
objArrayOop AOTMappedHeapLoader::root_segment(int segment_idx) {
if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
} else {
assert(CDSConfig::is_using_archive(), "must be");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
assert(segment != nullptr, "should have been initialized");
return segment;
}
void AOTMappedHeapLoader::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
assert(_root_segment_max_size_elems > 0, "sanity");
// Try to avoid divisions for the common case.
if (idx < _root_segment_max_size_elems) {
seg_idx = 0;
int_idx = idx;
} else {
seg_idx = idx / _root_segment_max_size_elems;
int_idx = idx % _root_segment_max_size_elems;
}
assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
"sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
}
void AOTMappedHeapLoader::add_root_segment(objArrayOop segment_oop) {
assert(segment_oop != nullptr, "must be");
assert(is_in_use(), "must be");
if (_root_segments == nullptr) {
_root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
}
_root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
}
void AOTMappedHeapLoader::init_root_segment_sizes(int max_size_elems) {
_root_segment_max_size_elems = max_size_elems;
}
oop AOTMappedHeapLoader::get_root(int index) {
assert(!_root_segments->is_empty(), "must have loaded shared heap");
int seg_idx, int_idx;
get_segment_indexes(index, seg_idx, int_idx);
objArrayOop result = objArrayOop(root_segment(seg_idx));
return result->obj_at(int_idx);
}
void AOTMappedHeapLoader::clear_root(int index) {
int seg_idx, int_idx;
get_segment_indexes(index, seg_idx, int_idx);
root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
}
class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
HashTable<uintptr_t, bool>* _table;
public:
VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
virtual void do_oop(narrowOop* p) {
// This should be called before the loaded region is modified, so all the embedded pointers
// must be null, or must point to a valid object in the loaded region.
narrowOop v = *p;
if (!CompressedOops::is_null(v)) {
oop o = CompressedOops::decode_not_null(v);
uintptr_t u = cast_from_oop<uintptr_t>(o);
AOTMappedHeapLoader::assert_in_loaded_heap(u);
guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
}
}
virtual void do_oop(oop* p) {
oop v = *p;
if(v != nullptr) {
uintptr_t u = cast_from_oop<uintptr_t>(v);
AOTMappedHeapLoader::assert_in_loaded_heap(u);
guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
}
}
};
void AOTMappedHeapLoader::finish_initialization(FileMapInfo* info) {
patch_heap_embedded_pointers(info);
if (is_loaded()) {
// These operations are needed only when the heap is loaded (not mapped).
finish_loaded_heap();
if (VerifyArchivedFields > 0) {
verify_loaded_heap();
}
}
if (is_in_use()) {
patch_native_pointers();
intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
// The heap roots are stored in one or more segments that are laid out consecutively.
// The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
HeapRootSegments segments = FileMapInfo::current_info()->mapped_heap()->root_segments();
init_root_segment_sizes(segments.max_size_in_elems());
intptr_t first_segment_addr = bottom + segments.base_offset();
for (size_t c = 0; c < segments.count(); c++) {
oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
assert(segment_oop->is_objArray(), "Must be");
add_root_segment((objArrayOop)segment_oop);
}
StringTable::load_shared_strings_array();
}
}
void AOTMappedHeapLoader::finish_loaded_heap() {
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
HeapWord* top = (HeapWord*)_loaded_heap_top;
MemRegion archive_space = MemRegion(bottom, top);
Universe::heap()->complete_loaded_archive_space(archive_space);
}
void AOTMappedHeapLoader::verify_loaded_heap() {
log_info(aot, heap)("Verify all oops and pointers in loaded heap");
ResourceMark rm;
HashTable<uintptr_t, bool> table;
VerifyLoadedHeapEmbeddedPointers verifier(&table);
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
HeapWord* top = (HeapWord*)_loaded_heap_top;
for (HeapWord* p = bottom; p < top; ) {
oop o = cast_to_oop(p);
table.put(cast_from_oop<uintptr_t>(o), true);
p += o->size();
}
for (HeapWord* p = bottom; p < top; ) {
oop o = cast_to_oop(p);
o->oop_iterate(&verifier);
p += o->size();
}
}
void AOTMappedHeapLoader::fill_failed_loaded_heap() {
assert(_loading_failed, "must be");
if (_loaded_heap_bottom != 0) {
assert(_loaded_heap_top != 0, "must be");
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
HeapWord* top = (HeapWord*)_loaded_heap_top;
Universe::heap()->fill_with_objects(bottom, top - bottom);
}
}
class PatchNativePointers: public BitMapClosure {
Metadata** _start;
public:
PatchNativePointers(Metadata** start) : _start(start) {}
bool do_bit(size_t offset) {
Metadata** p = _start + offset;
*p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
return true;
}
};
void AOTMappedHeapLoader::patch_native_pointers() {
if (AOTMetaspace::relocation_delta() == 0) {
return;
}
FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
if (r->mapped_base() != nullptr && r->has_ptrmap()) {
log_info(aot, heap)("Patching native pointers in heap region");
BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->mapped_heap()->ptrmap_start_pos());
bm.iterate(&patcher);
}
}
// The actual address of this region during dump time.
address AOTMappedHeapLoader::heap_region_dumptime_address(FileMapInfo* info) {
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
assert(CDSConfig::is_using_archive(), "runtime only");
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
if (UseCompressedOops) {
return /*dumptime*/ (address)((uintptr_t)info->narrow_oop_base() + r->mapping_offset());
} else {
return heap_region_requested_address(info);
}
}
// The address where this region can be mapped into the runtime heap without
// patching any of the pointers that are embedded in this region.
address AOTMappedHeapLoader::heap_region_requested_address(FileMapInfo* info) {
assert(CDSConfig::is_using_archive(), "runtime only");
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
assert(can_use(), "cannot be used by AOTMappedHeapLoader::can_load() mode");
if (UseCompressedOops) {
// We can avoid relocation if each region's offset from the runtime CompressedOops::base()
// is the same as its offset from the CompressedOops::base() during dumptime.
// Note that CompressedOops::base() may be different between dumptime and runtime.
//
// Example:
// Dumptime base = 0x1000 and shift is 0. We have a region at address 0x2000. There's a
// narrowOop P stored in this region that points to an object at address 0x2200.
// P's encoded value is 0x1200.
//
// Runtime base = 0x4000 and shift is also 0. If we map this region at 0x5000, then
// the value P can remain 0x1200. The decoded address = (0x4000 + (0x1200 << 0)) = 0x5200,
// which is the runtime location of the referenced object.
return /*runtime*/ (address)((uintptr_t)CompressedOops::base() + r->mapping_offset());
} else {
// This was the hard-coded requested base address used at dump time. With uncompressed oops,
// the heap range is assigned by the OS so we will most likely have to relocate anyway, no matter
// what base address was picked at duump time.
return (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
}
}
bool AOTMappedHeapLoader::map_heap_region(FileMapInfo* info) {
if (map_heap_region_impl(info)) {
#ifdef ASSERT
// The "old" regions must be parsable -- we cannot have any unused space
// at the start of the lowest G1 region that contains archived objects.
assert(is_aligned(_mapped_heap_memregion.start(), G1HeapRegion::GrainBytes), "must be");
// Make sure we map at the very top of the heap - see comments in
// init_heap_region_relocation().
MemRegion heap_range = G1CollectedHeap::heap()->reserved();
assert(heap_range.contains(_mapped_heap_memregion), "must be");
address heap_end = (address)heap_range.end();
address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
assert(heap_end >= mapped_heap_region_end, "must be");
assert(heap_end - mapped_heap_region_end < (intx)(G1HeapRegion::GrainBytes),
"must be at the top of the heap to avoid fragmentation");
#endif
set_mapped();
return true;
} else {
return false;
}
}
bool AOTMappedHeapLoader::map_heap_region_impl(FileMapInfo* info) {
assert(UseG1GC, "the following code assumes G1");
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
size_t size = r->used();
if (size == 0) {
return false; // no archived java heap data
}
size_t word_size = size / HeapWordSize;
address requested_start = heap_region_requested_address(info);
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;
}
_mapped_heap_memregion = MemRegion(start, word_size);
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag()
// for mapped region as it is part of the reserved java heap, which is already recorded.
char* addr = (char*)_mapped_heap_memregion.start();
char* base;
if (AOTMetaspace::use_windows_memory_mapping() || UseLargePages) {
// With UseLargePages, memory mapping may fail on some OSes if the size is not
// large page aligned, so let's use read() instead. In this case, the memory region
// is already commited by G1 so we don't need to commit it again.
if (!info->read_region(AOTMetaspace::hp, addr,
align_up(_mapped_heap_memregion.byte_size(), os::vm_page_size()),
/* do_commit = */ !UseLargePages)) {
dealloc_heap_region(info);
aot_log_error(aot)("Failed to read archived heap region into " INTPTR_FORMAT, p2i(addr));
return false;
}
// Checks for VerifySharedSpaces is already done inside read_region()
base = addr;
} else {
base = info->map_heap_region(r, addr, _mapped_heap_memregion.byte_size());
if (base == nullptr || base != addr) {
dealloc_heap_region(info);
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to map at required address in java heap. "
INTPTR_FORMAT ", size = %zu bytes",
p2i(addr), _mapped_heap_memregion.byte_size());
return false;
}
if (VerifySharedSpaces && !r->check_region_crc(base)) {
dealloc_heap_region(info);
AOTMetaspace::report_loading_error("UseSharedSpaces: mapped heap region is corrupt");
return false;
}
}
r->set_mapped_base(base);
// If the requested range is different from the range allocated by GC, then
// the pointers need to be patched.
address mapped_start = (address) _mapped_heap_memregion.start();
ptrdiff_t delta = mapped_start - requested_start;
if (UseCompressedOops &&
(info->narrow_oop_mode() != CompressedOops::mode() ||
info->narrow_oop_shift() != CompressedOops::shift())) {
_heap_pointers_need_patching = true;
}
if (delta != 0) {
_heap_pointers_need_patching = true;
}
init_mapped_heap_info(mapped_start, delta, info->narrow_oop_shift());
if (_heap_pointers_need_patching) {
char* bitmap_base = info->map_bitmap_region();
if (bitmap_base == nullptr) {
AOTMetaspace::report_loading_error("CDS heap cannot be used because bitmap region cannot be mapped");
dealloc_heap_region(info);
_heap_pointers_need_patching = false;
return false;
}
}
aot_log_info(aot)("Heap data mapped at " INTPTR_FORMAT ", size = %8zu bytes",
p2i(mapped_start), _mapped_heap_memregion.byte_size());
aot_log_info(aot)("CDS heap data relocation delta = %zd bytes", delta);
return true;
}
narrowOop AOTMappedHeapLoader::encoded_heap_region_dumptime_address(FileMapInfo* info) {
assert(CDSConfig::is_using_archive(), "runtime only");
assert(UseCompressedOops, "sanity");
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
return CompressedOops::narrow_oop_cast(r->mapping_offset() >> info->narrow_oop_shift());
}
void AOTMappedHeapLoader::patch_heap_embedded_pointers(FileMapInfo* info) {
if (!info->is_mapped() || !_heap_pointers_need_patching) {
return;
}
char* bitmap_base = info->map_bitmap_region();
assert(bitmap_base != nullptr, "must have already been mapped");
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
patch_embedded_pointers(
info, _mapped_heap_memregion,
(address)(info->region_at(AOTMetaspace::bm)->mapped_base()) + r->oopmap_offset(),
r->oopmap_size_in_bits());
}
void AOTMappedHeapLoader::fixup_mapped_heap_region(FileMapInfo* info) {
if (is_mapped()) {
assert(!_mapped_heap_memregion.is_empty(), "sanity");
// Populate the archive regions' G1BlockOffsetTables. That ensures
// fast G1BlockOffsetTable::block_start operations for any given address
// within the archive regions when trying to find start of an object
// (e.g. during card table scanning).
G1CollectedHeap::heap()->populate_archive_regions_bot(_mapped_heap_memregion);
}
}
// dealloc the archive regions from java heap
void AOTMappedHeapLoader::dealloc_heap_region(FileMapInfo* info) {
G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion);
}
AOTMapLogger::OopDataIterator* AOTMappedHeapLoader::oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end) {
class MappedLoaderOopIterator : public AOTMapLogger::OopDataIterator {
private:
address _current;
address _next;
address _buffer_start;
address _buffer_end;
uint64_t _buffer_start_narrow_oop;
intptr_t _buffer_to_requested_delta;
int _requested_shift;
size_t _num_root_segments;
size_t _num_obj_arrays_logged;
public:
MappedLoaderOopIterator(address buffer_start,
address buffer_end,
uint64_t buffer_start_narrow_oop,
intptr_t buffer_to_requested_delta,
int requested_shift,
size_t num_root_segments)
: _current(nullptr),
_next(buffer_start),
_buffer_start(buffer_start),
_buffer_end(buffer_end),
_buffer_start_narrow_oop(buffer_start_narrow_oop),
_buffer_to_requested_delta(buffer_to_requested_delta),
_requested_shift(requested_shift),
_num_root_segments(num_root_segments),
_num_obj_arrays_logged(0) {
}
AOTMapLogger::OopData capture(address buffered_addr) {
oopDesc* raw_oop = (oopDesc*)buffered_addr;
size_t size = raw_oop->size();
address requested_addr = buffered_addr + _buffer_to_requested_delta;
intptr_t target_location = intptr_t(requested_addr);
uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
Klass* klass = raw_oop->klass();
return { buffered_addr,
requested_addr,
target_location,
narrow_location,
raw_oop,
klass,
size,
false };
}
bool has_next() override {
return _next < _buffer_end;
}
AOTMapLogger::OopData next() override {
_current = _next;
AOTMapLogger::OopData result = capture(_current);
if (result._klass->is_objArray_klass()) {
result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
}
_next = _current + result._size * BytesPerWord;
return result;
}
AOTMapLogger::OopData obj_at(narrowOop* addr) override {
uint64_t n = (uint64_t)(*addr);
if (n == 0) {
return null_data();
} else {
precond(n >= _buffer_start_narrow_oop);
address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
return capture(buffer_addr);
}
}
AOTMapLogger::OopData obj_at(oop* addr) override {
address requested_value = cast_from_oop<address>(*addr);
if (requested_value == nullptr) {
return null_data();
} else {
address buffer_addr = requested_value - _buffer_to_requested_delta;
return capture(buffer_addr);
}
}
GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
return new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
}
};
FileMapRegion* r = info->region_at(AOTMetaspace::hp);
address requested_base = UseCompressedOops ? (address)info->narrow_oop_base() : heap_region_requested_address(info);
address requested_start = requested_base + r->mapping_offset();
int requested_shift = info->narrow_oop_shift();
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
if (UseCompressedOops) {
buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
}
return new MappedLoaderOopIterator(buffer_start,
buffer_end,
buffer_start_narrow_oop,
buffer_to_requested_delta,
requested_shift,
info->mapped_heap()->root_segments().count());
}
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -22,22 +22,27 @@
*
*/
#ifndef SHARE_CDS_ARCHIVEHEAPLOADER_HPP
#define SHARE_CDS_ARCHIVEHEAPLOADER_HPP
#ifndef SHARE_CDS_AOTMAPPEDHEAPLOADER_HPP
#define SHARE_CDS_AOTMAPPEDHEAPLOADER_HPP
#include "cds/aotMapLogger.hpp"
#include "gc/shared/gc_globals.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "memory/memRegion.hpp"
#include "oops/oopHandle.hpp"
#include "oops/oopsHierarchy.hpp"
#include "runtime/globals.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
class FileMapInfo;
struct LoadedArchiveHeapRegion;
class ArchiveHeapLoader : AllStatic {
class AOTMappedHeapLoader : AllStatic {
friend class AOTMapLogger;
public:
// At runtime, the heap region in the CDS archive can be used in two different ways,
// depending on the GC type:
@ -55,7 +60,6 @@ public:
// Can this VM load the objects from archived heap region into the heap at start-up?
static bool can_load() NOT_CDS_JAVA_HEAP_RETURN_(false);
static void finish_initialization() NOT_CDS_JAVA_HEAP_RETURN;
static bool is_loaded() {
CDS_JAVA_HEAP_ONLY(return _is_loaded;)
NOT_CDS_JAVA_HEAP(return false;)
@ -81,6 +85,8 @@ public:
NOT_CDS_JAVA_HEAP_RETURN_(false);
}
static void finish_initialization(FileMapInfo* info) NOT_CDS_JAVA_HEAP_RETURN;
// NarrowOops stored in the CDS archive may use a different encoding scheme
// than CompressedOops::{base,shift} -- see FileMapInfo::map_heap_region_impl.
// To decode them, do not use CompressedOops::decode_not_null. Use this
@ -127,6 +133,13 @@ private:
static ptrdiff_t _mapped_heap_delta;
static bool _mapped_heap_relocation_initialized;
// Heap roots
static GrowableArrayCHeap<OopHandle, mtClassShared>* _root_segments;
static int _root_segment_max_size_elems;
static MemRegion _mapped_heap_memregion;
static bool _heap_pointers_need_patching;
static void init_narrow_oop_decoding(address base, int shift);
static bool init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
MemRegion& archive_space);
@ -141,20 +154,40 @@ private:
return (_loaded_heap_bottom <= o && o < _loaded_heap_top);
}
static objArrayOop root_segment(int segment_idx);
static void get_segment_indexes(int idx, int& seg_idx, int& int_idx);
static void add_root_segment(objArrayOop segment_oop);
static void init_root_segment_sizes(int max_size_elems);
template<bool IS_MAPPED>
inline static oop decode_from_archive_impl(narrowOop v) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
class PatchLoadedRegionPointers;
class PatchUncompressedLoadedRegionPointers;
static address heap_region_dumptime_address(FileMapInfo* info);
static address heap_region_requested_address(FileMapInfo* info);
static bool map_heap_region_impl(FileMapInfo* info);
static narrowOop encoded_heap_region_dumptime_address(FileMapInfo* info);
static void patch_heap_embedded_pointers(FileMapInfo* info);
static void fixup_mapped_heap_region(FileMapInfo* info);
static void dealloc_heap_region(FileMapInfo* info);
public:
static bool map_heap_region(FileMapInfo* info);
static bool load_heap_region(FileMapInfo* mapinfo);
static void assert_in_loaded_heap(uintptr_t o) {
assert(is_in_loaded_heap(o), "must be");
}
static oop get_root(int index);
static void clear_root(int index);
static AOTMapLogger::OopDataIterator* oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end);
#endif // INCLUDE_CDS_JAVA_HEAP
};
#endif // SHARE_CDS_ARCHIVEHEAPLOADER_HPP
#endif // SHARE_CDS_AOTMAPPEDHEAPLOADER_HPP

View File

@ -22,10 +22,10 @@
*
*/
#ifndef SHARE_CDS_ARCHIVEHEAPLOADER_INLINE_HPP
#define SHARE_CDS_ARCHIVEHEAPLOADER_INLINE_HPP
#ifndef SHARE_CDS_AOTMAPPEDHEAPLOADER_INLINE_HPP
#define SHARE_CDS_AOTMAPPEDHEAPLOADER_INLINE_HPP
#include "cds/archiveHeapLoader.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "oops/compressedOops.inline.hpp"
#include "utilities/align.hpp"
@ -33,7 +33,7 @@
#if INCLUDE_CDS_JAVA_HEAP
template<bool IS_MAPPED>
inline oop ArchiveHeapLoader::decode_from_archive_impl(narrowOop v) {
inline oop AOTMappedHeapLoader::decode_from_archive_impl(narrowOop v) {
assert(!CompressedOops::is_null(v), "narrow oop value can never be zero");
assert(_narrow_oop_base_initialized, "relocation information must have been initialized");
uintptr_t p = ((uintptr_t)_narrow_oop_base) + ((uintptr_t)v << _narrow_oop_shift);
@ -49,14 +49,14 @@ inline oop ArchiveHeapLoader::decode_from_archive_impl(narrowOop v) {
return result;
}
inline oop ArchiveHeapLoader::decode_from_archive(narrowOop v) {
inline oop AOTMappedHeapLoader::decode_from_archive(narrowOop v) {
return decode_from_archive_impl<false>(v);
}
inline oop ArchiveHeapLoader::decode_from_mapped_archive(narrowOop v) {
inline oop AOTMappedHeapLoader::decode_from_mapped_archive(narrowOop v) {
return decode_from_archive_impl<true>(v);
}
#endif
#endif // SHARE_CDS_ARCHIVEHEAPLOADER_INLINE_HPP
#endif // SHARE_CDS_AOTMAPPEDHEAPLOADER_INLINE_HPP

View File

@ -22,16 +22,18 @@
*
*/
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "cds/regeneratedClasses.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/modules.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/allocation.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.hpp"
@ -51,24 +53,25 @@
#if INCLUDE_CDS_JAVA_HEAP
GrowableArrayCHeap<u1, mtClassShared>* ArchiveHeapWriter::_buffer = nullptr;
GrowableArrayCHeap<u1, mtClassShared>* AOTMappedHeapWriter::_buffer = nullptr;
// The following are offsets from buffer_bottom()
size_t ArchiveHeapWriter::_buffer_used;
size_t AOTMappedHeapWriter::_buffer_used;
// Heap root segments
HeapRootSegments ArchiveHeapWriter::_heap_root_segments;
HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
address ArchiveHeapWriter::_requested_bottom;
address ArchiveHeapWriter::_requested_top;
address AOTMappedHeapWriter::_requested_bottom;
address AOTMappedHeapWriter::_requested_top;
GrowableArrayCHeap<ArchiveHeapWriter::NativePointerInfo, mtClassShared>* ArchiveHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* ArchiveHeapWriter::_source_objs;
GrowableArrayCHeap<ArchiveHeapWriter::HeapObjOrder, mtClassShared>* ArchiveHeapWriter::_source_objs_order;
GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
ArchiveHeapWriter::BufferOffsetToSourceObjectTable*
ArchiveHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
size_t, // offset of a filler from ArchiveHeapWriter::buffer_bottom()
@ -79,11 +82,12 @@ typedef HashTable<
static FillersTable* _fillers;
static int _num_native_ptrs = 0;
void ArchiveHeapWriter::init() {
void AOTMappedHeapWriter::init() {
if (CDSConfig::is_dumping_heap()) {
Universe::heap()->collect(GCCause::_java_lang_system_gc);
_buffer_offset_to_source_obj_table = new BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
_fillers = new FillersTable();
_requested_bottom = nullptr;
_requested_top = nullptr;
@ -95,17 +99,20 @@ void ArchiveHeapWriter::init() {
}
}
void ArchiveHeapWriter::delete_tables_with_raw_oops() {
void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
delete _source_objs;
_source_objs = nullptr;
delete _dumped_interned_strings;
_dumped_interned_strings = nullptr;
}
void ArchiveHeapWriter::add_source_obj(oop src_obj) {
void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
_source_objs->append(src_obj);
}
void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
ArchiveHeapInfo* heap_info) {
void AOTMappedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
ArchiveMappedHeapInfo* heap_info) {
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
copy_source_objs_to_buffer(roots);
@ -113,16 +120,16 @@ void ArchiveHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
relocate_embedded_oops(roots, heap_info);
}
bool ArchiveHeapWriter::is_too_large_to_archive(oop o) {
bool AOTMappedHeapWriter::is_too_large_to_archive(oop o) {
return is_too_large_to_archive(o->size());
}
bool ArchiveHeapWriter::is_string_too_large_to_archive(oop string) {
bool AOTMappedHeapWriter::is_string_too_large_to_archive(oop string) {
typeArrayOop value = java_lang_String::value_no_keepalive(string);
return is_too_large_to_archive(value);
}
bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
assert(size > 0, "no zero-size object");
assert(size * HeapWordSize > size, "no overflow");
static_assert(MIN_GC_REGION_ALIGNMENT > 0, "must be positive");
@ -135,20 +142,39 @@ bool ArchiveHeapWriter::is_too_large_to_archive(size_t size) {
}
}
// Keep track of the contents of the archived interned string table. This table
// is used only by CDSHeapVerifier.
void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
assert(!is_string_too_large_to_archive(string), "must be");
bool created;
_dumped_interned_strings->put_if_absent(string, true, &created);
if (created) {
// Prevent string deduplication from changing the value field to
// something not in the archive.
java_lang_String::set_deduplication_forbidden(string);
_dumped_interned_strings->maybe_grow();
}
}
bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
return _dumped_interned_strings->get(o) != nullptr;
}
// Various lookup functions between source_obj, buffered_obj and requested_obj
bool ArchiveHeapWriter::is_in_requested_range(oop o) {
bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
address a = cast_from_oop<address>(o);
return (_requested_bottom <= a && a < _requested_top);
}
oop ArchiveHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
oop AOTMappedHeapWriter::requested_obj_from_buffer_offset(size_t offset) {
oop req_obj = cast_to_oop(_requested_bottom + offset);
assert(is_in_requested_range(req_obj), "must be");
return req_obj;
}
oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
oop AOTMappedHeapWriter::source_obj_to_requested_obj(oop src_obj) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
if (p != nullptr) {
@ -158,7 +184,7 @@ oop ArchiveHeapWriter::source_obj_to_requested_obj(oop src_obj) {
}
}
oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
oop AOTMappedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_address_to_offset(buffered_addr));
if (oh != nullptr) {
return oh->resolve();
@ -167,7 +193,7 @@ oop ArchiveHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
}
}
Klass* ArchiveHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
Klass* AOTMappedHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
oop p = buffered_addr_to_source_obj(buffered_addr);
if (p != nullptr) {
return p->klass();
@ -179,7 +205,7 @@ Klass* ArchiveHeapWriter::real_klass_of_buffered_oop(address buffered_addr) {
}
}
size_t ArchiveHeapWriter::size_of_buffered_oop(address buffered_addr) {
size_t AOTMappedHeapWriter::size_of_buffered_oop(address buffered_addr) {
oop p = buffered_addr_to_source_obj(buffered_addr);
if (p != nullptr) {
return p->size();
@ -205,29 +231,29 @@ size_t ArchiveHeapWriter::size_of_buffered_oop(address buffered_addr) {
return 0;
}
address ArchiveHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
address AOTMappedHeapWriter::buffered_addr_to_requested_addr(address buffered_addr) {
return _requested_bottom + buffered_address_to_offset(buffered_addr);
}
address ArchiveHeapWriter::requested_address() {
address AOTMappedHeapWriter::requested_address() {
assert(_buffer != nullptr, "must be initialized");
return _requested_bottom;
}
void ArchiveHeapWriter::allocate_buffer() {
void AOTMappedHeapWriter::allocate_buffer() {
int initial_buffer_size = 100000;
_buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
_buffer_used = 0;
ensure_buffer_space(1); // so that buffer_bottom() works
}
void ArchiveHeapWriter::ensure_buffer_space(size_t min_bytes) {
void AOTMappedHeapWriter::ensure_buffer_space(size_t min_bytes) {
// We usually have very small heaps. If we get a huge one it's probably caused by a bug.
guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
_buffer->at_grow(to_array_index(min_bytes));
}
objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_count) {
objArrayOop AOTMappedHeapWriter::allocate_root_segment(size_t offset, int element_count) {
HeapWord* mem = offset_to_buffered_address<HeapWord *>(offset);
memset(mem, 0, objArrayOopDesc::object_size(element_count));
@ -242,7 +268,7 @@ objArrayOop ArchiveHeapWriter::allocate_root_segment(size_t offset, int element_
return objArrayOop(cast_to_oop(mem));
}
void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
void AOTMappedHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop root) {
// Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside the real heap!
if (UseCompressedOops) {
*segment->obj_at_addr<narrowOop>(index) = CompressedOops::encode(root);
@ -251,7 +277,7 @@ void ArchiveHeapWriter::root_segment_at_put(objArrayOop segment, int index, oop
}
}
void ArchiveHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
void AOTMappedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
// Depending on the number of classes we are archiving, a single roots array may be
// larger than MIN_GC_REGION_ALIGNMENT. Roots are allocated first in the buffer, which
// allows us to chop the large array into a series of "segments". Current layout
@ -324,7 +350,7 @@ static int oop_sorting_rank(oop o) {
}
}
int ArchiveHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
int AOTMappedHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder* b) {
int rank_a = a->_rank;
int rank_b = b->_rank;
@ -336,7 +362,7 @@ int ArchiveHeapWriter::compare_objs_by_oop_fields(HeapObjOrder* a, HeapObjOrder*
}
}
void ArchiveHeapWriter::sort_source_objs() {
void AOTMappedHeapWriter::sort_source_objs() {
log_info(aot)("sorting heap objects");
int len = _source_objs->length();
_source_objs_order = new GrowableArrayCHeap<HeapObjOrder, mtClassShared>(len);
@ -352,7 +378,7 @@ void ArchiveHeapWriter::sort_source_objs() {
log_info(aot)("sorting heap objects done");
}
void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
// There could be multiple root segments, which we want to be aligned by region.
// Putting them ahead of objects makes sure we waste no space.
copy_roots_to_buffer(roots);
@ -379,12 +405,12 @@ void ArchiveHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtCla
_buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
}
size_t ArchiveHeapWriter::filler_array_byte_size(int length) {
size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
size_t byte_size = objArrayOopDesc::object_size(length) * HeapWordSize;
return byte_size;
}
int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
int AOTMappedHeapWriter::filler_array_length(size_t fill_bytes) {
assert(is_object_aligned(fill_bytes), "must be");
size_t elemSize = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
@ -400,7 +426,7 @@ int ArchiveHeapWriter::filler_array_length(size_t fill_bytes) {
return -1;
}
HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
HeapWord* AOTMappedHeapWriter::init_filler_array_at_buffer_top(int array_length, size_t fill_bytes) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
Klass* oak = Universe::objectArrayKlass(); // already relocated to point to archived klass
HeapWord* mem = offset_to_buffered_address<HeapWord*>(_buffer_used);
@ -416,7 +442,7 @@ HeapWord* ArchiveHeapWriter::init_filler_array_at_buffer_top(int array_length, s
return mem;
}
void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
void AOTMappedHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
// We fill only with arrays (so we don't need to use a single HeapWord filler if the
// leftover space is smaller than a zero-sized array object). Therefore, we need to
// make sure there's enough space of min_filler_byte_size in the current region after
@ -449,7 +475,7 @@ void ArchiveHeapWriter::maybe_fill_gc_region_gap(size_t required_byte_size) {
}
}
size_t ArchiveHeapWriter::get_filler_size_at(address buffered_addr) {
size_t AOTMappedHeapWriter::get_filler_size_at(address buffered_addr) {
size_t* p = _fillers->get(buffered_address_to_offset(buffered_addr));
if (p != nullptr) {
assert(*p > 0, "filler must be larger than zero bytes");
@ -465,7 +491,7 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu
*field_addr = value;
}
size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
assert(!is_too_large_to_archive(src_obj), "already checked");
size_t byte_size = src_obj->size() * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");
@ -510,7 +536,7 @@ size_t ArchiveHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
return buffered_obj_offset;
}
void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
void AOTMappedHeapWriter::set_requested_address(ArchiveMappedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
@ -541,12 +567,12 @@ void ArchiveHeapWriter::set_requested_address(ArchiveHeapInfo* info) {
info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
offset_to_buffered_address<HeapWord*>(_buffer_used)));
info->set_heap_root_segments(_heap_root_segments);
info->set_root_segments(_heap_root_segments);
}
// Oop relocation
template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p) {
template <typename T> T* AOTMappedHeapWriter::requested_addr_to_buffered_addr(T* p) {
assert(is_in_requested_range(cast_to_oop(p)), "must be");
address addr = address(p);
@ -555,56 +581,44 @@ template <typename T> T* ArchiveHeapWriter::requested_addr_to_buffered_addr(T* p
return offset_to_buffered_address<T*>(offset);
}
template <typename T> oop ArchiveHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
template <typename T> oop AOTMappedHeapWriter::load_source_oop_from_buffer(T* buffered_addr) {
oop o = load_oop_from_buffer(buffered_addr);
assert(!in_buffer(cast_from_oop<address>(o)), "must point to source oop");
return o;
}
template <typename T> void ArchiveHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
oop request_oop) {
assert(is_in_requested_range(request_oop), "must be");
template <typename T> void AOTMappedHeapWriter::store_requested_oop_in_buffer(T* buffered_addr,
oop request_oop) {
assert(request_oop == nullptr || is_in_requested_range(request_oop), "must be");
store_oop_in_buffer(buffered_addr, request_oop);
}
inline void ArchiveHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
inline void AOTMappedHeapWriter::store_oop_in_buffer(oop* buffered_addr, oop requested_obj) {
*buffered_addr = requested_obj;
}
inline void ArchiveHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
narrowOop val = CompressedOops::encode_not_null(requested_obj);
inline void AOTMappedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, oop requested_obj) {
narrowOop val = CompressedOops::encode(requested_obj);
*buffered_addr = val;
}
oop ArchiveHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
oop AOTMappedHeapWriter::load_oop_from_buffer(oop* buffered_addr) {
return *buffered_addr;
}
oop ArchiveHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
oop AOTMappedHeapWriter::load_oop_from_buffer(narrowOop* buffered_addr) {
return CompressedOops::decode(*buffered_addr);
}
template <typename T> void ArchiveHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap) {
oop source_referent = load_source_oop_from_buffer<T>(field_addr_in_buffer);
if (source_referent != nullptr) {
if (java_lang_Class::is_instance(source_referent)) {
Klass* k = java_lang_Class::as_Klass(source_referent);
if (RegeneratedClasses::has_been_regenerated(k)) {
source_referent = RegeneratedClasses::get_regenerated_object(k)->java_mirror();
}
// When the source object points to a "real" mirror, the buffered object should point
// to the "scratch" mirror, which has all unarchivable fields scrubbed (to be reinstated
// at run time).
source_referent = HeapShared::scratch_java_mirror(source_referent);
assert(source_referent != nullptr, "must be");
}
oop request_referent = source_obj_to_requested_obj(source_referent);
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
template <typename T> void AOTMappedHeapWriter::relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap) {
oop request_referent = source_obj_to_requested_obj(source_referent);
store_requested_oop_in_buffer<T>(field_addr_in_buffer, request_referent);
if (request_referent != nullptr) {
mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
}
}
template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
template <typename T> void AOTMappedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
T* request_p = (T*)(buffered_addr_to_requested_addr((address)buffered_addr));
address requested_region_bottom;
@ -620,7 +634,7 @@ template <typename T> void ArchiveHeapWriter::mark_oop_pointer(T* buffered_addr,
oopmap->set_bit(idx);
}
void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
void AOTMappedHeapWriter::update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
address buffered_addr = requested_addr_to_buffered_addr(cast_from_oop<address>(requested_obj));
@ -653,7 +667,7 @@ void ArchiveHeapWriter::update_header_for_requested_obj(oop requested_obj, oop s
fake_oop->set_mark(fake_oop->mark().set_age(0));
}
class ArchiveHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
class AOTMappedHeapWriter::EmbeddedOopRelocator: public BasicOopIterateClosure {
oop _src_obj;
address _buffered_obj;
CHeapBitMap* _oopmap;
@ -672,12 +686,9 @@ private:
template <class T> void do_oop_work(T *p) {
int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_src_obj));
T* field_addr = (T*)(_buffered_obj + field_offset);
if (_is_java_lang_ref && AOTReferenceObjSupport::skip_field(field_offset)) {
// Do not copy these fields. Set them to null
*field_addr = (T)0x0;
} else {
ArchiveHeapWriter::relocate_field_in_buffer<T>(field_addr, _oopmap);
}
oop referent = load_source_oop_from_buffer<T>(field_addr);
referent = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, referent);
AOTMappedHeapWriter::relocate_field_in_buffer<T>(field_addr, referent, _oopmap);
}
};
@ -693,8 +704,8 @@ static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bit
}
// Update all oop fields embedded in the buffered objects
void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
ArchiveHeapInfo* heap_info) {
void AOTMappedHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots,
ArchiveMappedHeapInfo* heap_info) {
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
size_t heap_region_byte_size = _buffer_used;
heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
@ -709,6 +720,7 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
EmbeddedOopRelocator relocator(src_obj, buffered_obj, heap_info->oopmap());
src_obj->oop_iterate(&relocator);
mark_native_pointers(src_obj);
};
// Relocate HeapShared::roots(), which is created in copy_roots_to_buffer() and
@ -721,15 +733,19 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
address buffered_obj = offset_to_buffered_address<address>(seg_offset);
int length = _heap_root_segments.size_in_elems(seg_idx);
if (UseCompressedOops) {
for (int i = 0; i < length; i++) {
narrowOop* addr = (narrowOop*)(buffered_obj + objArrayOopDesc::obj_at_offset<narrowOop>(i));
relocate_field_in_buffer<narrowOop>(addr, heap_info->oopmap());
}
} else {
for (int i = 0; i < length; i++) {
oop* addr = (oop*)(buffered_obj + objArrayOopDesc::obj_at_offset<oop>(i));
relocate_field_in_buffer<oop>(addr, heap_info->oopmap());
size_t elem_size = UseCompressedOops ? sizeof(narrowOop) : sizeof(oop);
for (int i = 0; i < length; i++) {
// There is no source object; these are native oops - load, translate and
// write back
size_t elem_offset = objArrayOopDesc::base_offset_in_bytes() + elem_size * i;
HeapWord* elem_addr = (HeapWord*)(buffered_obj + elem_offset);
oop obj = NativeAccess<>::oop_load(elem_addr);
obj = HeapShared::maybe_remap_referent(false /* is_reference_field */, elem_offset, obj);
if (UseCompressedOops) {
relocate_field_in_buffer<narrowOop>((narrowOop*)elem_addr, obj, heap_info->oopmap());
} else {
relocate_field_in_buffer<oop>((oop*)elem_addr, obj, heap_info->oopmap());
}
}
}
@ -741,7 +757,7 @@ void ArchiveHeapWriter::relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassSh
log_bitmap_usage("ptrmap", heap_info->ptrmap(), total_bytes / sizeof(address));
}
void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
void AOTMappedHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
Metadata* ptr = src_obj->metadata_field_acquire(field_offset);
if (ptr != nullptr) {
NativePointerInfo info;
@ -753,7 +769,13 @@ void ArchiveHeapWriter::mark_native_pointer(oop src_obj, int field_offset) {
}
}
void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
void AOTMappedHeapWriter::mark_native_pointers(oop orig_obj) {
HeapShared::do_metadata_offsets(orig_obj, [&](int offset) {
mark_native_pointer(orig_obj, offset);
});
}
void AOTMappedHeapWriter::compute_ptrmap(ArchiveMappedHeapInfo* heap_info) {
int num_non_null_ptrs = 0;
Metadata** bottom = (Metadata**) _requested_bottom;
Metadata** top = (Metadata**) _requested_top; // exclusive
@ -800,4 +822,118 @@ void ArchiveHeapWriter::compute_ptrmap(ArchiveHeapInfo* heap_info) {
num_non_null_ptrs, size_t(heap_info->ptrmap()->size()));
}
AOTMapLogger::OopDataIterator* AOTMappedHeapWriter::oop_iterator(ArchiveMappedHeapInfo* heap_info) {
class MappedWriterOopIterator : public AOTMapLogger::OopDataIterator {
private:
address _current;
address _next;
address _buffer_start;
address _buffer_end;
uint64_t _buffer_start_narrow_oop;
intptr_t _buffer_to_requested_delta;
int _requested_shift;
size_t _num_root_segments;
size_t _num_obj_arrays_logged;
public:
MappedWriterOopIterator(address buffer_start,
address buffer_end,
uint64_t buffer_start_narrow_oop,
intptr_t buffer_to_requested_delta,
int requested_shift,
size_t num_root_segments)
: _current(nullptr),
_next(buffer_start),
_buffer_start(buffer_start),
_buffer_end(buffer_end),
_buffer_start_narrow_oop(buffer_start_narrow_oop),
_buffer_to_requested_delta(buffer_to_requested_delta),
_requested_shift(requested_shift),
_num_root_segments(num_root_segments),
_num_obj_arrays_logged(0) {
}
AOTMapLogger::OopData capture(address buffered_addr) {
oopDesc* raw_oop = (oopDesc*)buffered_addr;
size_t size = size_of_buffered_oop(buffered_addr);
address requested_addr = buffered_addr_to_requested_addr(buffered_addr);
intptr_t target_location = (intptr_t)requested_addr;
uint64_t pd = (uint64_t)(pointer_delta(buffered_addr, _buffer_start, 1));
uint32_t narrow_location = checked_cast<uint32_t>(_buffer_start_narrow_oop + (pd >> _requested_shift));
Klass* klass = real_klass_of_buffered_oop(buffered_addr);
return { buffered_addr,
requested_addr,
target_location,
narrow_location,
raw_oop,
klass,
size,
false };
}
bool has_next() override {
return _next < _buffer_end;
}
AOTMapLogger::OopData next() override {
_current = _next;
AOTMapLogger::OopData result = capture(_current);
if (result._klass->is_objArray_klass()) {
result._is_root_segment = _num_obj_arrays_logged++ < _num_root_segments;
}
_next = _current + result._size * BytesPerWord;
return result;
}
AOTMapLogger::OopData obj_at(narrowOop* addr) override {
uint64_t n = (uint64_t)(*addr);
if (n == 0) {
return null_data();
} else {
precond(n >= _buffer_start_narrow_oop);
address buffer_addr = _buffer_start + ((n - _buffer_start_narrow_oop) << _requested_shift);
return capture(buffer_addr);
}
}
AOTMapLogger::OopData obj_at(oop* addr) override {
address requested_value = cast_from_oop<address>(*addr);
if (requested_value == nullptr) {
return null_data();
} else {
address buffer_addr = requested_value - _buffer_to_requested_delta;
return capture(buffer_addr);
}
}
GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
return new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
}
};
MemRegion r = heap_info->buffer_region();
address buffer_start = address(r.start());
address buffer_end = address(r.end());
address requested_base = UseCompressedOops ? (address)CompressedOops::base() : (address)AOTMappedHeapWriter::NOCOOPS_REQUESTED_BASE;
address requested_start = UseCompressedOops ? buffered_addr_to_requested_addr(buffer_start) : requested_base;
int requested_shift = CompressedOops::shift();
intptr_t buffer_to_requested_delta = requested_start - buffer_start;
uint64_t buffer_start_narrow_oop = 0xdeadbeed;
if (UseCompressedOops) {
buffer_start_narrow_oop = (uint64_t)(pointer_delta(requested_start, requested_base, 1)) >> requested_shift;
assert(buffer_start_narrow_oop < 0xffffffff, "sanity");
}
return new MappedWriterOopIterator(buffer_start,
buffer_end,
buffer_start_narrow_oop,
buffer_to_requested_delta,
requested_shift,
heap_info->root_segments().count());
}
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -22,9 +22,10 @@
*
*/
#ifndef SHARE_CDS_ARCHIVEHEAPWRITER_HPP
#define SHARE_CDS_ARCHIVEHEAPWRITER_HPP
#ifndef SHARE_CDS_AOTMAPPEDHEAPWRITER_HPP
#define SHARE_CDS_AOTMAPPEDHEAPWRITER_HPP
#include "cds/aotMapLogger.hpp"
#include "cds/heapShared.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
@ -37,32 +38,25 @@
class MemRegion;
class ArchiveHeapInfo {
MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
CHeapBitMap _oopmap;
CHeapBitMap _ptrmap;
HeapRootSegments _heap_root_segments;
#if INCLUDE_CDS_JAVA_HEAP
class DumpedInternedStrings :
public ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>
{
public:
ArchiveHeapInfo() : _buffer_region(), _oopmap(128, mtClassShared), _ptrmap(128, mtClassShared) {}
bool is_used() { return !_buffer_region.is_empty(); }
MemRegion buffer_region() { return _buffer_region; }
void set_buffer_region(MemRegion r) { _buffer_region = r; }
char* buffer_start() { return (char*)_buffer_region.start(); }
size_t buffer_byte_size() { return _buffer_region.byte_size(); }
CHeapBitMap* oopmap() { return &_oopmap; }
CHeapBitMap* ptrmap() { return &_ptrmap; }
void set_heap_root_segments(HeapRootSegments segments) { _heap_root_segments = segments; };
HeapRootSegments heap_root_segments() { return _heap_root_segments; }
DumpedInternedStrings(unsigned size, unsigned max_size) :
ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>(size, max_size) {}
};
#if INCLUDE_CDS_JAVA_HEAP
class ArchiveHeapWriter : AllStatic {
// ArchiveHeapWriter manipulates three types of addresses:
class AOTMappedHeapWriter : AllStatic {
friend class HeapShared;
friend class AOTMappedHeapLoader;
// AOTMappedHeapWriter manipulates three types of addresses:
//
// "source" vs "buffered" vs "requested"
//
@ -117,6 +111,9 @@ public:
// Shenandoah heap region size can never be smaller than 256K.
static constexpr int MIN_GC_REGION_ALIGNMENT = 256 * K;
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
private:
class EmbeddedOopRelocator;
struct NativePointerInfo {
@ -138,6 +135,7 @@ private:
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
// See comments near the body of ArchiveHeapWriter::compare_objs_by_oop_fields().
@ -202,9 +200,10 @@ private:
static int filler_array_length(size_t fill_bytes);
static HeapWord* init_filler_array_at_buffer_top(int array_length, size_t fill_bytes);
static void set_requested_address(ArchiveHeapInfo* info);
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveHeapInfo* info);
static void compute_ptrmap(ArchiveHeapInfo *info);
static void set_requested_address(ArchiveMappedHeapInfo* info);
static void mark_native_pointers(oop orig_obj);
static void relocate_embedded_oops(GrowableArrayCHeap<oop, mtClassShared>* roots, ArchiveMappedHeapInfo* info);
static void compute_ptrmap(ArchiveMappedHeapInfo *info);
static bool is_in_requested_range(oop o);
static oop requested_obj_from_buffer_offset(size_t offset);
@ -217,7 +216,7 @@ private:
template <typename T> static void store_requested_oop_in_buffer(T* buffered_addr, oop request_oop);
template <typename T> static T* requested_addr_to_buffered_addr(T* p);
template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, CHeapBitMap* oopmap);
template <typename T> static void relocate_field_in_buffer(T* field_addr_in_buffer, oop source_referent, CHeapBitMap* oopmap);
template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
static void update_header_for_requested_obj(oop requested_obj, oop src_obj, Klass* src_klass);
@ -232,7 +231,9 @@ public:
static bool is_too_large_to_archive(size_t size);
static bool is_too_large_to_archive(oop obj);
static bool is_string_too_large_to_archive(oop string);
static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveHeapInfo* heap_info);
static bool is_dumped_interned_string(oop o);
static void add_to_dumped_interned_strings(oop string);
static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveMappedHeapInfo* heap_info);
static address requested_address(); // requested address of the lowest achived heap object
static size_t get_filler_size_at(address buffered_addr);
@ -242,6 +243,8 @@ public:
static address buffered_addr_to_requested_addr(address buffered_addr);
static Klass* real_klass_of_buffered_oop(address buffered_addr);
static size_t size_of_buffered_oop(address buffered_addr);
static AOTMapLogger::OopDataIterator* oop_iterator(ArchiveMappedHeapInfo* heap_info);
};
#endif // INCLUDE_CDS_JAVA_HEAP
#endif // SHARE_CDS_ARCHIVEHEAPWRITER_HPP
#endif // SHARE_CDS_AOTMAPPEDHEAPWRITER_HPP

View File

@ -30,11 +30,10 @@
#include "cds/aotLinkedClassBulkLoader.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMapLogger.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/cds_globals.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cdsProtectionDomain.hpp"
@ -45,7 +44,7 @@
#include "cds/dynamicArchive.hpp"
#include "cds/filemap.hpp"
#include "cds/finalImageRecipes.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "cds/lambdaFormInvokers.hpp"
#include "cds/lambdaProxyClassDictionary.hpp"
#include "classfile/classLoaderDataGraph.hpp"
@ -339,7 +338,10 @@ void AOTMetaspace::post_initialize(TRAPS) {
// Close any open file descriptors. However, mmap'ed pages will remain in memory.
static_mapinfo->close();
static_mapinfo->unmap_region(AOTMetaspace::bm);
if (HeapShared::is_loading() && HeapShared::is_loading_mapping_mode()) {
static_mapinfo->unmap_region(AOTMetaspace::bm);
}
if (dynamic_mapinfo != nullptr) {
dynamic_mapinfo->close();
@ -395,7 +397,7 @@ void AOTMetaspace::read_extra_data(JavaThread* current, const char* filename) {
CLEAR_PENDING_EXCEPTION;
} else {
#if INCLUDE_CDS_JAVA_HEAP
if (ArchiveHeapWriter::is_string_too_large_to_archive(str)) {
if (HeapShared::is_string_too_large_to_archive(str)) {
log_warning(aot, heap)("[line %d] extra interned string ignored; size too large: %d",
reader.last_line_no(), utf8_length);
continue;
@ -638,7 +640,8 @@ void AOTMetaspace::rewrite_bytecodes_and_calculate_fingerprints(Thread* thread,
class VM_PopulateDumpSharedSpace : public VM_Operation {
private:
ArchiveHeapInfo _heap_info;
ArchiveMappedHeapInfo _mapped_heap_info;
ArchiveStreamedHeapInfo _streamed_heap_info;
FileMapInfo* _map_info;
StaticArchiveBuilder& _builder;
@ -653,12 +656,13 @@ private:
public:
VM_PopulateDumpSharedSpace(StaticArchiveBuilder& b) :
VM_Operation(), _heap_info(), _map_info(nullptr), _builder(b) {}
VM_Operation(), _mapped_heap_info(), _streamed_heap_info(), _map_info(nullptr), _builder(b) {}
bool skip_operation() const { return false; }
VMOp_Type type() const { return VMOp_PopulateDumpSharedSpace; }
ArchiveHeapInfo* heap_info() { return &_heap_info; }
ArchiveMappedHeapInfo* mapped_heap_info() { return &_mapped_heap_info; }
ArchiveStreamedHeapInfo* streamed_heap_info() { return &_streamed_heap_info; }
FileMapInfo* map_info() const { return _map_info; }
void doit(); // outline because gdb sucks
bool allow_nested_vm_operations() const { return true; }
@ -1100,8 +1104,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_dumping_heap()) {
ArchiveHeapWriter::init();
HeapShared::init_heap_writer();
if (CDSConfig::is_dumping_full_module_graph()) {
ClassLoaderDataShared::ensure_module_entry_tables_exist();
HeapShared::reset_archived_object_states(CHECK);
@ -1124,9 +1127,11 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
// See discussion in JDK-8342481.
}
// Do this at the very end, when no Java code will be executed. Otherwise
// some new strings may be added to the intern table.
StringTable::allocate_shared_strings_array(CHECK);
if (HeapShared::is_writing_mapping_mode()) {
// Do this at the very end, when no Java code will be executed. Otherwise
// some new strings may be added to the intern table.
StringTable::allocate_shared_strings_array(CHECK);
}
} else {
log_info(aot)("Not dumping heap, reset CDSConfig::_is_using_optimized_module_handling");
CDSConfig::stop_using_optimized_module_handling();
@ -1147,7 +1152,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
CDSConfig::disable_dumping_aot_code();
}
bool status = write_static_archive(&builder, op.map_info(), op.heap_info());
bool status = write_static_archive(&builder, op.map_info(), op.mapped_heap_info(), op.streamed_heap_info());
if (status && CDSConfig::is_dumping_preimage_static_archive()) {
tty->print_cr("%s AOTConfiguration recorded: %s",
CDSConfig::has_temp_aot_config_file() ? "Temporary" : "", AOTConfiguration);
@ -1161,7 +1166,10 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS
}
}
bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder, FileMapInfo* map_info, ArchiveHeapInfo* heap_info) {
bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info) {
// relocate the data so that it can be mapped to AOTMetaspace::requested_base_address()
// without runtime relocation.
builder->relocate_to_requested();
@ -1170,7 +1178,7 @@ bool AOTMetaspace::write_static_archive(ArchiveBuilder* builder, FileMapInfo* ma
if (!map_info->is_open()) {
return false;
}
builder->write_archive(map_info, heap_info);
builder->write_archive(map_info, mapped_heap_info, streamed_heap_info);
return true;
}
@ -1344,7 +1352,7 @@ bool AOTMetaspace::try_link_class(JavaThread* current, InstanceKlass* ik) {
void VM_PopulateDumpSharedSpace::dump_java_heap_objects() {
if (CDSConfig::is_dumping_heap()) {
HeapShared::write_heap(&_heap_info);
HeapShared::write_heap(&_mapped_heap_info, &_streamed_heap_info);
} else {
CDSConfig::log_reasons_for_not_dumping_heap();
}
@ -1746,9 +1754,29 @@ MapArchiveResult AOTMetaspace::map_archives(FileMapInfo* static_mapinfo, FileMap
CompressedKlassPointers::establish_protection_zone(klass_range_start, prot_zone_size);
}
// map_or_load_heap_region() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.
static_mapinfo->map_or_load_heap_region();
if (static_mapinfo->can_use_heap_region()) {
if (static_mapinfo->object_streaming_mode()) {
HeapShared::initialize_loading_mode(HeapArchiveMode::_streaming);
} else {
// map_or_load_heap_region() compares the current narrow oop and klass encodings
// with the archived ones, so it must be done after all encodings are determined.
static_mapinfo->map_or_load_heap_region();
HeapShared::initialize_loading_mode(HeapArchiveMode::_mapping);
}
} else {
FileMapRegion* r = static_mapinfo->region_at(AOTMetaspace::hp);
if (r->used() > 0) {
if (static_mapinfo->object_streaming_mode()) {
AOTMetaspace::report_loading_error("Cannot use CDS heap data.");
} else {
if (!UseCompressedOops && !AOTMappedHeapLoader::can_map()) {
AOTMetaspace::report_loading_error("Cannot use CDS heap data. Selected GC not compatible -XX:-UseCompressedOops");
} else {
AOTMetaspace::report_loading_error("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC, UseParallelGC, or UseShenandoahGC are required.");
}
}
}
}
}
#endif // _LP64
log_info(aot)("initial optimized module handling: %s", CDSConfig::is_using_optimized_module_handling() ? "enabled" : "disabled");
@ -2081,11 +2109,11 @@ void AOTMetaspace::initialize_shared_spaces() {
ReadClosure rc(&array, (intptr_t)SharedBaseAddress);
serialize(&rc);
// Finish up archived heap initialization. These must be
// done after ReadClosure.
static_mapinfo->patch_heap_embedded_pointers();
ArchiveHeapLoader::finish_initialization();
// Finish initializing the heap dump mode used in the archive
// Heap initialization can be done only after vtables are initialized by ReadClosure.
HeapShared::finalize_initialization(static_mapinfo);
Universe::load_archived_object_instances();
AOTCodeCache::initialize();
if (dynamic_mapinfo != nullptr) {
@ -2138,7 +2166,9 @@ void AOTMetaspace::initialize_shared_spaces() {
CountSharedSymbols cl;
SymbolTable::shared_symbols_do(&cl);
tty->print_cr("Number of shared symbols: %zu", cl.total());
tty->print_cr("Number of shared strings: %zu", StringTable::shared_entry_count());
if (HeapShared::is_loading_mapping_mode()) {
tty->print_cr("Number of shared strings: %zu", StringTable::shared_entry_count());
}
tty->print_cr("VM version: %s\r\n", static_mapinfo->vm_version());
if (FileMapInfo::current_info() == nullptr || _archive_loading_failed) {
tty->print_cr("archive is invalid");

View File

@ -33,7 +33,8 @@
#include "utilities/macros.hpp"
class ArchiveBuilder;
class ArchiveHeapInfo;
class ArchiveMappedHeapInfo;
class ArchiveStreamedHeapInfo;
class FileMapInfo;
class Method;
class outputStream;
@ -184,7 +185,10 @@ public:
private:
static void read_extra_data(JavaThread* current, const char* filename) NOT_CDS_RETURN;
static void fork_and_dump_final_static_archive(TRAPS);
static bool write_static_archive(ArchiveBuilder* builder, FileMapInfo* map_info, ArchiveHeapInfo* heap_info);
static bool write_static_archive(ArchiveBuilder* builder,
FileMapInfo* map_info,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info);
static FileMapInfo* open_static_archive();
static FileMapInfo* open_dynamic_archive();
// use_requested_addr: If true (default), attempt to map at the address the

View File

@ -153,6 +153,9 @@ void AOTReferenceObjSupport::stabilize_cached_reference_objects(TRAPS) {
_keep_alive_objs_array = OopHandle(Universe::vm_global(), result.get_oop());
}
// Trigger a GC to prune eligible referents that were not kept alive
Universe::heap()->collect(GCCause::_java_lang_system_gc);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,245 @@
/*
* Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_AOTSTREAMEDHEAPLOADER_HPP
#define SHARE_CDS_AOTSTREAMEDHEAPLOADER_HPP
#include "cds/aotMapLogger.hpp"
#include "cds/filemap.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopsHierarchy.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include "utilities/stack.hpp"
#if INCLUDE_CDS_JAVA_HEAP
// The streaming archive heap loader loads Java objects using normal allocations. It requires the objects
// to be ordered in DFS order already at dump time, given the set of roots into the archived heap.
// Since the objects are ordered in DFS order, that means that walking them linearly through the archive
// is equivalent to performing a DFS traversal, but without pushing and popping anything.
//
// The advantage of this pre-ordering, other than the obvious locality improvement, is that we can have
// a separate thread, the AOTThread, perform this walk, in a way that allows us to split the archived
// heap into three separate zones. The first zone contains objects that have been transitively materialized,
// the second zone contains objects that are currently being materialized, and the last zone contains
// objects that have not and are not about to be touched by the AOT thread.
// Whenever a new root is traversed by the AOT thread, the zones are shifted atomically under a lock.
//
// Visualization of the three zones:
//
// +--------------------------------------+-------------------------+----------------------------------+
// | transitively materialized | currently materializing | not yet materialized |
// +--------------------------------------+-------------------------+----------------------------------+
//
// Being able to split the memory into these three zones, allows the bootstrapping thread and potential
// other threads to be able to, under a lock, traverse a root, and know how to coordinate with the
// concurrent AOT thread. Whenever the traversal finds an object in the "transitively materialized"
// zone, then we know such objects don't need any processing at all. As for "currently materializing",
// we know that if we just stay out of the way and let the AOT thread finish its current root, then
// the transitive closure of such objects will be materialized. And the AOT thread can materialize faster
// then the rest as it doesn't need to perform any traversal. Finally, as for objects in the "not yet
// materialized" zone, we know that we can trace through it without stepping on the feed of the AOT thread
// which has published it won't be tracing anything in there.
//
// What we get from this, is fast iterative traversal from the AOT thread (IterativeObjectLoader)
// while allowing lazyness and concurrency with the rest of the program (TracingObjectLoader).
// This way the AOT thread can remove the bulk of the work of materializing the Java objects from
// the critical bootstrapping thread.
//
// When we start materializing objects, we have not yet come to the point in the bootstrapping where
// GC is allowed. This is a two edged sword. On the one hand side, we can materialize objects faster
// when we know there is no GC to coordinate with, but on the other hand side, if we need to perform
// a GC when allocating memory for archived objects, we will bring down the entire JVM. To deal with this,
// the AOT thread asks the GC for a budget of bytes it is allowed to allocate before GC is allowed.
// When we get to the point in the bootstrapping where GC is allowed, we resume materializing objects
// that didn't fit in the budget. Before we let the application run, we force materialization of any
// remaining objects that have not been materialized by the AOT thread yet, so that we don't get
// surprising OOMs due to object materialization while the program is running.
//
// The object format of the archived heap is similar to a normal object. However, references are encoded
// as DFS indices, which in the end map to what index the object is in the buffer, as they are laid out
// in DFS order. The DFS indices start at 1 for the first object, and hence the number 0 represents
// null. The DFS index of objects is a core identifier of objects in this approach. From this index
// it is possible to find out what offset the archived object has into the buffer, as well as finding
// mappings to Java heap objects that have been materialized.
//
// The table mapping DFS indices to Java heap objects is filled in when an object is allocated.
// Materializing objects involves allocating the object, initializing it, and linking it with other
// objects. Since linking the object requires whatever is being referenced to be at least allocated,
// the iterative traversal will first allocate all of the objects in its zone being worked on, and then
// perform initialization and linking in a second pass. What these passes have in common is that they
// are trivially parallelizable, should we ever need to do that. The tracing materialization links
// objects when going "back" in the DFS traversal.
//
// The forwarding information for the mechanism contains raw oops before GC is allowed, and as we
// enable GC in the bootstrapping, all raw oops are handleified using OopStorage. All handles are
// handed back from the AOT thread when materialization has finished. The switch from raw oops to
// using OopStorage handles, happens under a lock while no iteration nor tracing is allowed.
//
// The initialization code is also performed in a faster way when the GC is not allowed. In particular,
// before GC is allowed, we perform raw memcpy of the archived object into the Java heap. Then the
// object is initialized with IS_DEST_UNINITIALIZED stores. The assumption made here is that before
// any GC activity is allowed, we shouldn't have to worry about concurrent GC threads scanning the
// memory and getting tripped up by that. Once GC is enabled, we revert to a bit more careful approach
// that uses a pre-computed bitmap to find the holes where oops go, and carefully copy only the
// non-oop information with memcpy, while the oops are set separately with HeapAccess stores that
// should be able to cope well with concurrent activity.
//
// The marked bit pattern of the mark word of archived heap objects is used for signalling which string
// objects should be interned. From the dump, some referenced strings were interned. This is
// really an identity property. We don't need to dump the entire string table as a way of communicating
// this identity property. Instead we intern strings on-the-fly, exploiting the dynamic object
// level linking that this approach has chosen to our advantage.
class FileMapInfo;
class OopStorage;
class Thread;
struct AOTHeapTraversalEntry;
struct alignas(AOTHeapTraversalEntry* /* Requirement of Stack<AOTHeapTraversalEntry> */) AOTHeapTraversalEntry {
int _pointee_object_index;
int _base_object_index;
int _heap_field_offset_bytes;
};
class AOTStreamedHeapLoader {
friend class InflateReferenceOopClosure;
private:
static FileMapRegion* _heap_region;
static FileMapRegion* _bitmap_region;
static OopStorage* _oop_storage;
static int* _roots_archive;
static OopHandle _roots;
static BitMapView _oopmap;
static bool _is_in_use;
static bool _allow_gc;
static bool _objects_are_handles;
static int _previous_batch_last_object_index;
static int _current_batch_last_object_index;
static size_t _allocated_words;
static int _current_root_index;
static size_t _num_archived_objects;
static int _num_roots;
static size_t _heap_region_used;
static bool _loading_all_objects;
static size_t* _object_index_to_buffer_offset_table;
static void** _object_index_to_heap_object_table;
static int* _root_highest_object_index_table;
static bool _waiting_for_iterator;
static bool _swapping_root_format;
template <typename LinkerT>
class InPlaceLinkingOopClosure;
static oop allocate_object(oopDesc* archive_object, markWord mark, size_t size, TRAPS);
static int object_index_for_root_index(int root_index);
static int highest_object_index_for_root_index(int root_index);
static size_t buffer_offset_for_object_index(int object_index);
static oopDesc* archive_object_for_object_index(int object_index);
static size_t buffer_offset_for_archive_object(oopDesc* archive_object);
template <bool use_coops>
static BitMap::idx_t obj_bit_idx_for_buffer_offset(size_t buffer_offset);
template <bool use_coops, typename LinkerT>
static void copy_payload_carefully(oopDesc* archive_object,
oop heap_object,
BitMap::idx_t header_bit,
BitMap::idx_t start_bit,
BitMap::idx_t end_bit,
LinkerT linker);
template <bool use_coops, typename LinkerT>
static void copy_object_impl(oopDesc* archive_object,
oop heap_object,
size_t size,
LinkerT linker);
static void copy_object_eager_linking(oopDesc* archive_object, oop heap_object, size_t size);
static void switch_object_index_to_handle(int object_index);
static oop heap_object_for_object_index(int object_index);
static void set_heap_object_for_object_index(int object_index, oop heap_object);
static int archived_string_value_object_index(oopDesc* archive_object);
static bool materialize_early(TRAPS);
static void materialize_late(TRAPS);
static void cleanup();
static void log_statistics();
class TracingObjectLoader {
static oop materialize_object(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS);
static oop materialize_object_inner(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS);
static void copy_object_lazy_linking(int object_index,
oopDesc* archive_object,
oop heap_object,
size_t size,
Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack);
static void drain_stack(Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS);
static oop materialize_object_transitive(int object_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS);
static void wait_for_iterator();
public:
static oop materialize_root(int root_index, Stack<AOTHeapTraversalEntry, mtClassShared>& dfs_stack, TRAPS);
};
class IterativeObjectLoader {
static void initialize_range(int first_object_index, int last_object_index, TRAPS);
static size_t materialize_range(int first_object_index, int last_object_index, TRAPS);
public:
static bool has_more();
static void materialize_next_batch(TRAPS);
};
static void install_root(int root_index, oop heap_object);
static void await_gc_enabled();
static void await_finished_processing();
public:
static void initialize();
static void enable_gc();
static void materialize_thread_object();
static oop materialize_root(int root_index);
static oop get_root(int root_index);
static void clear_root(int index);
static void materialize_objects();
static void finish_materialize_objects();
static bool is_in_use() { return _is_in_use; }
static void finish_initialization(FileMapInfo* info);
static AOTMapLogger::OopDataIterator* oop_iterator(FileMapInfo* info, address buffer_start, address buffer_end);
};
#endif // SHARE_CDS_AOTSTREAMEDHEAPLOADER_HPP
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -0,0 +1,614 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/filemap.hpp"
#include "cds/heapShared.inline.hpp"
#include "cds/regeneratedClasses.hpp"
#include "classfile/modules.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/oopFactory.hpp"
#include "memory/universe.hpp"
#include "oops/compressedOops.hpp"
#include "oops/objArrayOop.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/oopHandle.inline.hpp"
#include "oops/typeArrayKlass.hpp"
#include "oops/typeArrayOop.hpp"
#include "runtime/java.hpp"
#include "runtime/mutexLocker.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/stack.inline.hpp"
#if INCLUDE_CDS_JAVA_HEAP
GrowableArrayCHeap<u1, mtClassShared>* AOTStreamedHeapWriter::_buffer = nullptr;
// The following are offsets from buffer_bottom()
size_t AOTStreamedHeapWriter::_buffer_used;
size_t AOTStreamedHeapWriter::_roots_offset;
size_t AOTStreamedHeapWriter::_forwarding_offset;
size_t AOTStreamedHeapWriter::_root_highest_object_index_table_offset;
GrowableArrayCHeap<oop, mtClassShared>* AOTStreamedHeapWriter::_source_objs;
AOTStreamedHeapWriter::BufferOffsetToSourceObjectTable* AOTStreamedHeapWriter::_buffer_offset_to_source_obj_table;
AOTStreamedHeapWriter::SourceObjectToDFSOrderTable* AOTStreamedHeapWriter::_dfs_order_table;
int* AOTStreamedHeapWriter::_roots_highest_dfs;
size_t* AOTStreamedHeapWriter::_dfs_to_archive_object_table;
static const int max_table_capacity = 0x3fffffff;
void AOTStreamedHeapWriter::init() {
if (CDSConfig::is_dumping_heap()) {
_buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(8, max_table_capacity);
int initial_source_objs_capacity = 10000;
_source_objs = new GrowableArrayCHeap<oop, mtClassShared>(initial_source_objs_capacity);
}
}
void AOTStreamedHeapWriter::delete_tables_with_raw_oops() {
delete _source_objs;
_source_objs = nullptr;
delete _dfs_order_table;
_dfs_order_table = nullptr;
}
void AOTStreamedHeapWriter::add_source_obj(oop src_obj) {
_source_objs->append(src_obj);
}
class FollowOopIterateClosure: public BasicOopIterateClosure {
Stack<oop, mtClassShared>* _dfs_stack;
oop _src_obj;
bool _is_java_lang_ref;
public:
FollowOopIterateClosure(Stack<oop, mtClassShared>* dfs_stack, oop src_obj, bool is_java_lang_ref) :
_dfs_stack(dfs_stack),
_src_obj(src_obj),
_is_java_lang_ref(is_java_lang_ref) {}
void do_oop(narrowOop *p) { do_oop_work(p); }
void do_oop( oop *p) { do_oop_work(p); }
private:
template <class T> void do_oop_work(T *p) {
size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
if (obj != nullptr) {
_dfs_stack->push(obj);
}
}
};
int AOTStreamedHeapWriter::cmp_dfs_order(oop* o1, oop* o2) {
int* o1_dfs = _dfs_order_table->get(*o1);
int* o2_dfs = _dfs_order_table->get(*o2);
return *o1_dfs - *o2_dfs;
}
void AOTStreamedHeapWriter::order_source_objs(GrowableArrayCHeap<oop, mtClassShared>* roots) {
Stack<oop, mtClassShared> dfs_stack;
_dfs_order_table = new (mtClassShared) SourceObjectToDFSOrderTable(8, max_table_capacity);
_roots_highest_dfs = NEW_C_HEAP_ARRAY(int, (size_t)roots->length(), mtClassShared);
_dfs_to_archive_object_table = NEW_C_HEAP_ARRAY(size_t, (size_t)_source_objs->length() + 1, mtClassShared);
for (int i = 0; i < _source_objs->length(); ++i) {
oop obj = _source_objs->at(i);
_dfs_order_table->put(cast_from_oop<void*>(obj), -1);
_dfs_order_table->maybe_grow();
}
int dfs_order = 0;
for (int i = 0; i < roots->length(); ++i) {
oop root = roots->at(i);
if (root == nullptr) {
log_info(aot, heap)("null root at %d", i);
continue;
}
dfs_stack.push(root);
while (!dfs_stack.is_empty()) {
oop obj = dfs_stack.pop();
assert(obj != nullptr, "null root");
int* dfs_number = _dfs_order_table->get(cast_from_oop<void*>(obj));
if (*dfs_number != -1) {
// Already visited in the traversal
continue;
}
_dfs_order_table->put(cast_from_oop<void*>(obj), ++dfs_order);
_dfs_order_table->maybe_grow();
FollowOopIterateClosure cl(&dfs_stack, obj, AOTReferenceObjSupport::check_if_ref_obj(obj));
obj->oop_iterate(&cl);
}
_roots_highest_dfs[i] = dfs_order;
}
_source_objs->sort(cmp_dfs_order);
}
void AOTStreamedHeapWriter::write(GrowableArrayCHeap<oop, mtClassShared>* roots,
ArchiveStreamedHeapInfo* heap_info) {
assert(CDSConfig::is_dumping_heap(), "sanity");
allocate_buffer();
order_source_objs(roots);
copy_source_objs_to_buffer(roots);
map_embedded_oops(heap_info);
populate_archive_heap_info(heap_info);
}
void AOTStreamedHeapWriter::allocate_buffer() {
int initial_buffer_size = 100000;
_buffer = new GrowableArrayCHeap<u1, mtClassShared>(initial_buffer_size);
_buffer_used = 0;
ensure_buffer_space(1); // so that buffer_bottom() works
}
void AOTStreamedHeapWriter::ensure_buffer_space(size_t min_bytes) {
// We usually have very small heaps. If we get a huge one it's probably caused by a bug.
guarantee(min_bytes <= max_jint, "we dont support archiving more than 2G of objects");
_buffer->at_grow(to_array_index(min_bytes));
}
void AOTStreamedHeapWriter::copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
int length = roots->length();
size_t byte_size = align_up(sizeof(int) + sizeof(int) * (size_t)length, (size_t)HeapWordSize);
size_t new_used = _buffer_used + byte_size;
ensure_buffer_space(new_used);
int* mem = offset_to_buffered_address<int*>(_buffer_used);
memset(mem, 0, byte_size);
*mem = length;
for (int i = 0; i < length; i++) {
// Do not use arrayOop->obj_at_put(i, o) as arrayOop is outside of the real heap!
oop o = roots->at(i);
int dfs_index = o == nullptr ? 0 : *_dfs_order_table->get(cast_from_oop<void*>(o));
mem[i + 1] = dfs_index;
}
log_info(aot, heap)("archived obj roots[%d] = %zu bytes, mem = %p", length, byte_size, mem);
_roots_offset = _buffer_used;
_buffer_used = new_used;
}
template <typename T>
void AOTStreamedHeapWriter::write(T value) {
size_t new_used = _buffer_used + sizeof(T);
ensure_buffer_space(new_used);
T* mem = offset_to_buffered_address<T*>(_buffer_used);
*mem = value;
_buffer_used = new_used;
}
void AOTStreamedHeapWriter::copy_forwarding_to_buffer() {
_forwarding_offset = _buffer_used;
write<size_t>(0); // The first entry is the null entry
// Write a mapping from object index to buffer offset
for (int i = 1; i <= _source_objs->length(); i++) {
size_t buffer_offset = _dfs_to_archive_object_table[i];
write(buffer_offset);
}
}
void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) {
_root_highest_object_index_table_offset = _buffer_used;
for (int i = 0; i < roots_length; ++i) {
int highest_dfs = _roots_highest_dfs[i];
write(highest_dfs);
}
if ((roots_length % 2) != 0) {
write(-1); // Align up to a 64 bit word
}
}
static bool is_interned_string(oop obj) {
if (!java_lang_String::is_instance(obj)) {
return false;
}
ResourceMark rm;
int len;
jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
if (name == nullptr) {
fatal("Insufficient memory for dumping");
}
return StringTable::lookup(name, len) == obj;
}
static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
if (UseCompressedOops) {
return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
} else {
return BitMap::idx_t(buffer_offset / sizeof(HeapWord));
}
}
bool AOTStreamedHeapWriter::is_dumped_interned_string(oop obj) {
return is_interned_string(obj) && HeapShared::get_cached_oop_info(obj) != nullptr;
}
void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
for (int i = 0; i < _source_objs->length(); i++) {
oop src_obj = _source_objs->at(i);
HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
assert(info != nullptr, "must be");
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
info->set_buffer_offset(buffer_offset);
OopHandle handle(Universe::vm_global(), src_obj);
_buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
_buffer_offset_to_source_obj_table->maybe_grow();
int dfs_order = i + 1;
_dfs_to_archive_object_table[dfs_order] = buffer_offset;
}
copy_roots_to_buffer(roots);
copy_forwarding_to_buffer();
copy_roots_max_dfs_to_buffer(roots->length());
log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots",
_buffer_used, _source_objs->length() + 1, roots->length());
}
template <typename T>
void update_buffered_object_field(address buffered_obj, int field_offset, T value) {
T* field_addr = cast_to_oop(buffered_obj)->field_addr<T>(field_offset);
*field_addr = value;
}
static bool needs_explicit_size(oop src_obj) {
Klass* klass = src_obj->klass();
int lh = klass->layout_helper();
// Simple instances or arrays don't need explicit size
if (Klass::layout_helper_is_instance(lh)) {
return Klass::layout_helper_needs_slow_path(lh);
}
return !Klass::layout_helper_is_array(lh);
}
size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
if (needs_explicit_size(src_obj)) {
// Explicitly write object size for more complex objects, to avoid having to
// pretend the buffer objects are objects when loading the objects, in order
// to read the size. Most of the time, the layout helper of the class is enough.
write<size_t>(src_obj->size());
}
size_t byte_size = src_obj->size() * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");
size_t new_used = _buffer_used + byte_size;
assert(new_used > _buffer_used, "no wrap around");
ensure_buffer_space(new_used);
if (is_interned_string(src_obj)) {
java_lang_String::hash_code(src_obj); // Sets the hash code field(s)
java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
}
address from = cast_from_oop<address>(src_obj);
address to = offset_to_buffered_address<address>(_buffer_used);
assert(is_object_aligned(_buffer_used), "sanity");
assert(is_object_aligned(byte_size), "sanity");
memcpy(to, from, byte_size);
if (java_lang_Module::is_instance(src_obj)) {
// These native pointers will be restored explicitly at run time.
Modules::check_archived_module_oop(src_obj);
update_buffered_object_field<ModuleEntry*>(to, java_lang_Module::module_entry_offset(), nullptr);
} else if (java_lang_ClassLoader::is_instance(src_obj)) {
#ifdef ASSERT
// We only archive these loaders
if (src_obj != SystemDictionary::java_platform_loader() &&
src_obj != SystemDictionary::java_system_loader()) {
assert(src_obj->klass()->name()->equals("jdk/internal/loader/ClassLoaders$BootClassLoader"), "must be");
}
#endif
update_buffered_object_field<ClassLoaderData*>(to, java_lang_ClassLoader::loader_data_offset(), nullptr);
}
size_t buffered_obj_offset = _buffer_used;
_buffer_used = new_used;
return buffered_obj_offset;
}
// Oop mapping
inline void AOTStreamedHeapWriter::store_oop_in_buffer(oop* buffered_addr, int dfs_index) {
*(ssize_t*)buffered_addr = dfs_index;
}
inline void AOTStreamedHeapWriter::store_oop_in_buffer(narrowOop* buffered_addr, int dfs_index) {
*(int32_t*)buffered_addr = (int32_t)dfs_index;
}
template <typename T> void AOTStreamedHeapWriter::mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap) {
// Mark the pointer in the oopmap
size_t buffered_offset = buffered_address_to_offset((address)buffered_addr);
BitMap::idx_t idx = bit_idx_for_buffer_offset(buffered_offset);
oopmap->set_bit(idx);
}
template <typename T> void AOTStreamedHeapWriter::map_oop_field_in_buffer(oop obj, T* field_addr_in_buffer, CHeapBitMap* oopmap) {
if (obj == nullptr) {
store_oop_in_buffer(field_addr_in_buffer, 0);
} else {
int dfs_index = *_dfs_order_table->get(obj);
store_oop_in_buffer(field_addr_in_buffer, dfs_index);
}
mark_oop_pointer<T>(field_addr_in_buffer, oopmap);
}
void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass) {
assert(UseCompressedClassPointers, "Archived heap only supported for compressed klasses");
narrowKlass nk = ArchiveBuilder::current()->get_requested_narrow_klass(src_klass);
markWord mw = markWord::prototype();
oopDesc* fake_oop = (oopDesc*)buffered_addr;
// We need to retain the identity_hash, because it may have been used by some hashtables
// in the shared heap. This also has the side effect of pre-initializing the
// identity_hash for all shared objects, so they are less likely to be written
// into during run time, increasing the potential of memory sharing.
if (src_obj != nullptr) {
intptr_t src_hash = src_obj->identity_hash();
mw = mw.copy_set_hash(src_hash);
}
if (is_interned_string(src_obj)) {
// Mark the mark word of interned string so the loader knows to link these to
// the string table at runtime.
mw = mw.set_marked();
}
if (UseCompactObjectHeaders) {
fake_oop->set_mark(mw.set_narrow_klass(nk));
} else {
fake_oop->set_mark(mw);
fake_oop->set_narrow_klass(nk);
}
}
class AOTStreamedHeapWriter::EmbeddedOopMapper: public BasicOopIterateClosure {
oop _src_obj;
address _buffered_obj;
CHeapBitMap* _oopmap;
bool _is_java_lang_ref;
public:
EmbeddedOopMapper(oop src_obj, address buffered_obj, CHeapBitMap* oopmap)
: _src_obj(src_obj),
_buffered_obj(buffered_obj),
_oopmap(oopmap),
_is_java_lang_ref(AOTReferenceObjSupport::check_if_ref_obj(src_obj)) {}
void do_oop(narrowOop *p) { EmbeddedOopMapper::do_oop_work(p); }
void do_oop( oop *p) { EmbeddedOopMapper::do_oop_work(p); }
private:
template <typename T>
void do_oop_work(T *p) {
size_t field_offset = pointer_delta(p, _src_obj, sizeof(char));
oop obj = HeapShared::maybe_remap_referent(_is_java_lang_ref, field_offset, HeapAccess<>::oop_load(p));
AOTStreamedHeapWriter::map_oop_field_in_buffer<T>(obj, (T*)(_buffered_obj + field_offset), _oopmap);
}
};
static void log_bitmap_usage(const char* which, BitMap* bitmap, size_t total_bits) {
// The whole heap is covered by total_bits, but there are only non-zero bits within [start ... end).
size_t start = bitmap->find_first_set_bit(0);
size_t end = bitmap->size();
log_info(aot)("%s = %7zu ... %7zu (%3zu%% ... %3zu%% = %3zu%%)", which,
start, end,
start * 100 / total_bits,
end * 100 / total_bits,
(end - start) * 100 / total_bits);
}
// Update all oop fields embedded in the buffered objects
void AOTStreamedHeapWriter::map_embedded_oops(ArchiveStreamedHeapInfo* heap_info) {
size_t oopmap_unit = (UseCompressedOops ? sizeof(narrowOop) : sizeof(oop));
size_t heap_region_byte_size = _buffer_used;
heap_info->oopmap()->resize(heap_region_byte_size / oopmap_unit);
for (int i = 0; i < _source_objs->length(); i++) {
oop src_obj = _source_objs->at(i);
HeapShared::CachedOopInfo* info = HeapShared::get_cached_oop_info(src_obj);
assert(info != nullptr, "must be");
address buffered_obj = offset_to_buffered_address<address>(info->buffer_offset());
update_header_for_buffered_addr(buffered_obj, src_obj, src_obj->klass());
EmbeddedOopMapper mapper(src_obj, buffered_obj, heap_info->oopmap());
src_obj->oop_iterate(&mapper);
HeapShared::remap_dumped_metadata(src_obj, buffered_obj);
};
size_t total_bytes = (size_t)_buffer->length();
log_bitmap_usage("oopmap", heap_info->oopmap(), total_bytes / oopmap_unit);
}
size_t AOTStreamedHeapWriter::source_obj_to_buffered_offset(oop src_obj) {
HeapShared::CachedOopInfo* p = HeapShared::get_cached_oop_info(src_obj);
return p->buffer_offset();
}
address AOTStreamedHeapWriter::source_obj_to_buffered_addr(oop src_obj) {
return offset_to_buffered_address<address>(source_obj_to_buffered_offset(src_obj));
}
oop AOTStreamedHeapWriter::buffered_offset_to_source_obj(size_t buffered_offset) {
OopHandle* oh = _buffer_offset_to_source_obj_table->get(buffered_offset);
if (oh != nullptr) {
return oh->resolve();
} else {
return nullptr;
}
}
oop AOTStreamedHeapWriter::buffered_addr_to_source_obj(address buffered_addr) {
return buffered_offset_to_source_obj(buffered_address_to_offset(buffered_addr));
}
void AOTStreamedHeapWriter::populate_archive_heap_info(ArchiveStreamedHeapInfo* info) {
assert(!info->is_used(), "only set once");
size_t heap_region_byte_size = _buffer_used;
assert(heap_region_byte_size > 0, "must archived at least one object!");
info->set_buffer_region(MemRegion(offset_to_buffered_address<HeapWord*>(0),
offset_to_buffered_address<HeapWord*>(_buffer_used)));
info->set_roots_offset(_roots_offset);
info->set_num_roots((size_t)HeapShared::pending_roots()->length());
info->set_forwarding_offset(_forwarding_offset);
info->set_root_highest_object_index_table_offset(_root_highest_object_index_table_offset);
info->set_num_archived_objects((size_t)_source_objs->length());
}
AOTMapLogger::OopDataIterator* AOTStreamedHeapWriter::oop_iterator(ArchiveStreamedHeapInfo* heap_info) {
class StreamedWriterOopIterator : public AOTMapLogger::OopDataIterator {
private:
int _current;
int _next;
address _buffer_start;
int _num_archived_objects;
int _num_archived_roots;
int* _roots;
public:
StreamedWriterOopIterator(address buffer_start,
int num_archived_objects,
int num_archived_roots,
int* roots)
: _current(0),
_next(1),
_buffer_start(buffer_start),
_num_archived_objects(num_archived_objects),
_num_archived_roots(num_archived_roots),
_roots(roots) {
}
AOTMapLogger::OopData capture(int dfs_index) {
size_t buffered_offset = _dfs_to_archive_object_table[dfs_index];
address buffered_addr = _buffer_start + buffered_offset;
oop src_obj = AOTStreamedHeapWriter::buffered_offset_to_source_obj(buffered_offset);
assert(src_obj != nullptr, "why is this null?");
oopDesc* raw_oop = (oopDesc*)buffered_addr;
Klass* klass = src_obj->klass();
size_t size = src_obj->size();
intptr_t target_location = (intptr_t)buffered_offset;
uint32_t narrow_location = checked_cast<uint32_t>(dfs_index);
address requested_addr = (address)buffered_offset;
return { buffered_addr,
requested_addr,
target_location,
narrow_location,
raw_oop,
klass,
size,
false };
}
bool has_next() override {
return _next <= _num_archived_objects;
}
AOTMapLogger::OopData next() override {
_current = _next;
AOTMapLogger::OopData result = capture(_current);
_next = _current + 1;
return result;
}
AOTMapLogger::OopData obj_at(narrowOop* addr) override {
int dfs_index = (int)(*addr);
if (dfs_index == 0) {
return null_data();
} else {
return capture(dfs_index);
}
}
AOTMapLogger::OopData obj_at(oop* addr) override {
int dfs_index = (int)cast_from_oop<uintptr_t>(*addr);
if (dfs_index == 0) {
return null_data();
} else {
return capture(dfs_index);
}
}
GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* roots() override {
GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>* result = new GrowableArrayCHeap<AOTMapLogger::OopData, mtClass>();
for (int i = 0; i < _num_archived_roots; ++i) {
int object_index = _roots[i];
result->append(capture(object_index));
}
return result;
}
};
MemRegion r = heap_info->buffer_region();
address buffer_start = address(r.start());
size_t roots_offset = heap_info->roots_offset();
int* roots = ((int*)(buffer_start + roots_offset)) + 1;
return new StreamedWriterOopIterator(buffer_start, (int)heap_info->num_archived_objects(), (int)heap_info->num_roots(), roots);
}
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -0,0 +1,162 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_AOTSTREAMEDHEAPWRITER_HPP
#define SHARE_CDS_AOTSTREAMEDHEAPWRITER_HPP
#include "cds/aotMapLogger.hpp"
#include "cds/heapShared.hpp"
#include "memory/allocation.hpp"
#include "memory/allStatic.hpp"
#include "oops/oopHandle.hpp"
#include "utilities/bitMap.hpp"
#include "utilities/exceptions.hpp"
#include "utilities/growableArray.hpp"
#include "utilities/macros.hpp"
#include "utilities/resizableHashTable.hpp"
class MemRegion;
#if INCLUDE_CDS_JAVA_HEAP
class AOTStreamedHeapWriter : AllStatic {
class EmbeddedOopMapper;
static GrowableArrayCHeap<u1, mtClassShared>* _buffer;
// The number of bytes that have written into _buffer (may be smaller than _buffer->length()).
static size_t _buffer_used;
// The bottom of the copy of Heap::roots() inside this->_buffer.
static size_t _roots_offset;
// Offset to the forwarding information
static size_t _forwarding_offset;
// Offset to dfs bounds information
static size_t _root_highest_object_index_table_offset;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
typedef ResizeableHashTable<size_t, OopHandle,
AnyObj::C_HEAP,
mtClassShared> BufferOffsetToSourceObjectTable;
static BufferOffsetToSourceObjectTable* _buffer_offset_to_source_obj_table;
typedef ResizeableHashTable<void*, int,
AnyObj::C_HEAP,
mtClassShared> SourceObjectToDFSOrderTable;
static SourceObjectToDFSOrderTable* _dfs_order_table;
static int* _roots_highest_dfs;
static size_t* _dfs_to_archive_object_table;
static int cmp_dfs_order(oop* o1, oop* o2);
static void allocate_buffer();
static void ensure_buffer_space(size_t min_bytes);
// Both Java bytearray and GrowableArraty use int indices and lengths. Do a safe typecast with range check
static int to_array_index(size_t i) {
assert(i <= (size_t)max_jint, "must be");
return (int)i;
}
static int to_array_length(size_t n) {
return to_array_index(n);
}
template <typename T> static T offset_to_buffered_address(size_t offset) {
return (T)(_buffer->adr_at(to_array_index(offset)));
}
static address buffer_bottom() {
return offset_to_buffered_address<address>(0);
}
// The exclusive end of the last object that was copied into the buffer.
static address buffer_top() {
return buffer_bottom() + _buffer_used;
}
static bool in_buffer(address buffered_addr) {
return (buffer_bottom() <= buffered_addr) && (buffered_addr < buffer_top());
}
static size_t buffered_address_to_offset(address buffered_addr) {
assert(in_buffer(buffered_addr), "sanity");
return buffered_addr - buffer_bottom();
}
static void order_source_objs(GrowableArrayCHeap<oop, mtClassShared>* roots);
static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static size_t copy_one_source_obj_to_buffer(oop src_obj);
template <typename T>
static void write(T value);
static void copy_forwarding_to_buffer();
static void copy_roots_max_dfs_to_buffer(int roots_length);
static void map_embedded_oops(ArchiveStreamedHeapInfo* info);
static bool is_in_requested_range(oop o);
static oop requested_obj_from_buffer_offset(size_t offset);
static oop load_oop_from_buffer(oop* buffered_addr);
static oop load_oop_from_buffer(narrowOop* buffered_addr);
inline static void store_oop_in_buffer(oop* buffered_addr, int dfs_index);
inline static void store_oop_in_buffer(narrowOop* buffered_addr, int dfs_index);
template <typename T> static void mark_oop_pointer(T* buffered_addr, CHeapBitMap* oopmap);
template <typename T> static void map_oop_field_in_buffer(oop obj, T* field_addr_in_buffer, CHeapBitMap* oopmap);
static void update_header_for_buffered_addr(address buffered_addr, oop src_obj, Klass* src_klass);
static void populate_archive_heap_info(ArchiveStreamedHeapInfo* info);
public:
static void init() NOT_CDS_JAVA_HEAP_RETURN;
static void delete_tables_with_raw_oops();
static void add_source_obj(oop src_obj);
static void write(GrowableArrayCHeap<oop, mtClassShared>*, ArchiveStreamedHeapInfo* heap_info);
static address buffered_heap_roots_addr() {
return offset_to_buffered_address<address>(_roots_offset);
}
static size_t buffered_addr_to_buffered_offset(address buffered_addr) {
assert(buffered_addr != nullptr, "should not be null");
return size_t(buffered_addr) - size_t(buffer_bottom());
}
static bool is_dumped_interned_string(oop obj);
static size_t source_obj_to_buffered_offset(oop src_obj);
static address source_obj_to_buffered_addr(oop src_obj);
static oop buffered_offset_to_source_obj(size_t buffered_offset);
static oop buffered_addr_to_source_obj(address buffered_addr);
static AOTMapLogger::OopDataIterator* oop_iterator(ArchiveStreamedHeapInfo* heap_info);
};
#endif // INCLUDE_CDS_JAVA_HEAP
#endif // SHARE_CDS_AOTSTREAMEDHEAPWRITER_HPP

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotStreamedHeapLoader.hpp"
#include "cds/aotThread.hpp"
#include "cds/heapShared.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/javaThreadStatus.hpp"
#include "classfile/vmClasses.hpp"
#include "classfile/vmSymbols.hpp"
#include "jfr/jfr.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/osThread.hpp"
#include "runtime/thread.hpp"
#include "runtime/threads.hpp"
#include "utilities/exceptions.hpp"
AOTThread* AOTThread::_aot_thread;
bool AOTThread::_started;
// Starting the AOTThread is tricky. We wish to start it as early as possible, as
// that increases the amount of curling this thread can do for the application thread
// that is concurrently starting. But there are complications starting a thread this
// early. The java.lang.Thread class is not initialized and we may not execute any
// Java bytecodes yet. This is an internal thread, so we try to keep the bookkeeping
// minimal and use a logical ThreadIdentifier for JFR and monitor identity. The real
// thread object is created just after the main thread creates its Thread object, after
// the Thread class has been initialized.
void AOTThread::initialize() {
#if INCLUDE_CDS_JAVA_HEAP
EXCEPTION_MARK;
// Spin up a thread without thread oop, because the java.lang classes
// have not yet been initialized, and hence we can't allocate the Thread
// object yet.
AOTThread* thread = new AOTThread(&aot_thread_entry);
_aot_thread = thread;
#if INCLUDE_JVMTI
// The line below hides JVMTI events from this thread (cf. should_hide_jvmti_events())
// This is important because this thread runs before JVMTI monitors are set up appropriately.
// Therefore, callbacks would not work as intended. JVMTI has no business peeking at how we
// materialize primordial objects from the AOT cache.
thread->toggle_is_disable_suspend();
#endif
JavaThread::vm_exit_on_osthread_failure(thread);
_started = true;
// Note that the Thread class is not initialized yet at this point. We
// can run a bit concurrently until the Thread class is initialized; then
// materialize_thread_object is called to inflate the thread object.
// The thread needs an identifier. This thread is fine with a temporary ID
// assignment; it will terminate soon anyway.
int64_t tid = ThreadIdentifier::next();
thread->set_monitor_owner_id(tid);
{
MutexLocker mu(THREAD, Threads_lock);
Threads::add(thread);
}
JFR_ONLY(Jfr::on_java_thread_start(THREAD, thread);)
os::start_thread(thread);
#endif
}
void AOTThread::materialize_thread_object() {
#if INCLUDE_CDS_JAVA_HEAP
if (!_started) {
// No thread object to materialize
return;
}
EXCEPTION_MARK;
HandleMark hm(THREAD);
Handle thread_oop = JavaThread::create_system_thread_object("AOTThread", CHECK);
java_lang_Thread::release_set_thread(thread_oop(), _aot_thread);
_aot_thread->set_threadOopHandles(thread_oop());
#endif
}
void AOTThread::aot_thread_entry(JavaThread* jt, TRAPS) {
#if INCLUDE_CDS_JAVA_HEAP
AOTStreamedHeapLoader::materialize_objects();
_aot_thread = nullptr; // AOT thread will get destroyed after this point
#endif
}

View File

@ -0,0 +1,52 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_AOTTHREAD_HPP
#define SHARE_CDS_AOTTHREAD_HPP
#include "runtime/javaThread.hpp"
#include "utilities/macros.hpp"
// A hidden from external view JavaThread for materializing archived objects
class AOTThread : public JavaThread {
private:
static bool _started;
static AOTThread* _aot_thread;
static void aot_thread_entry(JavaThread* thread, TRAPS);
AOTThread(ThreadFunction entry_point) : JavaThread(entry_point) {};
public:
static void initialize();
// Hide this thread from external view.
virtual bool is_hidden_from_external_view() const { return true; }
static void materialize_thread_object();
static bool aot_thread_initialized() { return _started; };
bool is_aot_thread() const { return true; };
};
#endif // SHARE_CDS_AOTTHREAD_HPP

View File

@ -28,7 +28,6 @@
#include "cds/aotMapLogger.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cppVtables.hpp"
@ -1175,11 +1174,13 @@ void ArchiveBuilder::print_stats() {
_alloc_stats.print_stats(int(_ro_region.used()), int(_rw_region.used()));
}
void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info) {
void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
// Make sure NUM_CDS_REGIONS (exported in cds.h) agrees with
// AOTMetaspace::n_regions (internal to hotspot).
assert(NUM_CDS_REGIONS == AOTMetaspace::n_regions, "sanity");
ResourceMark rm;
write_region(mapinfo, AOTMetaspace::rw, &_rw_region, /*read_only=*/false,/*allow_exec=*/false);
write_region(mapinfo, AOTMetaspace::ro, &_ro_region, /*read_only=*/true, /*allow_exec=*/false);
write_region(mapinfo, AOTMetaspace::ac, &_ac_region, /*read_only=*/false,/*allow_exec=*/false);
@ -1188,14 +1189,19 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_i
ArchivePtrMarker::initialize_rw_ro_maps(&_rw_ptrmap, &_ro_ptrmap);
size_t bitmap_size_in_bytes;
char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::rw_ptrmap(), ArchivePtrMarker::ro_ptrmap(), heap_info,
char* bitmap = mapinfo->write_bitmap_region(ArchivePtrMarker::rw_ptrmap(),
ArchivePtrMarker::ro_ptrmap(),
mapped_heap_info,
streamed_heap_info,
bitmap_size_in_bytes);
if (heap_info->is_used()) {
_total_heap_region_size = mapinfo->write_heap_region(heap_info);
if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
_total_heap_region_size = mapinfo->write_mapped_heap_region(mapped_heap_info);
} else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
_total_heap_region_size = mapinfo->write_streamed_heap_region(streamed_heap_info);
}
print_region_stats(mapinfo, heap_info);
print_region_stats(mapinfo, mapped_heap_info, streamed_heap_info);
mapinfo->set_requested_base((char*)AOTMetaspace::requested_base_address());
mapinfo->set_header_crc(mapinfo->compute_header_crc());
@ -1210,7 +1216,7 @@ void ArchiveBuilder::write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_i
}
if (log_is_enabled(Info, aot, map)) {
AOTMapLogger::dumptime_log(this, mapinfo, heap_info, bitmap, bitmap_size_in_bytes);
AOTMapLogger::dumptime_log(this, mapinfo, mapped_heap_info, streamed_heap_info, bitmap, bitmap_size_in_bytes);
}
CDS_JAVA_HEAP_ONLY(HeapShared::destroy_archived_object_cache());
FREE_C_HEAP_ARRAY(char, bitmap);
@ -1226,7 +1232,9 @@ void ArchiveBuilder::count_relocated_pointer(bool tagged, bool nulled) {
_relocated_ptr_info._num_nulled_ptrs += nulled ? 1 : 0;
}
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* heap_info) {
void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info) {
// Print statistics of all the regions
const size_t bitmap_used = mapinfo->region_at(AOTMetaspace::bm)->used();
const size_t bitmap_reserved = mapinfo->region_at(AOTMetaspace::bm)->used_aligned();
@ -1244,22 +1252,22 @@ void ArchiveBuilder::print_region_stats(FileMapInfo *mapinfo, ArchiveHeapInfo* h
print_bitmap_region_stats(bitmap_used, total_reserved);
if (heap_info->is_used()) {
print_heap_region_stats(heap_info, total_reserved);
if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
print_heap_region_stats(mapped_heap_info->buffer_start(), mapped_heap_info->buffer_byte_size(), total_reserved);
} else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
print_heap_region_stats(streamed_heap_info->buffer_start(), streamed_heap_info->buffer_byte_size(), total_reserved);
}
aot_log_debug(aot)("total : %9zu [100.0%% of total] out of %9zu bytes [%5.1f%% used]",
total_bytes, total_reserved, total_u_perc);
total_bytes, total_reserved, total_u_perc);
}
void ArchiveBuilder::print_bitmap_region_stats(size_t size, size_t total_size) {
aot_log_debug(aot)("bm space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used]",
size, size/double(total_size)*100.0, size);
size, size/double(total_size)*100.0, size);
}
void ArchiveBuilder::print_heap_region_stats(ArchiveHeapInfo *info, size_t total_size) {
char* start = info->buffer_start();
size_t size = info->buffer_byte_size();
void ArchiveBuilder::print_heap_region_stats(char* start, size_t size, size_t total_size) {
char* top = start + size;
aot_log_debug(aot)("hp space: %9zu [ %4.1f%% of total] out of %9zu bytes [100.0%% used] at " INTPTR_FORMAT,
size, size/double(total_size)*100.0, size, p2i(start));

View File

@ -39,7 +39,8 @@
#include "utilities/hashTable.hpp"
#include "utilities/resizableHashTable.hpp"
class ArchiveHeapInfo;
class ArchiveMappedHeapInfo;
class ArchiveStreamedHeapInfo;
class CHeapBitMap;
class FileMapInfo;
class Klass;
@ -245,9 +246,11 @@ private:
size_t _num_nulled_ptrs;
} _relocated_ptr_info;
void print_region_stats(FileMapInfo *map_info, ArchiveHeapInfo* heap_info);
void print_region_stats(FileMapInfo *map_info,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info);
void print_bitmap_region_stats(size_t size, size_t total_size);
void print_heap_region_stats(ArchiveHeapInfo* heap_info, size_t total_size);
void print_heap_region_stats(char* start, size_t size, size_t total_size);
// For global access.
static ArchiveBuilder* _current;
@ -434,7 +437,9 @@ public:
void make_klasses_shareable();
void make_training_data_shareable();
void relocate_to_requested();
void write_archive(FileMapInfo* mapinfo, ArchiveHeapInfo* heap_info);
void write_archive(FileMapInfo* mapinfo,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info);
void write_region(FileMapInfo* mapinfo, int region_idx, DumpRegion* dump_region,
bool read_only, bool allow_exec);
@ -502,6 +507,7 @@ public:
return (Symbol*)current()->get_buffered_addr((address)src_symbol);
}
static void log_as_hex(address base, address top, address requested_base, bool is_heap = false);
void print_stats();
void report_out_of_space(const char* name, size_t needed_bytes);

View File

@ -1,468 +0,0 @@
/*
* Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "cds/aotMetaspace.hpp"
#include "cds/archiveHeapLoader.inline.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "logging/log.hpp"
#include "memory/iterator.inline.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "sanitizers/ub.hpp"
#include "utilities/bitMap.inline.hpp"
#include "utilities/copy.hpp"
#if INCLUDE_CDS_JAVA_HEAP
bool ArchiveHeapLoader::_is_mapped = false;
bool ArchiveHeapLoader::_is_loaded = false;
bool ArchiveHeapLoader::_narrow_oop_base_initialized = false;
address ArchiveHeapLoader::_narrow_oop_base;
int ArchiveHeapLoader::_narrow_oop_shift;
// Support for loaded heap.
uintptr_t ArchiveHeapLoader::_loaded_heap_bottom = 0;
uintptr_t ArchiveHeapLoader::_loaded_heap_top = 0;
uintptr_t ArchiveHeapLoader::_dumptime_base = UINTPTR_MAX;
uintptr_t ArchiveHeapLoader::_dumptime_top = 0;
intx ArchiveHeapLoader::_runtime_offset = 0;
bool ArchiveHeapLoader::_loading_failed = false;
// Support for mapped heap.
uintptr_t ArchiveHeapLoader::_mapped_heap_bottom = 0;
bool ArchiveHeapLoader::_mapped_heap_relocation_initialized = false;
ptrdiff_t ArchiveHeapLoader::_mapped_heap_delta = 0;
// Every mapped region is offset by _mapped_heap_delta from its requested address.
// See FileMapInfo::heap_region_requested_address().
ATTRIBUTE_NO_UBSAN
void ArchiveHeapLoader::init_mapped_heap_info(address mapped_heap_bottom, ptrdiff_t delta, int dumptime_oop_shift) {
assert(!_mapped_heap_relocation_initialized, "only once");
if (!UseCompressedOops) {
assert(dumptime_oop_shift == 0, "sanity");
}
assert(can_map(), "sanity");
init_narrow_oop_decoding(CompressedOops::base() + delta, dumptime_oop_shift);
_mapped_heap_bottom = (intptr_t)mapped_heap_bottom;
_mapped_heap_delta = delta;
_mapped_heap_relocation_initialized = true;
}
void ArchiveHeapLoader::init_narrow_oop_decoding(address base, int shift) {
assert(!_narrow_oop_base_initialized, "only once");
_narrow_oop_base_initialized = true;
_narrow_oop_base = base;
_narrow_oop_shift = shift;
}
void ArchiveHeapLoader::fixup_region() {
FileMapInfo* mapinfo = FileMapInfo::current_info();
if (is_mapped()) {
mapinfo->fixup_mapped_heap_region();
} else if (_loading_failed) {
fill_failed_loaded_heap();
}
if (is_in_use()) {
if (!CDSConfig::is_using_full_module_graph()) {
// Need to remove all the archived java.lang.Module objects from HeapShared::roots().
ClassLoaderDataShared::clear_archived_oops();
}
}
}
// ------------------ Support for Region MAPPING -----------------------------------------
// Patch all the embedded oop pointers inside an archived heap region,
// to be consistent with the runtime oop encoding.
class PatchCompressedEmbeddedPointers: public BitMapClosure {
narrowOop* _start;
public:
PatchCompressedEmbeddedPointers(narrowOop* start) : _start(start) {}
bool do_bit(size_t offset) {
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
oop o = ArchiveHeapLoader::decode_from_mapped_archive(v);
RawAccess<IS_NOT_NULL>::oop_store(p, o);
return true;
}
};
class PatchCompressedEmbeddedPointersQuick: public BitMapClosure {
narrowOop* _start;
uint32_t _delta;
public:
PatchCompressedEmbeddedPointersQuick(narrowOop* start, uint32_t delta) : _start(start), _delta(delta) {}
bool do_bit(size_t offset) {
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
narrowOop new_v = CompressedOops::narrow_oop_cast(CompressedOops::narrow_oop_value(v) + _delta);
assert(!CompressedOops::is_null(new_v), "should never relocate to narrowOop(0)");
#ifdef ASSERT
oop o1 = ArchiveHeapLoader::decode_from_mapped_archive(v);
oop o2 = CompressedOops::decode_not_null(new_v);
assert(o1 == o2, "quick delta must work");
#endif
RawAccess<IS_NOT_NULL>::oop_store(p, new_v);
return true;
}
};
class PatchUncompressedEmbeddedPointers: public BitMapClosure {
oop* _start;
intptr_t _delta;
public:
PatchUncompressedEmbeddedPointers(oop* start, intx runtime_offset) :
_start(start),
_delta(runtime_offset) {}
PatchUncompressedEmbeddedPointers(oop* start) :
_start(start),
_delta(ArchiveHeapLoader::mapped_heap_delta()) {}
bool do_bit(size_t offset) {
oop* p = _start + offset;
intptr_t dumptime_oop = (intptr_t)((void*)*p);
assert(dumptime_oop != 0, "null oops should have been filtered out at dump time");
intptr_t runtime_oop = dumptime_oop + _delta;
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(runtime_oop));
return true;
}
};
void ArchiveHeapLoader::patch_compressed_embedded_pointers(BitMapView bm,
FileMapInfo* info,
MemRegion region) {
narrowOop dt_encoded_bottom = info->encoded_heap_region_dumptime_address();
narrowOop rt_encoded_bottom = CompressedOops::encode_not_null(cast_to_oop(region.start()));
log_info(aot)("patching heap embedded pointers: narrowOop 0x%8x -> 0x%8x",
(uint)dt_encoded_bottom, (uint)rt_encoded_bottom);
// Optimization: if dumptime shift is the same as runtime shift, we can perform a
// quick conversion from "dumptime narrowOop" -> "runtime narrowOop".
narrowOop* patching_start = (narrowOop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos();
if (_narrow_oop_shift == CompressedOops::shift()) {
uint32_t quick_delta = (uint32_t)rt_encoded_bottom - (uint32_t)dt_encoded_bottom;
log_info(aot)("heap data relocation quick delta = 0x%x", quick_delta);
if (quick_delta == 0) {
log_info(aot)("heap data relocation unnecessary, quick_delta = 0");
} else {
PatchCompressedEmbeddedPointersQuick patcher(patching_start, quick_delta);
bm.iterate(&patcher);
}
} else {
log_info(aot)("heap data quick relocation not possible");
PatchCompressedEmbeddedPointers patcher(patching_start);
bm.iterate(&patcher);
}
}
// Patch all the non-null pointers that are embedded in the archived heap objects
// in this (mapped) region
void ArchiveHeapLoader::patch_embedded_pointers(FileMapInfo* info,
MemRegion region, address oopmap,
size_t oopmap_size_in_bits) {
BitMapView bm((BitMap::bm_word_t*)oopmap, oopmap_size_in_bits);
if (UseCompressedOops) {
patch_compressed_embedded_pointers(bm, info, region);
} else {
PatchUncompressedEmbeddedPointers patcher((oop*)region.start() + FileMapInfo::current_info()->heap_oopmap_start_pos());
bm.iterate(&patcher);
}
}
// ------------------ Support for Region LOADING -----------------------------------------
// The CDS archive remembers each heap object by its address at dump time, but
// the heap object may be loaded at a different address at run time. This structure is used
// to translate the dump time addresses for all objects in FileMapInfo::space_at(region_index)
// to their runtime addresses.
struct LoadedArchiveHeapRegion {
int _region_index; // index for FileMapInfo::space_at(index)
size_t _region_size; // number of bytes in this region
uintptr_t _dumptime_base; // The dump-time (decoded) address of the first object in this region
intx _runtime_offset; // If an object's dump time address P is within in this region, its
// runtime address is P + _runtime_offset
uintptr_t top() {
return _dumptime_base + _region_size;
}
};
void ArchiveHeapLoader::init_loaded_heap_relocation(LoadedArchiveHeapRegion* loaded_region) {
_dumptime_base = loaded_region->_dumptime_base;
_dumptime_top = loaded_region->top();
_runtime_offset = loaded_region->_runtime_offset;
}
bool ArchiveHeapLoader::can_load() {
return Universe::heap()->can_load_archived_objects();
}
class ArchiveHeapLoader::PatchLoadedRegionPointers: public BitMapClosure {
narrowOop* _start;
intx _offset;
uintptr_t _base;
uintptr_t _top;
public:
PatchLoadedRegionPointers(narrowOop* start, LoadedArchiveHeapRegion* loaded_region)
: _start(start),
_offset(loaded_region->_runtime_offset),
_base(loaded_region->_dumptime_base),
_top(loaded_region->top()) {}
bool do_bit(size_t offset) {
assert(UseCompressedOops, "PatchLoadedRegionPointers for uncompressed oops is unimplemented");
narrowOop* p = _start + offset;
narrowOop v = *p;
assert(!CompressedOops::is_null(v), "null oops should have been filtered out at dump time");
uintptr_t o = cast_from_oop<uintptr_t>(ArchiveHeapLoader::decode_from_archive(v));
assert(_base <= o && o < _top, "must be");
o += _offset;
ArchiveHeapLoader::assert_in_loaded_heap(o);
RawAccess<IS_NOT_NULL>::oop_store(p, cast_to_oop(o));
return true;
}
};
bool ArchiveHeapLoader::init_loaded_region(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
MemRegion& archive_space) {
size_t total_bytes = 0;
FileMapRegion* r = mapinfo->region_at(AOTMetaspace::hp);
r->assert_is_heap_region();
if (r->used() == 0) {
return false;
}
assert(is_aligned(r->used(), HeapWordSize), "must be");
total_bytes += r->used();
loaded_region->_region_index = AOTMetaspace::hp;
loaded_region->_region_size = r->used();
loaded_region->_dumptime_base = (uintptr_t)mapinfo->heap_region_dumptime_address();
assert(is_aligned(total_bytes, HeapWordSize), "must be");
size_t word_size = total_bytes / HeapWordSize;
HeapWord* buffer = Universe::heap()->allocate_loaded_archive_space(word_size);
if (buffer == nullptr) {
return false;
}
archive_space = MemRegion(buffer, word_size);
_loaded_heap_bottom = (uintptr_t)archive_space.start();
_loaded_heap_top = _loaded_heap_bottom + total_bytes;
loaded_region->_runtime_offset = _loaded_heap_bottom - loaded_region->_dumptime_base;
return true;
}
bool ArchiveHeapLoader::load_heap_region_impl(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_region,
uintptr_t load_address) {
uintptr_t bitmap_base = (uintptr_t)mapinfo->map_bitmap_region();
if (bitmap_base == 0) {
_loading_failed = true;
return false; // OOM or CRC error
}
FileMapRegion* r = mapinfo->region_at(loaded_region->_region_index);
if (!mapinfo->read_region(loaded_region->_region_index, (char*)load_address, r->used(), /* do_commit = */ false)) {
// There's no easy way to free the buffer, so we will fill it with zero later
// in fill_failed_loaded_heap(), and it will eventually be GC'ed.
log_warning(aot)("Loading of heap region %d has failed. Archived objects are disabled", loaded_region->_region_index);
_loading_failed = true;
return false;
}
assert(r->mapped_base() == (char*)load_address, "sanity");
log_info(aot)("Loaded heap region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT
" size %6zu delta %zd",
loaded_region->_region_index, load_address, load_address + loaded_region->_region_size,
loaded_region->_region_size, loaded_region->_runtime_offset);
uintptr_t oopmap = bitmap_base + r->oopmap_offset();
BitMapView bm((BitMap::bm_word_t*)oopmap, r->oopmap_size_in_bits());
if (UseCompressedOops) {
PatchLoadedRegionPointers patcher((narrowOop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region);
bm.iterate(&patcher);
} else {
PatchUncompressedEmbeddedPointers patcher((oop*)load_address + FileMapInfo::current_info()->heap_oopmap_start_pos(), loaded_region->_runtime_offset);
bm.iterate(&patcher);
}
return true;
}
bool ArchiveHeapLoader::load_heap_region(FileMapInfo* mapinfo) {
assert(can_load(), "loaded heap for must be supported");
init_narrow_oop_decoding(mapinfo->narrow_oop_base(), mapinfo->narrow_oop_shift());
LoadedArchiveHeapRegion loaded_region;
memset(&loaded_region, 0, sizeof(loaded_region));
MemRegion archive_space;
if (!init_loaded_region(mapinfo, &loaded_region, archive_space)) {
return false;
}
if (!load_heap_region_impl(mapinfo, &loaded_region, (uintptr_t)archive_space.start())) {
assert(_loading_failed, "must be");
return false;
}
init_loaded_heap_relocation(&loaded_region);
_is_loaded = true;
return true;
}
class VerifyLoadedHeapEmbeddedPointers: public BasicOopIterateClosure {
HashTable<uintptr_t, bool>* _table;
public:
VerifyLoadedHeapEmbeddedPointers(HashTable<uintptr_t, bool>* table) : _table(table) {}
virtual void do_oop(narrowOop* p) {
// This should be called before the loaded region is modified, so all the embedded pointers
// must be null, or must point to a valid object in the loaded region.
narrowOop v = *p;
if (!CompressedOops::is_null(v)) {
oop o = CompressedOops::decode_not_null(v);
uintptr_t u = cast_from_oop<uintptr_t>(o);
ArchiveHeapLoader::assert_in_loaded_heap(u);
guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
}
}
virtual void do_oop(oop* p) {
oop v = *p;
if(v != nullptr) {
uintptr_t u = cast_from_oop<uintptr_t>(v);
ArchiveHeapLoader::assert_in_loaded_heap(u);
guarantee(_table->contains(u), "must point to beginning of object in loaded archived region");
}
}
};
void ArchiveHeapLoader::finish_initialization() {
if (is_loaded()) {
// These operations are needed only when the heap is loaded (not mapped).
finish_loaded_heap();
if (VerifyArchivedFields > 0) {
verify_loaded_heap();
}
}
if (is_in_use()) {
patch_native_pointers();
intptr_t bottom = is_loaded() ? _loaded_heap_bottom : _mapped_heap_bottom;
// The heap roots are stored in one or more segments that are laid out consecutively.
// The size of each segment (except for the last one) is max_size_in_{elems,bytes}.
HeapRootSegments segments = FileMapInfo::current_info()->heap_root_segments();
HeapShared::init_root_segment_sizes(segments.max_size_in_elems());
intptr_t first_segment_addr = bottom + segments.base_offset();
for (size_t c = 0; c < segments.count(); c++) {
oop segment_oop = cast_to_oop(first_segment_addr + (c * segments.max_size_in_bytes()));
assert(segment_oop->is_objArray(), "Must be");
HeapShared::add_root_segment((objArrayOop)segment_oop);
}
}
}
void ArchiveHeapLoader::finish_loaded_heap() {
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
HeapWord* top = (HeapWord*)_loaded_heap_top;
MemRegion archive_space = MemRegion(bottom, top);
Universe::heap()->complete_loaded_archive_space(archive_space);
}
void ArchiveHeapLoader::verify_loaded_heap() {
log_info(aot, heap)("Verify all oops and pointers in loaded heap");
ResourceMark rm;
HashTable<uintptr_t, bool> table;
VerifyLoadedHeapEmbeddedPointers verifier(&table);
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
HeapWord* top = (HeapWord*)_loaded_heap_top;
for (HeapWord* p = bottom; p < top; ) {
oop o = cast_to_oop(p);
table.put(cast_from_oop<uintptr_t>(o), true);
p += o->size();
}
for (HeapWord* p = bottom; p < top; ) {
oop o = cast_to_oop(p);
o->oop_iterate(&verifier);
p += o->size();
}
}
void ArchiveHeapLoader::fill_failed_loaded_heap() {
assert(_loading_failed, "must be");
if (_loaded_heap_bottom != 0) {
assert(_loaded_heap_top != 0, "must be");
HeapWord* bottom = (HeapWord*)_loaded_heap_bottom;
HeapWord* top = (HeapWord*)_loaded_heap_top;
Universe::heap()->fill_with_objects(bottom, top - bottom);
}
}
class PatchNativePointers: public BitMapClosure {
Metadata** _start;
public:
PatchNativePointers(Metadata** start) : _start(start) {}
bool do_bit(size_t offset) {
Metadata** p = _start + offset;
*p = (Metadata*)(address(*p) + AOTMetaspace::relocation_delta());
return true;
}
};
void ArchiveHeapLoader::patch_native_pointers() {
if (AOTMetaspace::relocation_delta() == 0) {
return;
}
FileMapRegion* r = FileMapInfo::current_info()->region_at(AOTMetaspace::hp);
if (r->mapped_base() != nullptr && r->has_ptrmap()) {
log_info(aot, heap)("Patching native pointers in heap region");
BitMapView bm = FileMapInfo::current_info()->ptrmap_view(AOTMetaspace::hp);
PatchNativePointers patcher((Metadata**)r->mapped_base() + FileMapInfo::current_info()->heap_ptrmap_start_pos());
bm.iterate(&patcher);
}
}
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -25,7 +25,6 @@
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.inline.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/classListParser.hpp"

View File

@ -24,11 +24,10 @@
#include "cds/aotLogging.hpp"
#include "cds/aotMapLogger.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/classListWriter.hpp"
#include "cds/filemap.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/moduleEntry.hpp"
#include "code/aotCodeCache.hpp"
@ -893,11 +892,6 @@ static const char* check_options_incompatible_with_dumping_heap() {
return "UseCompressedClassPointers must be true";
}
// Almost all GCs support heap region dump, except ZGC (so far).
if (UseZGC) {
return "ZGC is not supported";
}
return nullptr;
#else
return "JVM not configured for writing Java heap objects";
@ -969,7 +963,7 @@ bool CDSConfig::is_dumping_heap() {
}
bool CDSConfig::is_loading_heap() {
return ArchiveHeapLoader::is_in_use();
return HeapShared::is_archived_heap_in_use();
}
bool CDSConfig::is_using_full_module_graph() {
@ -981,7 +975,7 @@ bool CDSConfig::is_using_full_module_graph() {
return false;
}
if (is_using_archive() && ArchiveHeapLoader::can_use()) {
if (is_using_archive() && HeapShared::can_use_archived_heap()) {
// Classes used by the archived full module graph are loaded in JVMTI early phase.
assert(!(JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()),
"CDS should be disabled if early class hooks are enabled");

View File

@ -22,9 +22,8 @@
*
*/
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsEnumKlass.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/vmClasses.hpp"
#include "memory/resourceArea.hpp"
@ -109,7 +108,7 @@ void CDSEnumKlass::archive_static_field(int level, KlassSubGraphInfo* subgraph_i
}
bool CDSEnumKlass::initialize_enum_klass(InstanceKlass* k, TRAPS) {
if (!ArchiveHeapLoader::is_in_use()) {
if (!HeapShared::is_archived_heap_in_use()) {
return false;
}
@ -121,14 +120,14 @@ bool CDSEnumKlass::initialize_enum_klass(InstanceKlass* k, TRAPS) {
log_info(aot, heap)("Initializing Enum class: %s", k->external_name());
}
oop mirror = k->java_mirror();
int i = 0;
for (JavaFieldStream fs(k); !fs.done(); fs.next()) {
if (fs.access_flags().is_static()) {
int root_index = info->enum_klass_static_field_root_index_at(i++);
fieldDescriptor& fd = fs.field_descriptor();
assert(fd.field_type() == T_OBJECT || fd.field_type() == T_ARRAY, "must be");
mirror->obj_field_put(fd.offset(), HeapShared::get_root(root_index, /*clear=*/true));
oop root_object = HeapShared::get_root(root_index, /*clear=*/true);
k->java_mirror()->obj_field_put(fd.offset(), root_object);
}
}
return true;

View File

@ -28,6 +28,7 @@
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/stringTable.hpp"
#include "classfile/symbolTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "classfile/systemDictionaryShared.hpp"
@ -386,7 +387,7 @@ inline bool CDSHeapVerifier::do_entry(OopHandle& orig_obj_handle, HeapShared::Ca
if (java_lang_String::is_instance(orig_obj) && HeapShared::is_dumped_interned_string(orig_obj)) {
// It's quite often for static fields to have interned strings. These are most likely not
// problematic (and are hard to filter). So we will ignore them.
return true; /* keep on iterating */
return true;
}
StaticFieldInfo* info = _table.get(orig_obj);

View File

@ -76,6 +76,12 @@
"Dump the names all loaded classes, that could be stored into " \
"the CDS archive, in the specified file") \
\
product(bool, AOTStreamableObjects, false, DIAGNOSTIC, \
"Archive the Java heap in a generic streamable object format") \
\
product(bool, AOTEagerlyLoadObjects, false, DIAGNOSTIC, \
"Load streamable objects synchronously without concurrency") \
\
product(ccstr, SharedClassListFile, nullptr, \
"Override the default CDS class list") \
\

View File

@ -28,11 +28,11 @@
#include "cds/aotLogging.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/archiveUtils.inline.hpp"
#include "cds/cds_globals.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/dynamicArchive.hpp"
#include "cds/heapShared.hpp"
#include "cds/lambdaFormInvokers.hpp"
#include "cds/lambdaProxyClassDictionary.hpp"
#include "cds/regeneratedClasses.hpp"
@ -353,8 +353,7 @@ void DynamicArchiveBuilder::write_archive(char* serialized_data, AOTClassLocatio
assert(dynamic_info != nullptr, "Sanity");
dynamic_info->open_as_output();
ArchiveHeapInfo no_heap_for_dynamic_dump;
ArchiveBuilder::write_archive(dynamic_info, &no_heap_for_dynamic_dump);
ArchiveBuilder::write_archive(dynamic_info, nullptr, nullptr);
address base = _requested_dynamic_archive_bottom;
address top = _requested_dynamic_archive_top;

View File

@ -24,16 +24,16 @@
#include "cds/aotClassLocation.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.inline.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/archiveUtils.inline.hpp"
#include "cds/cds_globals.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/dynamicArchive.hpp"
#include "cds/filemap.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classFileStream.hpp"
#include "classfile/classLoader.hpp"
@ -217,6 +217,7 @@ void FileMapHeader::populate(FileMapInfo *info, size_t core_region_alignment,
_compact_strings = CompactStrings;
_compact_headers = UseCompactObjectHeaders;
if (CDSConfig::is_dumping_heap()) {
_object_streaming_mode = HeapShared::is_writing_streaming_mode();
_narrow_oop_mode = CompressedOops::mode();
_narrow_oop_base = CompressedOops::base();
_narrow_oop_shift = CompressedOops::shift();
@ -283,40 +284,51 @@ void FileMapHeader::print(outputStream* st) {
}
st->print_cr("============ end regions ======== ");
st->print_cr("- core_region_alignment: %zu", _core_region_alignment);
st->print_cr("- obj_alignment: %d", _obj_alignment);
st->print_cr("- narrow_oop_base: " INTPTR_FORMAT, p2i(_narrow_oop_base));
st->print_cr("- narrow_oop_shift %d", _narrow_oop_shift);
st->print_cr("- compact_strings: %d", _compact_strings);
st->print_cr("- compact_headers: %d", _compact_headers);
st->print_cr("- max_heap_size: %zu", _max_heap_size);
st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode);
st->print_cr("- compressed_oops: %d", _compressed_oops);
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits);
st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift);
st->print_cr("- cloned_vtables_offset: 0x%zx", _cloned_vtables_offset);
st->print_cr("- early_serialized_data_offset: 0x%zx", _early_serialized_data_offset);
st->print_cr("- serialized_data_offset: 0x%zx", _serialized_data_offset);
st->print_cr("- jvm_ident: %s", _jvm_ident);
st->print_cr("- class_location_config_offset: 0x%zx", _class_location_config_offset);
st->print_cr("- verify_local: %d", _verify_local);
st->print_cr("- verify_remote: %d", _verify_remote);
st->print_cr("- has_platform_or_app_classes: %d", _has_platform_or_app_classes);
st->print_cr("- requested_base_address: " INTPTR_FORMAT, p2i(_requested_base_address));
st->print_cr("- mapped_base_address: " INTPTR_FORMAT, p2i(_mapped_base_address));
st->print_cr("- heap_root_segments.roots_count: %d" , _heap_root_segments.roots_count());
st->print_cr("- heap_root_segments.base_offset: 0x%zx", _heap_root_segments.base_offset());
st->print_cr("- heap_root_segments.count: %zu", _heap_root_segments.count());
st->print_cr("- heap_root_segments.max_size_elems: %d", _heap_root_segments.max_size_in_elems());
st->print_cr("- heap_root_segments.max_size_bytes: %zu", _heap_root_segments.max_size_in_bytes());
st->print_cr("- _heap_oopmap_start_pos: %zu", _heap_oopmap_start_pos);
st->print_cr("- _heap_ptrmap_start_pos: %zu", _heap_ptrmap_start_pos);
st->print_cr("- _rw_ptrmap_start_pos: %zu", _rw_ptrmap_start_pos);
st->print_cr("- _ro_ptrmap_start_pos: %zu", _ro_ptrmap_start_pos);
st->print_cr("- use_optimized_module_handling: %d", _use_optimized_module_handling);
st->print_cr("- has_full_module_graph %d", _has_full_module_graph);
st->print_cr("- has_aot_linked_classes %d", _has_aot_linked_classes);
st->print_cr("- core_region_alignment: %zu", _core_region_alignment);
st->print_cr("- obj_alignment: %d", _obj_alignment);
st->print_cr("- narrow_oop_base: " INTPTR_FORMAT, p2i(_narrow_oop_base));
st->print_cr("- narrow_oop_shift %d", _narrow_oop_shift);
st->print_cr("- compact_strings: %d", _compact_strings);
st->print_cr("- compact_headers: %d", _compact_headers);
st->print_cr("- max_heap_size: %zu", _max_heap_size);
st->print_cr("- narrow_oop_mode: %d", _narrow_oop_mode);
st->print_cr("- compressed_oops: %d", _compressed_oops);
st->print_cr("- compressed_class_ptrs: %d", _compressed_class_ptrs);
st->print_cr("- narrow_klass_pointer_bits: %d", _narrow_klass_pointer_bits);
st->print_cr("- narrow_klass_shift: %d", _narrow_klass_shift);
st->print_cr("- cloned_vtables_offset: 0x%zx", _cloned_vtables_offset);
st->print_cr("- early_serialized_data_offset: 0x%zx", _early_serialized_data_offset);
st->print_cr("- serialized_data_offset: 0x%zx", _serialized_data_offset);
st->print_cr("- jvm_ident: %s", _jvm_ident);
st->print_cr("- class_location_config_offset: 0x%zx", _class_location_config_offset);
st->print_cr("- verify_local: %d", _verify_local);
st->print_cr("- verify_remote: %d", _verify_remote);
st->print_cr("- has_platform_or_app_classes: %d", _has_platform_or_app_classes);
st->print_cr("- requested_base_address: " INTPTR_FORMAT, p2i(_requested_base_address));
st->print_cr("- mapped_base_address: " INTPTR_FORMAT, p2i(_mapped_base_address));
st->print_cr("- object_streaming_mode: %d", _object_streaming_mode);
st->print_cr("- mapped_heap_header");
st->print_cr(" - root_segments");
st->print_cr(" - roots_count: %d", _mapped_heap_header.root_segments().roots_count());
st->print_cr(" - base_offset: 0x%zx", _mapped_heap_header.root_segments().base_offset());
st->print_cr(" - count: %zu", _mapped_heap_header.root_segments().count());
st->print_cr(" - max_size_elems: %d", _mapped_heap_header.root_segments().max_size_in_elems());
st->print_cr(" - max_size_bytes: %zu", _mapped_heap_header.root_segments().max_size_in_bytes());
st->print_cr(" - oopmap_start_pos: %zu", _mapped_heap_header.oopmap_start_pos());
st->print_cr(" - oopmap_ptrmap_pos: %zu", _mapped_heap_header.ptrmap_start_pos());
st->print_cr("- streamed_heap_header");
st->print_cr(" - forwarding_offset: %zu", _streamed_heap_header.forwarding_offset());
st->print_cr(" - roots_offset: %zu", _streamed_heap_header.roots_offset());
st->print_cr(" - num_roots: %zu", _streamed_heap_header.num_roots());
st->print_cr(" - root_highest_object_index_table_offset: %zu", _streamed_heap_header.root_highest_object_index_table_offset());
st->print_cr(" - num_archived_objects: %zu", _streamed_heap_header.num_archived_objects());
st->print_cr("- _rw_ptrmap_start_pos: %zu", _rw_ptrmap_start_pos);
st->print_cr("- _ro_ptrmap_start_pos: %zu", _ro_ptrmap_start_pos);
st->print_cr("- use_optimized_module_handling: %d", _use_optimized_module_handling);
st->print_cr("- has_full_module_graph %d", _has_full_module_graph);
st->print_cr("- has_aot_linked_classes %d", _has_aot_linked_classes);
}
bool FileMapInfo::validate_class_location() {
@ -896,12 +908,14 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
assert(CDSConfig::is_dumping_heap(), "sanity");
#if INCLUDE_CDS_JAVA_HEAP
assert(!CDSConfig::is_dumping_dynamic_archive(), "must be");
requested_base = (char*)ArchiveHeapWriter::requested_address();
if (UseCompressedOops) {
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
if (HeapShared::is_writing_mapping_mode()) {
requested_base = (char*)AOTMappedHeapWriter::requested_address();
if (UseCompressedOops) {
mapping_offset = (size_t)((address)requested_base - CompressedOops::base());
assert((mapping_offset >> CompressedOops::shift()) << CompressedOops::shift() == mapping_offset, "must be");
}
} else {
mapping_offset = 0; // not used with !UseCompressedOops
requested_base = nullptr;
}
#endif // INCLUDE_CDS_JAVA_HEAP
} else {
@ -954,7 +968,10 @@ size_t FileMapInfo::remove_bitmap_zeros(CHeapBitMap* map) {
return first_set;
}
char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap, ArchiveHeapInfo* heap_info,
char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap,
CHeapBitMap* ro_ptrmap,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info,
size_t &size_in_bytes) {
size_t removed_rw_leading_zeros = remove_bitmap_zeros(rw_ptrmap);
size_t removed_ro_leading_zeros = remove_bitmap_zeros(ro_ptrmap);
@ -962,22 +979,27 @@ char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_p
header()->set_ro_ptrmap_start_pos(removed_ro_leading_zeros);
size_in_bytes = rw_ptrmap->size_in_bytes() + ro_ptrmap->size_in_bytes();
if (heap_info->is_used()) {
if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
// Remove leading and trailing zeros
size_t removed_oop_leading_zeros = remove_bitmap_zeros(heap_info->oopmap());
size_t removed_ptr_leading_zeros = remove_bitmap_zeros(heap_info->ptrmap());
header()->set_heap_oopmap_start_pos(removed_oop_leading_zeros);
header()->set_heap_ptrmap_start_pos(removed_ptr_leading_zeros);
assert(HeapShared::is_writing_mapping_mode(), "unexpected dumping mode");
size_t removed_oop_leading_zeros = remove_bitmap_zeros(mapped_heap_info->oopmap());
size_t removed_ptr_leading_zeros = remove_bitmap_zeros(mapped_heap_info->ptrmap());
mapped_heap_info->set_oopmap_start_pos(removed_oop_leading_zeros);
mapped_heap_info->set_ptrmap_start_pos(removed_ptr_leading_zeros);
size_in_bytes += heap_info->oopmap()->size_in_bytes();
size_in_bytes += heap_info->ptrmap()->size_in_bytes();
size_in_bytes += mapped_heap_info->oopmap()->size_in_bytes();
size_in_bytes += mapped_heap_info->ptrmap()->size_in_bytes();
} else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
assert(HeapShared::is_writing_streaming_mode(), "unexpected dumping mode");
size_in_bytes += streamed_heap_info->oopmap()->size_in_bytes();
}
// The bitmap region contains up to 4 parts:
// rw_ptrmap: metaspace pointers inside the read-write region
// ro_ptrmap: metaspace pointers inside the read-only region
// heap_info->oopmap(): Java oop pointers in the heap region
// heap_info->ptrmap(): metaspace pointers in the heap region
// rw_ptrmap: metaspace pointers inside the read-write region
// ro_ptrmap: metaspace pointers inside the read-only region
// *_heap_info->oopmap(): Java oop pointers in the heap region
// mapped_heap_info->ptrmap(): metaspace pointers in the heap region
char* buffer = NEW_C_HEAP_ARRAY(char, size_in_bytes, mtClassShared);
size_t written = 0;
@ -987,28 +1009,45 @@ char* FileMapInfo::write_bitmap_region(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_p
region_at(AOTMetaspace::ro)->init_ptrmap(written, ro_ptrmap->size());
written = write_bitmap(ro_ptrmap, buffer, written);
if (heap_info->is_used()) {
if (mapped_heap_info != nullptr && mapped_heap_info->is_used()) {
assert(HeapShared::is_writing_mapping_mode(), "unexpected dumping mode");
FileMapRegion* r = region_at(AOTMetaspace::hp);
r->init_oopmap(written, heap_info->oopmap()->size());
written = write_bitmap(heap_info->oopmap(), buffer, written);
r->init_oopmap(written, mapped_heap_info->oopmap()->size());
written = write_bitmap(mapped_heap_info->oopmap(), buffer, written);
r->init_ptrmap(written, heap_info->ptrmap()->size());
written = write_bitmap(heap_info->ptrmap(), buffer, written);
r->init_ptrmap(written, mapped_heap_info->ptrmap()->size());
written = write_bitmap(mapped_heap_info->ptrmap(), buffer, written);
} else if (streamed_heap_info != nullptr && streamed_heap_info->is_used()) {
assert(HeapShared::is_writing_streaming_mode(), "unexpected dumping mode");
FileMapRegion* r = region_at(AOTMetaspace::hp);
r->init_oopmap(written, streamed_heap_info->oopmap()->size());
written = write_bitmap(streamed_heap_info->oopmap(), buffer, written);
}
write_region(AOTMetaspace::bm, (char*)buffer, size_in_bytes, /*read_only=*/true, /*allow_exec=*/false);
return buffer;
}
size_t FileMapInfo::write_heap_region(ArchiveHeapInfo* heap_info) {
#if INCLUDE_CDS_JAVA_HEAP
size_t FileMapInfo::write_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) {
char* buffer_start = heap_info->buffer_start();
size_t buffer_size = heap_info->buffer_byte_size();
write_region(AOTMetaspace::hp, buffer_start, buffer_size, false, false);
header()->set_heap_root_segments(heap_info->heap_root_segments());
header()->set_mapped_heap_header(heap_info->create_header());
return buffer_size;
}
size_t FileMapInfo::write_streamed_heap_region(ArchiveStreamedHeapInfo* heap_info) {
char* buffer_start = heap_info->buffer_start();
size_t buffer_size = heap_info->buffer_byte_size();
write_region(AOTMetaspace::hp, buffer_start, buffer_size, true, false);
header()->set_streamed_heap_header(heap_info->create_header());
return buffer_size;
}
#endif // INCLUDE_CDS_JAVA_HEAP
// Dump bytes to file -- at the current file position.
void FileMapInfo::write_bytes(const void* buffer, size_t nbytes) {
@ -1076,7 +1115,7 @@ void FileMapInfo::close() {
* Same as os::map_memory() but also pretouches if AlwaysPreTouch is enabled.
*/
static char* map_memory(int fd, const char* file_name, size_t file_offset,
char *addr, size_t bytes, bool read_only,
char* addr, size_t bytes, bool read_only,
bool allow_exec, MemTag mem_tag) {
char* mem = os::map_memory(fd, file_name, file_offset, addr, bytes,
mem_tag, AlwaysPreTouch ? false : read_only,
@ -1087,6 +1126,17 @@ static char* map_memory(int fd, const char* file_name, size_t file_offset,
return mem;
}
char* FileMapInfo::map_heap_region(FileMapRegion* r, char* addr, size_t bytes) {
return ::map_memory(_fd,
_full_path,
r->file_offset(),
addr,
bytes,
r->read_only(),
r->allow_exec(),
mtJavaHeap);
}
// JVM/TI RedefineClasses() support:
// Remap the shared readonly space to shared readwrite, private.
bool FileMapInfo::remap_shared_readonly_as_readwrite() {
@ -1254,35 +1304,40 @@ MapArchiveResult FileMapInfo::map_region(int i, intx addr_delta, char* mapped_ba
}
// The return value is the location of the archive relocation bitmap.
char* FileMapInfo::map_bitmap_region() {
FileMapRegion* r = region_at(AOTMetaspace::bm);
char* FileMapInfo::map_auxiliary_region(int region_index, bool read_only) {
FileMapRegion* r = region_at(region_index);
if (r->mapped_base() != nullptr) {
return r->mapped_base();
}
bool read_only = true, allow_exec = false;
const char* region_name = shared_region_name[region_index];
bool allow_exec = false;
char* requested_addr = nullptr; // allow OS to pick any location
char* bitmap_base = map_memory(_fd, _full_path, r->file_offset(),
char* mapped_base = map_memory(_fd, _full_path, r->file_offset(),
requested_addr, r->used_aligned(), read_only, allow_exec, mtClassShared);
if (bitmap_base == nullptr) {
AOTMetaspace::report_loading_error("failed to map relocation bitmap");
if (mapped_base == nullptr) {
AOTMetaspace::report_loading_error("failed to map %d region", region_index);
return nullptr;
}
if (VerifySharedSpaces && !r->check_region_crc(bitmap_base)) {
aot_log_error(aot)("relocation bitmap CRC error");
if (!os::unmap_memory(bitmap_base, r->used_aligned())) {
fatal("os::unmap_memory of relocation bitmap failed");
if (VerifySharedSpaces && !r->check_region_crc(mapped_base)) {
aot_log_error(aot)("region %d CRC error", region_index);
if (!os::unmap_memory(mapped_base, r->used_aligned())) {
fatal("os::unmap_memory of region %d failed", region_index);
}
return nullptr;
}
r->set_mapped_from_file(true);
r->set_mapped_base(bitmap_base);
aot_log_info(aot)("Mapped %s region #%d at base " INTPTR_FORMAT " top " INTPTR_FORMAT " (%s)",
r->set_mapped_base(mapped_base);
aot_log_info(aot)("Mapped %s region #%d at base %zu top %zu (%s)",
is_static() ? "static " : "dynamic",
AOTMetaspace::bm, p2i(r->mapped_base()), p2i(r->mapped_end()),
shared_region_name[AOTMetaspace::bm]);
return bitmap_base;
region_index, p2i(r->mapped_base()), p2i(r->mapped_end()),
region_name);
return mapped_base;
}
char* FileMapInfo::map_bitmap_region() {
return map_auxiliary_region(AOTMetaspace::bm, false);
}
bool FileMapInfo::map_aot_code_region(ReservedSpace rs) {
@ -1429,59 +1484,48 @@ size_t FileMapInfo::readonly_total() {
}
#if INCLUDE_CDS_JAVA_HEAP
MemRegion FileMapInfo::_mapped_heap_memregion;
bool FileMapInfo::has_heap_region() {
return (region_at(AOTMetaspace::hp)->used() > 0);
}
// Returns the address range of the archived heap region computed using the
// current oop encoding mode. This range may be different than the one seen at
// dump time due to encoding mode differences. The result is used in determining
// if/how these regions should be relocated at run time.
MemRegion FileMapInfo::get_heap_region_requested_range() {
FileMapRegion* r = region_at(AOTMetaspace::hp);
size_t size = r->used();
assert(size > 0, "must have non-empty heap region");
static void on_heap_region_loading_error() {
if (CDSConfig::is_using_aot_linked_classes()) {
// It's too late to recover -- we have already committed to use the archived metaspace objects, but
// the archived heap objects cannot be loaded, so we don't have the archived FMG to guarantee that
// all AOT-linked classes are visible.
//
// We get here because the heap is too small. The app will fail anyway. So let's quit.
aot_log_error(aot)("%s has aot-linked classes but the archived "
"heap objects cannot be loaded. Try increasing your heap size.",
CDSConfig::type_of_archive_being_loaded());
AOTMetaspace::unrecoverable_loading_error();
}
CDSConfig::stop_using_full_module_graph();
}
address start = heap_region_requested_address();
address end = start + size;
aot_log_info(aot)("Requested heap region [" INTPTR_FORMAT " - " INTPTR_FORMAT "] = %8zu bytes",
p2i(start), p2i(end), size);
void FileMapInfo::stream_heap_region() {
assert(object_streaming_mode(), "This should only be done for the streaming approach");
return MemRegion((HeapWord*)start, (HeapWord*)end);
if (map_auxiliary_region(AOTMetaspace::hp, /*readonly=*/true) != nullptr) {
HeapShared::initialize_streaming();
} else {
on_heap_region_loading_error();
}
}
void FileMapInfo::map_or_load_heap_region() {
assert(!object_streaming_mode(), "This should only be done for the mapping approach");
bool success = false;
if (can_use_heap_region()) {
if (ArchiveHeapLoader::can_map()) {
success = map_heap_region();
} else if (ArchiveHeapLoader::can_load()) {
success = ArchiveHeapLoader::load_heap_region(this);
} else {
if (!UseCompressedOops && !ArchiveHeapLoader::can_map()) {
AOTMetaspace::report_loading_error("Cannot use CDS heap data. Selected GC not compatible -XX:-UseCompressedOops");
} else {
AOTMetaspace::report_loading_error("Cannot use CDS heap data. UseEpsilonGC, UseG1GC, UseSerialGC, UseParallelGC, or UseShenandoahGC are required.");
}
}
if (AOTMappedHeapLoader::can_map()) {
success = AOTMappedHeapLoader::map_heap_region(this);
} else if (AOTMappedHeapLoader::can_load()) {
success = AOTMappedHeapLoader::load_heap_region(this);
}
if (!success) {
if (CDSConfig::is_using_aot_linked_classes()) {
// It's too late to recover -- we have already committed to use the archived metaspace objects, but
// the archived heap objects cannot be loaded, so we don't have the archived FMG to guarantee that
// all AOT-linked classes are visible.
//
// We get here because the heap is too small. The app will fail anyway. So let's quit.
aot_log_error(aot)("%s has aot-linked classes but the archived "
"heap objects cannot be loaded. Try increasing your heap size.",
CDSConfig::type_of_archive_being_loaded());
AOTMetaspace::unrecoverable_loading_error();
}
CDSConfig::stop_using_full_module_graph("archive heap loading failed");
on_heap_region_loading_error();
}
}
@ -1489,6 +1533,10 @@ bool FileMapInfo::can_use_heap_region() {
if (!has_heap_region()) {
return false;
}
if (!object_streaming_mode() && !Universe::heap()->can_load_archived_objects() && !UseG1GC) {
// Incompatible object format
return false;
}
if (JvmtiExport::should_post_class_file_load_hook() && JvmtiExport::has_early_class_hook_env()) {
ShouldNotReachHere(); // CDS should have been disabled.
// The archived objects are mapped at JVM start-up, but we don't know if
@ -1503,7 +1551,7 @@ bool FileMapInfo::can_use_heap_region() {
}
// We pre-compute narrow Klass IDs with the runtime mapping start intended to be the base, and a shift of
// ArchiveBuilder::precomputed_narrow_klass_shift. We enforce this encoding at runtime (see
// HeapShared::precomputed_narrow_klass_shift. We enforce this encoding at runtime (see
// CompressedKlassPointers::initialize_for_given_encoding()). Therefore, the following assertions must
// hold:
address archive_narrow_klass_base = (address)header()->mapped_base_address();
@ -1512,21 +1560,28 @@ bool FileMapInfo::can_use_heap_region() {
aot_log_info(aot)("CDS archive was created with max heap size = %zuM, and the following configuration:",
max_heap_size()/M);
aot_log_info(aot)(" narrow_klass_base at mapping start address, narrow_klass_pointer_bits = %d, narrow_klass_shift = %d",
archive_narrow_klass_pointer_bits, archive_narrow_klass_shift);
aot_log_info(aot)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
if (UseCompressedOops) {
aot_log_info(aot)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
narrow_oop_mode(), p2i(narrow_oop_base()), narrow_oop_shift());
}
aot_log_info(aot)("The current max heap size = %zuM, G1HeapRegion::GrainBytes = %zu",
MaxHeapSize/M, G1HeapRegion::GrainBytes);
aot_log_info(aot)(" narrow_klass_base = " PTR_FORMAT ", arrow_klass_pointer_bits = %d, narrow_klass_shift = %d",
p2i(CompressedKlassPointers::base()), CompressedKlassPointers::narrow_klass_pointer_bits(), CompressedKlassPointers::shift());
aot_log_info(aot)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
CompressedOops::mode(), p2i(CompressedOops::base()), CompressedOops::shift());
aot_log_info(aot)(" heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
UseCompressedOops ? p2i(CompressedOops::begin()) :
UseG1GC ? p2i((address)G1CollectedHeap::heap()->reserved().start()) : 0L,
UseCompressedOops ? p2i(CompressedOops::end()) :
UseG1GC ? p2i((address)G1CollectedHeap::heap()->reserved().end()) : 0L);
if (UseCompressedOops) {
aot_log_info(aot)(" narrow_oop_mode = %d, narrow_oop_base = " PTR_FORMAT ", narrow_oop_shift = %d",
CompressedOops::mode(), p2i(CompressedOops::base()), CompressedOops::shift());
}
if (!object_streaming_mode()) {
aot_log_info(aot)(" heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
UseCompressedOops ? p2i(CompressedOops::begin()) :
UseG1GC ? p2i((address)G1CollectedHeap::heap()->reserved().start()) : 0L,
UseCompressedOops ? p2i(CompressedOops::end()) :
UseG1GC ? p2i((address)G1CollectedHeap::heap()->reserved().end()) : 0L);
}
int err = 0;
if ( archive_narrow_klass_base != CompressedKlassPointers::base() ||
@ -1570,204 +1625,10 @@ bool FileMapInfo::can_use_heap_region() {
return true;
}
// The actual address of this region during dump time.
address FileMapInfo::heap_region_dumptime_address() {
FileMapRegion* r = region_at(AOTMetaspace::hp);
assert(CDSConfig::is_using_archive(), "runtime only");
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
if (UseCompressedOops) {
return /*dumptime*/ (address)((uintptr_t)narrow_oop_base() + r->mapping_offset());
} else {
return heap_region_requested_address();
}
}
// The address where this region can be mapped into the runtime heap without
// patching any of the pointers that are embedded in this region.
address FileMapInfo::heap_region_requested_address() {
assert(CDSConfig::is_using_archive(), "runtime only");
FileMapRegion* r = region_at(AOTMetaspace::hp);
assert(is_aligned(r->mapping_offset(), sizeof(HeapWord)), "must be");
assert(ArchiveHeapLoader::can_use(), "GC must support mapping or loading");
if (UseCompressedOops) {
// We can avoid relocation if each region's offset from the runtime CompressedOops::base()
// is the same as its offset from the CompressedOops::base() during dumptime.
// Note that CompressedOops::base() may be different between dumptime and runtime.
//
// Example:
// Dumptime base = 0x1000 and shift is 0. We have a region at address 0x2000. There's a
// narrowOop P stored in this region that points to an object at address 0x2200.
// P's encoded value is 0x1200.
//
// Runtime base = 0x4000 and shift is also 0. If we map this region at 0x5000, then
// the value P can remain 0x1200. The decoded address = (0x4000 + (0x1200 << 0)) = 0x5200,
// which is the runtime location of the referenced object.
return /*runtime*/ (address)((uintptr_t)CompressedOops::base() + r->mapping_offset());
} else {
// This was the hard-coded requested base address used at dump time. With uncompressed oops,
// the heap range is assigned by the OS so we will most likely have to relocate anyway, no matter
// what base address was picked at duump time.
return (address)ArchiveHeapWriter::NOCOOPS_REQUESTED_BASE;
}
}
bool FileMapInfo::map_heap_region() {
if (map_heap_region_impl()) {
#ifdef ASSERT
// The "old" regions must be parsable -- we cannot have any unused space
// at the start of the lowest G1 region that contains archived objects.
assert(is_aligned(_mapped_heap_memregion.start(), G1HeapRegion::GrainBytes), "must be");
// Make sure we map at the very top of the heap - see comments in
// init_heap_region_relocation().
MemRegion heap_range = G1CollectedHeap::heap()->reserved();
assert(heap_range.contains(_mapped_heap_memregion), "must be");
address heap_end = (address)heap_range.end();
address mapped_heap_region_end = (address)_mapped_heap_memregion.end();
assert(heap_end >= mapped_heap_region_end, "must be");
assert(heap_end - mapped_heap_region_end < (intx)(G1HeapRegion::GrainBytes),
"must be at the top of the heap to avoid fragmentation");
#endif
ArchiveHeapLoader::set_mapped();
return true;
} else {
return false;
}
}
bool FileMapInfo::map_heap_region_impl() {
assert(UseG1GC, "the following code assumes G1");
FileMapRegion* r = region_at(AOTMetaspace::hp);
size_t size = r->used();
if (size == 0) {
return false; // no archived java heap data
}
size_t word_size = size / HeapWordSize;
address requested_start = heap_region_requested_address();
aot_log_info(aot)("Preferred address to map heap data (to avoid relocation) is " INTPTR_FORMAT, p2i(requested_start));
// allocate from java heap
HeapWord* start = G1CollectedHeap::heap()->alloc_archive_region(word_size, (HeapWord*)requested_start);
if (start == nullptr) {
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to allocate java heap region for archive heap.");
return false;
}
_mapped_heap_memregion = MemRegion(start, word_size);
// Map the archived heap data. No need to call MemTracker::record_virtual_memory_tag()
// for mapped region as it is part of the reserved java heap, which is already recorded.
char* addr = (char*)_mapped_heap_memregion.start();
char* base;
if (AOTMetaspace::use_windows_memory_mapping() || UseLargePages) {
// With UseLargePages, memory mapping may fail on some OSes if the size is not
// large page aligned, so let's use read() instead. In this case, the memory region
// is already commited by G1 so we don't need to commit it again.
if (!read_region(AOTMetaspace::hp, addr,
align_up(_mapped_heap_memregion.byte_size(), os::vm_page_size()),
/* do_commit = */ !UseLargePages)) {
dealloc_heap_region();
aot_log_error(aot)("Failed to read archived heap region into " INTPTR_FORMAT, p2i(addr));
return false;
}
// Checks for VerifySharedSpaces is already done inside read_region()
base = addr;
} else {
base = map_memory(_fd, _full_path, r->file_offset(),
addr, _mapped_heap_memregion.byte_size(), r->read_only(),
r->allow_exec(), mtJavaHeap);
if (base == nullptr || base != addr) {
dealloc_heap_region();
AOTMetaspace::report_loading_error("UseSharedSpaces: Unable to map at required address in java heap. "
INTPTR_FORMAT ", size = %zu bytes",
p2i(addr), _mapped_heap_memregion.byte_size());
return false;
}
if (VerifySharedSpaces && !r->check_region_crc(base)) {
dealloc_heap_region();
AOTMetaspace::report_loading_error("UseSharedSpaces: mapped heap region is corrupt");
return false;
}
}
r->set_mapped_base(base);
// If the requested range is different from the range allocated by GC, then
// the pointers need to be patched.
address mapped_start = (address) _mapped_heap_memregion.start();
ptrdiff_t delta = mapped_start - requested_start;
if (UseCompressedOops &&
(narrow_oop_mode() != CompressedOops::mode() ||
narrow_oop_shift() != CompressedOops::shift())) {
_heap_pointers_need_patching = true;
}
if (delta != 0) {
_heap_pointers_need_patching = true;
}
ArchiveHeapLoader::init_mapped_heap_info(mapped_start, delta, narrow_oop_shift());
if (_heap_pointers_need_patching) {
char* bitmap_base = map_bitmap_region();
if (bitmap_base == nullptr) {
AOTMetaspace::report_loading_error("CDS heap cannot be used because bitmap region cannot be mapped");
dealloc_heap_region();
_heap_pointers_need_patching = false;
return false;
}
}
aot_log_info(aot)("Heap data mapped at " INTPTR_FORMAT ", size = %8zu bytes",
p2i(mapped_start), _mapped_heap_memregion.byte_size());
aot_log_info(aot)("CDS heap data relocation delta = %zd bytes", delta);
return true;
}
narrowOop FileMapInfo::encoded_heap_region_dumptime_address() {
assert(CDSConfig::is_using_archive(), "runtime only");
assert(UseCompressedOops, "sanity");
FileMapRegion* r = region_at(AOTMetaspace::hp);
return CompressedOops::narrow_oop_cast(r->mapping_offset() >> narrow_oop_shift());
}
void FileMapInfo::patch_heap_embedded_pointers() {
if (!ArchiveHeapLoader::is_mapped() || !_heap_pointers_need_patching) {
return;
}
char* bitmap_base = map_bitmap_region();
assert(bitmap_base != nullptr, "must have already been mapped");
FileMapRegion* r = region_at(AOTMetaspace::hp);
ArchiveHeapLoader::patch_embedded_pointers(
this, _mapped_heap_memregion,
(address)(region_at(AOTMetaspace::bm)->mapped_base()) + r->oopmap_offset(),
r->oopmap_size_in_bits());
}
void FileMapInfo::fixup_mapped_heap_region() {
if (ArchiveHeapLoader::is_mapped()) {
assert(!_mapped_heap_memregion.is_empty(), "sanity");
// Populate the archive regions' G1BlockOffsetTables. That ensures
// fast G1BlockOffsetTable::block_start operations for any given address
// within the archive regions when trying to find start of an object
// (e.g. during card table scanning).
G1CollectedHeap::heap()->populate_archive_regions_bot(_mapped_heap_memregion);
}
}
// dealloc the archive regions from java heap
void FileMapInfo::dealloc_heap_region() {
G1CollectedHeap::heap()->dealloc_archive_regions(_mapped_heap_memregion);
}
#endif // INCLUDE_CDS_JAVA_HEAP
// Unmap a memory region in the address space.
void FileMapInfo::unmap_regions(int regions[], int num_regions) {
for (int r = 0; r < num_regions; r++) {
int idx = regions[r];
@ -1775,8 +1636,6 @@ void FileMapInfo::unmap_regions(int regions[], int num_regions) {
}
}
// Unmap a memory region in the address space.
void FileMapInfo::unmap_region(int i) {
FileMapRegion* r = region_at(i);
char* mapped_base = r->mapped_base();
@ -1808,7 +1667,6 @@ void FileMapInfo::assert_mark(bool check) {
FileMapInfo* FileMapInfo::_current_info = nullptr;
FileMapInfo* FileMapInfo::_dynamic_archive_info = nullptr;
bool FileMapInfo::_heap_pointers_need_patching = false;
bool FileMapInfo::_memory_mapping_failed = false;
// Open the shared archive file, read and validate the header

View File

@ -27,12 +27,14 @@
#include "cds/aotMetaspace.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/heapShared.hpp"
#include "include/cds.h"
#include "logging/logLevel.hpp"
#include "memory/allocation.hpp"
#include "oops/array.hpp"
#include "oops/compressedOops.hpp"
#include "utilities/align.hpp"
#include "utilities/bitMap.hpp"
// To understand the layout of the CDS archive file:
//
@ -43,7 +45,6 @@
static const int JVM_IDENT_MAX = 256;
class AOTClassLocationConfig;
class ArchiveHeapInfo;
class BitMapView;
class CHeapBitMap;
class ClassFileStream;
@ -114,6 +115,7 @@ private:
bool _compact_headers; // value of UseCompactObjectHeaders
uintx _max_heap_size; // java max heap size during dumping
CompressedOops::Mode _narrow_oop_mode; // compressed oop encoding mode
bool _object_streaming_mode; // dump was created for object streaming
bool _compressed_oops; // save the flag UseCompressedOops
bool _compressed_class_ptrs; // save the flag UseCompressedClassPointers
int _narrow_klass_pointer_bits; // save number of bits in narrowKlass
@ -139,12 +141,12 @@ private:
// some expensive operations.
bool _has_aot_linked_classes; // Was the CDS archive created with -XX:+AOTClassLinking
bool _has_full_module_graph; // Does this CDS archive contain the full archived module graph?
HeapRootSegments _heap_root_segments; // Heap root segments info
size_t _heap_oopmap_start_pos; // The first bit in the oopmap corresponds to this position in the heap.
size_t _heap_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
size_t _rw_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the rw region
size_t _ro_ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the ro region
ArchiveMappedHeapHeader _mapped_heap_header;
ArchiveStreamedHeapHeader _streamed_heap_header;
// The following are parameters that affect MethodData layout.
u1 _compiler_type;
uint _type_profile_level;
@ -192,6 +194,7 @@ public:
char* cloned_vtables() const { return from_mapped_offset<char*>(_cloned_vtables_offset); }
char* early_serialized_data() const { return from_mapped_offset<char*>(_early_serialized_data_offset); }
char* serialized_data() const { return from_mapped_offset<char*>(_serialized_data_offset); }
bool object_streaming_mode() const { return _object_streaming_mode; }
const char* jvm_ident() const { return _jvm_ident; }
char* requested_base_address() const { return _requested_base_address; }
char* mapped_base_address() const { return _mapped_base_address; }
@ -201,23 +204,25 @@ public:
bool compressed_class_pointers() const { return _compressed_class_ptrs; }
int narrow_klass_pointer_bits() const { return _narrow_klass_pointer_bits; }
int narrow_klass_shift() const { return _narrow_klass_shift; }
HeapRootSegments heap_root_segments() const { return _heap_root_segments; }
bool has_full_module_graph() const { return _has_full_module_graph; }
size_t heap_oopmap_start_pos() const { return _heap_oopmap_start_pos; }
size_t heap_ptrmap_start_pos() const { return _heap_ptrmap_start_pos; }
size_t rw_ptrmap_start_pos() const { return _rw_ptrmap_start_pos; }
size_t ro_ptrmap_start_pos() const { return _ro_ptrmap_start_pos; }
// Heap archiving
const ArchiveMappedHeapHeader* mapped_heap() const { return &_mapped_heap_header; }
const ArchiveStreamedHeapHeader* streamed_heap() const { return &_streamed_heap_header; }
void set_streamed_heap_header(ArchiveStreamedHeapHeader header) { _streamed_heap_header = header; }
void set_mapped_heap_header(ArchiveMappedHeapHeader header) { _mapped_heap_header = header; }
void set_has_platform_or_app_classes(bool v) { _has_platform_or_app_classes = v; }
void set_cloned_vtables(char* p) { set_as_offset(p, &_cloned_vtables_offset); }
void set_early_serialized_data(char* p) { set_as_offset(p, &_early_serialized_data_offset); }
void set_serialized_data(char* p) { set_as_offset(p, &_serialized_data_offset); }
void set_mapped_base_address(char* p) { _mapped_base_address = p; }
void set_heap_root_segments(HeapRootSegments segments) { _heap_root_segments = segments; }
void set_heap_oopmap_start_pos(size_t n) { _heap_oopmap_start_pos = n; }
void set_heap_ptrmap_start_pos(size_t n) { _heap_ptrmap_start_pos = n; }
void set_rw_ptrmap_start_pos(size_t n) { _rw_ptrmap_start_pos = n; }
void set_ro_ptrmap_start_pos(size_t n) { _ro_ptrmap_start_pos = n; }
void copy_base_archive_name(const char* name);
void set_class_location_config(AOTClassLocationConfig* table) {
@ -273,7 +278,6 @@ private:
static FileMapInfo* _current_info;
static FileMapInfo* _dynamic_archive_info;
static bool _heap_pointers_need_patching;
static bool _memory_mapping_failed;
public:
@ -303,11 +307,12 @@ public:
address narrow_oop_base() const { return header()->narrow_oop_base(); }
int narrow_oop_shift() const { return header()->narrow_oop_shift(); }
uintx max_heap_size() const { return header()->max_heap_size(); }
HeapRootSegments heap_root_segments() const { return header()->heap_root_segments(); }
size_t core_region_alignment() const { return header()->core_region_alignment(); }
size_t heap_oopmap_start_pos() const { return header()->heap_oopmap_start_pos(); }
size_t heap_ptrmap_start_pos() const { return header()->heap_ptrmap_start_pos(); }
const ArchiveMappedHeapHeader* mapped_heap() const { return header()->mapped_heap(); }
const ArchiveStreamedHeapHeader* streamed_heap() const { return header()->streamed_heap(); }
bool object_streaming_mode() const { return header()->object_streaming_mode(); }
CompressedOops::Mode narrow_oop_mode() const { return header()->narrow_oop_mode(); }
char* cloned_vtables() const { return header()->cloned_vtables(); }
@ -324,6 +329,7 @@ public:
bool is_mapped() const { return _is_mapped; }
void set_is_mapped(bool v) { _is_mapped = v; }
const char* full_path() const { return _full_path; }
char* map_heap_region(FileMapRegion* r, char* addr, size_t bytes);
void set_requested_base(char* b) { header()->set_requested_base(b); }
char* requested_base_address() const { return header()->requested_base_address(); }
@ -363,23 +369,29 @@ public:
void write_region(int region, char* base, size_t size,
bool read_only, bool allow_exec);
size_t remove_bitmap_zeros(CHeapBitMap* map);
char* write_bitmap_region(CHeapBitMap* rw_ptrmap, CHeapBitMap* ro_ptrmap, ArchiveHeapInfo* heap_info,
char* write_bitmap_region(CHeapBitMap* rw_ptrmap,
CHeapBitMap* ro_ptrmap,
ArchiveMappedHeapInfo* mapped_heap_info,
ArchiveStreamedHeapInfo* streamed_heap_info,
size_t &size_in_bytes);
size_t write_heap_region(ArchiveHeapInfo* heap_info);
size_t write_mapped_heap_region(ArchiveMappedHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN_(0);
size_t write_streamed_heap_region(ArchiveStreamedHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN_(0);
void write_bytes(const void* buffer, size_t count);
void write_bytes_aligned(const void* buffer, size_t count);
size_t read_bytes(void* buffer, size_t count);
static size_t readonly_total();
MapArchiveResult map_regions(int regions[], int num_regions, char* mapped_base_address, ReservedSpace rs);
void unmap_regions(int regions[], int num_regions);
// Object loading support
void stream_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
void map_or_load_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
void fixup_mapped_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
void patch_heap_embedded_pointers() NOT_CDS_JAVA_HEAP_RETURN;
bool has_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false);
MemRegion get_heap_region_requested_range() NOT_CDS_JAVA_HEAP_RETURN_(MemRegion());
bool read_region(int i, char* base, size_t size, bool do_commit);
char* map_bitmap_region();
bool map_aot_code_region(ReservedSpace rs);
char* map_forwarding_region();
void unmap_region(int i);
void close();
bool is_open() { return _file_open; }
@ -434,25 +446,17 @@ public:
const char* vm_version() {
return header()->jvm_ident();
}
bool can_use_heap_region();
private:
bool open_for_read();
void seek_to_position(size_t pos);
bool map_heap_region_impl() NOT_CDS_JAVA_HEAP_RETURN_(false);
void dealloc_heap_region() NOT_CDS_JAVA_HEAP_RETURN;
bool can_use_heap_region();
bool load_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false);
bool map_heap_region() NOT_CDS_JAVA_HEAP_RETURN_(false);
void init_heap_region_relocation();
MapArchiveResult map_region(int i, intx addr_delta, char* mapped_base_address, ReservedSpace rs);
bool relocate_pointers_in_core_regions(intx addr_delta);
static MemRegion _mapped_heap_memregion;
char* map_auxiliary_region(int region_index, bool read_only);
public:
address heap_region_dumptime_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
address heap_region_requested_address() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
narrowOop encoded_heap_region_dumptime_address();
private:

View File

@ -26,17 +26,20 @@
#include "cds/aotClassInitializer.hpp"
#include "cds/aotClassLocation.hpp"
#include "cds/aotLogging.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/aotOopChecker.hpp"
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/aotStreamedHeapLoader.hpp"
#include "cds/aotStreamedHeapWriter.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/archiveUtils.hpp"
#include "cds/cds_globals.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/cdsEnumKlass.hpp"
#include "cds/cdsHeapVerifier.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "cds/regeneratedClasses.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.inline.hpp"
@ -64,6 +67,7 @@
#include "prims/jvmtiExport.hpp"
#include "runtime/arguments.hpp"
#include "runtime/fieldDescriptor.inline.hpp"
#include "runtime/globals_extension.hpp"
#include "runtime/init.hpp"
#include "runtime/javaCalls.hpp"
#include "runtime/mutexLocker.hpp"
@ -91,7 +95,57 @@ struct ArchivableStaticFieldInfo {
}
};
DumpedInternedStrings *HeapShared::_dumped_interned_strings = nullptr;
// Anything that goes in the header must be thoroughly purged from uninitialized memory
// as it will be written to disk. Therefore, the constructors memset the memory to 0.
// This is not the prettiest thing, but we need to know every byte is initialized,
// including potential padding between fields.
ArchiveMappedHeapHeader::ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
size_t oopmap_start_pos,
HeapRootSegments root_segments) {
memset((char*)this, 0, sizeof(*this));
_ptrmap_start_pos = ptrmap_start_pos;
_oopmap_start_pos = oopmap_start_pos;
_root_segments = root_segments;
}
ArchiveMappedHeapHeader::ArchiveMappedHeapHeader() {
memset((char*)this, 0, sizeof(*this));
}
ArchiveMappedHeapHeader ArchiveMappedHeapInfo::create_header() {
return ArchiveMappedHeapHeader{_ptrmap_start_pos,
_oopmap_start_pos,
_root_segments};
}
ArchiveStreamedHeapHeader::ArchiveStreamedHeapHeader(size_t forwarding_offset,
size_t roots_offset,
size_t num_roots,
size_t root_highest_object_index_table_offset,
size_t num_archived_objects) {
memset((char*)this, 0, sizeof(*this));
_forwarding_offset = forwarding_offset;
_roots_offset = roots_offset;
_num_roots = num_roots;
_root_highest_object_index_table_offset = root_highest_object_index_table_offset;
_num_archived_objects = num_archived_objects;
}
ArchiveStreamedHeapHeader::ArchiveStreamedHeapHeader() {
memset((char*)this, 0, sizeof(*this));
}
ArchiveStreamedHeapHeader ArchiveStreamedHeapInfo::create_header() {
return ArchiveStreamedHeapHeader{_forwarding_offset,
_roots_offset,
_num_roots,
_root_highest_object_index_table_offset,
_num_archived_objects};
}
HeapArchiveMode HeapShared::_heap_load_mode = HeapArchiveMode::_uninitialized;
HeapArchiveMode HeapShared::_heap_write_mode = HeapArchiveMode::_uninitialized;
size_t HeapShared::_alloc_count[HeapShared::ALLOC_STAT_SLOTS];
size_t HeapShared::_alloc_size[HeapShared::ALLOC_STAT_SLOTS];
@ -142,8 +196,6 @@ static ArchivableStaticFieldInfo fmg_archive_subgraph_entry_fields[] = {
KlassSubGraphInfo* HeapShared::_dump_time_special_subgraph;
ArchivedKlassSubGraphInfoRecord* HeapShared::_run_time_special_subgraph;
GrowableArrayCHeap<oop, mtClassShared>* HeapShared::_pending_roots = nullptr;
GrowableArrayCHeap<OopHandle, mtClassShared>* HeapShared::_root_segments = nullptr;
int HeapShared::_root_segment_max_size_elems;
OopHandle HeapShared::_scratch_basic_type_mirrors[T_VOID+1];
MetaspaceObjToOopHandleTable* HeapShared::_scratch_objects_table = nullptr;
@ -239,10 +291,147 @@ void HeapShared::reset_archived_object_states(TRAPS) {
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
bool HeapShared::is_archived_heap_in_use() {
if (HeapShared::is_loading()) {
if (HeapShared::is_loading_streaming_mode()) {
return AOTStreamedHeapLoader::is_in_use();
} else {
return AOTMappedHeapLoader::is_in_use();
}
}
return false;
}
bool HeapShared::can_use_archived_heap() {
FileMapInfo* static_mapinfo = FileMapInfo::current_info();
if (static_mapinfo == nullptr) {
return false;
}
if (!static_mapinfo->has_heap_region()) {
return false;
}
if (!static_mapinfo->object_streaming_mode() &&
!Universe::heap()->can_load_archived_objects() &&
!UseG1GC) {
// Incompatible object format
return false;
}
return true;
}
bool HeapShared::is_too_large_to_archive(size_t size) {
if (HeapShared::is_writing_streaming_mode()) {
return false;
} else {
return AOTMappedHeapWriter::is_too_large_to_archive(size);
}
}
bool HeapShared::is_too_large_to_archive(oop obj) {
if (HeapShared::is_writing_streaming_mode()) {
return false;
} else {
return AOTMappedHeapWriter::is_too_large_to_archive(obj);
}
}
bool HeapShared::is_string_too_large_to_archive(oop string) {
typeArrayOop value = java_lang_String::value_no_keepalive(string);
return is_too_large_to_archive(value);
}
void HeapShared::initialize_loading_mode(HeapArchiveMode mode) {
assert(_heap_load_mode == HeapArchiveMode::_uninitialized, "already set?");
assert(mode != HeapArchiveMode::_uninitialized, "sanity");
_heap_load_mode = mode;
};
void HeapShared::initialize_writing_mode() {
assert(!FLAG_IS_ERGO(AOTStreamableObjects), "Should not have been ergonomically set yet");
if (!CDSConfig::is_dumping_archive()) {
// We use FLAG_IS_CMDLINE below because we are specifically looking to warn
// a user that explicitly sets the flag on the command line for a JVM that is
// not dumping an archive.
if (FLAG_IS_CMDLINE(AOTStreamableObjects)) {
log_warning(cds)("-XX:%cAOTStreamableObjects was specified, "
"AOTStreamableObjects is only used for writing "
"the AOT cache.",
AOTStreamableObjects ? '+' : '-');
}
}
// The below checks use !FLAG_IS_DEFAULT instead of FLAG_IS_CMDLINE
// because the one step AOT cache creation transfers the AOTStreamableObjects
// flag value from the training JVM to the assembly JVM using an environment
// variable that sets the flag as ERGO in the assembly JVM.
if (FLAG_IS_DEFAULT(AOTStreamableObjects)) {
// By default, the value of AOTStreamableObjects should match !UseCompressedOops.
FLAG_SET_DEFAULT(AOTStreamableObjects, !UseCompressedOops);
} else if (!AOTStreamableObjects && UseZGC) {
// Never write mapped heap with ZGC
if (CDSConfig::is_dumping_archive()) {
log_warning(cds)("Heap archiving without streaming not supported for -XX:+UseZGC");
}
FLAG_SET_ERGO(AOTStreamableObjects, true);
}
if (CDSConfig::is_dumping_archive()) {
// Select default mode
assert(_heap_write_mode == HeapArchiveMode::_uninitialized, "already initialized?");
_heap_write_mode = AOTStreamableObjects ? HeapArchiveMode::_streaming : HeapArchiveMode::_mapping;
}
}
void HeapShared::initialize_streaming() {
assert(is_loading_streaming_mode(), "shouldn't call this");
if (can_use_archived_heap()) {
AOTStreamedHeapLoader::initialize();
}
}
void HeapShared::enable_gc() {
if (AOTStreamedHeapLoader::is_in_use()) {
AOTStreamedHeapLoader::enable_gc();
}
}
void HeapShared::materialize_thread_object() {
if (AOTStreamedHeapLoader::is_in_use()) {
AOTStreamedHeapLoader::materialize_thread_object();
}
}
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
}
void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
if (HeapShared::is_loading()) {
if (HeapShared::is_loading_streaming_mode()) {
// Heap initialization can be done only after vtables are initialized by ReadClosure.
AOTStreamedHeapLoader::finish_initialization(static_mapinfo);
} else {
// Finish up archived heap initialization. These must be
// done after ReadClosure.
AOTMappedHeapLoader::finish_initialization(static_mapinfo);
}
}
}
HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
OopHandle oh(Universe::vm_global(), obj);
CachedOopInfo* result = _archived_object_cache->get(oh);
oh.release(Universe::vm_global());
return result;
}
bool HeapShared::has_been_archived(oop obj) {
assert(CDSConfig::is_dumping_heap(), "dump-time only");
OopHandle oh(&obj);
return archived_object_cache()->get(oh) != nullptr;
return get_cached_oop_info(obj) != nullptr;
}
int HeapShared::append_root(oop obj) {
@ -256,59 +445,45 @@ int HeapShared::append_root(oop obj) {
return _pending_roots->append(obj);
}
objArrayOop HeapShared::root_segment(int segment_idx) {
if (CDSConfig::is_dumping_heap()) {
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
} else {
assert(CDSConfig::is_using_archive(), "must be");
}
objArrayOop segment = (objArrayOop)_root_segments->at(segment_idx).resolve();
assert(segment != nullptr, "should have been initialized");
return segment;
}
void HeapShared::get_segment_indexes(int idx, int& seg_idx, int& int_idx) {
assert(_root_segment_max_size_elems > 0, "sanity");
// Try to avoid divisions for the common case.
if (idx < _root_segment_max_size_elems) {
seg_idx = 0;
int_idx = idx;
} else {
seg_idx = idx / _root_segment_max_size_elems;
int_idx = idx % _root_segment_max_size_elems;
}
assert(idx == seg_idx * _root_segment_max_size_elems + int_idx,
"sanity: %d index maps to %d segment and %d internal", idx, seg_idx, int_idx);
}
// Returns an objArray that contains all the roots of the archived objects
oop HeapShared::get_root(int index, bool clear) {
assert(index >= 0, "sanity");
assert(!CDSConfig::is_dumping_heap() && CDSConfig::is_using_archive(), "runtime only");
assert(!_root_segments->is_empty(), "must have loaded shared heap");
int seg_idx, int_idx;
get_segment_indexes(index, seg_idx, int_idx);
oop result = root_segment(seg_idx)->obj_at(int_idx);
assert(is_archived_heap_in_use(), "getting roots into heap that is not used");
oop result;
if (HeapShared::is_loading_streaming_mode()) {
result = AOTStreamedHeapLoader::get_root(index);
} else {
assert(HeapShared::is_loading_mapping_mode(), "must be");
result = AOTMappedHeapLoader::get_root(index);
}
if (clear) {
clear_root(index);
}
return result;
}
void HeapShared::finish_materialize_objects() {
if (AOTStreamedHeapLoader::is_in_use()) {
AOTStreamedHeapLoader::finish_materialize_objects();
}
}
void HeapShared::clear_root(int index) {
assert(index >= 0, "sanity");
assert(CDSConfig::is_using_archive(), "must be");
if (ArchiveHeapLoader::is_in_use()) {
int seg_idx, int_idx;
get_segment_indexes(index, seg_idx, int_idx);
if (is_archived_heap_in_use()) {
if (log_is_enabled(Debug, aot, heap)) {
oop old = root_segment(seg_idx)->obj_at(int_idx);
log_debug(aot, heap)("Clearing root %d: was " PTR_FORMAT, index, p2i(old));
log_debug(aot, heap)("Clearing root %d: was %zu", index, p2i(get_root(index, false /* clear */)));
}
if (HeapShared::is_loading_streaming_mode()) {
AOTStreamedHeapLoader::clear_root(index);
} else {
assert(HeapShared::is_loading_mapping_mode(), "must be");
AOTMappedHeapLoader::clear_root(index);
}
root_segment(seg_idx)->obj_at_put(int_idx, nullptr);
}
}
@ -320,81 +495,84 @@ bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgra
return true;
}
if (ArchiveHeapWriter::is_too_large_to_archive(obj->size())) {
if (is_too_large_to_archive(obj)) {
log_debug(aot, heap)("Cannot archive, object (" PTR_FORMAT ") is too large: %zu",
p2i(obj), obj->size());
debug_trace();
return false;
} else {
AOTOopChecker::check(obj); // Make sure contents of this oop are safe.
count_allocation(obj->size());
ArchiveHeapWriter::add_source_obj(obj);
CachedOopInfo info = make_cached_oop_info(obj, referrer);
OopHandle oh(Universe::vm_global(), obj);
archived_object_cache()->put_when_absent(oh, info);
archived_object_cache()->maybe_grow();
mark_native_pointers(obj);
Klass* k = obj->klass();
if (k->is_instance_klass()) {
// Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
// This ensures that during the production run, whenever Java code sees a cached object
// of type X, we know that X is already initialized. (see TODO comment below ...)
if (InstanceKlass::cast(k)->is_enum_subclass()
// We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
// we must store them as AOT-initialized.
|| (subgraph_info == _dump_time_special_subgraph))
// TODO: we do this only for the special subgraph for now. Extending this to
// other subgraphs would require more refactoring of the core library (such as
// move some initialization logic into runtimeSetup()).
//
// For the other subgraphs, we have a weaker mechanism to ensure that
// all classes in a subgraph are initialized before the subgraph is programmatically
// returned from jdk.internal.misc.CDS::initializeFromArchive().
// See HeapShared::initialize_from_archived_subgraph().
{
AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k));
}
if (java_lang_Class::is_instance(obj)) {
Klass* mirror_k = java_lang_Class::as_Klass(obj);
if (mirror_k != nullptr) {
AOTArtifactFinder::add_cached_class(mirror_k);
}
} else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) {
Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj);
if (m != nullptr) {
if (RegeneratedClasses::has_been_regenerated(m)) {
m = RegeneratedClasses::get_regenerated_object(m);
}
InstanceKlass* method_holder = m->method_holder();
AOTArtifactFinder::add_cached_class(method_holder);
}
}
}
if (log_is_enabled(Debug, aot, heap)) {
ResourceMark rm;
LogTarget(Debug, aot, heap) log;
LogStream out(log);
out.print("Archived heap object " PTR_FORMAT " : %s ",
p2i(obj), obj->klass()->external_name());
if (java_lang_Class::is_instance(obj)) {
Klass* k = java_lang_Class::as_Klass(obj);
if (k != nullptr) {
out.print("%s", k->external_name());
} else {
out.print("primitive");
}
}
out.cr();
}
return true;
}
AOTOopChecker::check(obj); // Make sure contents of this oop are safe.
count_allocation(obj->size());
if (HeapShared::is_writing_streaming_mode()) {
AOTStreamedHeapWriter::add_source_obj(obj);
} else {
AOTMappedHeapWriter::add_source_obj(obj);
}
OopHandle oh(Universe::vm_global(), obj);
CachedOopInfo info = make_cached_oop_info(obj, referrer);
archived_object_cache()->put_when_absent(oh, info);
archived_object_cache()->maybe_grow();
Klass* k = obj->klass();
if (k->is_instance_klass()) {
// Whenever we see a non-array Java object of type X, we mark X to be aot-initialized.
// This ensures that during the production run, whenever Java code sees a cached object
// of type X, we know that X is already initialized. (see TODO comment below ...)
if (InstanceKlass::cast(k)->is_enum_subclass()
// We can't rerun <clinit> of enum classes (see cdsEnumKlass.cpp) so
// we must store them as AOT-initialized.
|| (subgraph_info == _dump_time_special_subgraph))
// TODO: we do this only for the special subgraph for now. Extending this to
// other subgraphs would require more refactoring of the core library (such as
// move some initialization logic into runtimeSetup()).
//
// For the other subgraphs, we have a weaker mechanism to ensure that
// all classes in a subgraph are initialized before the subgraph is programmatically
// returned from jdk.internal.misc.CDS::initializeFromArchive().
// See HeapShared::initialize_from_archived_subgraph().
{
AOTArtifactFinder::add_aot_inited_class(InstanceKlass::cast(k));
}
if (java_lang_Class::is_instance(obj)) {
Klass* mirror_k = java_lang_Class::as_Klass(obj);
if (mirror_k != nullptr) {
AOTArtifactFinder::add_cached_class(mirror_k);
}
} else if (java_lang_invoke_ResolvedMethodName::is_instance(obj)) {
Method* m = java_lang_invoke_ResolvedMethodName::vmtarget(obj);
if (m != nullptr) {
if (RegeneratedClasses::has_been_regenerated(m)) {
m = RegeneratedClasses::get_regenerated_object(m);
}
InstanceKlass* method_holder = m->method_holder();
AOTArtifactFinder::add_cached_class(method_holder);
}
}
}
if (log_is_enabled(Debug, aot, heap)) {
ResourceMark rm;
LogTarget(Debug, aot, heap) log;
LogStream out(log);
out.print("Archived heap object " PTR_FORMAT " : %s ",
p2i(obj), obj->klass()->external_name());
if (java_lang_Class::is_instance(obj)) {
Klass* k = java_lang_Class::as_Klass(obj);
if (k != nullptr) {
out.print("%s", k->external_name());
} else {
out.print("primitive");
}
}
out.cr();
}
return true;
}
class MetaspaceObjToOopHandleTable: public HashTable<MetaspaceObj*, OopHandle,
@ -437,9 +615,9 @@ objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
return (objArrayOop)_scratch_objects_table->get_oop(src);
}
void HeapShared::init_dumping() {
_scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
void HeapShared::init_dumping() {
_scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
}
void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
@ -641,7 +819,7 @@ void HeapShared::copy_java_mirror(oop orig_mirror, oop scratch_m) {
static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
if (SystemDictionaryShared::is_builtin_loader(src_ik->class_loader_data())) {
objArrayOop rr = src_ik->constants()->resolved_references_or_null();
if (rr != nullptr && !ArchiveHeapWriter::is_too_large_to_archive(rr)) {
if (rr != nullptr && !HeapShared::is_too_large_to_archive(rr)) {
return HeapShared::scratch_resolved_references(src_ik->constants());
}
}
@ -649,6 +827,7 @@ static objArrayOop get_archived_resolved_references(InstanceKlass* src_ik) {
}
void HeapShared::archive_strings() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
oop shared_strings_array = StringTable::init_shared_strings_array();
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, shared_strings_array);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
@ -661,15 +840,6 @@ int HeapShared::archive_exception_instance(oop exception) {
return append_root(exception);
}
void HeapShared::mark_native_pointers(oop orig_obj) {
if (java_lang_Class::is_instance(orig_obj)) {
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::klass_offset());
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_Class::array_klass_offset());
} else if (java_lang_invoke_ResolvedMethodName::is_instance(orig_obj)) {
ArchiveHeapWriter::mark_native_pointer(orig_obj, java_lang_invoke_ResolvedMethodName::vmtarget_offset());
}
}
void HeapShared::get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers) {
OopHandle oh(&src_obj);
CachedOopInfo* info = archived_object_cache()->get(oh);
@ -698,7 +868,7 @@ void HeapShared::start_scanning_for_oops() {
// Cache for recording where the archived objects are copied to
create_archived_object_cache();
if (UseCompressedOops || UseG1GC) {
if (HeapShared::is_writing_mapping_mode() && (UseG1GC || UseCompressedOops)) {
aot_log_info(aot)("Heap range = [" PTR_FORMAT " - " PTR_FORMAT "]",
UseCompressedOops ? p2i(CompressedOops::begin()) :
p2i((address)G1CollectedHeap::heap()->reserved().start()),
@ -714,19 +884,26 @@ void HeapShared::start_scanning_for_oops() {
}
void HeapShared::end_scanning_for_oops() {
archive_strings();
if (is_writing_mapping_mode()) {
archive_strings();
}
delete_seen_objects_table();
}
void HeapShared::write_heap(ArchiveHeapInfo *heap_info) {
void HeapShared::write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) {
{
NoSafepointVerifier nsv;
CDSHeapVerifier::verify();
check_special_subgraph_classes();
}
StringTable::write_shared_table();
ArchiveHeapWriter::write(_pending_roots, heap_info);
if (HeapShared::is_writing_mapping_mode()) {
StringTable::write_shared_table();
AOTMappedHeapWriter::write(_pending_roots, mapped_heap_info);
} else {
assert(HeapShared::is_writing_streaming_mode(), "are there more modes?");
AOTStreamedHeapWriter::write(_pending_roots, streamed_heap_info);
}
ArchiveBuilder::OtherROAllocMark mark;
write_subgraph_info_table();
@ -1067,19 +1244,6 @@ void HeapShared::write_subgraph_info_table() {
}
}
void HeapShared::add_root_segment(objArrayOop segment_oop) {
assert(segment_oop != nullptr, "must be");
assert(ArchiveHeapLoader::is_in_use(), "must be");
if (_root_segments == nullptr) {
_root_segments = new GrowableArrayCHeap<OopHandle, mtClassShared>(10);
}
_root_segments->push(OopHandle(Universe::vm_global(), segment_oop));
}
void HeapShared::init_root_segment_sizes(int max_size_elems) {
_root_segment_max_size_elems = max_size_elems;
}
void HeapShared::serialize_tables(SerializeClosure* soc) {
#ifndef PRODUCT
@ -1100,10 +1264,10 @@ static void verify_the_heap(Klass* k, const char* which) {
log_info(aot, heap)("Verify heap %s initializing static field(s) in %s",
which, k->external_name());
VM_Verify verify_op;
VMThread::execute(&verify_op);
if (VerifyArchivedFields > 1 && is_init_completed()) {
if (VerifyArchivedFields == 1) {
VM_Verify verify_op;
VMThread::execute(&verify_op);
} else if (VerifyArchivedFields == 2 && is_init_completed()) {
// At this time, the oop->klass() of some archived objects in the heap may not
// have been loaded into the system dictionary yet. Nevertheless, oop->klass() should
// have enough information (object size, oop maps, etc) so that a GC can be safely
@ -1129,7 +1293,7 @@ static void verify_the_heap(Klass* k, const char* which) {
// this case, we will not load the ArchivedKlassSubGraphInfoRecord and will clear its roots.
void HeapShared::resolve_classes(JavaThread* current) {
assert(CDSConfig::is_using_archive(), "runtime only!");
if (!ArchiveHeapLoader::is_in_use()) {
if (!is_archived_heap_in_use()) {
return; // nothing to do
}
resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields);
@ -1188,7 +1352,7 @@ void HeapShared::initialize_java_lang_invoke(TRAPS) {
// should be initialized before any Java code can access the Fruit class. Note that
// HashSet itself doesn't necessary need to be an aot-initialized class.
void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
if (!ArchiveHeapLoader::is_in_use()) {
if (!is_archived_heap_in_use()) {
return;
}
@ -1220,7 +1384,7 @@ void HeapShared::init_classes_for_special_subgraph(Handle class_loader, TRAPS) {
void HeapShared::initialize_from_archived_subgraph(JavaThread* current, Klass* k) {
JavaThread* THREAD = current;
if (!ArchiveHeapLoader::is_in_use()) {
if (!is_archived_heap_in_use()) {
return; // nothing to do
}
@ -1356,9 +1520,6 @@ void HeapShared::resolve_or_init(Klass* k, bool do_init, TRAPS) {
void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record) {
verify_the_heap(k, "before");
// Load the subgraph entry fields from the record and store them back to
// the corresponding fields within the mirror.
oop m = k->java_mirror();
Array<int>* entry_field_records = record->entry_field_records();
if (entry_field_records != nullptr) {
int efr_len = entry_field_records->length();
@ -1366,7 +1527,10 @@ void HeapShared::init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphI
for (int i = 0; i < efr_len; i += 2) {
int field_offset = entry_field_records->at(i);
int root_index = entry_field_records->at(i+1);
// Load the subgraph entry fields from the record and store them back to
// the corresponding fields within the mirror.
oop v = get_root(root_index, /*clear=*/true);
oop m = k->java_mirror();
if (k->has_aot_initialized_mirror()) {
assert(v == m->obj_field(field_offset), "must be aot-initialized");
} else {
@ -1445,7 +1609,7 @@ class HeapShared::OopFieldPusher: public BasicOopIterateClosure {
template <class T> void do_oop_work(T *p) {
int field_offset = pointer_delta_as_int((char*)p, cast_from_oop<char*>(_referencing_obj));
oop obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_referencing_obj, field_offset);
if (!CompressedOops::is_null(obj)) {
if (obj != nullptr) {
if (_is_java_lang_ref && AOTReferenceObjSupport::skip_field(field_offset)) {
// Do not follow these fields. They will be cleared to null.
return;
@ -1494,7 +1658,7 @@ HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer
}
void HeapShared::init_box_classes(TRAPS) {
if (ArchiveHeapLoader::is_in_use()) {
if (is_archived_heap_in_use()) {
vmClasses::Boolean_klass()->initialize(CHECK);
vmClasses::Character_klass()->initialize(CHECK);
vmClasses::Float_klass()->initialize(CHECK);
@ -1739,8 +1903,8 @@ class VerifySharedOopClosure: public BasicOopIterateClosure {
protected:
template <class T> void do_oop_work(T *p) {
oop obj = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(obj)) {
oop obj = HeapAccess<>::oop_load(p);
if (obj != nullptr) {
HeapShared::verify_reachable_objects_from(obj);
}
}
@ -2041,7 +2205,7 @@ bool HeapShared::is_a_test_class_in_unnamed_module(Klass* ik) {
void HeapShared::initialize_test_class_from_archive(JavaThread* current) {
Klass* k = _test_class;
if (k != nullptr && ArchiveHeapLoader::is_in_use()) {
if (k != nullptr && is_archived_heap_in_use()) {
JavaThread* THREAD = current;
ExceptionMark em(THREAD);
const ArchivedKlassSubGraphInfoRecord* record =
@ -2062,11 +2226,18 @@ void HeapShared::initialize_test_class_from_archive(JavaThread* current) {
void HeapShared::init_for_dumping(TRAPS) {
if (CDSConfig::is_dumping_heap()) {
setup_test_class(ArchiveHeapTestClass);
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
init_subgraph_entry_fields(CHECK);
}
}
void HeapShared::init_heap_writer() {
if (HeapShared::is_writing_streaming_mode()) {
AOTStreamedHeapWriter::init();
} else {
AOTMappedHeapWriter::init();
}
}
void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
bool is_full_module_graph) {
_num_total_subgraph_recordings = 0;
@ -2117,23 +2288,12 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
#endif
}
// Keep track of the contents of the archived interned string table. This table
// is used only by CDSHeapVerifier.
void HeapShared::add_to_dumped_interned_strings(oop string) {
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
assert(!ArchiveHeapWriter::is_string_too_large_to_archive(string), "must be");
bool created;
_dumped_interned_strings->put_if_absent(string, true, &created);
if (created) {
// Prevent string deduplication from changing the value field to
// something not in the archive.
java_lang_String::set_deduplication_forbidden(string);
_dumped_interned_strings->maybe_grow();
}
}
bool HeapShared::is_dumped_interned_string(oop o) {
return _dumped_interned_strings->get(o) != nullptr;
if (is_writing_mapping_mode()) {
return AOTMappedHeapWriter::is_dumped_interned_string(o);
} else {
return AOTStreamedHeapWriter::is_dumped_interned_string(o);
}
}
// These tables should be used only within the CDS safepoint, so
@ -2142,10 +2302,12 @@ bool HeapShared::is_dumped_interned_string(oop o) {
void HeapShared::delete_tables_with_raw_oops() {
assert(_seen_objects_table == nullptr, "should have been deleted");
delete _dumped_interned_strings;
_dumped_interned_strings = nullptr;
ArchiveHeapWriter::delete_tables_with_raw_oops();
if (is_writing_mapping_mode()) {
AOTMappedHeapWriter::delete_tables_with_raw_oops();
} else {
assert(is_writing_streaming_mode(), "what other mode?");
AOTStreamedHeapWriter::delete_tables_with_raw_oops();
}
}
void HeapShared::debug_trace() {
@ -2242,6 +2404,35 @@ void HeapShared::print_stats() {
avg_size(_total_obj_size, _total_obj_count));
}
bool HeapShared::is_metadata_field(oop src_obj, int offset) {
bool result = false;
do_metadata_offsets(src_obj, [&](int metadata_offset) {
if (metadata_offset == offset) {
result = true;
}
});
return result;
}
void HeapShared::remap_dumped_metadata(oop src_obj, address archived_object) {
do_metadata_offsets(src_obj, [&](int offset) {
Metadata** buffered_field_addr = (Metadata**)(archived_object + offset);
Metadata* native_ptr = *buffered_field_addr;
if (native_ptr == nullptr) {
return;
}
if (RegeneratedClasses::has_been_regenerated(native_ptr)) {
native_ptr = RegeneratedClasses::get_regenerated_object(native_ptr);
}
address buffered_native_ptr = ArchiveBuilder::current()->get_buffered_addr((address)native_ptr);
address requested_native_ptr = ArchiveBuilder::current()->to_requested(buffered_native_ptr);
*buffered_field_addr = (Metadata*)requested_native_ptr;
});
}
bool HeapShared::is_archived_boot_layer_available(JavaThread* current) {
TempNewSymbol klass_name = SymbolTable::new_symbol(ARCHIVED_BOOT_LAYER_CLASS);
InstanceKlass* k = SystemDictionary::find_instance_klass(current, klass_name, Handle());

View File

@ -47,7 +47,6 @@ class MetaspaceObjToOopHandleTable;
class ResourceBitMap;
struct ArchivableStaticFieldInfo;
class ArchiveHeapInfo;
#define ARCHIVED_BOOT_LAYER_CLASS "jdk/internal/module/ArchivedBootLayer"
#define ARCHIVED_BOOT_LAYER_FIELD "archivedBootLayer"
@ -137,12 +136,152 @@ class ArchivedKlassSubGraphInfoRecord {
};
#endif // INCLUDE_CDS_JAVA_HEAP
struct LoadedArchiveHeapRegion;
enum class HeapArchiveMode {
_uninitialized,
_mapping,
_streaming
};
class ArchiveMappedHeapHeader {
size_t _ptrmap_start_pos; // The first bit in the ptrmap corresponds to this position in the heap.
size_t _oopmap_start_pos; // The first bit in the oopmap corresponds to this position in the heap.
HeapRootSegments _root_segments; // Heap root segments info
public:
ArchiveMappedHeapHeader();
ArchiveMappedHeapHeader(size_t ptrmap_start_pos,
size_t oopmap_start_pos,
HeapRootSegments root_segments);
size_t ptrmap_start_pos() const { return _ptrmap_start_pos; }
size_t oopmap_start_pos() const { return _oopmap_start_pos; }
HeapRootSegments root_segments() const { return _root_segments; }
// This class is trivially copyable and assignable.
ArchiveMappedHeapHeader(const ArchiveMappedHeapHeader&) = default;
ArchiveMappedHeapHeader& operator=(const ArchiveMappedHeapHeader&) = default;
};
class ArchiveStreamedHeapHeader {
size_t _forwarding_offset; // Offset of forwarding information in the heap region.
size_t _roots_offset; // Start position for the roots
size_t _root_highest_object_index_table_offset; // Offset of root dfs depth information
size_t _num_roots; // Number of embedded roots
size_t _num_archived_objects; // The number of archived heap objects
public:
ArchiveStreamedHeapHeader();
ArchiveStreamedHeapHeader(size_t forwarding_offset,
size_t roots_offset,
size_t num_roots,
size_t root_highest_object_index_table_offset,
size_t num_archived_objects);
size_t forwarding_offset() const { return _forwarding_offset; }
size_t roots_offset() const { return _roots_offset; }
size_t num_roots() const { return _num_roots; }
size_t root_highest_object_index_table_offset() const { return _root_highest_object_index_table_offset; }
size_t num_archived_objects() const { return _num_archived_objects; }
// This class is trivially copyable and assignable.
ArchiveStreamedHeapHeader(const ArchiveStreamedHeapHeader&) = default;
ArchiveStreamedHeapHeader& operator=(const ArchiveStreamedHeapHeader&) = default;
};
class ArchiveMappedHeapInfo {
MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
CHeapBitMap _oopmap;
CHeapBitMap _ptrmap;
HeapRootSegments _root_segments;
size_t _oopmap_start_pos; // How many zeros were removed from the beginning of the bit map?
size_t _ptrmap_start_pos; // How many zeros were removed from the beginning of the bit map?
public:
ArchiveMappedHeapInfo() :
_buffer_region(),
_oopmap(128, mtClassShared),
_ptrmap(128, mtClassShared),
_root_segments(),
_oopmap_start_pos(),
_ptrmap_start_pos() {}
bool is_used() { return !_buffer_region.is_empty(); }
MemRegion buffer_region() { return _buffer_region; }
void set_buffer_region(MemRegion r) { _buffer_region = r; }
char* buffer_start() { return (char*)_buffer_region.start(); }
size_t buffer_byte_size() { return _buffer_region.byte_size(); }
CHeapBitMap* oopmap() { return &_oopmap; }
CHeapBitMap* ptrmap() { return &_ptrmap; }
void set_oopmap_start_pos(size_t start_pos) { _oopmap_start_pos = start_pos; }
void set_ptrmap_start_pos(size_t start_pos) { _ptrmap_start_pos = start_pos; }
void set_root_segments(HeapRootSegments segments) { _root_segments = segments; };
HeapRootSegments root_segments() { return _root_segments; }
ArchiveMappedHeapHeader create_header();
};
class ArchiveStreamedHeapInfo {
MemRegion _buffer_region; // Contains the archived objects to be written into the CDS archive.
CHeapBitMap _oopmap;
size_t _roots_offset; // Offset of the HeapShared::roots() object, from the bottom
// of the archived heap objects, in bytes.
size_t _num_roots;
size_t _forwarding_offset; // Offset of forwarding information from the bottom
size_t _root_highest_object_index_table_offset; // Offset to root dfs depth information
size_t _num_archived_objects; // The number of archived objects written into the CDS archive.
public:
ArchiveStreamedHeapInfo()
: _buffer_region(),
_oopmap(128, mtClassShared),
_roots_offset(),
_forwarding_offset(),
_root_highest_object_index_table_offset(),
_num_archived_objects() {}
bool is_used() { return !_buffer_region.is_empty(); }
void set_buffer_region(MemRegion r) { _buffer_region = r; }
MemRegion buffer_region() { return _buffer_region; }
char* buffer_start() { return (char*)_buffer_region.start(); }
size_t buffer_byte_size() { return _buffer_region.byte_size(); }
CHeapBitMap* oopmap() { return &_oopmap; }
void set_roots_offset(size_t n) { _roots_offset = n; }
size_t roots_offset() { return _roots_offset; }
void set_num_roots(size_t n) { _num_roots = n; }
size_t num_roots() { return _num_roots; }
void set_forwarding_offset(size_t n) { _forwarding_offset = n; }
void set_root_highest_object_index_table_offset(size_t n) { _root_highest_object_index_table_offset = n; }
void set_num_archived_objects(size_t n) { _num_archived_objects = n; }
size_t num_archived_objects() { return _num_archived_objects; }
ArchiveStreamedHeapHeader create_header();
};
class HeapShared: AllStatic {
friend class VerifySharedOopClosure;
public:
static void initialize_loading_mode(HeapArchiveMode mode) NOT_CDS_JAVA_HEAP_RETURN;
static void initialize_writing_mode() NOT_CDS_JAVA_HEAP_RETURN;
inline static bool is_loading() NOT_CDS_JAVA_HEAP_RETURN_(false);
inline static bool is_loading_streaming_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
inline static bool is_loading_mapping_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
inline static bool is_writing() NOT_CDS_JAVA_HEAP_RETURN_(false);
inline static bool is_writing_streaming_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
inline static bool is_writing_mapping_mode() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_subgraph_root_class(InstanceKlass* ik);
// Scratch objects for archiving Klass::java_mirror()
@ -151,9 +290,22 @@ public:
static oop scratch_java_mirror(oop java_mirror) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static bool is_archived_boot_layer_available(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_archived_heap_in_use() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool can_use_archived_heap() NOT_CDS_JAVA_HEAP_RETURN_(false);
static bool is_too_large_to_archive(size_t size);
static bool is_string_too_large_to_archive(oop string);
static bool is_too_large_to_archive(oop obj);
static void initialize_streaming() NOT_CDS_JAVA_HEAP_RETURN;
static void enable_gc() NOT_CDS_JAVA_HEAP_RETURN;
static void materialize_thread_object() NOT_CDS_JAVA_HEAP_RETURN;
static void add_to_dumped_interned_strings(oop string) NOT_CDS_JAVA_HEAP_RETURN;
static void finalize_initialization(FileMapInfo* static_mapinfo) NOT_CDS_JAVA_HEAP_RETURN;
private:
#if INCLUDE_CDS_JAVA_HEAP
static DumpedInternedStrings *_dumped_interned_strings;
static HeapArchiveMode _heap_load_mode;
static HeapArchiveMode _heap_write_mode;
// statistics
constexpr static int ALLOC_STAT_SLOTS = 16;
@ -282,8 +434,6 @@ private:
static ArchivedKlassSubGraphInfoRecord* _run_time_special_subgraph; // for initializing classes during run time.
static GrowableArrayCHeap<oop, mtClassShared>* _pending_roots;
static GrowableArrayCHeap<OopHandle, mtClassShared>* _root_segments;
static int _root_segment_max_size_elems;
static OopHandle _scratch_basic_type_mirrors[T_VOID+1];
static MetaspaceObjToOopHandleTable* _scratch_objects_table;
@ -326,16 +476,6 @@ private:
static void resolve_or_init(Klass* k, bool do_init, TRAPS);
static void init_archived_fields_for(Klass* k, const ArchivedKlassSubGraphInfoRecord* record);
static int init_loaded_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
MemRegion& archive_space);
static void sort_loaded_regions(LoadedArchiveHeapRegion* loaded_regions, int num_loaded_regions,
uintptr_t buffer);
static bool load_regions(FileMapInfo* mapinfo, LoadedArchiveHeapRegion* loaded_regions,
int num_loaded_regions, uintptr_t buffer);
static void init_loaded_heap_relocation(LoadedArchiveHeapRegion* reloc_info,
int num_loaded_regions);
static void fill_failed_loaded_region();
static void mark_native_pointers(oop orig_obj);
static bool has_been_archived(oop orig_obj);
static void prepare_resolved_references();
static void archive_strings();
@ -380,10 +520,7 @@ private:
return _archived_object_cache;
}
static CachedOopInfo* get_cached_oop_info(oop orig_obj) {
OopHandle oh(&orig_obj);
return _archived_object_cache->get(oh);
}
static CachedOopInfo* get_cached_oop_info(oop orig_obj);
static int archive_exception_instance(oop exception);
@ -391,14 +528,19 @@ private:
KlassSubGraphInfo* subgraph_info,
oop orig_obj);
static void add_to_dumped_interned_strings(oop string);
static bool is_dumped_interned_string(oop o);
// Scratch objects for archiving Klass::java_mirror()
static void set_scratch_java_mirror(Klass* k, oop mirror);
static void remove_scratch_objects(Klass* k);
static bool is_metadata_field(oop src_obj, int offset);
template <typename T> static void do_metadata_offsets(oop src_obj, T callback);
static void remap_dumped_metadata(oop src_obj, address archived_object);
inline static void remap_loaded_metadata(oop obj);
inline static oop maybe_remap_referent(bool is_java_lang_ref, size_t field_offset, oop referent);
static void get_pointer_info(oop src_obj, bool& has_oop_pointers, bool& has_native_pointers);
static void set_has_native_pointers(oop src_obj);
static uintptr_t archive_location(oop src_obj);
// We use the HeapShared::roots() array to make sure that objects stored in the
// archived heap region are not prematurely collected. These roots include:
@ -432,7 +574,9 @@ private:
#endif // INCLUDE_CDS_JAVA_HEAP
public:
static void write_heap(ArchiveHeapInfo* heap_info) NOT_CDS_JAVA_HEAP_RETURN;
static void finish_materialize_objects() NOT_CDS_JAVA_HEAP_RETURN;
static void write_heap(ArchiveMappedHeapInfo* mapped_heap_info, ArchiveStreamedHeapInfo* streamed_heap_info) NOT_CDS_JAVA_HEAP_RETURN;
static objArrayOop scratch_resolved_references(ConstantPool* src);
static void add_scratch_resolved_references(ConstantPool* src, objArrayOop dest) NOT_CDS_JAVA_HEAP_RETURN;
static void init_dumping() NOT_CDS_JAVA_HEAP_RETURN;
@ -448,9 +592,8 @@ private:
static void initialize_from_archived_subgraph(JavaThread* current, Klass* k) NOT_CDS_JAVA_HEAP_RETURN;
static void init_for_dumping(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void init_heap_writer() NOT_CDS_JAVA_HEAP_RETURN;
static void write_subgraph_info_table() NOT_CDS_JAVA_HEAP_RETURN;
static void add_root_segment(objArrayOop segment_oop) NOT_CDS_JAVA_HEAP_RETURN;
static void init_root_segment_sizes(int max_size_elems) NOT_CDS_JAVA_HEAP_RETURN;
static void serialize_tables(SerializeClosure* soc) NOT_CDS_JAVA_HEAP_RETURN;
#ifndef PRODUCT
@ -472,22 +615,13 @@ private:
static void scan_java_class(Klass* k);
static void scan_java_mirror(oop orig_mirror);
static void copy_and_rescan_aot_inited_mirror(InstanceKlass* ik);
};
#if INCLUDE_CDS_JAVA_HEAP
class DumpedInternedStrings :
public ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>
{
public:
DumpedInternedStrings(unsigned size, unsigned max_size) :
ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>(size, max_size) {}
static void log_heap_roots();
static intptr_t log_target_location(oop source_oop);
static void log_oop_info(outputStream* st, oop source_oop, address archived_object_start, address archived_object_end);
static void log_oop_info(outputStream* st, oop source_oop);
static void log_oop_details(oop source_oop, address buffered_addr);
};
#endif
#endif // SHARE_CDS_HEAPSHARED_HPP

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_CDS_HEAPSHARED_INLINE_HPP
#define SHARE_CDS_HEAPSHARED_INLINE_HPP
#include "cds/heapShared.hpp"
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/regeneratedClasses.hpp"
#include "classfile/javaClasses.hpp"
#include "utilities/macros.hpp"
#if INCLUDE_CDS_JAVA_HEAP
inline bool HeapShared::is_loading() {
return _heap_load_mode != HeapArchiveMode::_uninitialized;
}
inline bool HeapShared::is_loading_streaming_mode() {
assert(_heap_load_mode != HeapArchiveMode::_uninitialized, "not initialized yet");
return _heap_load_mode == HeapArchiveMode::_streaming;
}
inline bool HeapShared::is_loading_mapping_mode() {
assert(_heap_load_mode != HeapArchiveMode::_uninitialized, "not initialized yet");
return _heap_load_mode == HeapArchiveMode::_mapping;
}
inline bool HeapShared::is_writing() {
return _heap_write_mode != HeapArchiveMode::_uninitialized;
}
inline bool HeapShared::is_writing_streaming_mode() {
assert(_heap_write_mode != HeapArchiveMode::_uninitialized, "not initialized yet");
return _heap_write_mode == HeapArchiveMode::_streaming;
}
inline bool HeapShared::is_writing_mapping_mode() {
assert(_heap_write_mode != HeapArchiveMode::_uninitialized, "not initialized yet");
return _heap_write_mode == HeapArchiveMode::_mapping;
}
// Keep the knowledge about which objects have what metadata in one single place
template <typename T>
void HeapShared::do_metadata_offsets(oop src_obj, T callback) {
if (java_lang_Class::is_instance(src_obj)) {
assert(java_lang_Class::klass_offset() < java_lang_Class::array_klass_offset(),
"metadata offsets must be sorted");
callback(java_lang_Class::klass_offset());
callback(java_lang_Class::array_klass_offset());
} else if (java_lang_invoke_ResolvedMethodName::is_instance(src_obj)) {
callback(java_lang_invoke_ResolvedMethodName::vmtarget_offset());
}
}
inline void HeapShared::remap_loaded_metadata(oop src_obj) {
do_metadata_offsets(src_obj, [&](int offset) {
Metadata* metadata = src_obj->metadata_field(offset);
if (metadata != nullptr) {
metadata = (Metadata*)(address(metadata) + AOTMetaspace::relocation_delta());
src_obj->metadata_field_put(offset, metadata);
}
});
}
inline oop HeapShared::maybe_remap_referent(bool is_java_lang_ref, size_t field_offset, oop referent) {
if (referent == nullptr) {
return nullptr;
}
if (is_java_lang_ref && AOTReferenceObjSupport::skip_field((int)field_offset)) {
return nullptr;
}
if (java_lang_Class::is_instance(referent)) {
Klass* k = java_lang_Class::as_Klass(referent);
if (RegeneratedClasses::has_been_regenerated(k)) {
referent = RegeneratedClasses::get_regenerated_object(k)->java_mirror();
}
// When the source object points to a "real" mirror, the buffered object should point
// to the "scratch" mirror, which has all unarchivable fields scrubbed (to be reinstated
// at run time).
referent = HeapShared::scratch_java_mirror(referent);
assert(referent != nullptr, "must be");
}
return referent;
}
#endif // INCLUDE_CDS_JAVA_HEAP
#endif // SHARE_CDS_HEAPSHARED_INLINE_HPP

View File

@ -38,6 +38,7 @@
#include "memory/resourceArea.hpp"
#include "runtime/atomicAccess.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/init.hpp"
#include "runtime/mutex.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/safepointVerifiers.hpp"
@ -149,7 +150,8 @@ ClassLoaderData* ClassLoaderDataGraph::add_to_graph(Handle loader, bool has_clas
// We mustn't GC until we've installed the ClassLoaderData in the Graph since the CLD
// contains oops in _handles that must be walked. GC doesn't walk CLD from the
// loader oop in all collections, particularly young collections.
NoSafepointVerifier no_safepoints;
// Before is_init_completed(), GC is not allowed to run.
NoSafepointVerifier no_safepoints(is_init_completed());
cld = new ClassLoaderData(loader, has_class_mirror_holder);

View File

@ -24,7 +24,7 @@
#include "cds/aotLogging.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "cds/serializeClosure.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataShared.hpp"
@ -152,6 +152,35 @@ void ArchivedClassLoaderData::clear_archived_oops() {
// ------------------------------
void ClassLoaderDataShared::load_archived_platform_and_system_class_loaders() {
#if INCLUDE_CDS_JAVA_HEAP
// The streaming object loader prefers loading the class loader related objects before
// the CLD constructor which has a NoSafepointVerifier.
if (!HeapShared::is_loading_streaming_mode()) {
return;
}
// Ensure these class loaders are eagerly materialized before their CLDs are created.
HeapShared::get_root(_platform_loader_root_index, false /* clear */);
HeapShared::get_root(_system_loader_root_index, false /* clear */);
if (Universe::is_module_initialized() || !CDSConfig::is_using_full_module_graph()) {
return;
}
// When using the full module graph, we need to load unnamed modules too.
ModuleEntry* platform_loader_module_entry = _archived_platform_loader_data.unnamed_module();
if (platform_loader_module_entry != nullptr) {
platform_loader_module_entry->preload_archived_oops();
}
ModuleEntry* system_loader_module_entry = _archived_system_loader_data.unnamed_module();
if (system_loader_module_entry != nullptr) {
system_loader_module_entry->preload_archived_oops();
}
#endif
}
static ClassLoaderData* null_class_loader_data() {
ClassLoaderData* loader_data = ClassLoaderData::the_null_class_loader_data();
assert(loader_data != nullptr, "must be");

View File

@ -38,6 +38,7 @@ class ClassLoaderDataShared : AllStatic {
static bool _full_module_graph_loaded;
CDS_JAVA_HEAP_ONLY(static void ensure_module_entry_table_exists(oop class_loader);)
public:
static void load_archived_platform_and_system_class_loaders() NOT_CDS_JAVA_HEAP_RETURN;
static void restore_archived_modules_for_preloading_classes(JavaThread* current) NOT_CDS_JAVA_HEAP_RETURN;
#if INCLUDE_CDS_JAVA_HEAP
static void ensure_module_entry_tables_exist();

View File

@ -25,9 +25,8 @@
#include "cds/aotMetaspace.hpp"
#include "cds/aotReferenceObjSupport.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/javaClasses.inline.hpp"
@ -978,7 +977,7 @@ void java_lang_Class::fixup_mirror(Klass* k, TRAPS) {
}
if (k->in_aot_cache() && k->has_archived_mirror_index()) {
if (ArchiveHeapLoader::is_in_use()) {
if (HeapShared::is_archived_heap_in_use()) {
bool present = restore_archived_mirror(k, Handle(), Handle(), Handle(), CHECK);
assert(present, "Missing archived mirror for %s", k->external_name());
return;

View File

@ -546,6 +546,10 @@ void ModuleEntry::load_from_archive(ClassLoaderData* loader_data) {
JFR_ONLY(INIT_ID(this);)
}
void ModuleEntry::preload_archived_oops() {
(void)HeapShared::get_root(_archived_module_index, false /* clear */);
}
void ModuleEntry::restore_archived_oops(ClassLoaderData* loader_data) {
assert(CDSConfig::is_using_archive(), "runtime only");
Handle module_handle(Thread::current(), HeapShared::get_root(_archived_module_index, /*clear=*/true));

View File

@ -206,6 +206,7 @@ public:
static Array<ModuleEntry*>* write_growable_array(GrowableArray<ModuleEntry*>* array);
static GrowableArray<ModuleEntry*>* restore_growable_array(Array<ModuleEntry*>* archived_array);
void load_from_archive(ClassLoaderData* loader_data);
void preload_archived_oops();
void restore_archived_oops(ClassLoaderData* loader_data);
void clear_archived_oops();
static void verify_archived_module_entries() PRODUCT_RETURN;

View File

@ -739,6 +739,8 @@ void Modules::init_archived_modules(JavaThread* current, Handle h_platform_loade
ModuleEntryTable::patch_javabase_entries(current, java_base_module);
}
ClassLoaderDataShared::load_archived_platform_and_system_class_loaders();
ClassLoaderData* platform_loader_data = SystemDictionary::register_loader(h_platform_loader);
SystemDictionary::set_platform_loader(platform_loader_data);
ClassLoaderDataShared::restore_java_platform_loader_from_archive(platform_loader_data);

View File

@ -22,11 +22,10 @@
*
*/
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.inline.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/compactHashtable.hpp"
#include "classfile/javaClasses.inline.hpp"
@ -80,7 +79,7 @@ OopHandle StringTable::_shared_strings_array;
int StringTable::_shared_strings_array_root_index;
inline oop StringTable::read_string_from_compact_hashtable(address base_address, u4 index) {
assert(ArchiveHeapLoader::is_in_use(), "sanity");
assert(AOTMappedHeapLoader::is_in_use(), "sanity");
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
oop s;
@ -316,13 +315,13 @@ void StringTable::create_table() {
_local_table = new StringTableHash(start_size_log_2, END_SIZE, REHASH_LEN, true);
_oop_storage = OopStorageSet::create_weak("StringTable Weak", mtSymbol);
_oop_storage->register_num_dead_callback(&gc_notification);
}
#if INCLUDE_CDS_JAVA_HEAP
if (ArchiveHeapLoader::is_in_use()) {
_shared_strings_array = OopHandle(Universe::vm_global(), HeapShared::get_root(_shared_strings_array_root_index));
}
#endif
void StringTable::load_shared_strings_array() {
_shared_strings_array = OopHandle(Universe::vm_global(), HeapShared::get_root(_shared_strings_array_root_index));
}
#endif
void StringTable::item_added() {
AtomicAccess::inc(&_items_count);
@ -932,10 +931,14 @@ void StringtableDCmd::execute(DCmdSource source, TRAPS) {
// Sharing
#if INCLUDE_CDS_JAVA_HEAP
size_t StringTable::shared_entry_count() {
assert(HeapShared::is_loading_mapping_mode(), "should not reach here");
return _shared_table.entry_count();
}
oop StringTable::lookup_shared(const StringWrapper& name, unsigned int hash) {
if (!AOTMappedHeapLoader::is_in_use()) {
return nullptr;
}
assert(hash == hash_wrapped_string(name),
"hash must be computed using java_lang_String::hash_code");
// len is required but is already part of StringWrapper, so 0 is used
@ -943,6 +946,9 @@ oop StringTable::lookup_shared(const StringWrapper& name, unsigned int hash) {
}
oop StringTable::lookup_shared(const jchar* name, int len) {
if (!AOTMappedHeapLoader::is_in_use()) {
return nullptr;
}
StringWrapper wrapped_name(name, len);
// len is required but is already part of StringWrapper, so 0 is used
return _shared_table.lookup(wrapped_name, java_lang_String::hash_code(name, len), 0);
@ -955,6 +961,8 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
return;
}
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
CompileBroker::wait_for_no_active_tasks();
precond(CDSConfig::allow_only_single_java_thread());
@ -977,7 +985,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
log_info(aot)("allocated string table for %d strings", total);
if (!ArchiveHeapWriter::is_too_large_to_archive(single_array_size)) {
if (!HeapShared::is_too_large_to_archive(single_array_size)) {
// The entire table can fit in a single array
objArrayOop array = oopFactory::new_objArray(vmClasses::Object_klass(), total, CHECK);
_shared_strings_array = OopHandle(Universe::vm_global(), array);
@ -988,7 +996,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
size_t primary_array_size = objArrayOopDesc::object_size(primary_array_length);
size_t secondary_array_size = objArrayOopDesc::object_size(_secondary_array_max_length);
if (ArchiveHeapWriter::is_too_large_to_archive(secondary_array_size)) {
if (HeapShared::is_too_large_to_archive(secondary_array_size)) {
// This can only happen if you have an extremely large number of classes that
// refer to more than 16384 * 16384 = 26M interned strings! Not a practical concern
// but bail out for safety.
@ -1014,7 +1022,7 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
primaryHandle()->obj_at_put(i, secondary);
log_info(aot)("string table array (secondary)[%d] length = %d", i, len);
assert(!ArchiveHeapWriter::is_too_large_to_archive(secondary), "sanity");
assert(!HeapShared::is_too_large_to_archive(secondary), "sanity");
}
assert(total == 0, "must be");
@ -1024,10 +1032,11 @@ void StringTable::allocate_shared_strings_array(TRAPS) {
#ifndef PRODUCT
void StringTable::verify_secondary_array_index_bits() {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
int max;
for (max = 1; ; max++) {
size_t next_size = objArrayOopDesc::object_size(1 << (max + 1));
if (ArchiveHeapWriter::is_too_large_to_archive(next_size)) {
if (HeapShared::is_too_large_to_archive(next_size)) {
break;
}
}
@ -1050,6 +1059,7 @@ void StringTable::verify_secondary_array_index_bits() {
// [2] Store the index and hashcode into _shared_table.
oop StringTable::init_shared_strings_array() {
assert(CDSConfig::is_dumping_heap(), "must be");
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
objArrayOop array = (objArrayOop)(_shared_strings_array.resolve());
verify_secondary_array_index_bits();
@ -1057,11 +1067,11 @@ oop StringTable::init_shared_strings_array() {
int index = 0;
auto copy_into_array = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !ArchiveHeapWriter::is_string_too_large_to_archive(string)) {
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
// If string is too large, don't put it into the string table.
// - If there are no other refernences to it, it won't be stored into the archive,
// - If there are no other references to it, it won't be stored into the archive,
// so we are all good.
// - If there's a referece to it, we will report an error inside HeapShared.cpp and
// - If there's a reference to it, we will report an error inside HeapShared.cpp and
// dumping will fail.
HeapShared::add_to_dumped_interned_strings(string);
if (!_is_two_dimensional_shared_strings_array) {
@ -1095,7 +1105,7 @@ void StringTable::write_shared_table() {
int index = 0;
auto copy_into_shared_table = [&] (WeakHandle* val) {
oop string = val->peek();
if (string != nullptr && !ArchiveHeapWriter::is_string_too_large_to_archive(string)) {
if (string != nullptr && !HeapShared::is_string_too_large_to_archive(string)) {
unsigned int hash = java_lang_String::hash_code(string);
writer.add(hash, index);
index ++;
@ -1109,6 +1119,7 @@ void StringTable::write_shared_table() {
}
void StringTable::set_shared_strings_array_index(int root_index) {
assert(HeapShared::is_writing_mapping_mode(), "should not reach here");
_shared_strings_array_root_index = root_index;
}
@ -1118,7 +1129,7 @@ void StringTable::serialize_shared_table_header(SerializeClosure* soc) {
if (soc->writing()) {
// Sanity. Make sure we don't use the shared table at dump time
_shared_table.reset();
} else if (!ArchiveHeapLoader::is_in_use()) {
} else if (!AOTMappedHeapLoader::is_in_use()) {
_shared_table.reset();
}

View File

@ -128,7 +128,7 @@ private:
// [2] _is_two_dimensional_shared_strings_array = true: _shared_strings_array is an Object[][]
// This happens when there are too many elements in the shared table. We store them
// using two levels of objArrays, such that none of the arrays are too big for
// ArchiveHeapWriter::is_too_large_to_archive(). In this case, the index is splited into two
// AOTMappedHeapWriter::is_too_large_to_archive(). In this case, the index is splited into two
// parts. Each shared string is stored as _shared_strings_array[primary_index][secondary_index]:
//
// [bits 31 .. 14][ bits 13 .. 0 ]
@ -147,6 +147,7 @@ private:
static oop lookup_shared(const jchar* name, int len) NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static size_t shared_entry_count() NOT_CDS_JAVA_HEAP_RETURN_(0);
static void allocate_shared_strings_array(TRAPS) NOT_CDS_JAVA_HEAP_RETURN;
static void load_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN;
static oop init_shared_strings_array() NOT_CDS_JAVA_HEAP_RETURN_(nullptr);
static void write_shared_table() NOT_CDS_JAVA_HEAP_RETURN;
static void set_shared_strings_array_index(int root_index) NOT_CDS_JAVA_HEAP_RETURN;

View File

@ -1185,6 +1185,7 @@ void SystemDictionary::preload_class(Handle class_loader, InstanceKlass* ik, TRA
ClassLoaderData* loader_data = ClassLoaderData::class_loader_data(class_loader());
oop java_mirror = ik->archived_java_mirror();
precond(java_mirror != nullptr);
assert(java_lang_Class::module(java_mirror) != nullptr, "must have been archived");
Handle pd(THREAD, java_lang_Class::protection_domain(java_mirror));
PackageEntry* pkg_entry = ik->package();
@ -1202,7 +1203,6 @@ void SystemDictionary::preload_class(Handle class_loader, InstanceKlass* ik, TRA
update_dictionary(THREAD, ik, loader_data);
}
assert(java_lang_Class::module(java_mirror) != nullptr, "must have been archived");
assert(ik->is_loaded(), "Must be in at least loaded state");
}

View File

@ -23,10 +23,12 @@
*/
#include "cds/aotLinkedClassBulkLoader.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/classLoaderDataShared.hpp"
#include "classfile/dictionary.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/systemDictionary.hpp"
@ -132,21 +134,28 @@ void vmClasses::resolve_all(TRAPS) {
CollectedHeap::set_filler_object_klass(vmClasses::Object_klass());
#if INCLUDE_CDS
if (CDSConfig::is_using_archive()) {
// It's unsafe to access the archived heap regions before they
// are fixed up, so we must do the fixup as early as possible
// before the archived java objects are accessed by functions
// such as java_lang_Class::restore_archived_mirror and
// ConstantPool::restore_unshareable_info (restores the archived
// resolved_references array object).
//
// ArchiveHeapLoader::fixup_regions fills the empty
// spaces in the archived heap regions and may use
// vmClasses::Object_klass(), so we can do this only after
// Object_klass is resolved. See the above resolve_through()
// call. No mirror objects are accessed/restored in the above call.
// Mirrors are restored after java.lang.Class is loaded.
ArchiveHeapLoader::fixup_region();
#if INCLUDE_CDS_JAVA_HEAP
if (HeapShared::is_loading() && HeapShared::is_loading_mapping_mode()) {
// It's unsafe to access the archived heap regions before they
// are fixed up, so we must do the fixup as early as possible
// before the archived java objects are accessed by functions
// such as java_lang_Class::restore_archived_mirror and
// ConstantPool::restore_unshareable_info (restores the archived
// resolved_references array object).
//
// AOTMappedHeapLoader::fixup_regions fills the empty
// spaces in the archived heap regions and may use
// vmClasses::Object_klass(), so we can do this only after
// Object_klass is resolved. See the above resolve_through()
// call. No mirror objects are accessed/restored in the above call.
// Mirrors are restored after java.lang.Class is loaded.
AOTMappedHeapLoader::fixup_region();
}
if (HeapShared::is_archived_heap_in_use() && !CDSConfig::is_using_full_module_graph()) {
// Need to remove all the archived java.lang.Module objects from HeapShared::roots().
ClassLoaderDataShared::clear_archived_oops();
}
#endif // INCLUDE_CDS_JAVA_HEAP
// Initialize the constant pool for the Object_class
assert(Object_klass()->in_aot_cache(), "must be");
Object_klass()->constants()->restore_unshareable_info(CHECK);

View File

@ -631,6 +631,10 @@ void CollectedHeap::before_exit() {
stop();
}
size_t CollectedHeap::bootstrap_max_memory() const {
return MaxNewSize;
}
#ifndef PRODUCT
bool CollectedHeap::promotion_should_fail(volatile size_t* count) {

View File

@ -500,6 +500,7 @@ protected:
virtual bool can_load_archived_objects() const { return false; }
virtual HeapWord* allocate_loaded_archive_space(size_t size) { return nullptr; }
virtual void complete_loaded_archive_space(MemRegion archive_space) { }
virtual size_t bootstrap_max_memory() const;
virtual bool is_oop(oop object) const;
// Non product verification and debugging.

View File

@ -22,6 +22,7 @@
*
*/
#include "cds/cdsConfig.hpp"
#include "classfile/altHashing.hpp"
#include "gc/shared/stringdedup/stringDedupConfig.hpp"
#include "logging/log.hpp"

View File

@ -22,6 +22,8 @@
*
*/
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/altHashing.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/stringTable.hpp"
@ -514,6 +516,7 @@ void StringDedup::Table::install(typeArrayOop obj, uint hash_code) {
// access to a String that is incompletely constructed; the value could be
// set before the coder.
bool StringDedup::Table::try_deduplicate_shared(oop java_string) {
assert(!HeapShared::is_loading_streaming_mode(), "should not reach here");
typeArrayOop value = java_lang_String::value(java_string);
assert(value != nullptr, "precondition");
assert(TypeArrayKlass::cast(value->klass())->element_type() == T_BYTE, "precondition");
@ -559,6 +562,7 @@ bool StringDedup::Table::try_deduplicate_shared(oop java_string) {
}
bool StringDedup::Table::try_deduplicate_found_shared(oop java_string, oop found) {
assert(!HeapShared::is_loading_streaming_mode(), "should not reach here");
_cur_stat.inc_known_shared();
typeArrayOop found_value = java_lang_String::value(found);
if (found_value == java_lang_String::value(java_string)) {
@ -609,7 +613,8 @@ bool StringDedup::Table::deduplicate_if_permitted(oop java_string,
void StringDedup::Table::deduplicate(oop java_string) {
assert(java_lang_String::is_instance(java_string), "precondition");
_cur_stat.inc_inspected();
if ((StringTable::shared_entry_count() > 0) &&
if (AOTMappedHeapLoader::is_in_use() &&
(StringTable::shared_entry_count() > 0) &&
try_deduplicate_shared(java_string)) {
return; // Done if deduplicated against shared StringTable.
}

View File

@ -25,7 +25,7 @@
*/
#include "cds/archiveHeapWriter.hpp"
#include "cds/aotMappedHeapWriter.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/classUnloadingContext.hpp"
#include "gc/shared/fullGCForwarding.hpp"
@ -2770,7 +2770,7 @@ HeapWord* ShenandoahHeap::allocate_loaded_archive_space(size_t size) {
//
// CDS would guarantee no objects straddle multiple regions, as long as regions are as large
// as MIN_GC_REGION_ALIGNMENT.
guarantee(ShenandoahHeapRegion::region_size_bytes() >= ArchiveHeapWriter::MIN_GC_REGION_ALIGNMENT, "Must be");
guarantee(ShenandoahHeapRegion::region_size_bytes() >= AOTMappedHeapWriter::MIN_GC_REGION_ALIGNMENT, "Must be");
ShenandoahAllocRequest req = ShenandoahAllocRequest::for_cds(size);
return allocate_memory(req);

View File

@ -208,6 +208,13 @@ void ZArguments::initialize() {
FLAG_SET_DEFAULT(LogEventsBufferEntries, 250);
}
if (VerifyArchivedFields > 0) {
// ZGC doesn't support verifying at arbitrary points as our normal state is that everything in the
// heap looks completely insane. Only at some particular points does the heap look sort of sane.
// So instead of verifying we trigger a GC that does its own verification when it's suitable.
FLAG_SET_DEFAULT(VerifyArchivedFields, 2);
}
// Verification before startup and after exit not (yet) supported
FLAG_SET_DEFAULT(VerifyDuringStartup, false);
FLAG_SET_DEFAULT(VerifyBeforeExit, false);

View File

@ -34,6 +34,7 @@
#include "gc/z/zGeneration.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zHeuristics.hpp"
#include "gc/z/zJNICritical.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zObjArrayAllocator.hpp"
@ -298,6 +299,10 @@ void ZCollectedHeap::unregister_nmethod(nmethod* nm) {
ZNMethod::purge_nmethod(nm);
}
size_t ZCollectedHeap::bootstrap_max_memory() const {
return MaxHeapSize - ZHeuristics::significant_young_overhead();
}
void ZCollectedHeap::verify_nmethod(nmethod* nm) {
// Does nothing
}

View File

@ -115,6 +115,8 @@ public:
void pin_object(JavaThread* thread, oop obj) override;
void unpin_object(JavaThread* thread, oop obj) override;
size_t bootstrap_max_memory() const override;
void print_heap_on(outputStream* st) const override;
void print_gc_on(outputStream* st) const override;
bool print_location(outputStream* st, void* addr) const override;

View File

@ -32,6 +32,7 @@
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zStat.hpp"
#include "logging/log.hpp"
#include "runtime/init.hpp"
ZDirector* ZDirector::_director;
@ -916,6 +917,12 @@ void ZDirector::run_thread() {
// Main loop
while (wait_for_tick()) {
ZDirectorStats stats = sample_stats();
if (!is_init_completed()) {
// Not allowed to start GCs yet
continue;
}
if (!start_gc(stats)) {
adjust_gc(stats);
}

View File

@ -477,11 +477,10 @@ traceid JfrThreadLocal::external_thread_id(const Thread* t) {
return JfrRecorder::is_recording() ? thread_id(t) : jvm_thread_id(t);
}
static inline traceid load_java_thread_id(const Thread* t) {
static inline traceid load_java_thread_id(const JavaThread* t) {
assert(t != nullptr, "invariant");
assert(t->is_Java_thread(), "invariant");
oop threadObj = JavaThread::cast(t)->threadObj();
return threadObj != nullptr ? AccessThreadTraceId::id(threadObj) : 0;
oop threadObj = t->threadObj();
return threadObj != nullptr ? AccessThreadTraceId::id(threadObj) : static_cast<traceid>(t->monitor_owner_id());
}
#ifdef ASSERT
@ -502,7 +501,7 @@ traceid JfrThreadLocal::assign_thread_id(const Thread* t, JfrThreadLocal* tl) {
if (tid == 0) {
assert(can_assign(t), "invariant");
if (t->is_Java_thread()) {
tid = load_java_thread_id(t);
tid = load_java_thread_id(JavaThread::cast(t));
tl->_jvm_thread_id = tid;
AtomicAccess::store(&tl->_vthread_id, tid);
return tid;

View File

@ -23,10 +23,9 @@
*/
#include "cds/aotMetaspace.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/dynamicArchive.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/classLoaderDataShared.hpp"
@ -323,7 +322,7 @@ void Universe::archive_exception_instances() {
}
void Universe::load_archived_object_instances() {
if (ArchiveHeapLoader::is_in_use()) {
if (HeapShared::is_archived_heap_in_use()) {
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
int index = _archived_basic_type_mirror_indices[i];
if (!is_reference_type((BasicType)i) && index >= 0) {
@ -559,10 +558,8 @@ void Universe::genesis(TRAPS) {
void Universe::initialize_basic_type_mirrors(TRAPS) {
#if INCLUDE_CDS_JAVA_HEAP
if (CDSConfig::is_using_archive() &&
ArchiveHeapLoader::is_in_use() &&
HeapShared::is_archived_heap_in_use() &&
_basic_type_mirrors[T_INT].resolve() != nullptr) {
assert(ArchiveHeapLoader::can_use(), "Sanity");
// check that all basic type mirrors are mapped also
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
if (!is_reference_type((BasicType)i)) {
@ -571,7 +568,7 @@ void Universe::initialize_basic_type_mirrors(TRAPS) {
}
}
} else
// _basic_type_mirrors[T_INT], etc, are null if archived heap is not mapped.
// _basic_type_mirrors[T_INT], etc, are null if not using an archived heap
#endif
{
for (int i = T_BOOLEAN; i < T_VOID+1; i++) {
@ -911,6 +908,21 @@ jint universe_init() {
return JNI_EINVAL;
}
// Add main_thread to threads list to finish barrier setup with
// on_thread_attach. Should be before starting to build Java objects in
// the AOT heap loader, which invokes barriers.
{
JavaThread* main_thread = JavaThread::current();
MutexLocker mu(Threads_lock);
Threads::add(main_thread);
}
HeapShared::initialize_writing_mode();
// Create the string table before the AOT object archive is loaded,
// as it might need to access the string table.
StringTable::create_table();
#if INCLUDE_CDS
if (CDSConfig::is_using_archive()) {
// Read the data structures supporting the shared spaces (shared
@ -933,7 +945,6 @@ jint universe_init() {
#endif
SymbolTable::create_table();
StringTable::create_table();
if (strlen(VerifySubSet) > 0) {
Universe::initialize_verify_flags();
@ -1178,6 +1189,7 @@ bool universe_post_init() {
MemoryService::add_metaspace_memory_pools();
MemoryService::set_universe_heap(Universe::heap());
#if INCLUDE_CDS
AOTMetaspace::post_initialize(CHECK_false);
#endif

View File

@ -24,10 +24,8 @@
#include "cds/aotConstantPoolResolver.hpp"
#include "cds/archiveBuilder.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/archiveHeapWriter.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.hpp"
#include "classfile/javaClasses.inline.hpp"
@ -358,7 +356,7 @@ objArrayOop ConstantPool::prepare_resolved_references_for_archiving() {
int index = object_to_cp_index(i);
if (tag_at(index).is_string()) {
assert(java_lang_String::is_instance(obj), "must be");
if (!ArchiveHeapWriter::is_string_too_large_to_archive(obj)) {
if (!HeapShared::is_string_too_large_to_archive(obj)) {
scratch_rr->obj_at_put(i, obj);
}
continue;
@ -398,7 +396,7 @@ void ConstantPool::restore_unshareable_info(TRAPS) {
if (vmClasses::Object_klass_is_loaded()) {
ClassLoaderData* loader_data = pool_holder()->class_loader_data();
#if INCLUDE_CDS_JAVA_HEAP
if (ArchiveHeapLoader::is_in_use() &&
if (HeapShared::is_archived_heap_in_use() &&
_cache->archived_references() != nullptr) {
oop archived = _cache->archived_references();
// Create handle for the archived resolved reference array object

View File

@ -22,9 +22,8 @@
*
*/
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderData.inline.hpp"
#include "classfile/classLoaderDataGraph.inline.hpp"
@ -900,7 +899,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
if (this->has_archived_mirror_index()) {
ResourceMark rm(THREAD);
log_debug(aot, mirror)("%s has raw archived mirror", external_name());
if (ArchiveHeapLoader::is_in_use()) {
if (HeapShared::is_archived_heap_in_use()) {
bool present = java_lang_Class::restore_archived_mirror(this, loader, module_handle,
protection_domain,
CHECK);

View File

@ -35,7 +35,7 @@ class Klass;
// Evaluating "String arg[10]" will create an objArrayOop.
class objArrayOopDesc : public arrayOopDesc {
friend class ArchiveHeapWriter;
friend class AOTMappedHeapWriter;
friend class ObjArrayKlass;
friend class Runtime1;
friend class psPromotionManager;

View File

@ -33,7 +33,6 @@ CheckOopFunctionPointer check_oop_function = nullptr;
void oop::register_oop() {
assert (CheckUnhandledOops, "should only call when CheckUnhandledOops");
if (!Universe::is_fully_initialized()) return;
// This gets expensive, which is why checking unhandled oops is on a switch.
Thread* t = Thread::current_or_null();
if (t != nullptr && t->is_Java_thread()) {
@ -43,7 +42,6 @@ void oop::register_oop() {
void oop::unregister_oop() {
assert (CheckUnhandledOops, "should only call when CheckUnhandledOops");
if (!Universe::is_fully_initialized()) return;
// This gets expensive, which is why checking unhandled oops is on a switch.
Thread* t = Thread::current_or_null();
if (t != nullptr && t->is_Java_thread()) {

View File

@ -3491,6 +3491,10 @@ enum VM_Creation_State {
volatile VM_Creation_State vm_created = NOT_CREATED;
bool is_vm_created() {
return AtomicAccess::load(&vm_created) == COMPLETE;
}
// Indicate whether it is safe to recreate VM. Recreation is only
// possible after a failed initial creation attempt in some cases.
volatile int safe_to_recreate_vm = 1;

View File

@ -22,6 +22,7 @@
*
*/
#include "cds/aotThread.hpp"
#include "classfile/javaClasses.inline.hpp"
#include "classfile/moduleEntry.hpp"
#include "classfile/vmClasses.hpp"
@ -1485,6 +1486,13 @@ void JvmtiExport::post_thread_start(JavaThread *thread) {
}
assert(thread->thread_state() == _thread_in_vm, "must be in vm state");
if (thread->is_aot_thread()) {
// The AOT thread is hidden from view but has no thread oop when it starts due
// to bootstrapping complexity, so we check for it before checking for bound
// virtual threads. When exiting it is filtered out due to being hidden.
return;
}
EVT_TRIG_TRACE(JVMTI_EVENT_THREAD_START, ("[%s] Trg Thread Start event triggered",
JvmtiTrace::safe_get_thread_name(thread)));

View File

@ -22,6 +22,7 @@
*
*/
#include "cds/aotThread.hpp"
#include "memory/allocation.inline.hpp"
#include "prims/jvmtiRawMonitor.hpp"
#include "runtime/atomicAccess.hpp"
@ -29,6 +30,7 @@
#include "runtime/javaThread.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/threads.hpp"
#include "runtime/threadSMR.hpp"
JvmtiRawMonitor::QNode::QNode(Thread* thread) : _next(nullptr), _prev(nullptr),
_event(thread->_ParkEvent),
@ -39,10 +41,15 @@ GrowableArray<JvmtiRawMonitor*>* JvmtiPendingMonitors::_monitors =
new (mtServiceability) GrowableArray<JvmtiRawMonitor*>(1, mtServiceability);
void JvmtiPendingMonitors::transition_raw_monitors() {
assert((Threads::number_of_threads()==1),
"Java thread has not been created yet or more than one java thread "
"is running. Raw monitor transition will not work");
JavaThread* current_java_thread = JavaThread::current();
#ifdef ASSERT
for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
assert(thread == current_java_thread || thread->is_aot_thread(),
"Didn't expect concurrent application threads at this point");
}
#endif
{
ThreadToNativeFromVM ttnfvm(current_java_thread);
for (int i = 0; i < count(); i++) {

View File

@ -23,11 +23,11 @@
*/
#include "cds.h"
#include "cds/aotMappedHeapLoader.hpp"
#include "cds/aotMetaspace.hpp"
#include "cds/archiveHeapLoader.hpp"
#include "cds/cdsConstants.hpp"
#include "cds/filemap.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/classLoaderStats.hpp"
@ -2205,6 +2205,9 @@ WB_ENTRY(jboolean, WB_CDSMemoryMappingFailed(JNIEnv* env, jobject wb))
WB_END
WB_ENTRY(jboolean, WB_IsSharedInternedString(JNIEnv* env, jobject wb, jobject str))
if (!HeapShared::is_loading_mapping_mode()) {
return false;
}
ResourceMark rm(THREAD);
oop str_oop = JNIHandles::resolve(str);
int length;
@ -2217,7 +2220,7 @@ WB_ENTRY(jboolean, WB_IsSharedClass(JNIEnv* env, jobject wb, jclass clazz))
WB_END
WB_ENTRY(jboolean, WB_AreSharedStringsMapped(JNIEnv* env))
return ArchiveHeapLoader::is_mapped();
return AOTMappedHeapLoader::is_mapped();
WB_END
WB_ENTRY(void, WB_LinkClass(JNIEnv* env, jobject wb, jclass clazz))
@ -2230,7 +2233,7 @@ WB_ENTRY(void, WB_LinkClass(JNIEnv* env, jobject wb, jclass clazz))
WB_END
WB_ENTRY(jboolean, WB_AreOpenArchiveHeapObjectsMapped(JNIEnv* env))
return ArchiveHeapLoader::is_mapped();
return AOTMappedHeapLoader::is_mapped();
WB_END
WB_ENTRY(jboolean, WB_IsCDSIncluded(JNIEnv* env))
@ -2260,10 +2263,21 @@ WB_ENTRY(jboolean, WB_IsJVMCISupportedByGC(JNIEnv* env))
#endif
WB_END
WB_ENTRY(jboolean, WB_CanWriteJavaHeapArchive(JNIEnv* env))
static bool canWriteJavaHeapArchive() {
return !CDSConfig::are_vm_options_incompatible_with_dumping_heap();
}
WB_ENTRY(jboolean, WB_CanWriteJavaHeapArchive(JNIEnv* env))
return canWriteJavaHeapArchive();
WB_END
WB_ENTRY(jboolean, WB_CanWriteMappedJavaHeapArchive(JNIEnv* env))
return canWriteJavaHeapArchive() && !AOTStreamableObjects;
WB_END
WB_ENTRY(jboolean, WB_CanWriteStreamedJavaHeapArchive(JNIEnv* env))
return canWriteJavaHeapArchive() && AOTStreamableObjects;
WB_END
WB_ENTRY(jboolean, WB_IsJFRIncluded(JNIEnv* env))
#if INCLUDE_JFR
@ -3040,6 +3054,8 @@ static JNINativeMethod methods[] = {
{CC"isC2OrJVMCIIncluded", CC"()Z", (void*)&WB_isC2OrJVMCIIncluded },
{CC"isJVMCISupportedByGC", CC"()Z", (void*)&WB_IsJVMCISupportedByGC},
{CC"canWriteJavaHeapArchive", CC"()Z", (void*)&WB_CanWriteJavaHeapArchive },
{CC"canWriteMappedJavaHeapArchive", CC"()Z", (void*)&WB_CanWriteMappedJavaHeapArchive },
{CC"canWriteStreamedJavaHeapArchive", CC"()Z", (void*)&WB_CanWriteStreamedJavaHeapArchive },
{CC"cdsMemoryMappingFailed", CC"()Z", (void*)&WB_CDSMemoryMappingFailed },
{CC"clearInlineCaches0", CC"(Z)V", (void*)&WB_ClearInlineCaches },

View File

@ -121,7 +121,7 @@ size_t JavaThread::_stack_size_at_create = 0;
#define HOTSPOT_THREAD_PROBE_stop HOTSPOT_THREAD_STOP
#define DTRACE_THREAD_PROBE(probe, javathread) \
{ \
if (!javathread->is_aot_thread()) { \
ResourceMark rm(this); \
int len = 0; \
const char* name = (javathread)->name(); \
@ -763,7 +763,7 @@ void JavaThread::run() {
void JavaThread::thread_main_inner() {
assert(JavaThread::current() == this, "sanity check");
assert(_threadObj.peek() != nullptr, "just checking");
assert(_threadObj.peek() != nullptr || is_aot_thread(), "just checking");
// Execute thread entry point unless this thread has a pending exception.
// Note: Due to JVMTI StopThread we can have pending exceptions already!

View File

@ -79,6 +79,7 @@ Monitor* CompileThread_lock = nullptr;
Monitor* Compilation_lock = nullptr;
Mutex* CompileStatistics_lock = nullptr;
Mutex* DirectivesStack_lock = nullptr;
Monitor* AOTHeapLoading_lock = nullptr;
Monitor* Terminator_lock = nullptr;
Monitor* InitCompleted_lock = nullptr;
Monitor* BeforeExit_lock = nullptr;
@ -330,7 +331,9 @@ void mutex_init() {
MUTEX_DEFL(Threads_lock , PaddedMonitor, CompileThread_lock, true);
MUTEX_DEFL(Compile_lock , PaddedMutex , MethodCompileQueue_lock);
MUTEX_DEFL(JNICritical_lock , PaddedMonitor, AdapterHandlerLibrary_lock); // used for JNI critical regions
MUTEX_DEFL(Module_lock , PaddedMutex , AdapterHandlerLibrary_lock);
MUTEX_DEFL(AOTHeapLoading_lock , PaddedMonitor, Module_lock);
MUTEX_DEFL(JNICritical_lock , PaddedMonitor, AOTHeapLoading_lock); // used for JNI critical regions
MUTEX_DEFL(Heap_lock , PaddedMonitor, JNICritical_lock);
MUTEX_DEFL(PerfDataMemAlloc_lock , PaddedMutex , Heap_lock);
@ -353,7 +356,6 @@ void mutex_init() {
MUTEX_DEFL(PSOldGenExpand_lock , PaddedMutex , Heap_lock, true);
}
#endif
MUTEX_DEFL(Module_lock , PaddedMutex , ClassLoaderDataGraph_lock);
MUTEX_DEFL(SystemDictionary_lock , PaddedMonitor, Module_lock);
#if INCLUDE_JVMCI
// JVMCIRuntime_lock must be acquired before JVMCI_lock to avoid deadlock

View File

@ -81,6 +81,7 @@ extern Monitor* TrainingReplayQueue_lock; // a lock held when class are a
extern Monitor* CompileTaskWait_lock; // a lock held when CompileTasks are waited/notified
extern Mutex* CompileStatistics_lock; // a lock held when updating compilation statistics
extern Mutex* DirectivesStack_lock; // a lock held when mutating the dirstack and ref counting directives
extern Monitor* AOTHeapLoading_lock; // a lock used to guard materialization of AOT heap objects
extern Monitor* Terminator_lock; // a lock used to guard termination of the vm
extern Monitor* InitCompleted_lock; // a lock used to signal threads waiting on init completed
extern Monitor* BeforeExit_lock; // a lock used to guard cleanups and shutdown hooks

View File

@ -30,13 +30,19 @@
#ifdef ASSERT
NoSafepointVerifier::NoSafepointVerifier() : _thread(Thread::current()) {
NoSafepointVerifier::NoSafepointVerifier(bool active) : _thread(Thread::current()), _active(active) {
if (!_active) {
return;
}
if (_thread->is_Java_thread()) {
JavaThread::cast(_thread)->inc_no_safepoint_count();
}
}
NoSafepointVerifier::~NoSafepointVerifier() {
if (!_active) {
return;
}
if (_thread->is_Java_thread()) {
JavaThread::cast(_thread)->dec_no_safepoint_count();
}

View File

@ -38,8 +38,9 @@ class NoSafepointVerifier : public StackObj {
private:
Thread *_thread;
bool _active;
public:
NoSafepointVerifier() NOT_DEBUG_RETURN;
explicit NoSafepointVerifier(bool active = true) NOT_DEBUG_RETURN;
~NoSafepointVerifier() NOT_DEBUG_RETURN;
};

View File

@ -313,6 +313,7 @@ class Thread: public ThreadShadow {
virtual bool is_JfrRecorder_thread() const { return false; }
virtual bool is_AttachListener_thread() const { return false; }
virtual bool is_monitor_deflation_thread() const { return false; }
virtual bool is_aot_thread() const { return false; }
// Convenience cast functions
CompilerThread* as_Compiler_thread() const {

View File

@ -27,7 +27,7 @@
#include "cds/aotMetaspace.hpp"
#include "cds/cds_globals.hpp"
#include "cds/cdsConfig.hpp"
#include "cds/heapShared.hpp"
#include "cds/heapShared.inline.hpp"
#include "classfile/classLoader.hpp"
#include "classfile/javaClasses.hpp"
#include "classfile/javaThreadStatus.hpp"
@ -102,6 +102,7 @@
#include "services/management.hpp"
#include "services/threadIdTable.hpp"
#include "services/threadService.hpp"
#include "utilities/debug.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/macros.hpp"
@ -383,6 +384,8 @@ void Threads::initialize_java_lang_classes(JavaThread* main_thread, TRAPS) {
initialize_class(vmSymbols::java_lang_reflect_Method(), CHECK);
initialize_class(vmSymbols::java_lang_ref_Finalizer(), CHECK);
HeapShared::materialize_thread_object();
// Phase 1 of the system initialization in the library, java.lang.System class initialization
call_initPhase1(CHECK);
@ -563,7 +566,8 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Initialize OopStorage for threadObj
JavaThread::_thread_oop_storage = OopStorageSet::create_strong("Thread OopStorage", mtThread);
// Attach the main thread to this os thread
// Attach the main thread to this os thread. It is added to the threads list inside
// universe_init(), within init_globals().
JavaThread* main_thread = new JavaThread();
main_thread->set_thread_state(_thread_in_vm);
main_thread->initialize_thread_current();
@ -577,7 +581,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// Set the _monitor_owner_id now since we will run Java code before the Thread instance
// is even created. The same value will be assigned to the Thread instance on init.
main_thread->set_monitor_owner_id(ThreadIdentifier::next());
const int64_t main_thread_tid = ThreadIdentifier::next();
guarantee(main_thread_tid == 3, "Must equal the PRIMORDIAL_TID used in Threads.java");
main_thread->set_monitor_owner_id(main_thread_tid);
if (!Thread::set_as_starting_thread(main_thread)) {
vm_shutdown_during_initialization(
@ -613,14 +619,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// of hangs during error reporting.
WatcherThread::start();
// Add main_thread to threads list to finish barrier setup with
// on_thread_attach. Should be before starting to build Java objects in
// init_globals2, which invokes barriers.
{
MutexLocker mu(Threads_lock);
Threads::add(main_thread);
}
status = init_globals2();
if (status != JNI_OK) {
Threads::remove(main_thread, false);
@ -704,6 +702,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// No more stub generation allowed after that point.
StubCodeDesc::freeze();
// Prepare AOT heap loader for GC.
HeapShared::enable_gc();
#ifdef ADDRESS_SANITIZER
Asan::initialize();
#endif
@ -899,6 +900,9 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
// take a while to process their first tick).
WatcherThread::run_all_tasks();
// Finish materializing AOT objects
HeapShared::finish_materialize_objects();
create_vm_timer.end();
#ifdef ASSERT
_vm_complete = true;

View File

@ -581,12 +581,13 @@ inline void ExceptionMark::check_no_pending_exception() {
}
}
extern bool is_vm_created();
ExceptionMark::~ExceptionMark() {
if (_thread->has_pending_exception()) {
Handle exception(_thread, _thread->pending_exception());
_thread->clear_pending_exception(); // Needed to avoid infinite recursion
if (is_init_completed()) {
if (is_vm_created()) {
ResourceMark rm;
exception->print();
fatal("ExceptionMark destructor expects no pending exceptions");

View File

@ -615,7 +615,7 @@
#define COMPILER_HEADER(basename) XSTR(COMPILER_HEADER_STEM(basename).hpp)
#define COMPILER_HEADER_INLINE(basename) XSTR(COMPILER_HEADER_STEM(basename).inline.hpp)
#if INCLUDE_CDS && INCLUDE_G1GC && defined(_LP64)
#if INCLUDE_CDS && defined(_LP64)
#define INCLUDE_CDS_JAVA_HEAP 1
#define CDS_JAVA_HEAP_ONLY(x) x
#define NOT_CDS_JAVA_HEAP(x)

View File

@ -7,6 +7,7 @@ runtime/symbols/TestSharedArchiveConfigFile.java 0000000 generic-all
gc/arguments/TestG1HeapSizeFlags.java 0000000 generic-all
gc/arguments/TestParallelHeapSizeFlags.java 0000000 generic-all
gc/arguments/TestSerialHeapSizeFlags.java 0000000 generic-all
gc/arguments/TestVerifyBeforeAndAfterGCFlags.java 0000000 generic-all
gc/arguments/TestCompressedClassFlags.java 0000000 generic-all
gc/TestAllocateHeapAtMultiple.java 0000000 generic-all

View File

@ -85,6 +85,8 @@ requires.properties= \
vm.cds.supports.aot.class.linking \
vm.cds.supports.aot.code.caching \
vm.cds.write.archived.java.heap \
vm.cds.write.mapped.java.heap \
vm.cds.write.streamed.java.heap \
vm.continuations \
vm.jvmti \
vm.graal.enabled \

View File

@ -313,7 +313,8 @@ tier3_gc_gcold = \
tier1_gc_gcbasher = \
gc/stress/gcbasher/TestGCBasherWithG1.java \
gc/stress/gcbasher/TestGCBasherWithSerial.java \
gc/stress/gcbasher/TestGCBasherWithParallel.java
gc/stress/gcbasher/TestGCBasherWithParallel.java \
gc/stress/gcbasher/TestGCBasherWithZ.java
tier1_gc_shenandoah = \
gc/shenandoah/options/ \

View File

@ -24,7 +24,7 @@
package gc;
/*
* @test TestPLABAdaptToMinTLABSizeG1
* @test id=G1
* @bug 8289137
* @summary Make sure that Young/OldPLABSize adapt to MinTLABSize setting.
* @requires vm.gc.G1
@ -35,7 +35,7 @@ package gc;
*/
/*
* @test TestPLABAdaptToMinTLABSizeParallel
* @test id=Parallel
* @bug 8289137
* @summary Make sure that Young/OldPLABSize adapt to MinTLABSize setting.
* @requires vm.gc.Parallel

View File

@ -33,7 +33,7 @@ import java.io.IOException;
* @requires vm.gc.Z
* @requires vm.flavor == "server" & !vm.emulatedClient
* @summary Stress ZGC
* @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -server -XX:+UseZGC gc.stress.gcbasher.TestGCBasherWithZ 120000
* @run main/othervm/timeout=200 -Xlog:gc*=info -Xmx384m -XX:+UseZGC gc.stress.gcbasher.TestGCBasherWithZ 120000
*/
/*
@ -43,7 +43,7 @@ import java.io.IOException;
* @requires vm.gc.Z
* @requires vm.flavor == "server" & !vm.emulatedClient & vm.opt.ClassUnloading != false
* @summary Stress ZGC with nmethod barrier forced deoptimization enabled.
* @run main/othervm/timeout=200 -Xlog:gc*=info,nmethod+barrier=trace -Xmx384m -server -XX:+UseZGC
* @run main/othervm/timeout=200 -Xlog:gc*=info,nmethod+barrier=trace -Xmx384m -XX:+UseZGC
* -XX:+UnlockDiagnosticVMOptions -XX:+DeoptimizeNMethodBarriersALot -XX:-Inline
* gc.stress.gcbasher.TestGCBasherWithZ 120000
*/

View File

@ -39,19 +39,27 @@ import java.util.ArrayList;
public class AOTMapTest {
public static void main(String[] args) throws Exception {
doTest(false);
doTest(false, false);
doTest(false, true);
if (Platform.is64bit()) {
// There's no oop/klass compression on 32-bit.
doTest(true);
doTest(true, false);
doTest(true, true);
}
}
public static void doTest(boolean compressed) throws Exception {
public static void doTest(boolean compressed, boolean streamHeap) throws Exception {
ArrayList<String> vmArgs = new ArrayList<>();
// Use the same heap size as make/Images.gmk
vmArgs.add("-Xmx128M");
vmArgs.add("-XX:+UnlockDiagnosticVMOptions");
if (streamHeap) {
vmArgs.add("-XX:+AOTStreamableObjects");
} else {
vmArgs.add("-XX:-AOTStreamableObjects");
}
if (Platform.is64bit()) {
// These options are available only on 64-bit.

View File

@ -25,7 +25,7 @@
* @test
* @summary Check to make sure that shared strings in the bootstrap CDS archive
* are actually shared
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.write.mapped.java.heap
* @requires vm.flagless
* @library /test/lib
* @build SharedStringsWb jdk.test.whitebox.WhiteBox

View File

@ -24,7 +24,7 @@
/**
* @test SharedStringsDedup
* @summary Test -Xshare:auto with shared strings and -XX:+UseStringDeduplication
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.write.mapped.java.heap
* @library /test/lib
* @run driver SharedStringsDedup
*/

View File

@ -24,7 +24,7 @@
/**
* @test SharedStringsAuto
* @summary Test -Xshare:auto with shared strings.
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.write.mapped.java.heap
* @library /test/lib
* @run driver SharedStringsRunAuto
*/

View File

@ -43,8 +43,9 @@ public class SharedSymbolTableBucketSize {
+ Integer.valueOf(bucket_size));
CDSTestUtils.checkMappingFailure(output);
String s = output.firstMatch("Average bucket size : .*");
Float f = Float.parseFloat(s.substring(25));
String regex = "Average bucket size : ([0-9]+\\.[0-9]+).*";
String s = output.firstMatch(regex, 1);
Float f = Float.parseFloat(s);
int size = Math.round(f);
if (size != bucket_size) {
throw new Exception("FAILED: incorrect bucket size " + size +

View File

@ -55,6 +55,7 @@
* @requires vm.cds.default.archive.available
* @requires vm.cds.write.archived.java.heap
* @requires vm.bits == 64
* @requires !vm.gc.Z
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management
@ -68,6 +69,7 @@
* @requires vm.cds.default.archive.available
* @requires vm.cds.write.archived.java.heap
* @requires vm.bits == 64
* @requires !vm.gc.Z
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.management

View File

@ -121,24 +121,23 @@ public class TestParallelGCWithCDS {
out.shouldNotContain(errMsg);
out.shouldHaveExitValue(0);
if (!dumpWithParallel && execWithParallel) {
// We dumped with G1, so we have an archived heap. At exec time, try to load them into
// a small ParallelGC heap that may be too small.
System.out.println("2. Exec with " + execGC);
out = TestCommon.exec(helloJar,
execGC,
small1,
small2,
"-Xmx4m",
coops,
"-Xlog:cds",
"Hello");
if (out.getExitValue() == 0) {
out.shouldContain(HELLO);
out.shouldNotContain(errMsg);
} else {
out.shouldNotHaveFatalError();
}
// Regardless of which GC dumped the heap, there will be an object archive, either
// created with mapping if dumped with G1, or streaming if dumped with parallel GC.
// At exec time, try to load them into a small ParallelGC heap that may be too small.
System.out.println("2. Exec with " + execGC);
out = TestCommon.exec(helloJar,
execGC,
small1,
small2,
"-Xmx4m",
coops,
"-Xlog:cds",
"Hello");
if (out.getExitValue() == 0) {
out.shouldContain(HELLO);
out.shouldNotContain(errMsg);
} else {
out.shouldNotHaveFatalError();
}
}
}

View File

@ -140,9 +140,8 @@ public class TestSerialGCWithCDS {
out.shouldNotContain(errMsg);
int n = 2;
if (dumpWithSerial == false && execWithSerial == true) {
// We dumped with G1, so we have an archived heap. At exec time, try to load them into
// a small SerialGC heap that may be too small.
if (execWithSerial == true) {
// At exec time, try to load archived objects into a small SerialGC heap that may be too small.
String[] sizes = {
"4m", // usually this will success load the archived heap
"2m", // usually this will fail to load the archived heap, but app can launch

View File

@ -0,0 +1,159 @@
/*
* Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test Loading and writing AOT archived heap objects with ZGC
* @requires vm.cds
* @requires vm.gc.Z
* @requires vm.gc.G1
*
* @comment don't run this test if any -XX::+Use???GC options are specified, since they will
* interfere with the test.
* @requires vm.gc == null
*
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @compile test-classes/Hello.java
* @run driver TestZGCWithAOTHeap
*/
import jdk.test.lib.Platform;
import jdk.test.lib.process.OutputAnalyzer;
public class TestZGCWithAOTHeap {
public final static String HELLO = "Hello World";
static String helloJar;
public static void main(String... args) throws Exception {
helloJar = JarBuilder.build("hello", "Hello");
// Check if we can use ZGC during dump time, or run time, or both.
test(false, true, true, true);
test(true, false, true, true);
test(true, true, true, true);
test(false, true, false, true);
test(false, true, true, false);
test(true, false, true, false);
test(true, true, true, false);
test(false, true, false, false);
}
final static String G1 = "-XX:+UseG1GC";
final static String Z = "-XX:+UseZGC";
static void test(boolean dumpWithZ, boolean execWithZ, boolean shouldStream, boolean shouldUseCOH) throws Exception {
String unlockDiagnostic = "-XX:+UnlockDiagnosticVMOptions";
String dumpGC = dumpWithZ ? Z : G1;
String execGC = execWithZ ? Z : G1;
String generalErrMsg = "Cannot use CDS heap data.";
String coopsErrMsg = generalErrMsg + " Selected GC not compatible -XX:-UseCompressedOops";
String coops = "-XX:-UseCompressedOops";
String coh = shouldUseCOH ? "-XX:+UseCompactObjectHeaders" : "-XX:-UseCompactObjectHeaders";
String stream = shouldStream ? "-XX:+AOTStreamableObjects" : "-XX:-AOTStreamableObjects";
String eagerLoading = "-XX:+AOTEagerlyLoadObjects";
OutputAnalyzer out;
System.out.println("0. Dump with " + dumpGC + ", " + coops + ", " + coh + ", " + stream);
out = TestCommon.dump(helloJar,
new String[] {"Hello"},
dumpGC,
coops,
coh,
"-XX:+UnlockDiagnosticVMOptions",
stream,
"-Xlog:cds,aot,aot+heap");
out.shouldContain("Dumping shared data to file:");
out.shouldHaveExitValue(0);
System.out.println("1. Exec with " + execGC + ", " + coops + ", " + coh);
out = TestCommon.exec(helloJar,
unlockDiagnostic,
execGC,
coops,
coh,
"-Xlog:cds,aot,aot+heap",
"Hello");
if (!shouldStream && execWithZ) {
// Only when dumping without streaming and executing with ZGC do we expect there
// to be a problem. With -XX:+AOTClassLinking, the problem is worse.
if (out.getExitValue() == 0) {
out.shouldContain(HELLO);
out.shouldContain(generalErrMsg);
} else {
out.shouldHaveExitValue(1);
}
} else {
out.shouldContain(HELLO);
out.shouldNotContain(generalErrMsg);
out.shouldHaveExitValue(0);
}
// Regardless of which GC dumped the heap, there will be an object archive, either
// created with mapping if dumped with G1, or streaming if dumped with parallel GC.
// At exec time, try to load them into a small ZGC heap that may be too small.
System.out.println("2. Exec with " + execGC + ", " + coops + ", " + coh);
out = TestCommon.exec(helloJar,
unlockDiagnostic,
execGC,
"-Xmx4m",
coops,
coh,
"-Xlog:cds,aot,aot+heap",
"Hello");
if (out.getExitValue() == 0) {
if (!shouldStream && execWithZ) {
out.shouldContain(coopsErrMsg);
} else {
out.shouldNotContain(generalErrMsg);
}
} else {
out.shouldHaveExitValue(1);
}
out.shouldNotHaveFatalError();
if (shouldStream) {
System.out.println("3. Exec with " + execGC + ", " + coops + ", " + coh + ", " + eagerLoading);
out = TestCommon.exec(helloJar,
unlockDiagnostic,
execGC,
coops,
coh,
eagerLoading,
"-Xlog:cds,aot,aot+heap",
"Hello");
if (!shouldStream && execWithZ) {
// Only when dumping without streaming and executing with ZGC do we expect there
// to be a problem. With -XX:+AOTClassLinking, the problem is worse.
if (out.getExitValue() == 0) {
out.shouldContain(HELLO);
out.shouldContain(generalErrMsg);
} else {
out.shouldHaveExitValue(1);
}
} else {
out.shouldContain(HELLO);
out.shouldNotContain(generalErrMsg);
out.shouldHaveExitValue(0);
}
}
}
}

View File

@ -1,59 +0,0 @@
/*
* Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/**
* @test
* @summary -XX:AOTMode=create should be compatible with ZGC
* @bug 8352775
* @requires vm.cds
* @requires vm.gc.Z
* @library /test/lib
* @build AOTCacheWithZGC
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar app.jar AOTCacheWithZGCApp
* @run driver AOTCacheWithZGC
*/
import jdk.test.lib.cds.SimpleCDSAppTester;
import jdk.test.lib.process.OutputAnalyzer;
public class AOTCacheWithZGC {
public static void main(String... args) throws Exception {
SimpleCDSAppTester.of("AOTCacheWithZGC")
.addVmArgs("-XX:+UseZGC", "-Xlog:cds", "-Xlog:aot")
.classpath("app.jar")
.appCommandLine("AOTCacheWithZGCApp")
.setProductionChecker((OutputAnalyzer out) -> {
// AOT-linked classes required cached Java heap objects, which is not
// yet supported by ZGC.
out.shouldContain("Using AOT-linked classes: false");
})
.runAOTWorkflow();
}
}
class AOTCacheWithZGCApp {
public static void main(String[] args) {
}
}

View File

@ -27,6 +27,8 @@
* @summary Sanity test of AOT Code Cache with compressed oops configurations
* @requires vm.cds.supports.aot.code.caching
* @requires vm.compMode != "Xcomp"
* @requires vm.bits == 64
* @requires vm.opt.final.UseCompressedOops
* @comment The test verifies AOT checks during VM startup and not code generation.
* No need to run it with -Xcomp. It takes a lot of time to complete all
* subtests with this flag.

View File

@ -28,9 +28,11 @@
* @requires vm.cds.write.archived.java.heap
* @library /test/jdk/lib/testlibrary /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @compile --add-exports java.base/jdk.internal.misc=ALL-UNNAMED CheckIntegerCacheApp.java ArchivedIntegerHolder.java
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar boxCache.jar CheckIntegerCacheApp
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar boxCache-boot.jar ArchivedIntegerHolder
* @run driver ArchivedIntegerCacheTest
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:./WhiteBox.jar ArchivedIntegerCacheTest
*/
import java.nio.file.Files;
@ -39,8 +41,10 @@ import java.nio.file.Paths;
import jdk.test.lib.cds.CDSTestUtils;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class ArchivedIntegerCacheTest {
private static WhiteBox WB = WhiteBox.getWhiteBox();
public static String[] mixArgs(String... args) {
String bootJar = ClassFileInstaller.getJarPath("boxCache-boot.jar");
@ -133,7 +137,9 @@ public class ArchivedIntegerCacheTest {
"-Xlog:cds+heap=info",
"-Xlog:gc+region+cds",
"-Xlog:gc+region=trace"));
TestCommon.checkDump(output,
"Cannot archive the sub-graph referenced from [Ljava.lang.Integer; object");
if (WB.canWriteMappedJavaHeapArchive()) {
// The mapping AOT heap archiving mechanism is unable to cache larger objects.
TestCommon.checkDump(output, "Cannot archive the sub-graph referenced from [Ljava.lang.Integer; object");
}
}
}

View File

@ -37,7 +37,7 @@
* jdk.test.lib.classloader.ClassUnloadCommon$TestFailure
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar hello_custom.jar CustomLoadee
* @run driver jdk.test.lib.helpers.ClassFileInstaller -jar WhiteBox.jar jdk.test.whitebox.WhiteBox
* @run driver PrintSharedArchiveAndExit
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:./WhiteBox.jar PrintSharedArchiveAndExit
*/
import jdk.test.lib.process.OutputAnalyzer;
@ -45,6 +45,8 @@ import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class PrintSharedArchiveAndExit {
private static WhiteBox WB = WhiteBox.getWhiteBox();
public static void main(String[] args) throws Exception {
run();
}
@ -82,7 +84,11 @@ public class PrintSharedArchiveAndExit {
.shouldContain("Shared Builtin Dictionary")
.shouldContain("Shared Unregistered Dictionary")
.shouldMatch("Number of shared symbols: \\d+")
.shouldMatch("Number of shared strings: \\d+")
.shouldMatch("VM version: .*");
if (WB.canWriteMappedJavaHeapArchive()) {
// With the mapping object dumper, the string table is dumped.
output.shouldMatch("Number of shared strings: \\d+");
}
}
}

View File

@ -43,8 +43,11 @@ import jdk.test.lib.cds.CDSTestUtils;
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.helpers.ClassFileInstaller;
import jdk.test.whitebox.WhiteBox;
public class PrintSharedArchiveAndExit extends DynamicArchiveTestBase {
private static final String ARCHIVE_NAME = CDSTestUtils.getOutputFileName("top.jsa");
private static final WhiteBox WB = WhiteBox.getWhiteBox();
public static void main(String... args) throws Exception {
runTest(PrintSharedArchiveAndExit::testPrtNExit);
@ -92,8 +95,11 @@ public class PrintSharedArchiveAndExit extends DynamicArchiveTestBase {
.shouldContain("Shared Builtin Dictionary")
.shouldContain("Shared Unregistered Dictionary")
.shouldMatch("Number of shared symbols: \\d+")
.shouldMatch("Number of shared strings: \\d+")
.shouldMatch("VM version: .*");
});
if (WB.canWriteMappedJavaHeapArchive()) {
// With the mapping object archiving mechanism, the string table is dumped
output.shouldMatch("Number of shared strings: \\d+");
}
});
}
}

View File

@ -25,8 +25,7 @@
/*
* @test
* @summary Exercise GC with shared strings
* @requires vm.cds.write.archived.java.heap
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
* @build HelloStringGC jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox

View File

@ -25,8 +25,7 @@
/**
* @test
* @summary Test relevant combinations of command line flags with shared strings
* @requires vm.cds.write.archived.java.heap & vm.hasJFR
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap & vm.hasJFR
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build HelloString
* @run driver FlagCombo
@ -36,8 +35,7 @@
* @test
* @summary Test relevant combinations of command line flags with shared strings
* @comment A special test excluding the case that requires JFR
* @requires vm.cds.write.archived.java.heap & !vm.hasJFR
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap & !vm.hasJFR
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build HelloString
* @run driver FlagCombo noJfr
@ -53,7 +51,7 @@ public class FlagCombo {
SharedStringsUtils.dump(TestCommon.list("HelloString"),
"SharedStringsBasic.txt", "-Xlog:cds,aot+hashtables");
SharedStringsUtils.runWithArchive("HelloString", "-XX:+UseG1GC");
SharedStringsUtils.runWithArchive("HelloString");
if (args.length == 0) {
SharedStringsUtils.runWithArchiveAuto("HelloString",

View File

@ -100,8 +100,12 @@ public class IncompatibleOptions {
// Uncompressed OOPs
testDump(1, "-XX:+UseG1GC", "-XX:-UseCompressedOops", null, false);
testExec(1, "-XX:+UseG1GC", "-XX:-UseCompressedOops", null, false);
// Try with ZGC
if (GC.Z.isSupported()) {
testDump(1, "-XX:+UseZGC", "-XX:-UseCompressedOops", null, false);
testDump(2, "-XX:+UseZGC", "-XX:-UseCompressedOops", null, false);
testExec(2, "-XX:+UseZGC", "-XX:-UseCompressedOops", null, false);
}
// Dump heap objects with Parallel, Serial, Shenandoah GC
@ -112,39 +116,62 @@ public class IncompatibleOptions {
}
// Explicitly archive with compressed oops, run without.
testDump(5, "-XX:+UseG1GC", "-XX:+UseCompressedOops", null, false);
testExec(5, "-XX:+UseG1GC", "-XX:-UseCompressedOops",
COMPRESSED_OOPS_NOT_CONSISTENT, true);
testDump(3, "-XX:+UseG1GC", "-XX:+UseCompressedOops", null, false);
testExec(3, "-XX:+UseG1GC", "-XX:-UseCompressedOops", COMPRESSED_OOPS_NOT_CONSISTENT, true);
// NOTE: No warning is displayed, by design
// Still run, to ensure no crash or exception
testExec(6, "-XX:+UseParallelGC", "", "", false);
testExec(7, "-XX:+UseSerialGC", "", "", false);
testExec(3, "-XX:+UseParallelGC", "", "", false);
testExec(3, "-XX:+UseSerialGC", "", "", false);
// Explicitly archive with object streaming with one GC, run with other GCs.
testDump(4, "-XX:+UseG1GC", "-XX:+AOTStreamableObjects", null, false);
testExec(4, "-XX:+UseParallelGC", "", "", false);
testExec(4, "-XX:+UseSerialGC", "", "", false);
if (GC.Z.isSupported()) {
testExec(4, "-XX:+UseZGC", "", COMPRESSED_OOPS_NOT_CONSISTENT, true);
}
// Explicitly archive with object streaming and COOPs with one GC, run with other GCs.
testDump(4, "-XX:-UseCompressedOops", "-XX:+AOTStreamableObjects", null, false);
testExec(4, "-XX:+UseG1GC", "", COMPRESSED_OOPS_NOT_CONSISTENT, true);
testExec(4, "-XX:+UseParallelGC", "", COMPRESSED_OOPS_NOT_CONSISTENT, true);
testExec(4, "-XX:+UseSerialGC", "", COMPRESSED_OOPS_NOT_CONSISTENT, true);
testExec(4, "-XX:+UseParallelGC", "-XX:-UseCompressedOops", "", false);
testExec(4, "-XX:+UseSerialGC", "-XX:-UseCompressedOops", "", false);
testExec(4, "-XX:+UseG1GC", "-XX:-UseCompressedOops", "", false);
// Test various oops encodings, by varying ObjectAlignmentInBytes and heap sizes
testDump(9, "-XX:+UseG1GC", "-XX:ObjectAlignmentInBytes=8", null, false);
testExec(9, "-XX:+UseG1GC", "-XX:ObjectAlignmentInBytes=16",
OBJ_ALIGNMENT_MISMATCH, true);
testDump(5, "-XX:+UseG1GC", "-XX:ObjectAlignmentInBytes=8", null, false);
testExec(5, "-XX:+UseG1GC", "-XX:ObjectAlignmentInBytes=16", OBJ_ALIGNMENT_MISMATCH, true);
testDump(6, "-XX:+AOTStreamableObjects", "-XX:ObjectAlignmentInBytes=8", null, false);
testExec(6, "-XX:+AOTStreamableObjects", "-XX:ObjectAlignmentInBytes=16", OBJ_ALIGNMENT_MISMATCH, true);
// Implicitly archive with compressed oops, run without.
// Max heap size for compressed oops is around 31G.
// UseCompressedOops is turned on by default when heap
// size is under 31G, but will be turned off when heap
// size is greater than that.
testDump(10, "-XX:+UseG1GC", "-Xmx1g", null, false);
testExec(10, "-XX:+UseG1GC", "-Xmx32g", null, true);
testDump(7, "-XX:+UseG1GC", "-Xmx1g", null, false);
testExec(7, "-XX:+UseG1GC", "-Xmx32g", null, true);
// Explicitly archive without compressed oops and run with.
testDump(11, "-XX:+UseG1GC", "-XX:-UseCompressedOops", null, false);
testExec(11, "-XX:+UseG1GC", "-XX:+UseCompressedOops", null, true);
testDump(8, "-XX:+UseG1GC", "-XX:-UseCompressedOops", null, false);
testExec(8, "-XX:+UseG1GC", "-XX:+UseCompressedOops", null, true);
// Implicitly archive without compressed oops and run with.
testDump(12, "-XX:+UseG1GC", "-Xmx32G", null, false);
testExec(12, "-XX:+UseG1GC", "-Xmx1G", null, true);
testDump(9, "-XX:+UseG1GC", "-Xmx32G", null, false);
testExec(9, "-XX:+UseG1GC", "-Xmx1G", null, true);
// CompactStrings must match between dump time and run time
testDump(13, "-XX:+UseG1GC", "-XX:-CompactStrings", null, false);
testExec(13, "-XX:+UseG1GC", "-XX:+CompactStrings",
testDump(10, "-XX:+UseG1GC", "-XX:-CompactStrings", null, false);
testExec(10, "-XX:+UseG1GC", "-XX:+CompactStrings",
COMPACT_STRING_MISMATCH, true);
testDump(14, "-XX:+UseG1GC", "-XX:+CompactStrings", null, false);
testExec(14, "-XX:+UseG1GC", "-XX:-CompactStrings",
testDump(11, "-XX:+UseG1GC", "-XX:+CompactStrings", null, false);
testExec(11, "-XX:+UseG1GC", "-XX:-CompactStrings",
COMPACT_STRING_MISMATCH, true);
testDump(11, "-XX:+AOTStreamableObjects", "-XX:+CompactStrings", null, false);
testExec(11, "-XX:+AOTStreamableObjects", "-XX:-CompactStrings",
COMPACT_STRING_MISMATCH, true);
}
@ -154,6 +181,7 @@ public class IncompatibleOptions {
System.out.println("Testcase: " + testCaseNr);
OutputAnalyzer output = TestCommon.dump(appJar, TestCommon.list("Hello"),
TestCommon.concat(vmOptionsPrefix,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+UseCompressedOops",
collectorOption,
"-XX:SharedArchiveConfigFile=" + TestCommon.getSourceFile("SharedStringsBasic.txt"),
@ -181,11 +209,13 @@ public class IncompatibleOptions {
if (!extraOption.isEmpty()) {
output = TestCommon.exec(appJar,
TestCommon.concat(vmOptionsPrefix,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+UseCompressedOops",
collectorOption, "-Xlog:cds", extraOption, "HelloString"));
} else {
output = TestCommon.exec(appJar,
TestCommon.concat(vmOptionsPrefix,
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+UseCompressedOops",
collectorOption, "-Xlog:cds", "HelloString"));
}

View File

@ -25,8 +25,8 @@
/*
* @test
* @summary Test shared strings together with string intern operation
* @requires vm.cds.write.archived.java.heap
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
* @compile InternStringTest.java
* @build jdk.test.whitebox.WhiteBox
@ -34,6 +34,10 @@
* @run driver InternSharedString
*/
// This test requires the vm.cds.write.mapped.java.heap specifically as it has expectations
// about using the mechanism for dumping the entire string table, which the streaming solution
// does not do.
public class InternSharedString {
public static void main(String[] args) throws Exception {
SharedStringsUtils.run(args, InternSharedString::test);

View File

@ -25,8 +25,7 @@
/*
* @test
* @summary Basic shared string test with large pages
* @requires vm.cds.write.archived.java.heap
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build HelloString
* @run driver LargePages

View File

@ -25,7 +25,7 @@
/*
* @test
* @summary Basic test for shared strings
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.write.mapped.java.heap
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
* @build HelloString
* @run driver SharedStringsBasic
@ -33,6 +33,10 @@
import jdk.test.lib.cds.CDSOptions;
import jdk.test.lib.cds.CDSTestUtils;
// This test requires the vm.cds.write.mapped.java.heap specifically as it has expectations
// about using the mechanism for dumping the entire string table, which the streaming solution
// does not do.
// This test does not use SharedStringsUtils.dumpXXX()
// and SharedStringsUtils.runWithXXX() intentionally:
// - in order to demonstrate the basic use of the functionality

View File

@ -25,14 +25,17 @@
/*
* @test
* @summary Basic plus test for shared strings
* @requires vm.cds.write.archived.java.heap
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
* @build HelloStringPlus jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run driver SharedStringsBasicPlus
*/
// This test requires the vm.cds.write.mapped.java.heap specifically as it has expectations
// about using the mechanism for dumping the entire string table, which the streaming solution
// does not do.
public class SharedStringsBasicPlus {
public static void main(String[] args) throws Exception {
SharedStringsUtils.run(args, SharedStringsBasicPlus::test);

View File

@ -26,7 +26,7 @@
* @test
* @summary Use a shared string allocated in a humongous G1 region.
* @comment -- the following implies that G1 is used (by command-line or by default)
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.write.mapped.java.heap
* @requires vm.gc.G1
*
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
@ -35,6 +35,11 @@
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xbootclasspath/a:. SharedStringsHumongous
*/
// The problem with humongous strings, or humongous objects in general, does not
// exist with the streaming heap loader. Therefore, this test requres the mapping mode.
// Further more, humongous regions are a bit specific to G1, so G1 is needed.
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;

View File

@ -25,11 +25,16 @@
/*
* @test
* @summary Write a lots of shared strings.
* @requires vm.cds.write.archived.java.heap
* @requires vm.cds.write.mapped.java.heap
* @library /test/hotspot/jtreg/runtime/cds/appcds /test/lib
* @build HelloString
* @run driver/timeout=2600 SharedStringsStress
*/
// This test requires the vm.cds.write.mapped.java.heap specifically as it has expectations
// about using the mechanism for dumping the entire string table, which the streaming solution
// does not do.
import java.io.File;
import java.io.FileOutputStream;
import java.io.OutputStreamWriter;

View File

@ -96,7 +96,7 @@ public class SharedStringsUtils {
String appJar = TestCommon.getTestJar(TEST_JAR_NAME_FULL);
String[] args =
TestCommon.concat(extraOptions, "-XX:+UseCompressedOops",
TestCommon.concat(extraOptions,
"-XX:SharedArchiveConfigFile=" +
TestCommon.getSourceFile(sharedDataFile));
args = TestCommon.concat(childVMOptionsPrefix, args);
@ -124,7 +124,7 @@ public class SharedStringsUtils {
String appJar = TestCommon.getTestJar(TEST_JAR_NAME_FULL);
String[] args = TestCommon.concat(extraOptions,
"-cp", appJar, "-XX:+UseCompressedOops", className);
"-cp", appJar, className);
args = TestCommon.concat(childVMOptionsPrefix, args);
OutputAnalyzer output = TestCommon.execAuto(args);
@ -142,8 +142,7 @@ public class SharedStringsUtils {
String className, String... extraOptions) throws Exception {
String appJar = TestCommon.getTestJar(TEST_JAR_NAME_FULL);
String[] args = TestCommon.concat(extraOptions,
"-XX:+UseCompressedOops", className);
String[] args = TestCommon.concat(extraOptions, className);
args = TestCommon.concat(childVMOptionsPrefix, args);
OutputAnalyzer output = TestCommon.exec(appJar, args);

View File

@ -25,14 +25,17 @@
/*
* @test
* @summary White box test for shared strings
* @requires vm.cds.write.archived.java.heap
* @requires vm.gc == null
* @requires vm.cds.write.mapped.java.heap
* @library /test/lib /test/hotspot/jtreg/runtime/cds/appcds
* @build jdk.test.whitebox.WhiteBox SharedStringsWb
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run driver SharedStringsWbTest
*/
// This test requires the vm.cds.write.mapped.java.heap specifically as it has expectations
// about using the mechanism for dumping the entire string table, which the streaming solution
// does not do.
import java.io.*;
import jdk.test.whitebox.WhiteBox;

Some files were not shown because too many files have changed in this diff Show More