mirror of
https://github.com/openjdk/jdk.git
synced 2026-05-13 23:19:36 +00:00
8252056: Move DumpRegion/ReadClosure/WriteClosure to archiveUtils.hpp
Reviewed-by: ccheung, minqi
This commit is contained in:
parent
56881d6465
commit
e4eaa2377b
@ -25,6 +25,7 @@
|
||||
#ifndef SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||
#define SHARE_MEMORY_ARCHIVEBUILDER_HPP
|
||||
|
||||
#include "memory/archiveUtils.hpp"
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
|
||||
@ -24,11 +24,14 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "memory/archiveUtils.hpp"
|
||||
#include "memory/dynamicArchive.hpp"
|
||||
#include "memory/filemap.hpp"
|
||||
#include "memory/heapShared.inline.hpp"
|
||||
#include "memory/metaspace.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "utilities/bitMap.inline.hpp"
|
||||
|
||||
#if INCLUDE_CDS
|
||||
|
||||
CHeapBitMap* ArchivePtrMarker::_ptrmap = NULL;
|
||||
address* ArchivePtrMarker::_ptr_base;
|
||||
address* ArchivePtrMarker::_ptr_end;
|
||||
@ -133,4 +136,160 @@ void ArchivePtrMarker::compact(size_t max_non_null_offset) {
|
||||
_compacted = true;
|
||||
}
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
char* DumpRegion::expand_top_to(char* newtop) {
|
||||
assert(is_allocatable(), "must be initialized and not packed");
|
||||
assert(newtop >= _top, "must not grow backwards");
|
||||
if (newtop > _end) {
|
||||
MetaspaceShared::report_out_of_space(_name, newtop - _top);
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (_rs == MetaspaceShared::shared_rs()) {
|
||||
uintx delta;
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
||||
} else {
|
||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
||||
}
|
||||
if (delta > MAX_SHARED_DELTA) {
|
||||
// This is just a sanity check and should not appear in any real world usage. This
|
||||
// happens only if you allocate more than 2GB of shared objects and would require
|
||||
// millions of shared classes.
|
||||
vm_exit_during_initialization("Out of memory in the CDS archive",
|
||||
"Please reduce the number of shared classes.");
|
||||
}
|
||||
}
|
||||
|
||||
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
||||
_top = newtop;
|
||||
return _top;
|
||||
}
|
||||
|
||||
char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
|
||||
char* p = (char*)align_up(_top, alignment);
|
||||
char* newtop = p + align_up(num_bytes, alignment);
|
||||
expand_top_to(newtop);
|
||||
memset(p, 0, newtop - p);
|
||||
return p;
|
||||
}
|
||||
|
||||
void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
|
||||
assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
|
||||
intptr_t *p = (intptr_t*)_top;
|
||||
char* newtop = _top + sizeof(intptr_t);
|
||||
expand_top_to(newtop);
|
||||
*p = n;
|
||||
if (need_to_mark) {
|
||||
ArchivePtrMarker::mark_pointer(p);
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::print(size_t total_bytes) const {
|
||||
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
|
||||
p2i(_base + MetaspaceShared::final_delta()));
|
||||
}
|
||||
|
||||
void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
||||
log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
|
||||
_name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
|
||||
if (strcmp(_name, failing_region) == 0) {
|
||||
log_error(cds)(" required = %d", int(needed_bytes));
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
|
||||
_rs = rs;
|
||||
_vs = vs;
|
||||
// Start with 0 committed bytes. The memory will be committed as needed by
|
||||
// MetaspaceShared::commit_to().
|
||||
if (!_vs->initialize(*_rs, 0)) {
|
||||
fatal("Unable to allocate memory for shared space");
|
||||
}
|
||||
_base = _top = _rs->base();
|
||||
_end = _rs->end();
|
||||
}
|
||||
|
||||
void DumpRegion::pack(DumpRegion* next) {
|
||||
assert(!is_packed(), "sanity");
|
||||
_end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
|
||||
_is_packed = true;
|
||||
if (next != NULL) {
|
||||
next->_rs = _rs;
|
||||
next->_vs = _vs;
|
||||
next->_base = next->_top = this->_end;
|
||||
next->_end = _rs->end();
|
||||
}
|
||||
}
|
||||
|
||||
void WriteClosure::do_oop(oop* o) {
|
||||
if (*o == NULL) {
|
||||
_dump_region->append_intptr_t(0);
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archiving heap object is not allowed");
|
||||
_dump_region->append_intptr_t(
|
||||
(intptr_t)CompressedOops::encode_not_null(*o));
|
||||
}
|
||||
}
|
||||
|
||||
void WriteClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
_dump_region->append_intptr_t(*(intptr_t*)start, true);
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadClosure::do_ptr(void** p) {
|
||||
assert(*p == NULL, "initializing previous initialized pointer.");
|
||||
intptr_t obj = nextPtr();
|
||||
assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
|
||||
"hit tag while initializing ptrs.");
|
||||
*p = (void*)obj;
|
||||
}
|
||||
|
||||
void ReadClosure::do_u4(u4* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (u4)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_bool(bool* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (bool)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_tag(int tag) {
|
||||
int old_tag;
|
||||
old_tag = (int)(intptr_t)nextPtr();
|
||||
// do_int(&old_tag);
|
||||
assert(tag == old_tag, "old tag doesn't match");
|
||||
FileMapInfo::assert_mark(tag == old_tag);
|
||||
}
|
||||
|
||||
void ReadClosure::do_oop(oop *p) {
|
||||
narrowOop o = (narrowOop)nextPtr();
|
||||
if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
|
||||
*p = NULL;
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archived heap object is not allowed");
|
||||
assert(HeapShared::open_archive_heap_region_mapped(),
|
||||
"Open archive heap region is not mapped");
|
||||
*p = HeapShared::decode_from_archive(o);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
*(intptr_t*)start = nextPtr();
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,9 +26,13 @@
|
||||
#define SHARE_MEMORY_ARCHIVEUTILS_HPP
|
||||
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
|
||||
class ReservedSpace;
|
||||
class VirtualSpace;
|
||||
|
||||
// ArchivePtrMarker is used to mark the location of pointers embedded in a CDS archive. E.g., when an
|
||||
// InstanceKlass k is dumped, we mark the location of the k->_name pointer by effectively calling
|
||||
// mark_pointer(/*ptr_loc=*/&k->_name). It's required that (_prt_base <= ptr_loc < _ptr_end). _ptr_base is
|
||||
@ -139,5 +143,100 @@ class SharedDataRelocator: public BitMapClosure {
|
||||
inline bool do_bit(size_t offset);
|
||||
};
|
||||
|
||||
class DumpRegion {
|
||||
private:
|
||||
const char* _name;
|
||||
char* _base;
|
||||
char* _top;
|
||||
char* _end;
|
||||
bool _is_packed;
|
||||
ReservedSpace* _rs;
|
||||
VirtualSpace* _vs;
|
||||
|
||||
public:
|
||||
DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
|
||||
|
||||
char* expand_top_to(char* newtop);
|
||||
char* allocate(size_t num_bytes, size_t alignment=BytesPerWord);
|
||||
|
||||
void append_intptr_t(intptr_t n, bool need_to_mark = false);
|
||||
|
||||
char* base() const { return _base; }
|
||||
char* top() const { return _top; }
|
||||
char* end() const { return _end; }
|
||||
size_t reserved() const { return _end - _base; }
|
||||
size_t used() const { return _top - _base; }
|
||||
bool is_packed() const { return _is_packed; }
|
||||
bool is_allocatable() const {
|
||||
return !is_packed() && _base != NULL;
|
||||
}
|
||||
|
||||
void print(size_t total_bytes) const;
|
||||
void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
|
||||
|
||||
void init(ReservedSpace* rs, VirtualSpace* vs);
|
||||
|
||||
void pack(DumpRegion* next = NULL);
|
||||
|
||||
bool contains(char* p) {
|
||||
return base() <= p && p < top();
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data out to a data area to be
|
||||
// written to the shared file.
|
||||
|
||||
class WriteClosure : public SerializeClosure {
|
||||
private:
|
||||
DumpRegion* _dump_region;
|
||||
|
||||
public:
|
||||
WriteClosure(DumpRegion* r) {
|
||||
_dump_region = r;
|
||||
}
|
||||
|
||||
void do_ptr(void** p) {
|
||||
_dump_region->append_intptr_t((intptr_t)*p, true);
|
||||
}
|
||||
|
||||
void do_u4(u4* p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_bool(bool *p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_tag(int tag) {
|
||||
_dump_region->append_intptr_t((intptr_t)tag);
|
||||
}
|
||||
|
||||
void do_oop(oop* o);
|
||||
void do_region(u_char* start, size_t size);
|
||||
bool reading() const { return false; }
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data in from a data area
|
||||
// (ptr_array) read from the shared file.
|
||||
|
||||
class ReadClosure : public SerializeClosure {
|
||||
private:
|
||||
intptr_t** _ptr_array;
|
||||
|
||||
inline intptr_t nextPtr() {
|
||||
return *(*_ptr_array)++;
|
||||
}
|
||||
|
||||
public:
|
||||
ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
|
||||
|
||||
void do_ptr(void** p);
|
||||
void do_u4(u4* p);
|
||||
void do_bool(bool *p);
|
||||
void do_tag(int tag);
|
||||
void do_oop(oop *p);
|
||||
void do_region(u_char* start, size_t size);
|
||||
bool reading() const { return true; }
|
||||
};
|
||||
|
||||
#endif // SHARE_MEMORY_ARCHIVEUTILS_HPP
|
||||
|
||||
@ -114,92 +114,6 @@ bool MetaspaceShared::_use_optimized_module_handling = true;
|
||||
// The s0/s1 and oa0/oa1 regions are populated inside HeapShared::archive_java_heap_objects.
|
||||
// Their layout is independent of the other 4 regions.
|
||||
|
||||
char* DumpRegion::expand_top_to(char* newtop) {
|
||||
assert(is_allocatable(), "must be initialized and not packed");
|
||||
assert(newtop >= _top, "must not grow backwards");
|
||||
if (newtop > _end) {
|
||||
MetaspaceShared::report_out_of_space(_name, newtop - _top);
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (_rs == MetaspaceShared::shared_rs()) {
|
||||
uintx delta;
|
||||
if (DynamicDumpSharedSpaces) {
|
||||
delta = DynamicArchive::object_delta_uintx(newtop);
|
||||
} else {
|
||||
delta = MetaspaceShared::object_delta_uintx(newtop);
|
||||
}
|
||||
if (delta > MAX_SHARED_DELTA) {
|
||||
// This is just a sanity check and should not appear in any real world usage. This
|
||||
// happens only if you allocate more than 2GB of shared objects and would require
|
||||
// millions of shared classes.
|
||||
vm_exit_during_initialization("Out of memory in the CDS archive",
|
||||
"Please reduce the number of shared classes.");
|
||||
}
|
||||
}
|
||||
|
||||
MetaspaceShared::commit_to(_rs, _vs, newtop);
|
||||
_top = newtop;
|
||||
return _top;
|
||||
}
|
||||
|
||||
char* DumpRegion::allocate(size_t num_bytes, size_t alignment) {
|
||||
char* p = (char*)align_up(_top, alignment);
|
||||
char* newtop = p + align_up(num_bytes, alignment);
|
||||
expand_top_to(newtop);
|
||||
memset(p, 0, newtop - p);
|
||||
return p;
|
||||
}
|
||||
|
||||
void DumpRegion::append_intptr_t(intptr_t n, bool need_to_mark) {
|
||||
assert(is_aligned(_top, sizeof(intptr_t)), "bad alignment");
|
||||
intptr_t *p = (intptr_t*)_top;
|
||||
char* newtop = _top + sizeof(intptr_t);
|
||||
expand_top_to(newtop);
|
||||
*p = n;
|
||||
if (need_to_mark) {
|
||||
ArchivePtrMarker::mark_pointer(p);
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::print(size_t total_bytes) const {
|
||||
log_debug(cds)("%-3s space: " SIZE_FORMAT_W(9) " [ %4.1f%% of total] out of " SIZE_FORMAT_W(9) " bytes [%5.1f%% used] at " INTPTR_FORMAT,
|
||||
_name, used(), percent_of(used(), total_bytes), reserved(), percent_of(used(), reserved()),
|
||||
p2i(_base + MetaspaceShared::final_delta()));
|
||||
}
|
||||
|
||||
void DumpRegion::print_out_of_space_msg(const char* failing_region, size_t needed_bytes) {
|
||||
log_error(cds)("[%-8s] " PTR_FORMAT " - " PTR_FORMAT " capacity =%9d, allocated =%9d",
|
||||
_name, p2i(_base), p2i(_top), int(_end - _base), int(_top - _base));
|
||||
if (strcmp(_name, failing_region) == 0) {
|
||||
log_error(cds)(" required = %d", int(needed_bytes));
|
||||
}
|
||||
}
|
||||
|
||||
void DumpRegion::init(ReservedSpace* rs, VirtualSpace* vs) {
|
||||
_rs = rs;
|
||||
_vs = vs;
|
||||
// Start with 0 committed bytes. The memory will be committed as needed by
|
||||
// MetaspaceShared::commit_to().
|
||||
if (!_vs->initialize(*_rs, 0)) {
|
||||
fatal("Unable to allocate memory for shared space");
|
||||
}
|
||||
_base = _top = _rs->base();
|
||||
_end = _rs->end();
|
||||
}
|
||||
|
||||
void DumpRegion::pack(DumpRegion* next) {
|
||||
assert(!is_packed(), "sanity");
|
||||
_end = (char*)align_up(_top, MetaspaceShared::reserved_space_alignment());
|
||||
_is_packed = true;
|
||||
if (next != NULL) {
|
||||
next->_rs = _rs;
|
||||
next->_vs = _vs;
|
||||
next->_base = next->_top = this->_end;
|
||||
next->_end = _rs->end();
|
||||
}
|
||||
}
|
||||
|
||||
static DumpRegion _mc_region("mc"), _ro_region("ro"), _rw_region("rw"), _symbol_region("symbols");
|
||||
static size_t _total_closed_archive_region_size = 0, _total_open_archive_region_size = 0;
|
||||
|
||||
@ -983,30 +897,6 @@ bool MetaspaceShared::is_valid_shared_method(const Method* m) {
|
||||
return CppVtableCloner<Method>::is_valid_shared_object(m);
|
||||
}
|
||||
|
||||
void WriteClosure::do_oop(oop* o) {
|
||||
if (*o == NULL) {
|
||||
_dump_region->append_intptr_t(0);
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archiving heap object is not allowed");
|
||||
_dump_region->append_intptr_t(
|
||||
(intptr_t)CompressedOops::encode_not_null(*o));
|
||||
}
|
||||
}
|
||||
|
||||
void WriteClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
_dump_region->append_intptr_t(*(intptr_t*)start, true);
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
// Populate the shared space.
|
||||
|
||||
class VM_PopulateDumpSharedSpace: public VM_Operation {
|
||||
private:
|
||||
GrowableArray<MemRegion> *_closed_archive_heap_regions;
|
||||
@ -1564,56 +1454,6 @@ void VM_PopulateDumpSharedSpace::dump_archive_heap_oopmaps(GrowableArray<MemRegi
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
void ReadClosure::do_ptr(void** p) {
|
||||
assert(*p == NULL, "initializing previous initialized pointer.");
|
||||
intptr_t obj = nextPtr();
|
||||
assert((intptr_t)obj >= 0 || (intptr_t)obj < -100,
|
||||
"hit tag while initializing ptrs.");
|
||||
*p = (void*)obj;
|
||||
}
|
||||
|
||||
void ReadClosure::do_u4(u4* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (u4)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_bool(bool* p) {
|
||||
intptr_t obj = nextPtr();
|
||||
*p = (bool)(uintx(obj));
|
||||
}
|
||||
|
||||
void ReadClosure::do_tag(int tag) {
|
||||
int old_tag;
|
||||
old_tag = (int)(intptr_t)nextPtr();
|
||||
// do_int(&old_tag);
|
||||
assert(tag == old_tag, "old tag doesn't match");
|
||||
FileMapInfo::assert_mark(tag == old_tag);
|
||||
}
|
||||
|
||||
void ReadClosure::do_oop(oop *p) {
|
||||
narrowOop o = (narrowOop)nextPtr();
|
||||
if (o == 0 || !HeapShared::open_archive_heap_region_mapped()) {
|
||||
*p = NULL;
|
||||
} else {
|
||||
assert(HeapShared::is_heap_object_archiving_allowed(),
|
||||
"Archived heap object is not allowed");
|
||||
assert(HeapShared::open_archive_heap_region_mapped(),
|
||||
"Open archive heap region is not mapped");
|
||||
*p = HeapShared::decode_from_archive(o);
|
||||
}
|
||||
}
|
||||
|
||||
void ReadClosure::do_region(u_char* start, size_t size) {
|
||||
assert((intptr_t)start % sizeof(intptr_t) == 0, "bad alignment");
|
||||
assert(size % sizeof(intptr_t) == 0, "bad size");
|
||||
do_tag((int)size);
|
||||
while (size > 0) {
|
||||
*(intptr_t*)start = nextPtr();
|
||||
start += sizeof(intptr_t);
|
||||
size -= sizeof(intptr_t);
|
||||
}
|
||||
}
|
||||
|
||||
void MetaspaceShared::set_shared_metaspace_range(void* base, void *static_top, void* top) {
|
||||
assert(base <= static_top && static_top <= top, "must be");
|
||||
_shared_metaspace_static_top = static_top;
|
||||
@ -2206,8 +2046,3 @@ void MetaspaceShared::print_on(outputStream* st) {
|
||||
}
|
||||
st->cr();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@ -36,8 +36,9 @@
|
||||
#define MAX_SHARED_DELTA (0x7FFFFFFF)
|
||||
|
||||
class outputStream;
|
||||
class FileMapInfo;
|
||||
class CHeapBitMap;
|
||||
class FileMapInfo;
|
||||
class DumpRegion;
|
||||
struct ArchiveHeapOopmapInfo;
|
||||
|
||||
enum MapArchiveResult {
|
||||
@ -55,113 +56,6 @@ public:
|
||||
CompactHashtableStats string;
|
||||
};
|
||||
|
||||
#if INCLUDE_CDS
|
||||
class DumpRegion {
|
||||
private:
|
||||
const char* _name;
|
||||
char* _base;
|
||||
char* _top;
|
||||
char* _end;
|
||||
bool _is_packed;
|
||||
ReservedSpace* _rs;
|
||||
VirtualSpace* _vs;
|
||||
|
||||
public:
|
||||
DumpRegion(const char* name) : _name(name), _base(NULL), _top(NULL), _end(NULL), _is_packed(false) {}
|
||||
|
||||
char* expand_top_to(char* newtop);
|
||||
char* allocate(size_t num_bytes, size_t alignment=BytesPerWord);
|
||||
|
||||
void append_intptr_t(intptr_t n, bool need_to_mark = false);
|
||||
|
||||
char* base() const { return _base; }
|
||||
char* top() const { return _top; }
|
||||
char* end() const { return _end; }
|
||||
size_t reserved() const { return _end - _base; }
|
||||
size_t used() const { return _top - _base; }
|
||||
bool is_packed() const { return _is_packed; }
|
||||
bool is_allocatable() const {
|
||||
return !is_packed() && _base != NULL;
|
||||
}
|
||||
|
||||
void print(size_t total_bytes) const;
|
||||
void print_out_of_space_msg(const char* failing_region, size_t needed_bytes);
|
||||
|
||||
void init(ReservedSpace* rs, VirtualSpace* vs);
|
||||
|
||||
void pack(DumpRegion* next = NULL);
|
||||
|
||||
bool contains(char* p) {
|
||||
return base() <= p && p < top();
|
||||
}
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data out to a data area to be
|
||||
// written to the shared file.
|
||||
|
||||
class WriteClosure : public SerializeClosure {
|
||||
private:
|
||||
DumpRegion* _dump_region;
|
||||
|
||||
public:
|
||||
WriteClosure(DumpRegion* r) {
|
||||
_dump_region = r;
|
||||
}
|
||||
|
||||
void do_ptr(void** p) {
|
||||
_dump_region->append_intptr_t((intptr_t)*p, true);
|
||||
}
|
||||
|
||||
void do_u4(u4* p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_bool(bool *p) {
|
||||
_dump_region->append_intptr_t((intptr_t)(*p));
|
||||
}
|
||||
|
||||
void do_tag(int tag) {
|
||||
_dump_region->append_intptr_t((intptr_t)tag);
|
||||
}
|
||||
|
||||
void do_oop(oop* o);
|
||||
|
||||
void do_region(u_char* start, size_t size);
|
||||
|
||||
bool reading() const { return false; }
|
||||
};
|
||||
|
||||
// Closure for serializing initialization data in from a data area
|
||||
// (ptr_array) read from the shared file.
|
||||
|
||||
class ReadClosure : public SerializeClosure {
|
||||
private:
|
||||
intptr_t** _ptr_array;
|
||||
|
||||
inline intptr_t nextPtr() {
|
||||
return *(*_ptr_array)++;
|
||||
}
|
||||
|
||||
public:
|
||||
ReadClosure(intptr_t** ptr_array) { _ptr_array = ptr_array; }
|
||||
|
||||
void do_ptr(void** p);
|
||||
|
||||
void do_u4(u4* p);
|
||||
|
||||
void do_bool(bool *p);
|
||||
|
||||
void do_tag(int tag);
|
||||
|
||||
void do_oop(oop *p);
|
||||
|
||||
void do_region(u_char* start, size_t size);
|
||||
|
||||
bool reading() const { return true; }
|
||||
};
|
||||
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
// Class Data Sharing Support
|
||||
class MetaspaceShared : AllStatic {
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user