8254125: Assertion in cppVtables.cpp during builds on 32bit Windows

Reviewed-by: shade, ccheung
This commit is contained in:
Ioi Lam 2020-10-16 05:14:46 +00:00
parent bdda2058c2
commit 5145bed028
11 changed files with 109 additions and 139 deletions

View File

@ -113,9 +113,11 @@ void CompactHashtableWriter::allocate_table() {
_compact_entries = MetaspaceShared::new_ro_array<u4>(entries_space);
_stats->bucket_count = _num_buckets;
_stats->bucket_bytes = _compact_buckets->size() * BytesPerWord;
_stats->bucket_bytes = align_up(_compact_buckets->size() * BytesPerWord,
SharedSpaceObjectAlignment);
_stats->hashentry_count = _num_entries_written;
_stats->hashentry_bytes = _compact_entries->size() * BytesPerWord;
_stats->hashentry_bytes = align_up(_compact_entries->size() * BytesPerWord,
SharedSpaceObjectAlignment);
}
// Write the compact table's buckets

View File

@ -45,6 +45,7 @@
#include "memory/heapShared.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/metaspaceClosure.hpp"
#include "memory/metaspaceShared.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@ -1860,7 +1861,7 @@ public:
bool do_entry(InstanceKlass* k, DumpTimeSharedClassInfo& info) {
if (!info.is_excluded()) {
size_t byte_size = RunTimeSharedClassInfo::byte_size(info._klass, info.num_verifier_constraints(), info.num_loader_constraints());
_shared_class_info_size += align_up(byte_size, BytesPerWord);
_shared_class_info_size += align_up(byte_size, SharedSpaceObjectAlignment);
}
return true; // keep on iterating
}
@ -1877,8 +1878,9 @@ size_t SystemDictionaryShared::estimate_size_for_archive() {
CompactHashtableWriter::estimate_size(_dumptime_table->count_of(true)) +
CompactHashtableWriter::estimate_size(_dumptime_table->count_of(false));
if (_dumptime_lambda_proxy_class_dictionary != NULL) {
size_t bytesize = align_up(sizeof(RunTimeLambdaProxyClassInfo), SharedSpaceObjectAlignment);
total_size +=
(sizeof(RunTimeLambdaProxyClassInfo) * _dumptime_lambda_proxy_class_dictionary->_count) +
(bytesize * _dumptime_lambda_proxy_class_dictionary->_count) +
CompactHashtableWriter::estimate_size(_dumptime_lambda_proxy_class_dictionary->_count);
} else {
total_size += CompactHashtableWriter::estimate_size(0);

View File

@ -154,7 +154,7 @@ ArchiveBuilder::ArchiveBuilder(DumpRegion* mc_region, DumpRegion* rw_region, Dum
_rw_region = rw_region;
_ro_region = ro_region;
_estimated_metsapceobj_bytes = 0;
_estimated_metaspaceobj_bytes = 0;
}
ArchiveBuilder::~ArchiveBuilder() {
@ -205,7 +205,8 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
_num_type_array_klasses ++;
}
}
_estimated_metsapceobj_bytes += BytesPerWord; // See RunTimeSharedClassInfo::get_for()
// See RunTimeSharedClassInfo::get_for()
_estimated_metaspaceobj_bytes += align_up(BytesPerWord, SharedSpaceObjectAlignment);
} else if (ref->msotype() == MetaspaceObj::SymbolType) {
// Make sure the symbol won't be GC'ed while we are dumping the archive.
Symbol* sym = (Symbol*)ref->obj();
@ -214,7 +215,7 @@ bool ArchiveBuilder::gather_klass_and_symbol(MetaspaceClosure::Ref* ref, bool re
}
int bytes = ref->size() * BytesPerWord;
_estimated_metsapceobj_bytes += bytes;
_estimated_metaspaceobj_bytes += align_up(bytes, SharedSpaceObjectAlignment);
return true; // recurse
}
@ -238,8 +239,8 @@ void ArchiveBuilder::gather_klasses_and_symbols() {
if (DumpSharedSpaces) {
// To ensure deterministic contents in the static archive, we need to ensure that
// we iterate the MetsapceObjs in a deterministic order. It doesn't matter where
// the MetsapceObjs are located originally, as they are copied sequentially into
// we iterate the MetaspaceObjs in a deterministic order. It doesn't matter where
// the MetaspaceObjs are located originally, as they are copied sequentially into
// the archive during the iteration.
//
// The only issue here is that the symbol table and the system directories may be
@ -483,7 +484,7 @@ void ArchiveBuilder::make_shallow_copy(DumpRegion *dump_region, SourceObjInfo* s
memcpy(dest, src, bytes);
intptr_t* archived_vtable = CppVtables::get_archived_cpp_vtable(ref->msotype(), (address)dest);
intptr_t* archived_vtable = CppVtables::get_archived_vtable(ref->msotype(), (address)dest);
if (archived_vtable != NULL) {
*(address*)dest = (address)archived_vtable;
ArchivePtrMarker::mark_pointer((address*)dest);

View File

@ -198,7 +198,7 @@ protected:
virtual void iterate_roots(MetaspaceClosure* it, bool is_relocating_pointers) = 0;
// Conservative estimate for number of bytes needed for:
size_t _estimated_metsapceobj_bytes; // all archived MetsapceObj's.
size_t _estimated_metaspaceobj_bytes; // all archived MetaspaceObj's.
protected:
DumpRegion* _current_dump_space;

View File

@ -37,10 +37,6 @@ address* ArchivePtrMarker::_ptr_base;
address* ArchivePtrMarker::_ptr_end;
bool ArchivePtrMarker::_compacted;
// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
// We enforce the same alignment rule in blocks allocated from the shared space.
const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
void ArchivePtrMarker::initialize(CHeapBitMap* ptrmap, address* ptr_base, address* ptr_end) {
assert(_ptrmap == NULL, "initialize only once");
_ptr_base = ptr_base;

View File

@ -49,8 +49,8 @@
// + at run time: we clone the actual contents of the vtables from libjvm.so
// into our own tables.
// Currently, the archive contain ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_PATCH_TYPES_DO(f) \
// Currently, the archive contains ONLY the following types of objects that have C++ vtables.
#define CPP_VTABLE_TYPES_DO(f) \
f(ConstantPool) \
f(InstanceKlass) \
f(InstanceClassLoaderKlass) \
@ -78,60 +78,36 @@ public:
}
};
static inline intptr_t* vtable_of(Metadata* m) {
static inline intptr_t* vtable_of(const Metadata* m) {
return *((intptr_t**)m);
}
static inline DumpRegion* mc_region() {
return MetaspaceShared::misc_code_dump_space();
}
template <class T> class CppVtableCloner : public T {
static CppVtableInfo* _info;
template <class T> class CppVtableCloner {
static int get_vtable_length(const char* name);
public:
// Allocate and initialize the C++ vtable, starting from top, but do not go past end.
static intptr_t* allocate(const char* name);
// Allocate a clone of the vtable of T from the shared metaspace;
// Initialize the contents of this clone.
static CppVtableInfo* allocate_and_initialize(const char* name);
// Clone the vtable to ...
static intptr_t* clone_vtable(const char* name, CppVtableInfo* info);
static void zero_vtable_clone() {
assert(DumpSharedSpaces, "dump-time only");
_info->zero();
}
static bool is_valid_shared_object(const T* obj) {
intptr_t* vptr = *(intptr_t**)obj;
return vptr == _info->cloned_vtable();
}
// Copy the contents of the vtable of T into info->_cloned_vtable;
static void initialize(const char* name, CppVtableInfo* info);
static void init_orig_cpp_vtptr(int kind);
};
template <class T> CppVtableInfo* CppVtableCloner<T>::_info = NULL;
template <class T>
intptr_t* CppVtableCloner<T>::allocate(const char* name) {
assert(is_aligned(mc_region()->top(), sizeof(intptr_t)), "bad alignment");
CppVtableInfo* CppVtableCloner<T>::allocate_and_initialize(const char* name) {
int n = get_vtable_length(name);
_info = (CppVtableInfo*)mc_region()->allocate(CppVtableInfo::byte_size(n));
_info->set_vtable_size(n);
intptr_t* p = clone_vtable(name, _info);
assert((char*)p == mc_region()->top(), "must be");
return _info->cloned_vtable();
CppVtableInfo* info =
(CppVtableInfo*)MetaspaceShared::misc_code_dump_space()->allocate(CppVtableInfo::byte_size(n));
info->set_vtable_size(n);
initialize(name, info);
return info;
}
template <class T>
intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info) {
if (!DumpSharedSpaces) {
assert(_info == 0, "_info is initialized only at dump time");
_info = info; // Remember it -- it will be used by MetaspaceShared::is_valid_shared_method()
}
void CppVtableCloner<T>::initialize(const char* name, CppVtableInfo* info) {
T tmp; // Allocate temporary dummy metadata object to get to the original vtable.
int n = info->vtable_size();
intptr_t* srcvtable = vtable_of(&tmp);
@ -141,7 +117,6 @@ intptr_t* CppVtableCloner<T>::clone_vtable(const char* name, CppVtableInfo* info
// safe to do memcpy.
log_debug(cds, vtables)("Copying %3d vtable entries for %s", n, name);
memcpy(dstvtable, srcvtable, sizeof(intptr_t) * n);
return dstvtable + n;
}
// To determine the size of the vtable for each type, we use the following
@ -195,15 +170,12 @@ int CppVtableCloner<T>::get_vtable_length(const char* name) {
return vtable_len;
}
#define ALLOC_CPP_VTABLE_CLONE(c) \
_cloned_cpp_vtptrs[c##_Kind] = CppVtableCloner<c>::allocate(#c); \
ArchivePtrMarker::mark_pointer(&_cloned_cpp_vtptrs[c##_Kind]);
#define ALLOCATE_AND_INITIALIZE_VTABLE(c) \
_index[c##_Kind] = CppVtableCloner<c>::allocate_and_initialize(#c); \
ArchivePtrMarker::mark_pointer(&_index[c##_Kind]);
#define CLONE_CPP_VTABLE(c) \
p = CppVtableCloner<c>::clone_vtable(#c, (CppVtableInfo*)p);
#define ZERO_CPP_VTABLE(c) \
CppVtableCloner<c>::zero_vtable_clone();
#define INITIALIZE_VTABLE(c) \
CppVtableCloner<c>::initialize(#c, _index[c##_Kind]);
#define INIT_ORIG_CPP_VTPTRS(c) \
CppVtableCloner<c>::init_orig_cpp_vtptr(c##_Kind);
@ -212,7 +184,7 @@ int CppVtableCloner<T>::get_vtable_length(const char* name) {
enum ClonedVtableKind {
// E.g., ConstantPool_Kind == 0, InstanceKlass_Kind == 1, etc.
CPP_VTABLE_PATCH_TYPES_DO(DECLARE_CLONED_VTABLE_KIND)
CPP_VTABLE_TYPES_DO(DECLARE_CLONED_VTABLE_KIND)
_num_cloned_vtable_kinds
};
@ -235,23 +207,30 @@ void CppVtableCloner<T>::init_orig_cpp_vtptr(int kind) {
// ConstantPool* cp = ....; // an archived constant pool
// InstanceKlass* ik = ....;// an archived class
// the following holds true:
// _cloned_cpp_vtptrs[ConstantPool_Kind] == ((intptr_t**)cp)[0]
// _cloned_cpp_vtptrs[InstanceKlass_Kind] == ((intptr_t**)ik)[0]
static intptr_t** _cloned_cpp_vtptrs = NULL;
// _index[ConstantPool_Kind]->cloned_vtable() == ((intptr_t**)cp)[0]
// _index[InstanceKlass_Kind]->cloned_vtable() == ((intptr_t**)ik)[0]
CppVtableInfo** CppVtables::_index = NULL;
void CppVtables::allocate_cloned_cpp_vtptrs() {
char* CppVtables::dumptime_init() {
assert(DumpSharedSpaces, "must");
size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(intptr_t*);
_cloned_cpp_vtptrs = (intptr_t**)mc_region()->allocate(vtptrs_bytes);
size_t vtptrs_bytes = _num_cloned_vtable_kinds * sizeof(CppVtableInfo*);
_index = (CppVtableInfo**)MetaspaceShared::misc_code_dump_space()->allocate(vtptrs_bytes);
CPP_VTABLE_TYPES_DO(ALLOCATE_AND_INITIALIZE_VTABLE);
return (char*)_index;
}
void CppVtables::serialize_cloned_cpp_vtptrs(SerializeClosure* soc) {
soc->do_ptr((void**)&_cloned_cpp_vtptrs);
void CppVtables::serialize(SerializeClosure* soc) {
soc->do_ptr((void**)&_index);
if (soc->reading()) {
CPP_VTABLE_TYPES_DO(INITIALIZE_VTABLE);
}
}
intptr_t* CppVtables::get_archived_cpp_vtable(MetaspaceObj::Type msotype, address obj) {
intptr_t* CppVtables::get_archived_vtable(MetaspaceObj::Type msotype, address obj) {
if (!_orig_cpp_vtptrs_inited) {
CPP_VTABLE_PATCH_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
CPP_VTABLE_TYPES_DO(INIT_ORIG_CPP_VTPTRS);
_orig_cpp_vtptrs_inited = true;
}
@ -283,50 +262,27 @@ intptr_t* CppVtables::get_archived_cpp_vtable(MetaspaceObj::Type msotype, addres
}
if (kind >= _num_cloned_vtable_kinds) {
fatal("Cannot find C++ vtable for " INTPTR_FORMAT " -- you probably added"
" a new subtype of Klass or MetaData without updating CPP_VTABLE_PATCH_TYPES_DO",
" a new subtype of Klass or MetaData without updating CPP_VTABLE_TYPES_DO",
p2i(obj));
}
}
if (kind >= 0) {
assert(kind < _num_cloned_vtable_kinds, "must be");
return _cloned_cpp_vtptrs[kind];
return _index[kind]->cloned_vtable();
} else {
return NULL;
}
}
// This can be called at both dump time and run time:
// - clone the contents of the c++ vtables into the space
// allocated by allocate_cpp_vtable_clones()
void CppVtables::clone_cpp_vtables(intptr_t* p) {
assert(DumpSharedSpaces || UseSharedSpaces, "sanity");
CPP_VTABLE_PATCH_TYPES_DO(CLONE_CPP_VTABLE);
}
void CppVtables::zero_cpp_vtable_clones_for_writing() {
void CppVtables::zero_archived_vtables() {
assert(DumpSharedSpaces, "dump-time only");
CPP_VTABLE_PATCH_TYPES_DO(ZERO_CPP_VTABLE);
}
// Allocate and initialize the C++ vtables, starting from top, but do not go past end.
char* CppVtables::allocate_cpp_vtable_clones() {
char* cloned_vtables = mc_region()->top(); // This is the beginning of all the cloned vtables
assert(DumpSharedSpaces, "dump-time only");
// Layout (each slot is a intptr_t):
// [number of slots in the first vtable = n1]
// [ <n1> slots for the first vtable]
// [number of slots in the first second = n2]
// [ <n2> slots for the second vtable]
// ...
// The order of the vtables is the same as the CPP_VTAB_PATCH_TYPES_DO macro.
CPP_VTABLE_PATCH_TYPES_DO(ALLOC_CPP_VTABLE_CLONE);
return cloned_vtables;
for (int kind = 0; kind < _num_cloned_vtable_kinds; kind ++) {
_index[kind]->zero();
}
}
bool CppVtables::is_valid_shared_method(const Method* m) {
assert(MetaspaceShared::is_in_shared_metaspace(m), "must be");
return CppVtableCloner<Method>::is_valid_shared_object(m);
return vtable_of(m) == _index[Method_Kind]->cloned_vtable();
}

View File

@ -31,17 +31,16 @@
class Method;
class SerializeClosure;
class CppVtableInfo;
// Support for C++ vtables in CDS archive.
class CppVtables : AllStatic {
static void patch_cpp_vtable_pointers();
static CppVtableInfo** _index;
public:
static char* allocate_cpp_vtable_clones();
static void allocate_cloned_cpp_vtptrs();
static void clone_cpp_vtables(intptr_t* p);
static void zero_cpp_vtable_clones_for_writing();
static intptr_t* get_archived_cpp_vtable(MetaspaceObj::Type msotype, address obj);
static void serialize_cloned_cpp_vtptrs(SerializeClosure* sc);
static char* dumptime_init();
static void zero_archived_vtables();
static intptr_t* get_archived_vtable(MetaspaceObj::Type msotype, address obj);
static void serialize(SerializeClosure* sc);
static bool is_valid_shared_method(const Method* m) NOT_CDS_RETURN_(false);
};

View File

@ -201,7 +201,7 @@ public:
dump_ro_region();
relocate_pointers();
verify_estimate_size(_estimated_metsapceobj_bytes, "MetaspaceObjs");
verify_estimate_size(_estimated_metaspaceobj_bytes, "MetaspaceObjs");
char* serialized_data;
{
@ -250,21 +250,27 @@ public:
size_t DynamicArchiveBuilder::estimate_archive_size() {
// size of the symbol table and two dictionaries, plus the RunTimeSharedClassInfo's
_estimated_hashtable_bytes = 0;
_estimated_hashtable_bytes += SymbolTable::estimate_size_for_archive();
_estimated_hashtable_bytes += SystemDictionaryShared::estimate_size_for_archive();
size_t symbol_table_est = SymbolTable::estimate_size_for_archive();
size_t dictionary_est = SystemDictionaryShared::estimate_size_for_archive();
_estimated_hashtable_bytes = symbol_table_est + dictionary_est;
_estimated_trampoline_bytes = estimate_trampoline_size();
size_t total = 0;
total += _estimated_metsapceobj_bytes;
total += _estimated_metaspaceobj_bytes;
total += _estimated_hashtable_bytes;
total += _estimated_trampoline_bytes;
// allow fragmentation at the end of each dump region
total += _total_dump_regions * reserve_alignment();
log_info(cds, dynamic)("_estimated_hashtable_bytes = " SIZE_FORMAT " + " SIZE_FORMAT " = " SIZE_FORMAT,
symbol_table_est, dictionary_est, _estimated_hashtable_bytes);
log_info(cds, dynamic)("_estimated_metaspaceobj_bytes = " SIZE_FORMAT, _estimated_metaspaceobj_bytes);
log_info(cds, dynamic)("_estimated_trampoline_bytes = " SIZE_FORMAT, _estimated_trampoline_bytes);
log_info(cds, dynamic)("total estimate bytes = " SIZE_FORMAT, total);
return align_up(total, reserve_alignment());
}
@ -348,7 +354,7 @@ size_t DynamicArchiveBuilder::estimate_trampoline_size() {
// We have nothing to archive, but let's avoid having an empty region.
total = SharedRuntime::trampoline_size();
}
return total;
return align_up(total, SharedSpaceObjectAlignment);
}
void DynamicArchiveBuilder::make_trampolines() {

View File

@ -501,7 +501,7 @@ void MetaspaceShared::serialize(SerializeClosure* soc) {
SystemDictionaryShared::serialize_well_known_klasses(soc);
soc->do_tag(--tag);
CppVtables::serialize_cloned_cpp_vtptrs(soc);
CppVtables::serialize(soc);
soc->do_tag(--tag);
CDS_JAVA_HEAP_ONLY(ClassLoaderDataShared::serialize(soc));
@ -732,9 +732,7 @@ void VM_PopulateDumpSharedSpace::doit() {
builder.gather_source_objs();
CppVtables::allocate_cloned_cpp_vtptrs();
char* cloned_vtables = _mc_region.top();
CppVtables::allocate_cpp_vtable_clones();
char* cloned_vtables = CppVtables::dumptime_init();
{
_mc_region.pack(&_rw_region);
@ -780,7 +778,7 @@ void VM_PopulateDumpSharedSpace::doit() {
// The vtable clones contain addresses of the current process.
// We don't want to write these addresses into the archive. Same for i2i buffer.
CppVtables::zero_cpp_vtable_clones_for_writing();
CppVtables::zero_archived_vtables();
memset(MetaspaceShared::i2i_entry_code_buffers(), 0,
MetaspaceShared::i2i_entry_code_buffers_size());
@ -1707,12 +1705,10 @@ void MetaspaceShared::initialize_shared_spaces() {
FileMapInfo *static_mapinfo = FileMapInfo::current_info();
_i2i_entry_code_buffers = static_mapinfo->i2i_entry_code_buffers();
_i2i_entry_code_buffers_size = static_mapinfo->i2i_entry_code_buffers_size();
char* buffer = static_mapinfo->cloned_vtables();
CppVtables::clone_cpp_vtables((intptr_t*)buffer);
// Verify various attributes of the archive, plus initialize the
// shared string/symbol tables
buffer = static_mapinfo->serialized_data();
char* buffer = static_mapinfo->serialized_data();
intptr_t* array = (intptr_t*)buffer;
ReadClosure rc(&array);
serialize(&rc);

View File

@ -35,6 +35,10 @@
#define MAX_SHARED_DELTA (0x7FFFFFFF)
// Metaspace::allocate() requires that all blocks must be aligned with KlassAlignmentInBytes.
// We enforce the same alignment rule in blocks allocated from the shared space.
const int SharedSpaceObjectAlignment = KlassAlignmentInBytes;
class outputStream;
class CHeapBitMap;
class FileMapInfo;
@ -230,7 +234,7 @@ class MetaspaceShared : AllStatic {
template <typename T>
static size_t ro_array_bytesize(int length) {
size_t byte_size = Array<T>::byte_sizeof(length, sizeof(T));
return align_up(byte_size, BytesPerWord);
return align_up(byte_size, SharedSpaceObjectAlignment);
}
static address i2i_entry_code_buffers(size_t total_size);

View File

@ -39,7 +39,7 @@ public class FileMapInfo {
// Fields for handling the copied C++ vtables
private static Address mcRegionBaseAddress;
private static Address mcRegionEndAddress;
private static Address vtablesStartAddress;
private static Address vtablesIndex;
// HashMap created by mapping the vTable addresses in the mc region with
// the corresponding metadata type.
@ -95,10 +95,10 @@ public class FileMapInfo {
// char* mapped_base_address = header->_mapped_base_address
// size_t cloned_vtable_offset = header->_cloned_vtable_offset
// char* vtablesStartAddress = mapped_base_address + cloned_vtable_offset;
// CppVtableInfo** vtablesIndex = mapped_base_address + cloned_vtable_offset;
Address mapped_base_address = get_AddressField(FileMapHeader_type, header, "_mapped_base_address");
long cloned_vtable_offset = get_CIntegerField(FileMapHeader_type, header, "_cloned_vtables_offset");
vtablesStartAddress = mapped_base_address.addOffsetTo(cloned_vtable_offset);
vtablesIndex = mapped_base_address.addOffsetTo(cloned_vtable_offset);
// CDSFileMapRegion* mc_space = &header->_space[mc];
// char* mcRegionBaseAddress = mc_space->_mapped_base;
@ -163,18 +163,26 @@ public class FileMapInfo {
public void createVtableTypeMapping() {
vTableTypeMap = new HashMap<Address, Type>();
long metadataVTableSize = 0;
long addressSize = VM.getVM().getAddressSize();
Address copiedVtableAddress = vtablesStartAddress;
for (int i=0; i < metadataTypeArray.length; i++) {
// The first entry denotes the vtable size.
metadataVTableSize = copiedVtableAddress.getAddressAt(0).asLongValue();
vTableTypeMap.put(copiedVtableAddress.addOffsetTo(addressSize), metadataTypeArray[i]);
// vtablesIndex points to this:
// class CppVtableInfo {
// intptr_t _vtable_size;
// intptr_t _cloned_vtable[1];
// ...
// };
// CppVtableInfo** CppVtables::_index;
// This is the index of all the cloned vtables. E.g., for
// ConstantPool* cp = ....; // an archived constant pool
// InstanceKlass* ik = ....;// an archived class
// the following holds true:
// &_index[ConstantPool_Kind]->_cloned_vtable[0] == ((intptr_t**)cp)[0]
// &_index[InstanceKlass_Kind]->_cloned_vtable[0] == ((intptr_t**)ik)[0]
// The '+ 1' below is to skip the entry containing the size of this metadata's vtable.
copiedVtableAddress =
copiedVtableAddress.addOffsetTo((metadataVTableSize + 1) * addressSize);
for (int i=0; i < metadataTypeArray.length; i++) {
Address vtableInfoAddress = vtablesIndex.getAddressAt(i * addressSize); // = _index[i]
Address vtableAddress = vtableInfoAddress.addOffsetTo(addressSize); // = &_index[i]->_cloned_vtable[0]
vTableTypeMap.put(vtableAddress, metadataTypeArray[i]);
}
}
}