8378152: Upstream AOT heap object improvements from Leyden repo

Reviewed-by: jrose, kvn
This commit is contained in:
Ioi Lam 2026-03-17 05:58:33 +00:00
parent 3e231755a0
commit a1e4621b30
9 changed files with 185 additions and 104 deletions

View File

@ -64,6 +64,11 @@ HeapRootSegments AOTMappedHeapWriter::_heap_root_segments;
address AOTMappedHeapWriter::_requested_bottom;
address AOTMappedHeapWriter::_requested_top;
static size_t _num_strings = 0;
static size_t _string_bytes = 0;
static size_t _num_packages = 0;
static size_t _num_protection_domains = 0;
GrowableArrayCHeap<AOTMappedHeapWriter::NativePointerInfo, mtClassShared>* AOTMappedHeapWriter::_native_pointers;
GrowableArrayCHeap<oop, mtClassShared>* AOTMappedHeapWriter::_source_objs;
GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedHeapWriter::_source_objs_order;
@ -71,8 +76,6 @@ GrowableArrayCHeap<AOTMappedHeapWriter::HeapObjOrder, mtClassShared>* AOTMappedH
AOTMappedHeapWriter::BufferOffsetToSourceObjectTable*
AOTMappedHeapWriter::_buffer_offset_to_source_obj_table = nullptr;
DumpedInternedStrings *AOTMappedHeapWriter::_dumped_interned_strings = nullptr;
typedef HashTable<
size_t, // offset of a filler from AOTMappedHeapWriter::buffer_bottom()
size_t, // size of this filler (in bytes)
@ -87,7 +90,6 @@ void AOTMappedHeapWriter::init() {
Universe::heap()->collect(GCCause::_java_lang_system_gc);
_buffer_offset_to_source_obj_table = new (mtClassShared) BufferOffsetToSourceObjectTable(/*size (prime)*/36137, /*max size*/1 * M);
_dumped_interned_strings = new (mtClass)DumpedInternedStrings(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
_fillers = new (mtClassShared) FillersTable();
_requested_bottom = nullptr;
_requested_top = nullptr;
@ -141,9 +143,6 @@ int AOTMappedHeapWriter::narrow_oop_shift() {
void AOTMappedHeapWriter::delete_tables_with_raw_oops() {
delete _source_objs;
_source_objs = nullptr;
delete _dumped_interned_strings;
_dumped_interned_strings = nullptr;
}
void AOTMappedHeapWriter::add_source_obj(oop src_obj) {
@ -181,25 +180,6 @@ bool AOTMappedHeapWriter::is_too_large_to_archive(size_t size) {
}
}
// Keep track of the contents of the archived interned string table. This table
// is used only by CDSHeapVerifier.
void AOTMappedHeapWriter::add_to_dumped_interned_strings(oop string) {
assert_at_safepoint(); // DumpedInternedStrings uses raw oops
assert(!is_string_too_large_to_archive(string), "must be");
bool created;
_dumped_interned_strings->put_if_absent(string, true, &created);
if (created) {
// Prevent string deduplication from changing the value field to
// something not in the archive.
java_lang_String::set_deduplication_forbidden(string);
_dumped_interned_strings->maybe_grow();
}
}
bool AOTMappedHeapWriter::is_dumped_interned_string(oop o) {
return _dumped_interned_strings->get(o) != nullptr;
}
// Various lookup functions between source_obj, buffered_obj and requested_obj
bool AOTMappedHeapWriter::is_in_requested_range(oop o) {
assert(_requested_bottom != nullptr, "do not call before _requested_bottom is initialized");
@ -430,6 +410,7 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtC
assert(info != nullptr, "must be");
size_t buffer_offset = copy_one_source_obj_to_buffer(src_obj);
info->set_buffer_offset(buffer_offset);
assert(buffer_offset <= 0x7fffffff, "sanity");
OopHandle handle(Universe::vm_global(), src_obj);
_buffer_offset_to_source_obj_table->put_when_absent(buffer_offset, handle);
@ -442,6 +423,9 @@ void AOTMappedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtC
log_info(aot)("Size of heap region = %zu bytes, %d objects, %d roots, %d native ptrs",
_buffer_used, _source_objs->length() + 1, roots->length(), _num_native_ptrs);
log_info(aot)(" strings = %8zu (%zu bytes)", _num_strings, _string_bytes);
log_info(aot)(" packages = %8zu", _num_packages);
log_info(aot)(" protection domains = %8zu", _num_protection_domains);
}
size_t AOTMappedHeapWriter::filler_array_byte_size(int length) {
@ -530,7 +514,25 @@ void update_buffered_object_field(address buffered_obj, int field_offset, T valu
*field_addr = value;
}
void AOTMappedHeapWriter::update_stats(oop src_obj) {
if (java_lang_String::is_instance(src_obj)) {
_num_strings ++;
_string_bytes += src_obj->size() * HeapWordSize;
_string_bytes += java_lang_String::value(src_obj)->size() * HeapWordSize;
} else {
Klass* k = src_obj->klass();
Symbol* name = k->name();
if (name->equals("java/lang/NamedPackage") || name->equals("java/lang/Package")) {
_num_packages ++;
} else if (name->equals("java/security/ProtectionDomain")) {
_num_protection_domains ++;
}
}
}
size_t AOTMappedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
update_stats(src_obj);
assert(!is_too_large_to_archive(src_obj), "already checked");
size_t byte_size = src_obj->size() * HeapWordSize;
assert(byte_size > 0, "no zero-size objects");

View File

@ -40,20 +40,6 @@
class MemRegion;
#if INCLUDE_CDS_JAVA_HEAP
class DumpedInternedStrings :
public ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>
{
public:
DumpedInternedStrings(unsigned size, unsigned max_size) :
ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::string_oop_hash>(size, max_size) {}
};
class AOTMappedHeapWriter : AllStatic {
friend class HeapShared;
friend class AOTMappedHeapLoader;
@ -131,7 +117,6 @@ private:
static GrowableArrayCHeap<NativePointerInfo, mtClassShared>* _native_pointers;
static GrowableArrayCHeap<oop, mtClassShared>* _source_objs;
static DumpedInternedStrings *_dumped_interned_strings;
// We sort _source_objs_order to minimize the number of bits in ptrmap and oopmap.
// See comments near the body of AOTMappedHeapWriter::compare_objs_by_oop_fields().
@ -190,6 +175,7 @@ private:
static void copy_roots_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static void copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots);
static size_t copy_one_source_obj_to_buffer(oop src_obj);
static void update_stats(oop src_obj);
static void maybe_fill_gc_region_gap(size_t required_byte_size);
static size_t filler_array_byte_size(int length);
@ -227,8 +213,6 @@ public:
static bool is_too_large_to_archive(size_t size);
static bool is_too_large_to_archive(oop obj);
static bool is_string_too_large_to_archive(oop string);
static bool is_dumped_interned_string(oop o);
static void add_to_dumped_interned_strings(oop string);
static void write(GrowableArrayCHeap<oop, mtClassShared>*, AOTMappedHeapInfo* heap_info);
static address requested_address(); // requested address of the lowest achived heap object
static size_t get_filler_size_at(address buffered_addr);

View File

@ -96,7 +96,7 @@ class KeepAliveObjectsTable : public HashTable<oop, bool,
36137, // prime number
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_hash> {};
HeapShared::oop_address_hash> {};
static KeepAliveObjectsTable* _keep_alive_objs_table;
static OopHandle _keep_alive_objs_array;

View File

@ -242,20 +242,6 @@ void AOTStreamedHeapWriter::copy_roots_max_dfs_to_buffer(int roots_length) {
}
}
static bool is_interned_string(oop obj) {
if (!java_lang_String::is_instance(obj)) {
return false;
}
ResourceMark rm;
int len;
jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
if (name == nullptr) {
fatal("Insufficient memory for dumping");
}
return StringTable::lookup(name, len) == obj;
}
static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
if (UseCompressedOops) {
return BitMap::idx_t(buffer_offset / sizeof(narrowOop));
@ -264,10 +250,6 @@ static BitMap::idx_t bit_idx_for_buffer_offset(size_t buffer_offset) {
}
}
bool AOTStreamedHeapWriter::is_dumped_interned_string(oop obj) {
return is_interned_string(obj) && HeapShared::get_cached_oop_info(obj) != nullptr;
}
void AOTStreamedHeapWriter::copy_source_objs_to_buffer(GrowableArrayCHeap<oop, mtClassShared>* roots) {
for (int i = 0; i < _source_objs->length(); i++) {
oop src_obj = _source_objs->at(i);
@ -325,7 +307,7 @@ size_t AOTStreamedHeapWriter::copy_one_source_obj_to_buffer(oop src_obj) {
ensure_buffer_space(new_used);
if (is_interned_string(src_obj)) {
if (HeapShared::is_interned_string(src_obj)) {
java_lang_String::hash_code(src_obj); // Sets the hash code field(s)
java_lang_String::set_deduplication_forbidden(src_obj); // Allows faster interning at runtime
assert(java_lang_String::hash_is_set(src_obj), "hash must be set");
@ -402,7 +384,7 @@ void AOTStreamedHeapWriter::update_header_for_buffered_addr(address buffered_add
mw = mw.copy_set_hash(src_hash);
}
if (is_interned_string(src_obj)) {
if (HeapShared::is_interned_string(src_obj)) {
// Mark the mark word of interned string so the loader knows to link these to
// the string table at runtime.
mw = mw.set_marked();

View File

@ -148,8 +148,6 @@ public:
return size_t(buffered_addr) - size_t(buffer_bottom());
}
static bool is_dumped_interned_string(oop obj);
static size_t source_obj_to_buffered_offset(oop src_obj);
static address source_obj_to_buffered_addr(oop src_obj);

View File

@ -53,7 +53,7 @@ class CDSHeapVerifier : public KlassClosure {
15889, // prime number
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_hash> _table;
HeapShared::oop_address_hash> _table;
GrowableArray<const char**> _exclusions;
GrowableArray<oop> _shared_secret_accessors;

View File

@ -175,23 +175,39 @@ oop HeapShared::CachedOopInfo::orig_referrer() const {
return _orig_referrer.resolve();
}
unsigned HeapShared::oop_hash(oop const& p) {
// This is a simple hashing of the oop's address. This function is used
// while copying the oops into the AOT heap region. We don't want to
// have any side effects during the copying, so we avoid calling
// p->identity_hash() which can update the object header.
unsigned HeapShared::oop_address_hash(oop const& p) {
assert(SafepointSynchronize::is_at_safepoint() ||
JavaThread::current()->is_in_no_safepoint_scope(), "sanity");
// Do not call p->identity_hash() as that will update the
// object header.
return primitive_hash(cast_from_oop<intptr_t>(p));
}
unsigned int HeapShared::oop_handle_hash_raw(const OopHandle& oh) {
return oop_hash(oh.resolve());
}
unsigned int HeapShared::oop_handle_hash(const OopHandle& oh) {
// About the hashcode in the cached objects:
// - If a source object has a hashcode, it must be copied into the cache.
// That's because some cached hashtables are laid out using this hashcode.
// - If a source object doesn't have a hashcode, we avoid computing it while
// copying the objects into the cache. This will allow the hashcode to be
// dynamically and randomly computed in each production, which generally
// desirable to make the hashcodes more random between runs.
unsigned HeapShared::archived_object_cache_hash(OopHandle const& oh) {
oop o = oh.resolve();
if (o == nullptr) {
return 0;
}
if (!_use_identity_hash_for_archived_object_cache) {
// This is called while we are copying the objects. Don't call o->identity_hash()
// as that will update the object header.
return oop_address_hash(o);
} else {
// This is called after all objects are copied. It's OK to update
// the object's hashcode.
//
// This may be called after we have left the AOT dumping safepoint.
// Objects in archived_object_cache() may be moved by the GC, so we
// can't use the address of o for computing the hash.
return o->identity_hash();
}
}
@ -271,6 +287,12 @@ void HeapShared::prepare_for_archiving(TRAPS) {
HeapShared::ArchivedObjectCache* HeapShared::_archived_object_cache = nullptr;
// Controls the hashing method for the _archived_object_cache.
// Changes from false to true once, after all objects are copied,
// inside make_archived_object_cache_gc_safe().
// See archived_object_cache_hash() for more details.
bool HeapShared::_use_identity_hash_for_archived_object_cache = false;
bool HeapShared::is_archived_heap_in_use() {
if (HeapShared::is_loading()) {
if (HeapShared::is_loading_streaming_mode()) {
@ -384,9 +406,8 @@ void HeapShared::materialize_thread_object() {
}
}
void HeapShared::add_to_dumped_interned_strings(oop string) {
void HeapShared::archive_interned_string(oop string) {
assert(HeapShared::is_writing_mapping_mode(), "Only used by this mode");
AOTMappedHeapWriter::add_to_dumped_interned_strings(string);
bool success = archive_reachable_objects_from(1, _dump_time_special_subgraph, string);
assert(success, "shared strings array must not point to arrays or strings that are too large to archive");
}
@ -404,6 +425,24 @@ void HeapShared::finalize_initialization(FileMapInfo* static_mapinfo) {
}
}
void HeapShared::make_archived_object_cache_gc_safe() {
ArchivedObjectCache* new_cache = new (mtClass)ArchivedObjectCache(INITIAL_TABLE_SIZE, MAX_TABLE_SIZE);
// It's safe to change the behavior of the hash function now, because iterate_all()
// doesn't call the hash function.
// See archived_object_cache_hash() for more details.
assert(_use_identity_hash_for_archived_object_cache == false, "happens only once");
_use_identity_hash_for_archived_object_cache = true;
// Copy all CachedOopInfo into a new table using a different hashing algorithm
archived_object_cache()->iterate_all([&] (OopHandle oh, CachedOopInfo info) {
new_cache->put_when_absent(oh, info);
});
destroy_archived_object_cache();
_archived_object_cache = new_cache;
}
HeapShared::CachedOopInfo* HeapShared::get_cached_oop_info(oop obj) {
OopHandle oh(Universe::vm_global(), obj);
CachedOopInfo* result = _archived_object_cache->get(oh);
@ -417,14 +456,53 @@ bool HeapShared::has_been_archived(oop obj) {
}
int HeapShared::append_root(oop obj) {
assert(SafepointSynchronize::is_at_safepoint(), "sanity");
assert(CDSConfig::is_dumping_heap(), "dump-time only");
if (obj != nullptr) {
assert(has_been_archived(obj), "must be");
}
// No GC should happen since we aren't scanning _pending_roots.
assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");
assert(_pending_roots != nullptr, "sanity");
return _pending_roots->append(obj);
if (obj == nullptr) {
assert(_pending_roots->at(0) == nullptr, "root index 0 always maps to null");
return 0;
} else if (CDSConfig::is_dumping_aot_linked_classes()) {
// The AOT compiler may refer the same obj many times, so we
// should use the same index for this oop to avoid excessive entries
// in the roots array.
CachedOopInfo* obj_info = get_cached_oop_info(obj);
assert(obj_info != nullptr, "must be archived");
if (obj_info->root_index() > 0) {
return obj_info->root_index();
} else {
assert(obj_info->root_index() < 0, "must not be zero");
int i = _pending_roots->append(obj);
obj_info->set_root_index(i);
return i;
}
} else {
return _pending_roots->append(obj);
}
}
int HeapShared::get_root_index(oop obj) {
if (java_lang_Class::is_instance(obj)) {
obj = scratch_java_mirror(obj);
}
CachedOopInfo* obj_info = get_cached_oop_info(obj);
const char* error = nullptr;
if (obj_info == nullptr) {
error = "Not a cached oop";
} else if (obj_info->root_index() < 0) {
error = "Not a cached oop root";
} else {
return obj_info->root_index();
}
ResourceMark rm;
log_debug(aot, codecache, oops)("%s: " INTPTR_FORMAT " (%s)", error,
cast_from_oop<uintptr_t>(obj),
obj->klass()->external_name());
return -1;
}
oop HeapShared::get_root(int index, bool clear) {
@ -453,6 +531,13 @@ void HeapShared::finish_materialize_objects() {
}
void HeapShared::clear_root(int index) {
if (CDSConfig::is_using_aot_linked_classes()) {
// When AOT linked classes are in use, all roots will be in use all
// the time, there's no benefit for clearing the roots. Also, we
// can't clear the roots as they can be shared.
return;
}
assert(index >= 0, "sanity");
assert(CDSConfig::is_using_archive(), "must be");
if (is_archived_heap_in_use()) {
@ -600,9 +685,10 @@ objArrayOop HeapShared::scratch_resolved_references(ConstantPool* src) {
return (objArrayOop)_scratch_objects_table->get_oop(src);
}
void HeapShared::init_dumping() {
_scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
void HeapShared::init_dumping() {
_scratch_objects_table = new (mtClass)MetaspaceObjToOopHandleTable();
_pending_roots = new GrowableArrayCHeap<oop, mtClassShared>(500);
_pending_roots->append(nullptr); // root index 0 represents a null oop
}
void HeapShared::init_scratch_objects_for_basic_type_mirrors(TRAPS) {
@ -883,6 +969,11 @@ void HeapShared::write_heap(AOTMappedHeapInfo* mapped_heap_info, AOTStreamedHeap
ArchiveBuilder::OtherROAllocMark mark;
write_subgraph_info_table();
delete _pending_roots;
_pending_roots = nullptr;
make_archived_object_cache_gc_safe();
}
void HeapShared::scan_java_mirror(oop orig_mirror) {
@ -1911,6 +2002,11 @@ void HeapShared::verify_subgraph_from(oop orig_obj) {
void HeapShared::verify_reachable_objects_from(oop obj) {
_num_total_verifications ++;
if (java_lang_Class::is_instance(obj)) {
Klass* k = java_lang_Class::as_Klass(obj);
if (RegeneratedClasses::has_been_regenerated(k)) {
k = RegeneratedClasses::get_regenerated_object(k);
obj = k->java_mirror();
}
obj = scratch_java_mirror(obj);
assert(obj != nullptr, "must be");
}
@ -2264,12 +2360,22 @@ void HeapShared::archive_object_subgraphs(ArchivableStaticFieldInfo fields[],
#endif
}
bool HeapShared::is_dumped_interned_string(oop o) {
if (is_writing_mapping_mode()) {
return AOTMappedHeapWriter::is_dumped_interned_string(o);
} else {
return AOTStreamedHeapWriter::is_dumped_interned_string(o);
bool HeapShared::is_interned_string(oop obj) {
if (!java_lang_String::is_instance(obj)) {
return false;
}
ResourceMark rm;
int len = 0;
jchar* name = java_lang_String::as_unicode_string_or_null(obj, len);
if (name == nullptr) {
fatal("Insufficient memory for dumping");
}
return StringTable::lookup(name, len) == obj;
}
bool HeapShared::is_dumped_interned_string(oop o) {
return is_interned_string(o) && has_been_archived(o);
}
// These tables should be used only within the CDS safepoint, so

View File

@ -40,7 +40,6 @@
#include "utilities/hashTable.hpp"
#if INCLUDE_CDS_JAVA_HEAP
class DumpedInternedStrings;
class FileMapInfo;
class KlassSubGraphInfo;
class MetaspaceObjToOopHandleTable;
@ -176,7 +175,7 @@ public:
static void initialize_streaming() NOT_CDS_JAVA_HEAP_RETURN;
static void enable_gc() NOT_CDS_JAVA_HEAP_RETURN;
static void materialize_thread_object() NOT_CDS_JAVA_HEAP_RETURN;
static void add_to_dumped_interned_strings(oop string) NOT_CDS_JAVA_HEAP_RETURN;
static void archive_interned_string(oop string);
static void finalize_initialization(FileMapInfo* static_mapinfo) NOT_CDS_JAVA_HEAP_RETURN;
private:
@ -195,13 +194,8 @@ private:
static void print_stats();
public:
static void debug_trace();
static unsigned oop_hash(oop const& p);
static unsigned oop_handle_hash(OopHandle const& oh);
static unsigned oop_handle_hash_raw(OopHandle const& oh);
static unsigned oop_address_hash(oop const& p);
static bool oop_handle_equals(const OopHandle& a, const OopHandle& b);
static unsigned string_oop_hash(oop const& string) {
return java_lang_String::hash_code(string);
}
class CopyKlassSubGraphInfoToArchive;
@ -217,27 +211,37 @@ public:
// One or more fields in this object are pointing to MetaspaceObj
bool _has_native_pointers;
// >= 0 if this oop has been append to the list of roots
int _root_index;
public:
CachedOopInfo(OopHandle orig_referrer, bool has_oop_pointers)
: _orig_referrer(orig_referrer),
_buffer_offset(0),
_has_oop_pointers(has_oop_pointers),
_has_native_pointers(false) {}
_has_native_pointers(false),
_root_index(-1) {}
oop orig_referrer() const;
void set_buffer_offset(size_t offset) { _buffer_offset = offset; }
size_t buffer_offset() const { return _buffer_offset; }
bool has_oop_pointers() const { return _has_oop_pointers; }
bool has_native_pointers() const { return _has_native_pointers; }
void set_has_native_pointers() { _has_native_pointers = true; }
int root_index() const { return _root_index; }
void set_root_index(int i) { _root_index = i; }
};
private:
static const int INITIAL_TABLE_SIZE = 15889; // prime number
static const int MAX_TABLE_SIZE = 1000000;
static bool _use_identity_hash_for_archived_object_cache;
static unsigned archived_object_cache_hash(OopHandle const& oh);
typedef ResizeableHashTable<OopHandle, CachedOopInfo,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_handle_hash_raw,
HeapShared::archived_object_cache_hash,
HeapShared::oop_handle_equals> ArchivedObjectCache;
static ArchivedObjectCache* _archived_object_cache;
@ -297,7 +301,7 @@ private:
typedef ResizeableHashTable<oop, bool,
AnyObj::C_HEAP,
mtClassShared,
HeapShared::oop_hash> SeenObjectsTable;
HeapShared::oop_address_hash> SeenObjectsTable;
static SeenObjectsTable *_seen_objects_table;
@ -394,6 +398,7 @@ private:
delete _archived_object_cache;
_archived_object_cache = nullptr;
}
static void make_archived_object_cache_gc_safe();
static ArchivedObjectCache* archived_object_cache() {
return _archived_object_cache;
}
@ -406,6 +411,7 @@ private:
KlassSubGraphInfo* subgraph_info,
oop orig_obj);
static bool is_interned_string(oop obj);
static bool is_dumped_interned_string(oop o);
// Scratch objects for archiving Klass::java_mirror()
@ -437,6 +443,11 @@ private:
// Dump-time only. Returns the index of the root, which can be used at run time to read
// the root using get_root(index, ...).
static int append_root(oop obj);
// AOT-compile time only.
// Returns -1 if obj is not in the heap root set.
static int get_root_index(oop obj) NOT_CDS_JAVA_HEAP_RETURN_(-1);
static GrowableArrayCHeap<oop, mtClassShared>* pending_roots() { return _pending_roots; }
// Dump-time and runtime
@ -445,9 +456,7 @@ private:
// Run-time only
static void clear_root(int index);
static void get_segment_indexes(int index, int& segment_index, int& internal_index);
static void setup_test_class(const char* test_class_name) PRODUCT_RETURN;
#endif // INCLUDE_CDS_JAVA_HEAP

View File

@ -946,7 +946,7 @@ void StringTable::init_shared_table() {
// so we are all good.
// - If there's a reference to it, we will report an error inside HeapShared.cpp and
// dumping will fail.
HeapShared::add_to_dumped_interned_strings(string);
HeapShared::archive_interned_string(string);
}
n++;
return true;