From 8a5ed47f00d74d4eb0d2b8027fb92ff2f5c30163 Mon Sep 17 00:00:00 2001 From: Ioi Lam Date: Tue, 11 Mar 2025 06:49:58 +0000 Subject: [PATCH] 8350148: Native stack overflow when writing Java heap objects into AOT cache Reviewed-by: iveresov, matsaave --- src/hotspot/share/cds/heapShared.cpp | 94 ++++++++++++++++++---------- src/hotspot/share/cds/heapShared.hpp | 28 ++++++++- 2 files changed, 87 insertions(+), 35 deletions(-) diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp index 0dc1d255da7..abb2071813f 100644 --- a/src/hotspot/share/cds/heapShared.cpp +++ b/src/hotspot/share/cds/heapShared.cpp @@ -288,7 +288,7 @@ void HeapShared::clear_root(int index) { } } -bool HeapShared::archive_object(oop obj, KlassSubGraphInfo* subgraph_info) { +bool HeapShared::archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info) { assert(CDSConfig::is_dumping_heap(), "dump-time only"); assert(!obj->is_stackChunk(), "do not archive stack chunks"); @@ -304,7 +304,7 @@ bool HeapShared::archive_object(oop obj, KlassSubGraphInfo* subgraph_info) { } else { count_allocation(obj->size()); ArchiveHeapWriter::add_source_obj(obj); - CachedOopInfo info = make_cached_oop_info(obj); + CachedOopInfo info = make_cached_oop_info(obj, referrer); archived_object_cache()->put_when_absent(obj, info); archived_object_cache()->maybe_grow(); mark_native_pointers(obj); @@ -1350,33 +1350,39 @@ void HeapShared::clear_archived_roots_of(Klass* k) { } } -class WalkOopAndArchiveClosure: public BasicOopIterateClosure { +// Push all oops that are referenced by _referencing_obj onto the _stack. +class HeapShared::ReferentPusher: public BasicOopIterateClosure { + PendingOopStack* _stack; + GrowableArray _found_oop_fields; int _level; bool _record_klasses_only; KlassSubGraphInfo* _subgraph_info; oop _referencing_obj; - - // The following are for maintaining a stack for determining - // CachedOopInfo::_referrer - static WalkOopAndArchiveClosure* _current; - WalkOopAndArchiveClosure* _last; public: - WalkOopAndArchiveClosure(int level, + ReferentPusher(PendingOopStack* stack, + int level, bool record_klasses_only, KlassSubGraphInfo* subgraph_info, oop orig) : + _stack(stack), + _found_oop_fields(), _level(level), _record_klasses_only(record_klasses_only), _subgraph_info(subgraph_info), _referencing_obj(orig) { - _last = _current; - _current = this; } - ~WalkOopAndArchiveClosure() { - _current = _last; + void do_oop(narrowOop *p) { ReferentPusher::do_oop_work(p); } + void do_oop( oop *p) { ReferentPusher::do_oop_work(p); } + + ~ReferentPusher() { + while (_found_oop_fields.length() > 0) { + // This produces the exact same traversal order as the previous version + // of ReferentPusher that recurses on the C stack -- a depth-first search, + // walking the oop fields in _referencing_obj by ascending field offsets. + oop obj = _found_oop_fields.pop(); + _stack->push(PendingOop(obj, _referencing_obj, _level + 1)); + } } - void do_oop(narrowOop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } - void do_oop( oop *p) { WalkOopAndArchiveClosure::do_oop_work(p); } protected: template void do_oop_work(T *p) { @@ -1396,20 +1402,15 @@ class WalkOopAndArchiveClosure: public BasicOopIterateClosure { } } - bool success = HeapShared::archive_reachable_objects_from( - _level + 1, _subgraph_info, obj); - assert(success, "VM should have exited with unarchivable objects for _level > 1"); + _found_oop_fields.push(obj); } } public: - static WalkOopAndArchiveClosure* current() { return _current; } oop referencing_obj() { return _referencing_obj; } KlassSubGraphInfo* subgraph_info() { return _subgraph_info; } }; -WalkOopAndArchiveClosure* WalkOopAndArchiveClosure::_current = nullptr; - // Checks if an oop has any non-null oop fields class PointsToOopsChecker : public BasicOopIterateClosure { bool _result; @@ -1425,9 +1426,7 @@ public: bool result() { return _result; } }; -HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj) { - WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); - oop referrer = (walker == nullptr) ? nullptr : walker->referencing_obj(); +HeapShared::CachedOopInfo HeapShared::make_cached_oop_info(oop obj, oop referrer) { PointsToOopsChecker points_to_oops_checker; obj->oop_iterate(&points_to_oops_checker); return CachedOopInfo(referrer, points_to_oops_checker.result()); @@ -1450,12 +1449,35 @@ void HeapShared::init_box_classes(TRAPS) { // (1) If orig_obj has not been archived yet, archive it. // (2) If orig_obj has not been seen yet (since start_recording_subgraph() was called), // trace all objects that are reachable from it, and make sure these objects are archived. -// (3) Record the klasses of all orig_obj and all reachable objects. +// (3) Record the klasses of all objects that are reachable from orig_obj (including those that +// were already archived when this function is called) bool HeapShared::archive_reachable_objects_from(int level, KlassSubGraphInfo* subgraph_info, oop orig_obj) { assert(orig_obj != nullptr, "must be"); + PendingOopStack stack; + stack.push(PendingOop(orig_obj, nullptr, level)); + while (stack.length() > 0) { + PendingOop po = stack.pop(); + _object_being_archived = po; + bool status = walk_one_object(&stack, po.level(), subgraph_info, po.obj(), po.referrer()); + _object_being_archived = PendingOop(); + + if (!status) { + // Don't archive a subgraph root that's too big. For archives static fields, that's OK + // as the Java code will take care of initializing this field dynamically. + assert(level == 1, "VM should have exited with unarchivable objects for _level > 1"); + return false; + } + } + + return true; +} + +bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info, + oop orig_obj, oop referrer) { + assert(orig_obj != nullptr, "must be"); if (!JavaClasses::is_supported_for_archiving(orig_obj)) { // This object has injected fields that cannot be supported easily, so we disallow them for now. // If you get an error here, you probably made a change in the JDK library that has added @@ -1524,7 +1546,7 @@ bool HeapShared::archive_reachable_objects_from(int level, bool record_klasses_only = already_archived; if (!already_archived) { ++_num_new_archived_objs; - if (!archive_object(orig_obj, subgraph_info)) { + if (!archive_object(orig_obj, referrer, subgraph_info)) { // Skip archiving the sub-graph referenced from the current entry field. ResourceMark rm; log_error(cds, heap)( @@ -1547,8 +1569,13 @@ bool HeapShared::archive_reachable_objects_from(int level, Klass *orig_k = orig_obj->klass(); subgraph_info->add_subgraph_object_klass(orig_k); - WalkOopAndArchiveClosure walker(level, record_klasses_only, subgraph_info, orig_obj); - orig_obj->oop_iterate(&walker); + { + // Find all the oops that are referenced by orig_obj, push them onto the stack + // so we can work on them next. + ResourceMark rm; + ReferentPusher pusher(stack, level, record_klasses_only, subgraph_info, orig_obj); + orig_obj->oop_iterate(&pusher); + } if (CDSConfig::is_initing_classes_at_dump_time()) { // The enum klasses are archived with aot-initialized mirror. @@ -1573,8 +1600,7 @@ bool HeapShared::archive_reachable_objects_from(int level, // - No java.lang.Class instance (java mirror) can be included inside // an archived sub-graph. Mirror can only be the sub-graph entry object. // -// The Java heap object sub-graph archiving process (see -// WalkOopAndArchiveClosure): +// The Java heap object sub-graph archiving process (see ReferentPusher): // // 1) Java object sub-graph archiving starts from a given static field // within a Class instance (java mirror). If the static field is a @@ -1722,6 +1748,7 @@ void HeapShared::check_special_subgraph_classes() { } HeapShared::SeenObjectsTable* HeapShared::_seen_objects_table = nullptr; +HeapShared::PendingOop HeapShared::_object_being_archived; int HeapShared::_num_new_walked_objs; int HeapShared::_num_new_archived_objs; int HeapShared::_num_old_recorded_klasses; @@ -2043,10 +2070,11 @@ bool HeapShared::is_dumped_interned_string(oop o) { void HeapShared::debug_trace() { ResourceMark rm; - WalkOopAndArchiveClosure* walker = WalkOopAndArchiveClosure::current(); - if (walker != nullptr) { + oop referrer = _object_being_archived.referrer(); + if (referrer != nullptr) { LogStream ls(Log(cds, heap)::error()); - CDSHeapVerifier::trace_to_root(&ls, walker->referencing_obj()); + ls.print_cr("Reference trace"); + CDSHeapVerifier::trace_to_root(&ls, referrer); } } diff --git a/src/hotspot/share/cds/heapShared.hpp b/src/hotspot/share/cds/heapShared.hpp index 9ad09882410..04c9ae91381 100644 --- a/src/hotspot/share/cds/heapShared.hpp +++ b/src/hotspot/share/cds/heapShared.hpp @@ -234,7 +234,7 @@ private: static DumpTimeKlassSubGraphInfoTable* _dump_time_subgraph_info_table; static RunTimeKlassSubGraphInfoTable _run_time_subgraph_info_table; - static CachedOopInfo make_cached_oop_info(oop obj); + static CachedOopInfo make_cached_oop_info(oop obj, oop referrer); static ArchivedKlassSubGraphInfoRecord* archive_subgraph_info(KlassSubGraphInfo* info); static void archive_object_subgraphs(ArchivableStaticFieldInfo fields[], bool is_full_module_graph); @@ -314,7 +314,7 @@ private: static bool has_been_seen_during_subgraph_recording(oop obj); static void set_has_been_seen_during_subgraph_recording(oop obj); - static bool archive_object(oop obj, KlassSubGraphInfo* subgraph_info); + static bool archive_object(oop obj, oop referrer, KlassSubGraphInfo* subgraph_info); static void resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]); static void resolve_classes_for_subgraph_of(JavaThread* current, Klass* k); @@ -340,6 +340,30 @@ private: static void archive_strings(); static void archive_subgraphs(); + // PendingOop and PendingOopStack are used for recursively discovering all cacheable + // heap objects. The recursion is done using PendingOopStack so we won't overflow the + // C stack with deep reference chains. + class PendingOop { + oop _obj; + oop _referrer; + int _level; + + public: + PendingOop() : _obj(nullptr), _referrer(nullptr), _level(-1) {} + PendingOop(oop obj, oop referrer, int level) : _obj(obj), _referrer(referrer), _level(level) {} + + oop obj() const { return _obj; } + oop referrer() const { return _referrer; } + int level() const { return _level; } + }; + + class ReferentPusher; + using PendingOopStack = GrowableArrayCHeap; + + static PendingOop _object_being_archived; + static bool walk_one_object(PendingOopStack* stack, int level, KlassSubGraphInfo* subgraph_info, + oop orig_obj, oop referrer); + public: static void reset_archived_object_states(TRAPS); static void create_archived_object_cache() {