diff --git a/src/hotspot/share/cds/aotArtifactFinder.cpp b/src/hotspot/share/cds/aotArtifactFinder.cpp index 5f346e832a8..f85f1e46520 100644 --- a/src/hotspot/share/cds/aotArtifactFinder.cpp +++ b/src/hotspot/share/cds/aotArtifactFinder.cpp @@ -145,7 +145,7 @@ void AOTArtifactFinder::find_artifacts() { #if INCLUDE_CDS_JAVA_HEAP // Keep scanning until we discover no more class that need to be AOT-initialized. - if (CDSConfig::is_initing_classes_at_dump_time()) { + if (CDSConfig::is_dumping_aot_linked_classes()) { while (_pending_aot_inited_classes->length() > 0) { InstanceKlass* ik = _pending_aot_inited_classes->pop(); HeapShared::copy_and_rescan_aot_inited_mirror(ik); @@ -188,7 +188,7 @@ void AOTArtifactFinder::end_scanning_for_oops() { } void AOTArtifactFinder::add_aot_inited_class(InstanceKlass* ik) { - if (CDSConfig::is_initing_classes_at_dump_time()) { + if (CDSConfig::is_dumping_aot_linked_classes()) { if (RegeneratedClasses::is_regenerated_object(ik)) { precond(RegeneratedClasses::get_original_object(ik)->is_initialized()); } else { @@ -258,7 +258,7 @@ void AOTArtifactFinder::add_cached_instance_class(InstanceKlass* ik) { return; } scan_oops_in_instance_class(ik); - if (ik->is_hidden() && CDSConfig::is_initing_classes_at_dump_time()) { + if (ik->is_hidden() && CDSConfig::is_dumping_aot_linked_classes()) { bool succeed = AOTClassLinker::try_add_candidate(ik); guarantee(succeed, "All cached hidden classes must be aot-linkable"); add_aot_inited_class(ik); diff --git a/src/hotspot/share/cds/aotClassInitializer.cpp b/src/hotspot/share/cds/aotClassInitializer.cpp index 00db747622f..06fc3af6f30 100644 --- a/src/hotspot/share/cds/aotClassInitializer.cpp +++ b/src/hotspot/share/cds/aotClassInitializer.cpp @@ -40,7 +40,7 @@ DEBUG_ONLY(InstanceKlass* _aot_init_class = nullptr;) bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) { assert(!ArchiveBuilder::is_active() || !ArchiveBuilder::current()->is_in_buffer_space(ik), "must be source klass"); - if (!CDSConfig::is_initing_classes_at_dump_time()) { + if (!CDSConfig::is_dumping_aot_linked_classes()) { return false; } @@ -64,7 +64,7 @@ bool AOTClassInitializer::can_archive_initialized_mirror(InstanceKlass* ik) { // Automatic selection for aot-inited classes // ========================================== // - // When CDSConfig::is_initing_classes_at_dump_time is enabled, + // When CDSConfig::is_dumping_aot_linked_classes is enabled, // AOTArtifactFinder::find_artifacts() finds the classes of all // heap objects that are reachable from HeapShared::_run_time_special_subgraph, // and mark these classes as aot-inited. This preserves the initialized @@ -310,7 +310,7 @@ void AOTClassInitializer::init_test_class(TRAPS) { // // -XX:AOTInitTestClass is NOT a general mechanism for including user-defined objects into // the AOT cache. Therefore, this option is NOT available in product JVM. - if (AOTInitTestClass != nullptr && CDSConfig::is_initing_classes_at_dump_time()) { + if (AOTInitTestClass != nullptr && CDSConfig::is_dumping_aot_linked_classes()) { log_info(aot)("Debug build only: force initialization of AOTInitTestClass %s", AOTInitTestClass); TempNewSymbol class_name = SymbolTable::new_symbol(AOTInitTestClass); Handle app_loader(THREAD, SystemDictionary::java_system_loader()); diff --git a/src/hotspot/share/cds/aotMetaspace.cpp b/src/hotspot/share/cds/aotMetaspace.cpp index 098d3baed58..3824a2be3e2 100644 --- a/src/hotspot/share/cds/aotMetaspace.cpp +++ b/src/hotspot/share/cds/aotMetaspace.cpp @@ -1141,7 +1141,7 @@ void AOTMetaspace::dump_static_archive_impl(StaticArchiveBuilder& builder, TRAPS AOTReferenceObjSupport::initialize(CHECK); AOTReferenceObjSupport::stabilize_cached_reference_objects(CHECK); - if (CDSConfig::is_initing_classes_at_dump_time()) { + if (CDSConfig::is_dumping_aot_linked_classes()) { // java.lang.Class::reflectionFactory cannot be archived yet. We set this field // to null, and it will be initialized again at runtime. log_debug(aot)("Resetting Class::reflectionFactory"); diff --git a/src/hotspot/share/cds/cdsConfig.cpp b/src/hotspot/share/cds/cdsConfig.cpp index 86533e212d8..5f6b568dd6e 100644 --- a/src/hotspot/share/cds/cdsConfig.cpp +++ b/src/hotspot/share/cds/cdsConfig.cpp @@ -1026,23 +1026,19 @@ void CDSConfig::set_has_aot_linked_classes(bool has_aot_linked_classes) { _has_aot_linked_classes |= has_aot_linked_classes; } -bool CDSConfig::is_initing_classes_at_dump_time() { - return is_dumping_heap() && is_dumping_aot_linked_classes(); -} - bool CDSConfig::is_dumping_invokedynamic() { // Requires is_dumping_aot_linked_classes(). Otherwise the classes of some archived heap // objects used by the archive indy callsites may be replaced at runtime. return AOTInvokeDynamicLinking && is_dumping_aot_linked_classes() && is_dumping_heap(); } -// When we are dumping aot-linked classes and we are able to write archived heap objects, we automatically -// enable the archiving of MethodHandles. This will in turn enable the archiving of MethodTypes and hidden +// When we are dumping aot-linked classes, we automatically enable the archiving of MethodHandles. +// This will in turn enable the archiving of MethodTypes and hidden // classes that are used in the implementation of MethodHandles. // Archived MethodHandles are required for higher-level optimizations such as AOT resolution of invokedynamic // and dynamic proxies. bool CDSConfig::is_dumping_method_handles() { - return is_initing_classes_at_dump_time(); + return is_dumping_aot_linked_classes(); } #endif // INCLUDE_CDS_JAVA_HEAP diff --git a/src/hotspot/share/cds/cdsConfig.hpp b/src/hotspot/share/cds/cdsConfig.hpp index d199e97eefd..202904e8231 100644 --- a/src/hotspot/share/cds/cdsConfig.hpp +++ b/src/hotspot/share/cds/cdsConfig.hpp @@ -187,7 +187,6 @@ public: static void disable_heap_dumping() { CDS_ONLY(_disable_heap_dumping = true); } static bool is_dumping_heap() NOT_CDS_JAVA_HEAP_RETURN_(false); static bool is_loading_heap() NOT_CDS_JAVA_HEAP_RETURN_(false); - static bool is_initing_classes_at_dump_time() NOT_CDS_JAVA_HEAP_RETURN_(false); static bool is_dumping_invokedynamic() NOT_CDS_JAVA_HEAP_RETURN_(false); static bool is_dumping_method_handles() NOT_CDS_JAVA_HEAP_RETURN_(false); diff --git a/src/hotspot/share/cds/cdsEnumKlass.cpp b/src/hotspot/share/cds/cdsEnumKlass.cpp index 1bf6ba4eba8..177d1d6e3ad 100644 --- a/src/hotspot/share/cds/cdsEnumKlass.cpp +++ b/src/hotspot/share/cds/cdsEnumKlass.cpp @@ -40,7 +40,7 @@ bool CDSEnumKlass::is_enum_obj(oop orig_obj) { } // !!! This is legacy support for enum classes before JEP 483. This file is not used when -// !!! CDSConfig::is_initing_classes_at_dump_time()==true. +// !!! CDSConfig::is_dumping_aot_linked_classes()==true. // // Java Enum classes have synthetic methods that look like this // enum MyEnum {FOO, BAR} @@ -63,7 +63,7 @@ bool CDSEnumKlass::is_enum_obj(oop orig_obj) { void CDSEnumKlass::handle_enum_obj(int level, KlassSubGraphInfo* subgraph_info, oop orig_obj) { - assert(!CDSConfig::is_initing_classes_at_dump_time(), "only for legacy support of enums"); + assert(!CDSConfig::is_dumping_aot_linked_classes(), "only for legacy support of enums"); assert(level > 1, "must never be called at the first (outermost) level"); assert(is_enum_obj(orig_obj), "must be"); diff --git a/src/hotspot/share/cds/cdsEnumKlass.hpp b/src/hotspot/share/cds/cdsEnumKlass.hpp index e6019ff705e..a4829368430 100644 --- a/src/hotspot/share/cds/cdsEnumKlass.hpp +++ b/src/hotspot/share/cds/cdsEnumKlass.hpp @@ -35,7 +35,7 @@ class JavaFieldStream; class KlassSubGraphInfo; // This is legacy support for enum classes before JEP 483. This code is not needed when -// CDSConfig::is_initing_classes_at_dump_time()==true. +// CDSConfig::is_dumping_aot_linked_classes()==true. class CDSEnumKlass: AllStatic { public: static bool is_enum_obj(oop orig_obj); diff --git a/src/hotspot/share/cds/cdsHeapVerifier.cpp b/src/hotspot/share/cds/cdsHeapVerifier.cpp index 65063b4b005..3ed0dce1f66 100644 --- a/src/hotspot/share/cds/cdsHeapVerifier.cpp +++ b/src/hotspot/share/cds/cdsHeapVerifier.cpp @@ -156,7 +156,7 @@ CDSHeapVerifier::CDSHeapVerifier() : _archived_objs(0), _problems(0) # undef ADD_EXCL - if (CDSConfig::is_initing_classes_at_dump_time()) { + if (CDSConfig::is_dumping_aot_linked_classes()) { add_shared_secret_accessors(); } ClassLoaderDataGraph::classes_do(this); diff --git a/src/hotspot/share/cds/finalImageRecipes.cpp b/src/hotspot/share/cds/finalImageRecipes.cpp index bf8a760904c..8ba4514dfed 100644 --- a/src/hotspot/share/cds/finalImageRecipes.cpp +++ b/src/hotspot/share/cds/finalImageRecipes.cpp @@ -206,6 +206,8 @@ void FinalImageRecipes::load_all_classes(TRAPS) { if (ik->has_aot_safe_initializer() && (flags & WAS_INITED) != 0) { assert(ik->class_loader() == nullptr, "supported only for boot classes for now"); + ResourceMark rm(THREAD); + log_info(aot, init)("Initializing %s", ik->external_name()); ik->initialize(CHECK); } } diff --git a/src/hotspot/share/cds/heapShared.cpp b/src/hotspot/share/cds/heapShared.cpp index f2382289c7d..fdc335f3799 100644 --- a/src/hotspot/share/cds/heapShared.cpp +++ b/src/hotspot/share/cds/heapShared.cpp @@ -209,8 +209,14 @@ static bool is_subgraph_root_class_of(ArchivableStaticFieldInfo fields[], Instan } bool HeapShared::is_subgraph_root_class(InstanceKlass* ik) { - return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) || - is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik); + assert(CDSConfig::is_dumping_heap(), "dump-time only"); + if (!CDSConfig::is_dumping_aot_linked_classes()) { + // Legacy CDS archive support (to be deprecated) + return is_subgraph_root_class_of(archive_subgraph_entry_fields, ik) || + is_subgraph_root_class_of(fmg_archive_subgraph_entry_fields, ik); + } else { + return false; + } } oop HeapShared::CachedOopInfo::orig_referrer() const { @@ -934,12 +940,16 @@ void HeapShared::scan_java_class(Klass* orig_k) { void HeapShared::archive_subgraphs() { assert(CDSConfig::is_dumping_heap(), "must be"); - archive_object_subgraphs(archive_subgraph_entry_fields, - false /* is_full_module_graph */); + if (!CDSConfig::is_dumping_aot_linked_classes()) { + archive_object_subgraphs(archive_subgraph_entry_fields, + false /* is_full_module_graph */); + if (CDSConfig::is_dumping_full_module_graph()) { + archive_object_subgraphs(fmg_archive_subgraph_entry_fields, + true /* is_full_module_graph */); + } + } if (CDSConfig::is_dumping_full_module_graph()) { - archive_object_subgraphs(fmg_archive_subgraph_entry_fields, - true /* is_full_module_graph */); Modules::verify_archived_modules(); } } @@ -1295,8 +1305,10 @@ void HeapShared::resolve_classes(JavaThread* current) { if (!is_archived_heap_in_use()) { return; // nothing to do } - resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields); - resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields); + if (!CDSConfig::is_using_aot_linked_classes()) { + resolve_classes_for_subgraphs(current, archive_subgraph_entry_fields); + resolve_classes_for_subgraphs(current, fmg_archive_subgraph_entry_fields); + } } void HeapShared::resolve_classes_for_subgraphs(JavaThread* current, ArchivableStaticFieldInfo fields[]) { @@ -1734,13 +1746,13 @@ bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGrap } } - if (CDSConfig::is_initing_classes_at_dump_time()) { + if (CDSConfig::is_dumping_aot_linked_classes()) { if (java_lang_Class::is_instance(orig_obj)) { orig_obj = scratch_java_mirror(orig_obj); assert(orig_obj != nullptr, "must be archived"); } } else if (java_lang_Class::is_instance(orig_obj) && subgraph_info != _dump_time_special_subgraph) { - // Without CDSConfig::is_initing_classes_at_dump_time(), we only allow archived objects to + // Without CDSConfig::is_dumping_aot_linked_classes(), we only allow archived objects to // point to the mirrors of (1) j.l.Object, (2) primitive classes, and (3) box classes. These are initialized // very early by HeapShared::init_box_classes(). if (orig_obj == vmClasses::Object_klass()->java_mirror() @@ -1808,9 +1820,9 @@ bool HeapShared::walk_one_object(PendingOopStack* stack, int level, KlassSubGrap orig_obj->oop_iterate(&pusher); } - if (CDSConfig::is_initing_classes_at_dump_time()) { - // The classes of all archived enum instances have been marked as aot-init, - // so there's nothing else to be done in the production run. + if (CDSConfig::is_dumping_aot_linked_classes()) { + // The enum klasses are archived with aot-initialized mirror. + // See AOTClassInitializer::can_archive_initialized_mirror(). } else { // This is legacy support for enum classes before JEP 483 -- we cannot rerun // the enum's in the production run, so special handling is needed. @@ -1949,7 +1961,7 @@ void HeapShared::verify_reachable_objects_from(oop obj) { #endif void HeapShared::check_special_subgraph_classes() { - if (CDSConfig::is_initing_classes_at_dump_time()) { + if (CDSConfig::is_dumping_aot_linked_classes()) { // We can have aot-initialized classes (such as Enums) that can reference objects // of arbitrary types. Currently, we trust the JEP 483 implementation to only // aot-initialize classes that are "safe". @@ -2136,9 +2148,11 @@ void HeapShared::init_subgraph_entry_fields(ArchivableStaticFieldInfo fields[], void HeapShared::init_subgraph_entry_fields(TRAPS) { assert(CDSConfig::is_dumping_heap(), "must be"); _dump_time_subgraph_info_table = new (mtClass)DumpTimeKlassSubGraphInfoTable(); - init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK); - if (CDSConfig::is_dumping_full_module_graph()) { - init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK); + if (!CDSConfig::is_dumping_aot_linked_classes()) { + init_subgraph_entry_fields(archive_subgraph_entry_fields, CHECK); + if (CDSConfig::is_dumping_full_module_graph()) { + init_subgraph_entry_fields(fmg_archive_subgraph_entry_fields, CHECK); + } } } diff --git a/src/hotspot/share/classfile/vmClassMacros.hpp b/src/hotspot/share/classfile/vmClassMacros.hpp index 04f0aaaaa44..71d6b9f22b2 100644 --- a/src/hotspot/share/classfile/vmClassMacros.hpp +++ b/src/hotspot/share/classfile/vmClassMacros.hpp @@ -190,6 +190,9 @@ /* GC support */ \ do_klass(FillerObject_klass, jdk_internal_vm_FillerObject ) \ \ + /* Scoped Values */ \ + do_klass(ScopedValue_Carrier_klass, java_lang_ScopedValue_Carrier ) \ + \ /*end*/ #endif // SHARE_CLASSFILE_VMCLASSMACROS_HPP diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp index 245a0ab20e9..3cec188eec1 100644 --- a/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp +++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupStat.cpp @@ -94,13 +94,17 @@ static double strdedup_elapsed_param_ms(Tickspan t) { void StringDedup::Stat::log_summary(const Stat* last_stat, const Stat* total_stat) { log_info(stringdedup)( "Concurrent String Deduplication " - "%zu/" STRDEDUP_BYTES_FORMAT_NS " (new), " + "%zu (inspected), " + "%zu/" STRDEDUP_BYTES_FORMAT_NS " (new unknown), " "%zu/" STRDEDUP_BYTES_FORMAT_NS " (deduped), " - "avg " STRDEDUP_PERCENT_FORMAT_NS ", " + "total avg deduped/new unknown bytes " STRDEDUP_PERCENT_FORMAT_NS ", " + STRDEDUP_BYTES_FORMAT_NS " (total deduped)," STRDEDUP_BYTES_FORMAT_NS " (total new unknown), " STRDEDUP_ELAPSED_FORMAT_MS " of " STRDEDUP_ELAPSED_FORMAT_MS, + last_stat->_inspected, last_stat->_new, STRDEDUP_BYTES_PARAM(last_stat->_new_bytes), last_stat->_deduped, STRDEDUP_BYTES_PARAM(last_stat->_deduped_bytes), percent_of(total_stat->_deduped_bytes, total_stat->_new_bytes), + STRDEDUP_BYTES_PARAM(total_stat->_deduped_bytes), STRDEDUP_BYTES_PARAM(total_stat->_new_bytes), strdedup_elapsed_param_ms(last_stat->_process_elapsed), strdedup_elapsed_param_ms(last_stat->_active_elapsed)); } @@ -210,14 +214,14 @@ void StringDedup::Stat::log_statistics() const { double deduped_bytes_percent = percent_of(_deduped_bytes, _new_bytes); double replaced_percent = percent_of(_replaced, _new); double deleted_percent = percent_of(_deleted, _new); - log_debug(stringdedup)(" Inspected: %12zu", _inspected); - log_debug(stringdedup)(" Known: %12zu(%5.1f%%)", _known, known_percent); - log_debug(stringdedup)(" Shared: %12zu(%5.1f%%)", _known_shared, known_shared_percent); - log_debug(stringdedup)(" New: %12zu(%5.1f%%)" STRDEDUP_BYTES_FORMAT, + log_debug(stringdedup)(" Inspected: %12zu", _inspected); + log_debug(stringdedup)(" Known: %12zu(%5.1f%%)", _known, known_percent); + log_debug(stringdedup)(" Shared: %12zu(%5.1f%%)", _known_shared, known_shared_percent); + log_debug(stringdedup)(" New unknown: %12zu(%5.1f%%)" STRDEDUP_BYTES_FORMAT, _new, new_percent, STRDEDUP_BYTES_PARAM(_new_bytes)); - log_debug(stringdedup)(" Replaced: %12zu(%5.1f%%)", _replaced, replaced_percent); - log_debug(stringdedup)(" Deleted: %12zu(%5.1f%%)", _deleted, deleted_percent); - log_debug(stringdedup)(" Deduplicated: %12zu(%5.1f%%)" STRDEDUP_BYTES_FORMAT "(%5.1f%%)", + log_debug(stringdedup)(" Replaced: %12zu(%5.1f%%)", _replaced, replaced_percent); + log_debug(stringdedup)(" Deleted: %12zu(%5.1f%%)", _deleted, deleted_percent); + log_debug(stringdedup)(" Deduplicated: %12zu(%5.1f%%)" STRDEDUP_BYTES_FORMAT "(%5.1f%%)", _deduped, deduped_percent, STRDEDUP_BYTES_PARAM(_deduped_bytes), deduped_bytes_percent); log_debug(stringdedup)(" Skipped: %zu (dead), %zu (incomplete), %zu (shared)", _skipped_dead, _skipped_incomplete, _skipped_shared); diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp index 1f257560bcb..8bf068df0a8 100644 --- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp +++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahOldHeuristics.cpp @@ -335,7 +335,13 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { size_t garbage = region->garbage(); size_t live_bytes = region->get_live_data_bytes(); - live_data += live_bytes; + if (!region->was_promoted_in_place()) { + // As currently implemented, region->get_live_data_bytes() represents bytes concurrently marked. + // Expansion of the region by promotion during concurrent marking is above TAMS, and is not included + // as live-data at [start of] old marking. + live_data += live_bytes; + } + // else, regions that were promoted in place had 0 old live data at mark start if (region->is_regular() || region->is_regular_pinned()) { // Only place regular or pinned regions with live data into the candidate set. @@ -374,7 +380,6 @@ void ShenandoahOldHeuristics::prepare_for_old_collections() { } } - // TODO: subtract from live_data bytes promoted during concurrent GC. _old_generation->set_live_bytes_at_last_mark(live_data); // Unlike young, we are more interested in efficiently packing OLD-gen than in reclaiming garbage first. We sort by live-data. diff --git a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp index de45877994c..c9b956f9c2f 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahGenerationalEvacuationTask.cpp @@ -223,7 +223,6 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion // We do not need to scan above TAMS because restored top equals tams assert(obj_addr == tams, "Expect loop to terminate when obj_addr equals tams"); - { ShenandoahHeapLocker locker(_heap->lock()); @@ -251,6 +250,7 @@ void ShenandoahGenerationalEvacuationTask::promote_in_place(ShenandoahHeapRegion // Transfer this region from young to old, increasing promoted_reserve if available space exceeds plab_min_size() _heap->free_set()->add_promoted_in_place_region_to_old_collector(region); region->set_affiliation(OLD_GENERATION); + region->set_promoted_in_place(); } } @@ -289,6 +289,7 @@ void ShenandoahGenerationalEvacuationTask::promote_humongous(ShenandoahHeapRegio r->index(), p2i(r->bottom()), p2i(r->top())); // We mark the entire humongous object's range as dirty after loop terminates, so no need to dirty the range here r->set_affiliation(OLD_GENERATION); + r->set_promoted_in_place(); } ShenandoahFreeSet* freeset = _heap->free_set(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp index 2ed5614c698..cf0dc5476d0 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp @@ -262,6 +262,7 @@ private: HeapWord* volatile _update_watermark; uint _age; + bool _promoted_in_place; CENSUS_NOISE(uint _youth;) // tracks epochs of retrograde ageing (rejuvenation) ShenandoahSharedFlag _recycling; // Used to indicate that the region is being recycled; see try_recycle*(). @@ -354,6 +355,15 @@ public: inline void save_top_before_promote(); inline HeapWord* get_top_before_promote() const { return _top_before_promoted; } + + inline void set_promoted_in_place() { + _promoted_in_place = true; + } + + // Returns true iff this region was promoted in place subsequent to the most recent start of concurrent old marking. + inline bool was_promoted_in_place() { + return _promoted_in_place; + } inline void restore_top_before_promote(); inline size_t garbage_before_padded_for_promote() const; @@ -379,7 +389,13 @@ public: inline void increase_live_data_gc_words(size_t s); inline bool has_live() const; + + // Represents the number of live bytes identified by most recent marking effort. Does not include the bytes + // above TAMS. inline size_t get_live_data_bytes() const; + + // Represents the number of live words identified by most recent marking effort. Does not include the words + // above TAMS. inline size_t get_live_data_words() const; inline size_t garbage() const; diff --git a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp index 69673eb7a60..b9304ee9daa 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp @@ -152,6 +152,7 @@ inline void ShenandoahHeapRegion::internal_increase_live_data(size_t s) { inline void ShenandoahHeapRegion::clear_live_data() { AtomicAccess::store(&_live_data, (size_t)0); + _promoted_in_place = false; } inline size_t ShenandoahHeapRegion::get_live_data_words() const { diff --git a/src/hotspot/share/jfr/metadata/metadata.xml b/src/hotspot/share/jfr/metadata/metadata.xml index 18a74454eb6..2b082165005 100644 --- a/src/hotspot/share/jfr/metadata/metadata.xml +++ b/src/hotspot/share/jfr/metadata/metadata.xml @@ -1289,8 +1289,8 @@ - - + + diff --git a/src/hotspot/share/opto/addnode.cpp b/src/hotspot/share/opto/addnode.cpp index 6075317d86e..21770f3a0a4 100644 --- a/src/hotspot/share/opto/addnode.cpp +++ b/src/hotspot/share/opto/addnode.cpp @@ -31,8 +31,8 @@ #include "opto/movenode.hpp" #include "opto/mulnode.hpp" #include "opto/phaseX.hpp" +#include "opto/rangeinference.hpp" #include "opto/subnode.hpp" -#include "opto/utilities/xor.hpp" #include "runtime/stubRoutines.hpp" // Portions of code courtesy of Clifford Click @@ -1011,35 +1011,8 @@ Node* OrINode::Ideal(PhaseGVN* phase, bool can_reshape) { // the logical operations the ring's ADD is really a logical OR function. // This also type-checks the inputs for sanity. Guaranteed never to // be passed a TOP or BOTTOM type, these are filtered out by pre-check. -const Type *OrINode::add_ring( const Type *t0, const Type *t1 ) const { - const TypeInt *r0 = t0->is_int(); // Handy access - const TypeInt *r1 = t1->is_int(); - - // If both args are bool, can figure out better types - if ( r0 == TypeInt::BOOL ) { - if ( r1 == TypeInt::ONE) { - return TypeInt::ONE; - } else if ( r1 == TypeInt::BOOL ) { - return TypeInt::BOOL; - } - } else if ( r0 == TypeInt::ONE ) { - if ( r1 == TypeInt::BOOL ) { - return TypeInt::ONE; - } - } - - // If either input is all ones, the output is all ones. - // x | ~0 == ~0 <==> x | -1 == -1 - if (r0 == TypeInt::MINUS_1 || r1 == TypeInt::MINUS_1) { - return TypeInt::MINUS_1; - } - - // If either input is not a constant, just return all integers. - if( !r0->is_con() || !r1->is_con() ) - return TypeInt::INT; // Any integer, but still no symbols. - - // Otherwise just OR them bits. - return TypeInt::make( r0->get_con() | r1->get_con() ); +const Type* OrINode::add_ring(const Type* t1, const Type* t2) const { + return RangeInference::infer_or(t1->is_int(), t2->is_int()); } //============================================================================= @@ -1087,22 +1060,8 @@ Node* OrLNode::Ideal(PhaseGVN* phase, bool can_reshape) { } //------------------------------add_ring--------------------------------------- -const Type *OrLNode::add_ring( const Type *t0, const Type *t1 ) const { - const TypeLong *r0 = t0->is_long(); // Handy access - const TypeLong *r1 = t1->is_long(); - - // If either input is all ones, the output is all ones. - // x | ~0 == ~0 <==> x | -1 == -1 - if (r0 == TypeLong::MINUS_1 || r1 == TypeLong::MINUS_1) { - return TypeLong::MINUS_1; - } - - // If either input is not a constant, just return all integers. - if( !r0->is_con() || !r1->is_con() ) - return TypeLong::LONG; // Any integer, but still no symbols. - - // Otherwise just OR them bits. - return TypeLong::make( r0->get_con() | r1->get_con() ); +const Type* OrLNode::add_ring(const Type* t1, const Type* t2) const { + return RangeInference::infer_or(t1->is_long(), t2->is_long()); } //---------------------------Helper ------------------------------------------- @@ -1189,46 +1148,14 @@ const Type* XorINode::Value(PhaseGVN* phase) const { // the logical operations the ring's ADD is really a logical OR function. // This also type-checks the inputs for sanity. Guaranteed never to // be passed a TOP or BOTTOM type, these are filtered out by pre-check. -const Type *XorINode::add_ring( const Type *t0, const Type *t1 ) const { - const TypeInt *r0 = t0->is_int(); // Handy access - const TypeInt *r1 = t1->is_int(); - - if (r0->is_con() && r1->is_con()) { - // compute constant result - return TypeInt::make(r0->get_con() ^ r1->get_con()); - } - - // At least one of the arguments is not constant - - if (r0->_lo >= 0 && r1->_lo >= 0) { - // Combine [r0->_lo, r0->_hi] ^ [r0->_lo, r1->_hi] -> [0, upper_bound] - jint upper_bound = xor_upper_bound_for_ranges(r0->_hi, r1->_hi); - return TypeInt::make(0, upper_bound, MAX2(r0->_widen, r1->_widen)); - } - - return TypeInt::INT; +const Type* XorINode::add_ring(const Type* t1, const Type* t2) const { + return RangeInference::infer_xor(t1->is_int(), t2->is_int()); } //============================================================================= //------------------------------add_ring--------------------------------------- -const Type *XorLNode::add_ring( const Type *t0, const Type *t1 ) const { - const TypeLong *r0 = t0->is_long(); // Handy access - const TypeLong *r1 = t1->is_long(); - - if (r0->is_con() && r1->is_con()) { - // compute constant result - return TypeLong::make(r0->get_con() ^ r1->get_con()); - } - - // At least one of the arguments is not constant - - if (r0->_lo >= 0 && r1->_lo >= 0) { - // Combine [r0->_lo, r0->_hi] ^ [r0->_lo, r1->_hi] -> [0, upper_bound] - julong upper_bound = xor_upper_bound_for_ranges(r0->_hi, r1->_hi); - return TypeLong::make(0, upper_bound, MAX2(r0->_widen, r1->_widen)); - } - - return TypeLong::LONG; +const Type* XorLNode::add_ring(const Type* t1, const Type* t2) const { + return RangeInference::infer_xor(t1->is_long(), t2->is_long()); } Node* XorLNode::Ideal(PhaseGVN* phase, bool can_reshape) { @@ -1401,6 +1328,10 @@ static ConstAddOperands as_add_with_constant(Node* n) { } Node* MaxNode::IdealI(PhaseGVN* phase, bool can_reshape) { + Node* n = AddNode::Ideal(phase, can_reshape); + if (n != nullptr) { + return n; + } int opcode = Opcode(); assert(opcode == Op_MinI || opcode == Op_MaxI, "Unexpected opcode"); // Try to transform the following pattern, in any of its four possible diff --git a/src/hotspot/share/opto/cfgnode.cpp b/src/hotspot/share/opto/cfgnode.cpp index 776a2d4c90b..ae5c4a682a9 100644 --- a/src/hotspot/share/opto/cfgnode.cpp +++ b/src/hotspot/share/opto/cfgnode.cpp @@ -933,8 +933,8 @@ bool RegionNode::optimize_trichotomy(PhaseIterGVN* igvn) { } // At this point we know that region->in(idx1) and region->(idx2) map to the same // value and control flow. Now search for ifs that feed into these region inputs. - ProjNode* proj1 = region->in(idx1)->isa_Proj(); - ProjNode* proj2 = region->in(idx2)->isa_Proj(); + IfProjNode* proj1 = region->in(idx1)->isa_IfProj(); + IfProjNode* proj2 = region->in(idx2)->isa_IfProj(); if (proj1 == nullptr || proj1->outcnt() != 1 || proj2 == nullptr || proj2->outcnt() != 1) { return false; // No projection inputs with region as unique user found @@ -1547,18 +1547,9 @@ Node* PhiNode::Identity(PhaseGVN* phase) { Node* phi_reg = region(); for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) { Node* u = phi_reg->fast_out(i); - if (u->is_Phi() && u->as_Phi()->type() == Type::MEMORY && - u->adr_type() == TypePtr::BOTTOM && u->in(0) == phi_reg && - u->req() == phi_len) { - for (uint j = 1; j < phi_len; j++) { - if (in(j) != u->in(j)) { - u = nullptr; - break; - } - } - if (u != nullptr) { - return u; - } + assert(!u->is_Phi() || u->in(0) == phi_reg, "broken Phi/Region subgraph"); + if (u->is_Phi() && u->req() == phi_len && can_be_replaced_by(u->as_Phi())) { + return u; } } } @@ -2790,6 +2781,25 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) { progress = merge_through_phi(this, phase->is_IterGVN()); } + // PhiNode::Identity replaces a non-bottom memory phi with a bottom memory phi with the same inputs, if it exists. + // If the bottom memory phi's inputs are changed (so it can now replace the non-bottom memory phi) or if it's created + // only after the non-bottom memory phi is processed by igvn, PhiNode::Identity doesn't run and the transformation + // doesn't happen. + // Look for non-bottom Phis that should be transformed and enqueue them for igvn so that PhiNode::Identity executes for + // them. + if (can_reshape && type() == Type::MEMORY && adr_type() == TypePtr::BOTTOM) { + PhaseIterGVN* igvn = phase->is_IterGVN(); + uint phi_len = req(); + Node* phi_reg = region(); + for (DUIterator_Fast imax, i = phi_reg->fast_outs(imax); i < imax; i++) { + Node* u = phi_reg->fast_out(i); + assert(!u->is_Phi() || (u->in(0) == phi_reg && u->req() == phi_len), "broken Phi/Region subgraph"); + if (u->is_Phi() && u->as_Phi()->can_be_replaced_by(this)) { + igvn->_worklist.push(u); + } + } + } + return progress; // Return any progress } @@ -2839,6 +2849,11 @@ const TypeTuple* PhiNode::collect_types(PhaseGVN* phase) const { return TypeTuple::make(types.length(), flds); } +bool PhiNode::can_be_replaced_by(const PhiNode* other) const { + return type() == Type::MEMORY && other->type() == Type::MEMORY && adr_type() != TypePtr::BOTTOM && + other->adr_type() == TypePtr::BOTTOM && has_same_inputs_as(other); +} + Node* PhiNode::clone_through_phi(Node* root_phi, const Type* t, uint c, PhaseIterGVN* igvn) { Node_Stack stack(1); VectorSet visited; diff --git a/src/hotspot/share/opto/cfgnode.hpp b/src/hotspot/share/opto/cfgnode.hpp index 5f7f4790443..ca549af1554 100644 --- a/src/hotspot/share/opto/cfgnode.hpp +++ b/src/hotspot/share/opto/cfgnode.hpp @@ -274,6 +274,7 @@ public: #endif //ASSERT const TypeTuple* collect_types(PhaseGVN* phase) const; + bool can_be_replaced_by(const PhiNode* other) const; }; //------------------------------GotoNode--------------------------------------- @@ -342,15 +343,15 @@ class IfNode : public MultiBranchNode { // Helper methods for fold_compares bool cmpi_folds(PhaseIterGVN* igvn, bool fold_ne = false); bool is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn); - bool has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail); - bool has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn); - Node* merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn); + bool has_shared_region(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail) const; + bool has_only_uncommon_traps(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail, PhaseIterGVN* igvn) const; + Node* merge_uncommon_traps(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn); static void improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGVN* igvn); - bool is_cmp_with_loadrange(ProjNode* proj); - bool is_null_check(ProjNode* proj, PhaseIterGVN* igvn); - bool is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn); - void reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, PhaseIterGVN* igvn); - bool fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn); + bool is_cmp_with_loadrange(IfProjNode* proj) const; + bool is_null_check(IfProjNode* proj, PhaseIterGVN* igvn) const; + bool is_side_effect_free_test(IfProjNode* proj, PhaseIterGVN* igvn) const; + static void reroute_side_effect_free_unc(IfProjNode* proj, IfProjNode* dom_proj, PhaseIterGVN* igvn); + bool fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn); static bool is_dominator_unc(CallStaticJavaNode* dom_unc, CallStaticJavaNode* unc); protected: @@ -559,6 +560,11 @@ public: IfProjNode(IfNode *ifnode, uint idx) : CProjNode(ifnode,idx) {} virtual Node* Identity(PhaseGVN* phase); + // Return the other IfProj node. + IfProjNode* other_if_proj() const { + return in(0)->as_If()->proj_out(1 - _con)->as_IfProj(); + } + void pin_array_access_nodes(PhaseIterGVN* igvn); protected: diff --git a/src/hotspot/share/opto/divnode.cpp b/src/hotspot/share/opto/divnode.cpp index f3039b12508..db4fedbba3b 100644 --- a/src/hotspot/share/opto/divnode.cpp +++ b/src/hotspot/share/opto/divnode.cpp @@ -504,6 +504,102 @@ Node* unsigned_div_ideal(PhaseGVN* phase, bool can_reshape, Node* div) { return nullptr; } +template +static const IntegerType* compute_signed_div_type(const IntegerType* i1, const IntegerType* i2) { + typedef typename IntegerType::NativeType NativeType; + assert(!i2->is_con() || i2->get_con() != 0, "Can't handle zero constant divisor"); + int widen = MAX2(i1->_widen, i2->_widen); + + // Case A: divisor range spans zero (i2->_lo < 0 < i2->_hi) + // We split into two subproblems to avoid division by 0: + // - negative part: [i2->_lo, −1] + // - positive part: [1, i2->_hi] + // Then we union the results by taking the min of all lower‐bounds and + // the max of all upper‐bounds from the two halves. + if (i2->_lo < 0 && i2->_hi > 0) { + // Handle negative part of the divisor range + const IntegerType* neg_part = compute_signed_div_type(i1, IntegerType::make(i2->_lo, -1, widen)); + // Handle positive part of the divisor range + const IntegerType* pos_part = compute_signed_div_type(i1, IntegerType::make(1, i2->_hi, widen)); + // Merge results + NativeType new_lo = MIN2(neg_part->_lo, pos_part->_lo); + NativeType new_hi = MAX2(neg_part->_hi, pos_part->_hi); + assert(new_hi >= new_lo, "sanity"); + return IntegerType::make(new_lo, new_hi, widen); + } + + // Case B: divisor range does NOT span zero. + // Here i2 is entirely negative or entirely positive. + // Then i1/i2 is monotonic in i1 and i2 (when i2 keeps the same sign). + // Therefore the extrema occur at the four “corners”: + // (i1->_lo, i2->_hi), (i1->_lo, i2->_lo), (i1->_hi, i2->_lo), (i1->_hi, i2->_hi). + // We compute all four and take the min and max. + // A special case handles overflow when dividing the most‐negative value by −1. + + // adjust i2 bounds to not include zero, as zero always throws + NativeType i2_lo = i2->_lo == 0 ? 1 : i2->_lo; + NativeType i2_hi = i2->_hi == 0 ? -1 : i2->_hi; + constexpr NativeType min_val = std::numeric_limits::min(); + static_assert(min_val == min_jint || min_val == min_jlong, "min has to be either min_jint or min_jlong"); + constexpr NativeType max_val = std::numeric_limits::max(); + static_assert(max_val == max_jint || max_val == max_jlong, "max has to be either max_jint or max_jlong"); + + // Special overflow case: min_val / (-1) == min_val (cf. JVMS§6.5 idiv/ldiv) + // We need to be careful that we never run min_val / (-1) in C++ code, as this overflow is UB there + if (i1->_lo == min_val && i2_hi == -1) { + NativeType new_lo = min_val; + NativeType new_hi; + // compute new_hi depending on whether divisor or dividend is non-constant. + // i2 is purely in the negative domain here (as i2_hi is -1) + // which means the maximum value this division can yield is either + if (!i1->is_con()) { + // a) non-constant dividend: i1 could be min_val + 1. + // -> i1 / i2 = (min_val + 1) / -1 = max_val is possible. + new_hi = max_val; + assert((min_val + 1) / -1 == new_hi, "new_hi should be max_val"); + } else if (i2_lo != i2_hi) { + // b) i1 is constant min_val, i2 is non-constant. + // if i2 = -1 -> i1 / i2 = min_val / -1 = min_val + // if i2 < -1 -> i1 / i2 <= min_val / -2 = (max_val / 2) + 1 + new_hi = (max_val / 2) + 1; + assert(min_val / -2 == new_hi, "new_hi should be (max_val / 2) + 1)"); + } else { + // c) i1 is constant min_val, i2 is constant -1. + // -> i1 / i2 = min_val / -1 = min_val + new_hi = min_val; + } + +#ifdef ASSERT + // validate new_hi for non-constant divisor + if (i2_lo != i2_hi) { + assert(i2_lo != -1, "Special case not possible here, as i2_lo has to be < i2_hi"); + NativeType result = i1->_lo / i2_lo; + assert(new_hi >= result, "computed wrong value for new_hi"); + } + + // validate new_hi for non-constant dividend + if (!i1->is_con()) { + assert(i2_hi > min_val, "Special case not possible here, as i1->_hi has to be > min"); + NativeType result1 = i1->_hi / i2_lo; + NativeType result2 = i1->_hi / i2_hi; + assert(new_hi >= result1 && new_hi >= result2, "computed wrong value for new_hi"); + } +#endif + + return IntegerType::make(new_lo, new_hi, widen); + } + assert((i1->_lo != min_val && i1->_hi != min_val) || (i2_hi != -1 && i2_lo != -1), "should have filtered out before"); + + // Special case not possible here, calculate all corners normally + NativeType corner1 = i1->_lo / i2_lo; + NativeType corner2 = i1->_lo / i2_hi; + NativeType corner3 = i1->_hi / i2_lo; + NativeType corner4 = i1->_hi / i2_hi; + + NativeType new_lo = MIN4(corner1, corner2, corner3, corner4); + NativeType new_hi = MAX4(corner1, corner2, corner3, corner4); + return IntegerType::make(new_lo, new_hi, widen); +} //============================================================================= //------------------------------Identity--------------------------------------- @@ -549,65 +645,26 @@ Node *DivINode::Ideal(PhaseGVN *phase, bool can_reshape) { // prevent hoisting the divide above an unsafe test. const Type* DivINode::Value(PhaseGVN* phase) const { // Either input is TOP ==> the result is TOP - const Type *t1 = phase->type( in(1) ); - const Type *t2 = phase->type( in(2) ); - if( t1 == Type::TOP ) return Type::TOP; - if( t2 == Type::TOP ) return Type::TOP; + const Type* t1 = phase->type(in(1)); + const Type* t2 = phase->type(in(2)); + if (t1 == Type::TOP || t2 == Type::TOP) { + return Type::TOP; + } + + if (t2 == TypeInt::ZERO) { + // this division will always throw an exception + return Type::TOP; + } // x/x == 1 since we always generate the dynamic divisor check for 0. if (in(1) == in(2)) { return TypeInt::ONE; } - // Either input is BOTTOM ==> the result is the local BOTTOM - const Type *bot = bottom_type(); - if( (t1 == bot) || (t2 == bot) || - (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) - return bot; + const TypeInt* i1 = t1->is_int(); + const TypeInt* i2 = t2->is_int(); - // Divide the two numbers. We approximate. - // If divisor is a constant and not zero - const TypeInt *i1 = t1->is_int(); - const TypeInt *i2 = t2->is_int(); - int widen = MAX2(i1->_widen, i2->_widen); - - if( i2->is_con() && i2->get_con() != 0 ) { - int32_t d = i2->get_con(); // Divisor - jint lo, hi; - if( d >= 0 ) { - lo = i1->_lo/d; - hi = i1->_hi/d; - } else { - if( d == -1 && i1->_lo == min_jint ) { - // 'min_jint/-1' throws arithmetic exception during compilation - lo = min_jint; - // do not support holes, 'hi' must go to either min_jint or max_jint: - // [min_jint, -10]/[-1,-1] ==> [min_jint] UNION [10,max_jint] - hi = i1->_hi == min_jint ? min_jint : max_jint; - } else { - lo = i1->_hi/d; - hi = i1->_lo/d; - } - } - return TypeInt::make(lo, hi, widen); - } - - // If the dividend is a constant - if( i1->is_con() ) { - int32_t d = i1->get_con(); - if( d < 0 ) { - if( d == min_jint ) { - // (-min_jint) == min_jint == (min_jint / -1) - return TypeInt::make(min_jint, max_jint/2 + 1, widen); - } else { - return TypeInt::make(d, -d, widen); - } - } - return TypeInt::make(-d, d, widen); - } - - // Otherwise we give up all hope - return TypeInt::INT; + return compute_signed_div_type(i1, i2); } @@ -655,65 +712,26 @@ Node *DivLNode::Ideal( PhaseGVN *phase, bool can_reshape) { // prevent hoisting the divide above an unsafe test. const Type* DivLNode::Value(PhaseGVN* phase) const { // Either input is TOP ==> the result is TOP - const Type *t1 = phase->type( in(1) ); - const Type *t2 = phase->type( in(2) ); - if( t1 == Type::TOP ) return Type::TOP; - if( t2 == Type::TOP ) return Type::TOP; + const Type* t1 = phase->type(in(1)); + const Type* t2 = phase->type(in(2)); + if (t1 == Type::TOP || t2 == Type::TOP) { + return Type::TOP; + } + + if (t2 == TypeLong::ZERO) { + // this division will always throw an exception + return Type::TOP; + } // x/x == 1 since we always generate the dynamic divisor check for 0. if (in(1) == in(2)) { return TypeLong::ONE; } - // Either input is BOTTOM ==> the result is the local BOTTOM - const Type *bot = bottom_type(); - if( (t1 == bot) || (t2 == bot) || - (t1 == Type::BOTTOM) || (t2 == Type::BOTTOM) ) - return bot; + const TypeLong* i1 = t1->is_long(); + const TypeLong* i2 = t2->is_long(); - // Divide the two numbers. We approximate. - // If divisor is a constant and not zero - const TypeLong *i1 = t1->is_long(); - const TypeLong *i2 = t2->is_long(); - int widen = MAX2(i1->_widen, i2->_widen); - - if( i2->is_con() && i2->get_con() != 0 ) { - jlong d = i2->get_con(); // Divisor - jlong lo, hi; - if( d >= 0 ) { - lo = i1->_lo/d; - hi = i1->_hi/d; - } else { - if( d == CONST64(-1) && i1->_lo == min_jlong ) { - // 'min_jlong/-1' throws arithmetic exception during compilation - lo = min_jlong; - // do not support holes, 'hi' must go to either min_jlong or max_jlong: - // [min_jlong, -10]/[-1,-1] ==> [min_jlong] UNION [10,max_jlong] - hi = i1->_hi == min_jlong ? min_jlong : max_jlong; - } else { - lo = i1->_hi/d; - hi = i1->_lo/d; - } - } - return TypeLong::make(lo, hi, widen); - } - - // If the dividend is a constant - if( i1->is_con() ) { - jlong d = i1->get_con(); - if( d < 0 ) { - if( d == min_jlong ) { - // (-min_jlong) == min_jlong == (min_jlong / -1) - return TypeLong::make(min_jlong, max_jlong/2 + 1, widen); - } else { - return TypeLong::make(d, -d, widen); - } - } - return TypeLong::make(-d, d, widen); - } - - // Otherwise we give up all hope - return TypeLong::LONG; + return compute_signed_div_type(i1, i2); } diff --git a/src/hotspot/share/opto/ifnode.cpp b/src/hotspot/share/opto/ifnode.cpp index 763888b65b2..cd8017f9fb3 100644 --- a/src/hotspot/share/opto/ifnode.cpp +++ b/src/hotspot/share/opto/ifnode.cpp @@ -771,7 +771,7 @@ bool IfNode::cmpi_folds(PhaseIterGVN* igvn, bool fold_ne) { // Is a dominating control suitable for folding with this if? bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) { return ctrl != nullptr && - ctrl->is_Proj() && + ctrl->is_IfProj() && ctrl->outcnt() == 1 && // No side-effects ctrl->in(0) != nullptr && ctrl->in(0)->Opcode() == Op_If && @@ -784,8 +784,8 @@ bool IfNode::is_ctrl_folds(Node* ctrl, PhaseIterGVN* igvn) { } // Do this If and the dominating If share a region? -bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fail) { - ProjNode* otherproj = proj->other_if_proj(); +bool IfNode::has_shared_region(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail) const { + IfProjNode* otherproj = proj->other_if_proj(); Node* otherproj_ctrl_use = otherproj->unique_ctrl_out_or_null(); RegionNode* region = (otherproj_ctrl_use != nullptr && otherproj_ctrl_use->is_Region()) ? otherproj_ctrl_use->as_Region() : nullptr; success = nullptr; @@ -793,13 +793,14 @@ bool IfNode::has_shared_region(ProjNode* proj, ProjNode*& success, ProjNode*& fa if (otherproj->outcnt() == 1 && region != nullptr && !region->has_phi()) { for (int i = 0; i < 2; i++) { - ProjNode* proj = proj_out(i); - if (success == nullptr && proj->outcnt() == 1 && proj->unique_out() == region) { - success = proj; + IfProjNode* next_proj = proj_out(i)->as_IfProj(); + if (success == nullptr && next_proj->outcnt() == 1 && next_proj->unique_out() == region) { + success = next_proj; } else if (fail == nullptr) { - fail = proj; + fail = next_proj; } else { - success = fail = nullptr; + success = nullptr; + fail = nullptr; } } } @@ -850,8 +851,8 @@ ProjNode* IfNode::uncommon_trap_proj(CallStaticJavaNode*& call, Deoptimization:: } // Do this If and the dominating If both branch out to an uncommon trap -bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNode*& fail, PhaseIterGVN* igvn) { - ProjNode* otherproj = proj->other_if_proj(); +bool IfNode::has_only_uncommon_traps(IfProjNode* proj, IfProjNode*& success, IfProjNode*& fail, PhaseIterGVN* igvn) const { + IfProjNode* otherproj = proj->other_if_proj(); CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(); if (otherproj->outcnt() == 1 && dom_unc != nullptr) { @@ -888,8 +889,8 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod !igvn->C->too_many_traps(dom_method, dom_bci, Deoptimization::Reason_range_check) && // Return true if c2 manages to reconcile with UnstableIf optimization. See the comments for it. igvn->C->remove_unstable_if_trap(dom_unc, true/*yield*/)) { - success = unc_proj; - fail = unc_proj->other_if_proj(); + success = unc_proj->as_IfProj(); + fail = unc_proj->as_IfProj()->other_if_proj(); return true; } } @@ -898,7 +899,7 @@ bool IfNode::has_only_uncommon_traps(ProjNode* proj, ProjNode*& success, ProjNod } // Check that the 2 CmpI can be folded into as single CmpU and proceed with the folding -bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) { +bool IfNode::fold_compares_helper(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn) { Node* this_cmp = in(1)->in(1); BoolNode* this_bool = in(1)->as_Bool(); IfNode* dom_iff = proj->in(0)->as_If(); @@ -906,7 +907,7 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f Node* lo = dom_iff->in(1)->in(1)->in(2); Node* hi = this_cmp->in(2); Node* n = this_cmp->in(1); - ProjNode* otherproj = proj->other_if_proj(); + IfProjNode* otherproj = proj->other_if_proj(); const TypeInt* lo_type = IfNode::filtered_int_type(igvn, n, otherproj); const TypeInt* hi_type = IfNode::filtered_int_type(igvn, n, success); @@ -1108,11 +1109,11 @@ bool IfNode::fold_compares_helper(ProjNode* proj, ProjNode* success, ProjNode* f // Merge the branches that trap for this If and the dominating If into // a single region that branches to the uncommon trap for the // dominating If -Node* IfNode::merge_uncommon_traps(ProjNode* proj, ProjNode* success, ProjNode* fail, PhaseIterGVN* igvn) { +Node* IfNode::merge_uncommon_traps(IfProjNode* proj, IfProjNode* success, IfProjNode* fail, PhaseIterGVN* igvn) { Node* res = this; assert(success->in(0) == this, "bad projection"); - ProjNode* otherproj = proj->other_if_proj(); + IfProjNode* otherproj = proj->other_if_proj(); CallStaticJavaNode* unc = success->is_uncommon_trap_proj(); CallStaticJavaNode* dom_unc = otherproj->is_uncommon_trap_proj(); @@ -1239,7 +1240,7 @@ void IfNode::improve_address_types(Node* l, Node* r, ProjNode* fail, PhaseIterGV #endif } -bool IfNode::is_cmp_with_loadrange(ProjNode* proj) { +bool IfNode::is_cmp_with_loadrange(IfProjNode* proj) const { if (in(1) != nullptr && in(1)->in(1) != nullptr && in(1)->in(1)->in(2) != nullptr) { @@ -1258,7 +1259,7 @@ bool IfNode::is_cmp_with_loadrange(ProjNode* proj) { return false; } -bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) { +bool IfNode::is_null_check(IfProjNode* proj, PhaseIterGVN* igvn) const { Node* other = in(1)->in(1)->in(2); if (other->in(MemNode::Address) != nullptr && proj->in(0)->in(1) != nullptr && @@ -1275,7 +1276,7 @@ bool IfNode::is_null_check(ProjNode* proj, PhaseIterGVN* igvn) { // Check that the If that is in between the 2 integer comparisons has // no side effect -bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) { +bool IfNode::is_side_effect_free_test(IfProjNode* proj, PhaseIterGVN* igvn) const { if (proj == nullptr) { return false; } @@ -1315,9 +1316,9 @@ bool IfNode::is_side_effect_free_test(ProjNode* proj, PhaseIterGVN* igvn) { // won't be guarded by the first CmpI anymore. It can trap in cases // where the first CmpI would have prevented it from executing: on a // trap, we need to restart execution at the state of the first CmpI -void IfNode::reroute_side_effect_free_unc(ProjNode* proj, ProjNode* dom_proj, PhaseIterGVN* igvn) { +void IfNode::reroute_side_effect_free_unc(IfProjNode* proj, IfProjNode* dom_proj, PhaseIterGVN* igvn) { CallStaticJavaNode* dom_unc = dom_proj->is_uncommon_trap_if_pattern(); - ProjNode* otherproj = proj->other_if_proj(); + IfProjNode* otherproj = proj->other_if_proj(); CallStaticJavaNode* unc = proj->is_uncommon_trap_if_pattern(); Node* call_proj = dom_unc->unique_ctrl_out(); Node* halt = call_proj->unique_ctrl_out(); @@ -1348,9 +1349,9 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) { if (is_ctrl_folds(ctrl, igvn)) { // A integer comparison immediately dominated by another integer // comparison - ProjNode* success = nullptr; - ProjNode* fail = nullptr; - ProjNode* dom_cmp = ctrl->as_Proj(); + IfProjNode* success = nullptr; + IfProjNode* fail = nullptr; + IfProjNode* dom_cmp = ctrl->as_IfProj(); if (has_shared_region(dom_cmp, success, fail) && // Next call modifies graph so must be last fold_compares_helper(dom_cmp, success, fail, igvn)) { @@ -1364,11 +1365,11 @@ Node* IfNode::fold_compares(PhaseIterGVN* igvn) { return nullptr; } else if (ctrl->in(0) != nullptr && ctrl->in(0)->in(0) != nullptr) { - ProjNode* success = nullptr; - ProjNode* fail = nullptr; + IfProjNode* success = nullptr; + IfProjNode* fail = nullptr; Node* dom = ctrl->in(0)->in(0); - ProjNode* dom_cmp = dom->isa_Proj(); - ProjNode* other_cmp = ctrl->isa_Proj(); + IfProjNode* dom_cmp = dom->isa_IfProj(); + IfProjNode* other_cmp = ctrl->isa_IfProj(); // Check if it's an integer comparison dominated by another // integer comparison with another test in between diff --git a/src/hotspot/share/opto/library_call.cpp b/src/hotspot/share/opto/library_call.cpp index 2263fa720ce..a057f66a989 100644 --- a/src/hotspot/share/opto/library_call.cpp +++ b/src/hotspot/share/opto/library_call.cpp @@ -6171,7 +6171,7 @@ LibraryCallKit::tightly_coupled_allocation(Node* ptr) { CallStaticJavaNode* LibraryCallKit::get_uncommon_trap_from_success_proj(Node* node) { if (node->is_IfProj()) { - Node* other_proj = node->as_IfProj()->other_if_proj(); + IfProjNode* other_proj = node->as_IfProj()->other_if_proj(); for (DUIterator_Fast jmax, j = other_proj->fast_outs(jmax); j < jmax; j++) { Node* obs = other_proj->fast_out(j); if (obs->in(0) == other_proj && obs->is_CallStaticJava() && diff --git a/src/hotspot/share/opto/memnode.cpp b/src/hotspot/share/opto/memnode.cpp index 19ff90df5ed..5b76f5b42cf 100644 --- a/src/hotspot/share/opto/memnode.cpp +++ b/src/hotspot/share/opto/memnode.cpp @@ -3103,7 +3103,7 @@ MergePrimitiveStores::CFGStatus MergePrimitiveStores::cfg_status_for_pair(const ctrl_use->in(0)->outcnt() != 2) { return CFGStatus::Failure; // Not RangeCheck. } - ProjNode* other_proj = ctrl_use->as_IfProj()->other_if_proj(); + IfProjNode* other_proj = ctrl_use->as_IfProj()->other_if_proj(); Node* trap = other_proj->is_uncommon_trap_proj(Deoptimization::Reason_range_check); if (trap != merge_mem->unique_out() || ctrl_use->in(0)->in(0) != ctrl_def) { diff --git a/src/hotspot/share/opto/mulnode.cpp b/src/hotspot/share/opto/mulnode.cpp index 280781f686b..aa8d6cfce2e 100644 --- a/src/hotspot/share/opto/mulnode.cpp +++ b/src/hotspot/share/opto/mulnode.cpp @@ -29,6 +29,7 @@ #include "opto/memnode.hpp" #include "opto/mulnode.hpp" #include "opto/phaseX.hpp" +#include "opto/rangeinference.hpp" #include "opto/subnode.hpp" #include "utilities/powerOfTwo.hpp" @@ -620,80 +621,14 @@ const Type* MulHiValue(const Type *t1, const Type *t2, const Type *bot) { return TypeLong::LONG; } -template -static const IntegerType* and_value(const IntegerType* r0, const IntegerType* r1) { - typedef typename IntegerType::NativeType NativeType; - static_assert(std::is_signed::value, "Native type of IntegerType must be signed!"); - - int widen = MAX2(r0->_widen, r1->_widen); - - // If both types are constants, we can calculate a constant result. - if (r0->is_con() && r1->is_con()) { - return IntegerType::make(r0->get_con() & r1->get_con()); - } - - // If both ranges are positive, the result will range from 0 up to the hi value of the smaller range. The minimum - // of the two constrains the upper bound because any higher value in the other range will see all zeroes, so it will be masked out. - if (r0->_lo >= 0 && r1->_lo >= 0) { - return IntegerType::make(0, MIN2(r0->_hi, r1->_hi), widen); - } - - // If only one range is positive, the result will range from 0 up to that range's maximum value. - // For the operation 'x & C' where C is a positive constant, the result will be in the range [0..C]. With that observation, - // we can say that for any integer c such that 0 <= c <= C will also be in the range [0..C]. Therefore, 'x & [c..C]' - // where c >= 0 will be in the range [0..C]. - if (r0->_lo >= 0) { - return IntegerType::make(0, r0->_hi, widen); - } - - if (r1->_lo >= 0) { - return IntegerType::make(0, r1->_hi, widen); - } - - // At this point, all positive ranges will have already been handled, so the only remaining cases will be negative ranges - // and constants. - - assert(r0->_lo < 0 && r1->_lo < 0, "positive ranges should already be handled!"); - - // As two's complement means that both numbers will start with leading 1s, the lower bound of both ranges will contain - // the common leading 1s of both minimum values. In order to count them with count_leading_zeros, the bits are inverted. - NativeType sel_val = ~MIN2(r0->_lo, r1->_lo); - - NativeType min; - if (sel_val == 0) { - // Since count_leading_zeros is undefined at 0, we short-circuit the condition where both ranges have a minimum of -1. - min = -1; - } else { - // To get the number of bits to shift, we count the leading 0-bits and then subtract one, as the sign bit is already set. - int shift_bits = count_leading_zeros(sel_val) - 1; - min = std::numeric_limits::min() >> shift_bits; - } - - NativeType max; - if (r0->_hi < 0 && r1->_hi < 0) { - // If both ranges are negative, then the same optimization as both positive ranges will apply, and the smaller hi - // value will mask off any bits set by higher values. - max = MIN2(r0->_hi, r1->_hi); - } else { - // In the case of ranges that cross zero, negative values can cause the higher order bits to be set, so the maximum - // positive value can be as high as the larger hi value. - max = MAX2(r0->_hi, r1->_hi); - } - - return IntegerType::make(min, max, widen); -} - //============================================================================= //------------------------------mul_ring--------------------------------------- // Supplied function returns the product of the inputs IN THE CURRENT RING. // For the logical operations the ring's MUL is really a logical AND function. // This also type-checks the inputs for sanity. Guaranteed never to // be passed a TOP or BOTTOM type, these are filtered out by pre-check. -const Type *AndINode::mul_ring( const Type *t0, const Type *t1 ) const { - const TypeInt* r0 = t0->is_int(); - const TypeInt* r1 = t1->is_int(); - - return and_value(r0, r1); +const Type* AndINode::mul_ring(const Type* t1, const Type* t2) const { + return RangeInference::infer_and(t1->is_int(), t2->is_int()); } static bool AndIL_is_zero_element_under_mask(const PhaseGVN* phase, const Node* expr, const Node* mask, BasicType bt); @@ -822,11 +757,8 @@ Node *AndINode::Ideal(PhaseGVN *phase, bool can_reshape) { // For the logical operations the ring's MUL is really a logical AND function. // This also type-checks the inputs for sanity. Guaranteed never to // be passed a TOP or BOTTOM type, these are filtered out by pre-check. -const Type *AndLNode::mul_ring( const Type *t0, const Type *t1 ) const { - const TypeLong* r0 = t0->is_long(); - const TypeLong* r1 = t1->is_long(); - - return and_value(r0, r1); +const Type* AndLNode::mul_ring(const Type* t1, const Type* t2) const { + return RangeInference::infer_and(t1->is_long(), t2->is_long()); } const Type* AndLNode::Value(PhaseGVN* phase) const { diff --git a/src/hotspot/share/opto/multnode.cpp b/src/hotspot/share/opto/multnode.cpp index 9409a2f6af3..05867a35268 100644 --- a/src/hotspot/share/opto/multnode.cpp +++ b/src/hotspot/share/opto/multnode.cpp @@ -260,12 +260,7 @@ CallStaticJavaNode* ProjNode::is_uncommon_trap_if_pattern(Deoptimization::DeoptR // Not a projection of an If or variation of a dead If node. return nullptr; } - return other_if_proj()->is_uncommon_trap_proj(reason); -} - -ProjNode* ProjNode::other_if_proj() const { - assert(_con == 0 || _con == 1, "not an if?"); - return in(0)->as_If()->proj_out(1-_con); + return as_IfProj()->other_if_proj()->is_uncommon_trap_proj(reason); } NarrowMemProjNode::NarrowMemProjNode(InitializeNode* src, const TypePtr* adr_type) diff --git a/src/hotspot/share/opto/multnode.hpp b/src/hotspot/share/opto/multnode.hpp index be1351cc5b1..692b69118c9 100644 --- a/src/hotspot/share/opto/multnode.hpp +++ b/src/hotspot/share/opto/multnode.hpp @@ -200,9 +200,6 @@ public: // other_proj->[region->..]call_uct" // null otherwise CallStaticJavaNode* is_uncommon_trap_if_pattern(Deoptimization::DeoptReason reason = Deoptimization::Reason_none) const; - - // Return other proj node when this is a If proj node - ProjNode* other_if_proj() const; }; // A ProjNode variant that captures an adr_type(). Used as a projection of InitializeNode to have the right adr_type() diff --git a/src/hotspot/share/opto/node.cpp b/src/hotspot/share/opto/node.cpp index 2452677caf3..9d68e058103 100644 --- a/src/hotspot/share/opto/node.cpp +++ b/src/hotspot/share/opto/node.cpp @@ -999,18 +999,22 @@ bool Node::has_out_with(int opcode1, int opcode2, int opcode3, int opcode4) { //---------------------------uncast_helper------------------------------------- Node* Node::uncast_helper(const Node* p, bool keep_deps) { #ifdef ASSERT + // If we end up traversing more nodes than we actually have, + // it is definitely an infinite loop. + uint max_depth = Compile::current()->unique(); uint depth_count = 0; const Node* orig_p = p; #endif while (true) { #ifdef ASSERT - if (depth_count >= K) { + if (depth_count++ >= max_depth) { orig_p->dump(4); - if (p != orig_p) + if (p != orig_p) { p->dump(1); + } + fatal("infinite loop in Node::uncast_helper"); } - assert(depth_count++ < K, "infinite loop in Node::uncast_helper"); #endif if (p == nullptr || p->req() != 2) { break; @@ -2875,16 +2879,9 @@ Node* Node::find_similar(int opc) { Node* use = def->fast_out(i); if (use != this && use->Opcode() == opc && - use->req() == req()) { - uint j; - for (j = 0; j < use->req(); j++) { - if (use->in(j) != in(j)) { - break; - } - } - if (j == use->req()) { - return use; - } + use->req() == req() && + has_same_inputs_as(use)) { + return use; } } } @@ -2892,6 +2889,16 @@ Node* Node::find_similar(int opc) { return nullptr; } +bool Node::has_same_inputs_as(const Node* other) const { + assert(req() == other->req(), "should have same number of inputs"); + for (uint j = 0; j < other->req(); j++) { + if (in(j) != other->in(j)) { + return false; + } + } + return true; +} + Node* Node::unique_multiple_edges_out_or_null() const { Node* use = nullptr; for (DUIterator_Fast kmax, k = fast_outs(kmax); k < kmax; k++) { diff --git a/src/hotspot/share/opto/node.hpp b/src/hotspot/share/opto/node.hpp index 2e19d1d247b..0adb2072100 100644 --- a/src/hotspot/share/opto/node.hpp +++ b/src/hotspot/share/opto/node.hpp @@ -1179,6 +1179,7 @@ public: // Return a node with opcode "opc" and same inputs as "this" if one can // be found; Otherwise return null; Node* find_similar(int opc); + bool has_same_inputs_as(const Node* other) const; // Return the unique control out if only one. Null if none or more than one. Node* unique_ctrl_out_or_null() const; diff --git a/src/hotspot/share/opto/predicates.cpp b/src/hotspot/share/opto/predicates.cpp index 2489ff563a9..89bc4374ca6 100644 --- a/src/hotspot/share/opto/predicates.cpp +++ b/src/hotspot/share/opto/predicates.cpp @@ -65,7 +65,7 @@ bool AssertionPredicate::has_assertion_predicate_opaque(const Node* predicate_pr // Check if the other projection (UCT projection) of `success_proj` has a Halt node as output. bool AssertionPredicate::has_halt(const IfTrueNode* success_proj) { - ProjNode* other_proj = success_proj->other_if_proj(); + IfProjNode* other_proj = success_proj->other_if_proj(); return other_proj->outcnt() == 1 && other_proj->unique_out()->Opcode() == Op_Halt; } @@ -396,7 +396,7 @@ bool InitializedAssertionPredicate::is_predicate(const Node* maybe_success_proj) #ifdef ASSERT bool InitializedAssertionPredicate::has_halt(const IfTrueNode* success_proj) { - ProjNode* other_proj = success_proj->other_if_proj(); + IfProjNode* other_proj = success_proj->other_if_proj(); if (other_proj->outcnt() != 1) { return false; } diff --git a/src/hotspot/share/opto/rangeinference.cpp b/src/hotspot/share/opto/rangeinference.cpp index 40b9da4bde5..cb26e68ef58 100644 --- a/src/hotspot/share/opto/rangeinference.cpp +++ b/src/hotspot/share/opto/rangeinference.cpp @@ -25,7 +25,6 @@ #include "opto/rangeinference.hpp" #include "opto/type.hpp" #include "utilities/intn_t.hpp" -#include "utilities/tuple.hpp" // If the cardinality of a TypeInt is below this threshold, use min widen, see // TypeIntPrototype::normalize_widen @@ -688,6 +687,8 @@ template class TypeIntPrototype, uintn_t<1>>; template class TypeIntPrototype, uintn_t<2>>; template class TypeIntPrototype, uintn_t<3>>; template class TypeIntPrototype, uintn_t<4>>; +template class TypeIntPrototype, uintn_t<5>>; +template class TypeIntPrototype, uintn_t<6>>; // Compute the meet of 2 types. When dual is true, the subset relation in CT is // reversed. This means that the result of 2 CTs would be the intersection of @@ -709,10 +710,7 @@ const Type* TypeIntHelper::int_type_xmeet(const CT* i1, const Type* t2) { if (!i1->_is_dual) { // meet (a.k.a union) - return CT::make_or_top(TypeIntPrototype{{MIN2(i1->_lo, i2->_lo), MAX2(i1->_hi, i2->_hi)}, - {MIN2(i1->_ulo, i2->_ulo), MAX2(i1->_uhi, i2->_uhi)}, - {i1->_bits._zeros & i2->_bits._zeros, i1->_bits._ones & i2->_bits._ones}}, - MAX2(i1->_widen, i2->_widen), false); + return int_type_union(i1, i2); } else { // join (a.k.a intersection) return CT::make_or_top(TypeIntPrototype{{MAX2(i1->_lo, i2->_lo), MIN2(i1->_hi, i2->_hi)}, diff --git a/src/hotspot/share/opto/rangeinference.hpp b/src/hotspot/share/opto/rangeinference.hpp index 73b8b43bd6e..ebfd98ca4a6 100644 --- a/src/hotspot/share/opto/rangeinference.hpp +++ b/src/hotspot/share/opto/rangeinference.hpp @@ -25,6 +25,7 @@ #ifndef SHARE_OPTO_RANGEINFERENCE_HPP #define SHARE_OPTO_RANGEINFERENCE_HPP +#include "cppstdlib/limits.hpp" #include "cppstdlib/type_traits.hpp" #include "utilities/globalDefinitions.hpp" @@ -92,19 +93,6 @@ public: RangeInt _urange; KnownBits _bits; -private: - friend class TypeInt; - friend class TypeLong; - - template - friend void test_canonicalize_constraints_exhaustive(); - - template - friend void test_canonicalize_constraints_simple(); - - template - friend void test_canonicalize_constraints_random(); - // A canonicalized version of a TypeIntPrototype, if the prototype represents // an empty type, _present is false, otherwise, _data is canonical class CanonicalizedTypeIntPrototype { @@ -158,21 +146,33 @@ public: template static const Type* int_type_xmeet(const CT* i1, const Type* t2); - template - static bool int_type_is_equal(const CT* t1, const CT* t2) { + template + static CTP int_type_union(CTP t1, CTP t2) { + using CT = std::conditional_t, std::remove_pointer_t, CTP>; + using S = std::remove_const_t; + using U = std::remove_const_t; + return CT::make(TypeIntPrototype{{MIN2(t1->_lo, t2->_lo), MAX2(t1->_hi, t2->_hi)}, + {MIN2(t1->_ulo, t2->_ulo), MAX2(t1->_uhi, t2->_uhi)}, + {t1->_bits._zeros & t2->_bits._zeros, t1->_bits._ones & t2->_bits._ones}}, + MAX2(t1->_widen, t2->_widen)); + } + + template + static bool int_type_is_equal(const CTP t1, const CTP t2) { return t1->_lo == t2->_lo && t1->_hi == t2->_hi && t1->_ulo == t2->_ulo && t1->_uhi == t2->_uhi && t1->_bits._zeros == t2->_bits._zeros && t1->_bits._ones == t2->_bits._ones; } - template - static bool int_type_is_subset(const CT* super, const CT* sub) { + template + static bool int_type_is_subset(const CTP super, const CTP sub) { + using U = decltype(super->_ulo); return super->_lo <= sub->_lo && super->_hi >= sub->_hi && super->_ulo <= sub->_ulo && super->_uhi >= sub->_uhi && // All bits that are known in super must also be known to be the same // value in sub, &~ (and not) is the same as a set subtraction on bit // sets - (super->_bits._zeros &~ sub->_bits._zeros) == 0 && (super->_bits._ones &~ sub->_bits._ones) == 0; + (super->_bits._zeros &~ sub->_bits._zeros) == U(0) && (super->_bits._ones &~ sub->_bits._ones) == U(0); } template @@ -195,4 +195,199 @@ public: #endif // PRODUCT }; +// A TypeIntMirror is structurally similar to a TypeInt or a TypeLong but it decouples the range +// inference from the Type infrastructure of the compiler. It also allows more flexibility with the +// bit width of the integer type. As a result, it is more efficient to use for intermediate steps +// of inference, as well as more flexible to perform testing on different integer types. +template +class TypeIntMirror { +public: + S _lo; + S _hi; + U _ulo; + U _uhi; + KnownBits _bits; + int _widen = 0; // dummy field to mimic the same field in TypeInt, useful in testing + + static TypeIntMirror make(const TypeIntPrototype& t, int widen) { + auto canonicalized_t = t.canonicalize_constraints(); + assert(!canonicalized_t.empty(), "must not be empty"); + return TypeIntMirror{canonicalized_t._data._srange._lo, canonicalized_t._data._srange._hi, + canonicalized_t._data._urange._lo, canonicalized_t._data._urange._hi, + canonicalized_t._data._bits}; + } + + // These allow TypeIntMirror to mimick the behaviors of TypeInt* and TypeLong*, so they can be + // passed into RangeInference methods. These are only used in testing, so they are implemented in + // the test file. + const TypeIntMirror* operator->() const; + TypeIntMirror meet(const TypeIntMirror& o) const; + bool contains(U u) const; + bool contains(const TypeIntMirror& o) const; + bool operator==(const TypeIntMirror& o) const; + + template + TypeIntMirror cast() const; +}; + +// This class contains methods for inferring the Type of the result of several arithmetic +// operations from those of the corresponding inputs. For example, given a, b such that the Type of +// a is [0, 1] and the Type of b is [-1, 3], then the Type of the sum a + b is [-1, 4]. +// The methods in this class receive one or more template parameters which are often TypeInt* or +// TypeLong*, or they can be TypeIntMirror which behave similar to TypeInt* and TypeLong* during +// testing. This allows us to verify the correctness of the implementation without coupling with +// the hotspot compiler allocation infrastructure. +class RangeInference { +private: + // If CTP is a pointer, get the underlying type. For the test helper classes, using the struct + // directly allows straightfoward equality comparison. + template + using CT = std::remove_const_t, std::remove_pointer_t, CTP>>; + + // The type of CT::_lo, should be jint for TypeInt* and jlong for TypeLong* + template + using S = std::remove_const_t::_lo)>; + + // The type of CT::_ulo, should be juint for TypeInt* and julong for TypeLong* + template + using U = std::remove_const_t::_ulo)>; + + // A TypeInt consists of 1 or 2 simple intervals, each of which will lie either in the interval + // [0, max_signed] or [min_signed, -1]. It is more optimal to analyze each simple interval + // separately when doing inference. For example, consider a, b whose Types are both [-2, 2]. By + // analyzing the interval [-2, -1] and [0, 2] separately, we can easily see that the result of + // a & b must also be in the interval [-2, 2]. This is much harder if we want to work with the + // whole value range at the same time. + // This class offers a convenient way to traverse all the simple interval of a TypeInt. + template + class SimpleIntervalIterable { + private: + TypeIntMirror, U> _first_interval; + TypeIntMirror, U> _second_interval; + int _interval_num; + + public: + SimpleIntervalIterable(CTP t) { + if (U(t->_lo) <= U(t->_hi)) { + _interval_num = 1; + _first_interval = TypeIntMirror, U>{t->_lo, t->_hi, t->_ulo, t->_uhi, t->_bits}; + } else { + _interval_num = 2; + _first_interval = TypeIntMirror, U>::make(TypeIntPrototype, U>{{t->_lo, S(t->_uhi)}, {U(t->_lo), t->_uhi}, t->_bits}, 0); + _second_interval = TypeIntMirror, U>::make(TypeIntPrototype, U>{{S(t->_ulo), t->_hi}, {t->_ulo, U(t->_hi)}, t->_bits}, 0); + } + } + + class Iterator { + private: + const SimpleIntervalIterable& _iterable; + int _current_interval; + + Iterator(const SimpleIntervalIterable& iterable) : _iterable(iterable), _current_interval(0) {} + + friend class SimpleIntervalIterable; + public: + const TypeIntMirror, U>& operator*() const { + assert(_current_interval < _iterable._interval_num, "out of bounds, %d - %d", _current_interval, _iterable._interval_num); + if (_current_interval == 0) { + return _iterable._first_interval; + } else { + return _iterable._second_interval; + } + } + + Iterator& operator++() { + assert(_current_interval < _iterable._interval_num, "out of bounds, %d - %d", _current_interval, _iterable._interval_num); + _current_interval++; + return *this; + } + + bool operator!=(const Iterator& o) const { + assert(&_iterable == &o._iterable, "not on the same iterable"); + return _current_interval != o._current_interval; + } + }; + + Iterator begin() const { + return Iterator(*this); + } + + Iterator end() const { + Iterator res(*this); + res._current_interval = _interval_num; + return res; + } + }; + + // Infer a result given the input types of a binary operation + template + static CTP infer_binary(CTP t1, CTP t2, Inference infer) { + CTP res; + bool is_init = false; + + SimpleIntervalIterable t1_simple_intervals(t1); + SimpleIntervalIterable t2_simple_intervals(t2); + + for (auto& st1 : t1_simple_intervals) { + for (auto& st2 : t2_simple_intervals) { + CTP current = infer(st1, st2); + + if (is_init) { + res = res->meet(current)->template cast>(); + } else { + is_init = true; + res = current; + } + } + } + + assert(is_init, "must be initialized"); + return res; + } + +public: + template + static CTP infer_and(CTP t1, CTP t2) { + return infer_binary(t1, t2, [&](const TypeIntMirror, U>& st1, const TypeIntMirror, U>& st2) { + S lo = std::numeric_limits>::min(); + S hi = std::numeric_limits>::max(); + U ulo = std::numeric_limits>::min(); + // The unsigned value of the result of 'and' is always not greater than both of its inputs + // since there is no position at which the bit is 1 in the result and 0 in either input + U uhi = MIN2(st1._uhi, st2._uhi); + U zeros = st1._bits._zeros | st2._bits._zeros; + U ones = st1._bits._ones & st2._bits._ones; + return CT::make(TypeIntPrototype, U>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen)); + }); + } + + template + static CTP infer_or(CTP t1, CTP t2) { + return infer_binary(t1, t2, [&](const TypeIntMirror, U>& st1, const TypeIntMirror, U>& st2) { + S lo = std::numeric_limits>::min(); + S hi = std::numeric_limits>::max(); + // The unsigned value of the result of 'or' is always not less than both of its inputs since + // there is no position at which the bit is 0 in the result and 1 in either input + U ulo = MAX2(st1._ulo, st2._ulo); + U uhi = std::numeric_limits>::max(); + U zeros = st1._bits._zeros & st2._bits._zeros; + U ones = st1._bits._ones | st2._bits._ones; + return CT::make(TypeIntPrototype, U>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen)); + }); + } + + template + static CTP infer_xor(CTP t1, CTP t2) { + return infer_binary(t1, t2, [&](const TypeIntMirror, U>& st1, const TypeIntMirror, U>& st2) { + S lo = std::numeric_limits>::min(); + S hi = std::numeric_limits>::max(); + U ulo = std::numeric_limits>::min(); + U uhi = std::numeric_limits>::max(); + U zeros = (st1._bits._zeros & st2._bits._zeros) | (st1._bits._ones & st2._bits._ones); + U ones = (st1._bits._zeros & st2._bits._ones) | (st1._bits._ones & st2._bits._zeros); + return CT::make(TypeIntPrototype, U>{{lo, hi}, {ulo, uhi}, {zeros, ones}}, MAX2(t1->_widen, t2->_widen)); + }); + } +}; + #endif // SHARE_OPTO_RANGEINFERENCE_HPP diff --git a/src/hotspot/share/opto/type.hpp b/src/hotspot/share/opto/type.hpp index 4666cfbcf2d..73e2ba0045a 100644 --- a/src/hotspot/share/opto/type.hpp +++ b/src/hotspot/share/opto/type.hpp @@ -798,6 +798,7 @@ public: // must always specify w static const TypeInt* make(jint lo, jint hi, int widen); static const Type* make_or_top(const TypeIntPrototype& t, int widen); + static const TypeInt* make(const TypeIntPrototype& t, int widen) { return make_or_top(t, widen)->is_int(); } // Check for single integer bool is_con() const { return _lo == _hi; } @@ -879,6 +880,7 @@ public: // must always specify w static const TypeLong* make(jlong lo, jlong hi, int widen); static const Type* make_or_top(const TypeIntPrototype& t, int widen); + static const TypeLong* make(const TypeIntPrototype& t, int widen) { return make_or_top(t, widen)->is_long(); } // Check for single integer bool is_con() const { return _lo == _hi; } diff --git a/src/hotspot/share/opto/utilities/xor.hpp b/src/hotspot/share/opto/utilities/xor.hpp deleted file mode 100644 index 20edaf0d017..00000000000 --- a/src/hotspot/share/opto/utilities/xor.hpp +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef SHARE_OPTO_UTILITIES_XOR_HPP -#define SHARE_OPTO_UTILITIES_XOR_HPP - -#include "utilities/powerOfTwo.hpp" -// Code separated into its own header to allow access from GTEST - -// Given 2 non-negative values in the ranges [0, hi_0] and [0, hi_1], respectively. The bitwise -// xor of these values should also be non-negative. This method calculates an upper bound. - -// S and U type parameters correspond to the signed and unsigned -// variants of an integer to operate on. -template -static S xor_upper_bound_for_ranges(const S hi_0, const S hi_1) { - static_assert(S(-1) < S(0), "S must be signed"); - static_assert(U(-1) > U(0), "U must be unsigned"); - - assert(hi_0 >= 0, "must be non-negative"); - assert(hi_1 >= 0, "must be non-negative"); - - // x ^ y cannot have any bit set that is higher than both the highest bits set in x and y - // x cannot have any bit set that is higher than the highest bit set in hi_0 - // y cannot have any bit set that is higher than the highest bit set in hi_1 - - // We want to find a value that has all 1 bits everywhere up to and including - // the highest bits set in hi_0 as well as hi_1. For this, we can take the next - // power of 2 strictly greater than both hi values and subtract 1 from it. - - // Example 1: - // hi_0 = 5 (0b0101) hi_1=1 (0b0001) - // (5|1)+1 = 0b0110 - // round_up_pow2 = 0b1000 - // -1 = 0b0111 = max - - // Example 2 - this demonstrates need for the +1: - // hi_0 = 4 (0b0100) hi_1=4 (0b0100) - // (4|4)+1 = 0b0101 - // round_up_pow2 = 0b1000 - // -1 = 0b0111 = max - // Without the +1, round_up_pow2 would be 0b0100, resulting in 0b0011 as max - - // Note: cast to unsigned happens before +1 to avoid signed overflow, and - // round_up is safe because high bit is unset (0 <= lo <= hi) - - return round_up_power_of_2(U(hi_0 | hi_1) + 1) - 1; -} - -#endif // SHARE_OPTO_UTILITIES_XOR_HPP diff --git a/src/hotspot/share/opto/vectorization.cpp b/src/hotspot/share/opto/vectorization.cpp index 15b2df663b6..230ff280f03 100644 --- a/src/hotspot/share/opto/vectorization.cpp +++ b/src/hotspot/share/opto/vectorization.cpp @@ -1060,6 +1060,29 @@ bool VPointer::can_make_speculative_aliasing_check_with(const VPointer& other) c return false; } + // The speculative check also needs to create the pointer expressions for both + // VPointers. We must check that we can do that, i.e. that all variables of the + // VPointers are available at the speculative check (and not just pre-loop invariant). + if (!this->can_make_pointer_expression_at_speculative_check()) { +#ifdef ASSERT + if (_vloop.is_trace_speculative_aliasing_analysis()) { + tty->print_cr("VPointer::can_make_speculative_aliasing_check_with: not all variables of VPointer are avaialbe at speculative check!"); + this->print_on(tty); + } +#endif + return false; + } + + if (!other.can_make_pointer_expression_at_speculative_check()) { +#ifdef ASSERT + if (_vloop.is_trace_speculative_aliasing_analysis()) { + tty->print_cr("VPointer::can_make_speculative_aliasing_check_with: not all variables of VPointer are avaialbe at speculative check!"); + other.print_on(tty); + } +#endif + return false; + } + return true; } @@ -1147,6 +1170,8 @@ BoolNode* VPointer::make_speculative_aliasing_check_with(const VPointer& other, Node* main_init = new ConvL2INode(main_initL); phase->register_new_node_with_ctrl_of(main_init, pre_init); + assert(vp1.can_make_pointer_expression_at_speculative_check(), "variables must be available early enough to avoid cycles"); + assert(vp2.can_make_pointer_expression_at_speculative_check(), "variables must be available early enough to avoid cycles"); Node* p1_init = vp1.make_pointer_expression(main_init, ctrl); Node* p2_init = vp2.make_pointer_expression(main_init, ctrl); Node* size1 = igvn.longcon(vp1.size()); diff --git a/src/hotspot/share/opto/vectorization.hpp b/src/hotspot/share/opto/vectorization.hpp index aacd406f798..9308712f78a 100644 --- a/src/hotspot/share/opto/vectorization.hpp +++ b/src/hotspot/share/opto/vectorization.hpp @@ -1188,6 +1188,22 @@ private: return true; } + // We already know that all non-iv summands are pre loop invariant. + // See init_are_non_iv_summands_pre_loop_invariant + // That is good enough for alignment computations in the pre-loop limit. But it is not + // sufficient if we want to use the variables of the VPointer at the speculative check, + // which is further up before the pre-loop. + bool can_make_pointer_expression_at_speculative_check() const { + bool success = true; + mem_pointer().for_each_non_empty_summand([&] (const MemPointerSummand& s) { + Node* variable = s.variable(); + if (variable != _vloop.iv() && !_vloop.is_available_for_speculative_check(variable)) { + success = false; + } + }); + return success; + } + // In the pointer analysis, and especially the AlignVector analysis, we assume that // stride and scale are not too large. For example, we multiply "iv_scale * iv_stride", // and assume that this does not overflow the int range. We also take "abs(iv_scale)" diff --git a/src/hotspot/share/prims/jvm.cpp b/src/hotspot/share/prims/jvm.cpp index 48d89235c98..ef5aca96a57 100644 --- a/src/hotspot/share/prims/jvm.cpp +++ b/src/hotspot/share/prims/jvm.cpp @@ -1211,22 +1211,11 @@ JVM_ENTRY(jboolean, JVM_IsHiddenClass(JNIEnv *env, jclass cls)) JVM_END -class ScopedValueBindingsResolver { -public: - InstanceKlass* Carrier_klass; - ScopedValueBindingsResolver(JavaThread* THREAD) { - Klass *k = SystemDictionary::resolve_or_fail(vmSymbols::java_lang_ScopedValue_Carrier(), true, THREAD); - Carrier_klass = InstanceKlass::cast(k); - } -}; - JVM_ENTRY(jobject, JVM_FindScopedValueBindings(JNIEnv *env, jclass cls)) ResourceMark rm(THREAD); GrowableArray* local_array = new GrowableArray(12); JvmtiVMObjectAllocEventCollector oam; - static ScopedValueBindingsResolver resolver(THREAD); - // Iterate through Java frames vframeStream vfst(thread); for(; !vfst.at_end(); vfst.next()) { @@ -1239,7 +1228,7 @@ JVM_ENTRY(jobject, JVM_FindScopedValueBindings(JNIEnv *env, jclass cls)) InstanceKlass* holder = method->method_holder(); if (name == vmSymbols::runWith_method_name()) { if (holder == vmClasses::Thread_klass() - || holder == resolver.Carrier_klass) { + || holder == vmClasses::ScopedValue_Carrier_klass()) { loc = 1; } } diff --git a/src/hotspot/share/prims/jvmtiTagMap.cpp b/src/hotspot/share/prims/jvmtiTagMap.cpp index 90a3461f321..04cb70863cd 100644 --- a/src/hotspot/share/prims/jvmtiTagMap.cpp +++ b/src/hotspot/share/prims/jvmtiTagMap.cpp @@ -1204,8 +1204,10 @@ void JvmtiTagMap::flush_object_free_events() { assert_not_at_safepoint(); if (env()->is_enabled(JVMTI_EVENT_OBJECT_FREE)) { { + // The other thread can block for safepoints during event callbacks, so ensure we + // are safepoint-safe while waiting. + ThreadBlockInVM tbivm(JavaThread::current()); MonitorLocker ml(lock(), Mutex::_no_safepoint_check_flag); - // If another thread is posting events, let it finish while (_posting_events) { ml.wait(); } diff --git a/src/hotspot/share/runtime/abstract_vm_version.cpp b/src/hotspot/share/runtime/abstract_vm_version.cpp index 54c8c917fb3..4051ba3f9d6 100644 --- a/src/hotspot/share/runtime/abstract_vm_version.cpp +++ b/src/hotspot/share/runtime/abstract_vm_version.cpp @@ -283,6 +283,8 @@ const char* Abstract_VM_Version::internal_vm_info_string() { #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.13 (VS2022)" #elif _MSC_VER == 1944 #define HOTSPOT_BUILD_COMPILER "MS VC++ 17.14 (VS2022)" + #elif _MSC_VER == 1950 + #define HOTSPOT_BUILD_COMPILER "MS VC++ 18.0 (VS2026)" #else #define HOTSPOT_BUILD_COMPILER "unknown MS VC++:" XSTR(_MSC_VER) #endif diff --git a/src/hotspot/share/utilities/intn_t.hpp b/src/hotspot/share/utilities/intn_t.hpp index 6f43f5c2556..594e62a1694 100644 --- a/src/hotspot/share/utilities/intn_t.hpp +++ b/src/hotspot/share/utilities/intn_t.hpp @@ -79,6 +79,7 @@ public: static_assert(min < max, ""); constexpr bool operator==(intn_t o) const { return (_v & _mask) == (o._v & _mask); } + constexpr bool operator!=(intn_t o) const { return !(*this == o); } constexpr bool operator<(intn_t o) const { return int(*this) < int(o); } constexpr bool operator>(intn_t o) const { return int(*this) > int(o); } constexpr bool operator<=(intn_t o) const { return int(*this) <= int(o); } diff --git a/src/java.base/share/classes/java/lang/Byte.java b/src/java.base/share/classes/java/lang/Byte.java index d9913e354a4..0f3f7f40d05 100644 --- a/src/java.base/share/classes/java/lang/Byte.java +++ b/src/java.base/share/classes/java/lang/Byte.java @@ -26,6 +26,7 @@ package java.lang; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.IntrinsicCandidate; import jdk.internal.vm.annotation.Stable; @@ -103,6 +104,7 @@ public final class Byte extends Number implements Comparable, Constable { return Optional.of(DynamicConstantDesc.ofNamed(BSM_EXPLICIT_CAST, DEFAULT_NAME, CD_byte, intValue())); } + @AOTSafeClassInitializer private static final class ByteCache { private ByteCache() {} diff --git a/src/java.base/share/classes/java/lang/Character.java b/src/java.base/share/classes/java/lang/Character.java index b71849eaee7..ffda729a45a 100644 --- a/src/java.base/share/classes/java/lang/Character.java +++ b/src/java.base/share/classes/java/lang/Character.java @@ -26,6 +26,7 @@ package java.lang; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.IntrinsicCandidate; import jdk.internal.vm.annotation.Stable; @@ -9379,6 +9380,7 @@ class Character implements java.io.Serializable, Comparable, Constabl this.value = value; } + @AOTSafeClassInitializer private static final class CharacterCache { private CharacterCache(){} diff --git a/src/java.base/share/classes/java/lang/Integer.java b/src/java.base/share/classes/java/lang/Integer.java index 2742ec40abf..a9da1c32490 100644 --- a/src/java.base/share/classes/java/lang/Integer.java +++ b/src/java.base/share/classes/java/lang/Integer.java @@ -28,6 +28,8 @@ package java.lang; import jdk.internal.misc.CDS; import jdk.internal.misc.VM; import jdk.internal.util.DecimalDigits; +import jdk.internal.vm.annotation.AOTRuntimeSetup; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.annotation.IntrinsicCandidate; import jdk.internal.vm.annotation.Stable; @@ -891,15 +893,20 @@ public final class Integer extends Number * with new Integer object(s) after initialization. */ + @AOTSafeClassInitializer private static final class IntegerCache { static final int low = -128; - static final int high; + @Stable static int high; - @Stable - static final Integer[] cache; + @Stable static Integer[] cache; static Integer[] archivedCache; static { + runtimeSetup(); + } + + @AOTRuntimeSetup + private static void runtimeSetup() { // high value may be configured by property int h = 127; String integerCacheHighPropValue = @@ -915,34 +922,50 @@ public final class Integer extends Number } high = h; - // Load IntegerCache.archivedCache from archive, if possible - CDS.initializeFromArchive(IntegerCache.class); - int size = (high - low) + 1; - - // Use the archived cache if it exists and is large enough - if (archivedCache == null || size > archivedCache.length) { - Integer[] c = new Integer[size]; - int j = low; - // If archive has Integer cache, we must use all instances from it. - // Otherwise, the identity checks between archived Integers and - // runtime-cached Integers would fail. - int archivedSize = (archivedCache == null) ? 0 : archivedCache.length; - for (int i = 0; i < archivedSize; i++) { - c[i] = archivedCache[i]; - assert j == archivedCache[i]; - j++; - } - // Fill the rest of the cache. - for (int i = archivedSize; i < size; i++) { - c[i] = new Integer(j++); - } - archivedCache = c; + Integer[] precomputed = null; + if (cache != null) { + // IntegerCache has been AOT-initialized. + precomputed = cache; + } else { + // Legacy CDS archive support (to be deprecated): + // Load IntegerCache.archivedCache from archive, if possible + CDS.initializeFromArchive(IntegerCache.class); + precomputed = archivedCache; } - cache = archivedCache; + + cache = loadOrInitializeCache(precomputed); + archivedCache = cache; // Legacy CDS archive support (to be deprecated) // range [-128, 127] must be interned (JLS7 5.1.7) assert IntegerCache.high >= 127; } + private static Integer[] loadOrInitializeCache(Integer[] precomputed) { + int size = (high - low) + 1; + + // Use the precomputed cache if it exists and is large enough + if (precomputed != null && size <= precomputed.length) { + return precomputed; + } + + Integer[] c = new Integer[size]; + int j = low; + // If we loading a precomputed cache (from AOT cache or CDS archive), + // we must use all instances from it. + // Otherwise, the Integers from the AOT cache (or CDS archive) will not + // have the same object identity as items in IntegerCache.cache[]. + int precomputedSize = (precomputed == null) ? 0 : precomputed.length; + for (int i = 0; i < precomputedSize; i++) { + c[i] = precomputed[i]; + assert j == precomputed[i]; + j++; + } + // Fill the rest of the cache. + for (int i = precomputedSize; i < size; i++) { + c[i] = new Integer(j++); + } + return c; + } + private IntegerCache() {} } diff --git a/src/java.base/share/classes/java/lang/Long.java b/src/java.base/share/classes/java/lang/Long.java index 3077e7c0a38..c5cd9650f2d 100644 --- a/src/java.base/share/classes/java/lang/Long.java +++ b/src/java.base/share/classes/java/lang/Long.java @@ -35,6 +35,7 @@ import java.util.Optional; import jdk.internal.misc.CDS; import jdk.internal.util.DecimalDigits; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.annotation.IntrinsicCandidate; import jdk.internal.vm.annotation.Stable; @@ -911,6 +912,7 @@ public final class Long extends Number return Long.valueOf(parseLong(s, 10)); } + @AOTSafeClassInitializer private static final class LongCache { private LongCache() {} diff --git a/src/java.base/share/classes/java/lang/Module.java b/src/java.base/share/classes/java/lang/Module.java index cd2b8095ee4..bd04345554b 100644 --- a/src/java.base/share/classes/java/lang/Module.java +++ b/src/java.base/share/classes/java/lang/Module.java @@ -69,6 +69,7 @@ import jdk.internal.module.ServicesCatalog; import jdk.internal.module.Resources; import jdk.internal.reflect.CallerSensitive; import jdk.internal.reflect.Reflection; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.Stable; /** @@ -391,6 +392,7 @@ public final class Module implements AnnotatedElement { private static final Module EVERYONE_MODULE; private static final Set EVERYONE_SET; + @AOTSafeClassInitializer private static class ArchivedData { private static ArchivedData archivedData; private final Module allUnnamedModule; diff --git a/src/java.base/share/classes/java/lang/ModuleLayer.java b/src/java.base/share/classes/java/lang/ModuleLayer.java index 9d922f787a6..a073de6b14a 100644 --- a/src/java.base/share/classes/java/lang/ModuleLayer.java +++ b/src/java.base/share/classes/java/lang/ModuleLayer.java @@ -53,6 +53,7 @@ import jdk.internal.module.ServicesCatalog; import jdk.internal.misc.CDS; import jdk.internal.reflect.CallerSensitive; import jdk.internal.reflect.Reflection; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.Stable; /** @@ -145,6 +146,7 @@ import jdk.internal.vm.annotation.Stable; * @see Module#getLayer() */ +@AOTSafeClassInitializer public final class ModuleLayer { // the empty layer (may be initialized from the CDS archive) diff --git a/src/java.base/share/classes/java/lang/Short.java b/src/java.base/share/classes/java/lang/Short.java index 4c64427b6df..920500a7fa3 100644 --- a/src/java.base/share/classes/java/lang/Short.java +++ b/src/java.base/share/classes/java/lang/Short.java @@ -26,6 +26,7 @@ package java.lang; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.IntrinsicCandidate; import jdk.internal.vm.annotation.Stable; @@ -230,6 +231,7 @@ public final class Short extends Number implements Comparable, Constable return Optional.of(DynamicConstantDesc.ofNamed(BSM_EXPLICIT_CAST, DEFAULT_NAME, CD_short, intValue())); } + @AOTSafeClassInitializer private static final class ShortCache { private ShortCache() {} diff --git a/src/java.base/share/classes/java/lang/module/Configuration.java b/src/java.base/share/classes/java/lang/module/Configuration.java index a76a32cfb28..40eeddc3f0b 100644 --- a/src/java.base/share/classes/java/lang/module/Configuration.java +++ b/src/java.base/share/classes/java/lang/module/Configuration.java @@ -44,6 +44,7 @@ import java.util.stream.Stream; import jdk.internal.misc.CDS; import jdk.internal.module.ModuleReferenceImpl; import jdk.internal.module.ModuleTarget; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.Stable; /** @@ -155,6 +156,7 @@ import jdk.internal.vm.annotation.Stable; * @since 9 * @see java.lang.ModuleLayer */ +@AOTSafeClassInitializer public final class Configuration { // @see Configuration#empty() diff --git a/src/java.base/share/classes/java/util/ImmutableCollections.java b/src/java.base/share/classes/java/util/ImmutableCollections.java index abc48ff5ed9..e7fe22490da 100644 --- a/src/java.base/share/classes/java/util/ImmutableCollections.java +++ b/src/java.base/share/classes/java/util/ImmutableCollections.java @@ -42,6 +42,9 @@ import java.util.function.UnaryOperator; import jdk.internal.access.JavaUtilCollectionAccess; import jdk.internal.access.SharedSecrets; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTRuntimeSetup; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; +import jdk.internal.vm.annotation.ForceInline; import jdk.internal.vm.annotation.Stable; /** @@ -52,6 +55,7 @@ import jdk.internal.vm.annotation.Stable; * classes use a serial proxy and thus have no need to declare serialVersionUID. */ @SuppressWarnings("serial") +@AOTSafeClassInitializer class ImmutableCollections { /** * A "salt" value used for randomizing iteration order. This is initialized once @@ -59,14 +63,20 @@ class ImmutableCollections { * it needs to vary sufficiently from one run to the next so that iteration order * will vary between JVM runs. */ - private static final long SALT32L; + @Stable private static long SALT32L; /** * For set and map iteration, we will iterate in "reverse" stochastically, * decided at bootstrap time. */ - private static final boolean REVERSE; + @Stable private static boolean REVERSE; + static { + runtimeSetup(); + } + + @AOTRuntimeSetup + private static void runtimeSetup() { // to generate a reasonably random and well-mixed SALT, use an arbitrary // value (a slice of pi), multiply with a random seed, then pick // the mid 32-bits from the product. By picking a SALT value in the @@ -102,6 +112,7 @@ class ImmutableCollections { static final MapN EMPTY_MAP; static { + // Legacy CDS archive support (to be deprecated) CDS.initializeFromArchive(ImmutableCollections.class); if (archivedObjects == null) { EMPTY = new Object(); diff --git a/src/java.base/share/classes/java/util/jar/Attributes.java b/src/java.base/share/classes/java/util/jar/Attributes.java index 9322bb9acac..20ff81676c9 100644 --- a/src/java.base/share/classes/java/util/jar/Attributes.java +++ b/src/java.base/share/classes/java/util/jar/Attributes.java @@ -36,6 +36,7 @@ import java.util.Objects; import java.util.Set; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.Stable; import sun.nio.cs.UTF_8; @@ -60,6 +61,7 @@ import sun.util.logging.PlatformLogger; * @see Manifest * @since 1.2 */ +@AOTSafeClassInitializer public class Attributes implements Map, Cloneable { /** * The attribute name-value mappings. @@ -450,6 +452,7 @@ public class Attributes implements Map, Cloneable { * * @spec jar/jar.html JAR File Specification */ + @AOTSafeClassInitializer public static class Name { private final String name; private final int hashCode; @@ -669,6 +672,7 @@ public class Attributes implements Map, Cloneable { static { + // Legacy CDS archive support (to be deprecated) CDS.initializeFromArchive(Attributes.Name.class); if (KNOWN_NAMES == null) { diff --git a/src/java.base/share/classes/jdk/internal/loader/ArchivedClassLoaders.java b/src/java.base/share/classes/jdk/internal/loader/ArchivedClassLoaders.java index be3425590fc..439772a8789 100644 --- a/src/java.base/share/classes/jdk/internal/loader/ArchivedClassLoaders.java +++ b/src/java.base/share/classes/jdk/internal/loader/ArchivedClassLoaders.java @@ -27,11 +27,13 @@ package jdk.internal.loader; import java.util.Map; import jdk.internal.misc.CDS; import jdk.internal.module.ServicesCatalog; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; /** * Used to archive the built-in class loaders, their services catalogs, and the * package-to-module map used by the built-in class loaders. */ +@AOTSafeClassInitializer class ArchivedClassLoaders { private static ArchivedClassLoaders archivedClassLoaders; diff --git a/src/java.base/share/classes/jdk/internal/math/FDBigInteger.java b/src/java.base/share/classes/jdk/internal/math/FDBigInteger.java index 1ef9dee3a8a..5413226c112 100644 --- a/src/java.base/share/classes/jdk/internal/math/FDBigInteger.java +++ b/src/java.base/share/classes/jdk/internal/math/FDBigInteger.java @@ -26,6 +26,7 @@ package jdk.internal.math; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.Stable; import java.util.Arrays; @@ -33,6 +34,7 @@ import java.util.Arrays; /** * A simple big integer class specifically for floating point base conversion. */ +@AOTSafeClassInitializer final class FDBigInteger { @Stable @@ -53,6 +55,7 @@ final class FDBigInteger { // Initialize FDBigInteger cache of powers of 5. static { + // Legacy CDS archive support (to be deprecated) CDS.initializeFromArchive(FDBigInteger.class); Object[] caches = archivedCaches; if (caches == null) { diff --git a/src/java.base/share/classes/jdk/internal/module/ArchivedBootLayer.java b/src/java.base/share/classes/jdk/internal/module/ArchivedBootLayer.java index 5c806f81dcd..425238dd521 100644 --- a/src/java.base/share/classes/jdk/internal/module/ArchivedBootLayer.java +++ b/src/java.base/share/classes/jdk/internal/module/ArchivedBootLayer.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,10 +25,12 @@ package jdk.internal.module; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; /** * Used by ModuleBootstrap for archiving the boot layer. */ +@AOTSafeClassInitializer class ArchivedBootLayer { private static ArchivedBootLayer archivedBootLayer; diff --git a/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java b/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java index 4f9223d0171..deb280c878d 100644 --- a/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java +++ b/src/java.base/share/classes/jdk/internal/module/ArchivedModuleGraph.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2018, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2018, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -30,11 +30,13 @@ import java.util.function.Function; import java.lang.module.Configuration; import java.lang.module.ModuleFinder; import jdk.internal.misc.CDS; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; /** * Used by ModuleBootstrap for archiving the configuration for the boot layer, * and the system module finder. */ +@AOTSafeClassInitializer class ArchivedModuleGraph { private static ArchivedModuleGraph archivedModuleGraph; @@ -126,6 +128,7 @@ class ArchivedModuleGraph { } static { + // Legacy CDS archive support (to be deprecated) CDS.initializeFromArchive(ArchivedModuleGraph.class); } } diff --git a/src/java.base/share/classes/sun/util/locale/BaseLocale.java b/src/java.base/share/classes/sun/util/locale/BaseLocale.java index 58ec6d76aa5..31078720ddc 100644 --- a/src/java.base/share/classes/sun/util/locale/BaseLocale.java +++ b/src/java.base/share/classes/sun/util/locale/BaseLocale.java @@ -34,11 +34,14 @@ package sun.util.locale; import jdk.internal.misc.CDS; import jdk.internal.util.ReferencedKeySet; +import jdk.internal.vm.annotation.AOTRuntimeSetup; +import jdk.internal.vm.annotation.AOTSafeClassInitializer; import jdk.internal.vm.annotation.Stable; import java.util.StringJoiner; import java.util.function.Supplier; +@AOTSafeClassInitializer public final class BaseLocale { public static @Stable BaseLocale[] constantBaseLocales; @@ -63,6 +66,7 @@ public final class BaseLocale { CANADA_FRENCH = 18, NUM_CONSTANTS = 19; static { + // Legacy CDS archive support (to be deprecated) CDS.initializeFromArchive(BaseLocale.class); BaseLocale[] baseLocales = constantBaseLocales; if (baseLocales == null) { @@ -91,13 +95,21 @@ public final class BaseLocale { } // Interned BaseLocale cache - private static final LazyConstant> CACHE = + @Stable private static LazyConstant> CACHE; + static { + runtimeSetup(); + } + + @AOTRuntimeSetup + private static void runtimeSetup() { + CACHE = LazyConstant.of(new Supplier<>() { @Override public ReferencedKeySet get() { return ReferencedKeySet.create(true, ReferencedKeySet.concurrentHashMapSupplier()); } }); + } public static final String SEP = "_"; diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java index 7870f4f1d8e..5e2384dce27 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/ConnectionTerminatorImpl.java @@ -344,10 +344,6 @@ final class ConnectionTerminatorImpl implements ConnectionTerminator { } } failHandshakeCFs(); - // remap the connection to a draining connection - final QuicEndpoint endpoint = this.connection.endpoint(); - assert endpoint != null : "QUIC endpoint is null"; - endpoint.draining(connection); discardConnectionState(); connection.streams.terminate(terminationCause); if (Log.quic()) { @@ -439,7 +435,7 @@ final class ConnectionTerminatorImpl implements ConnectionTerminator { final ProtectionRecord protectionRecord = ProtectionRecord.single(packet, connection::allocateDatagramForEncryption); // while sending the packet containing the CONNECTION_CLOSE frame, the pushDatagram will - // remap (or remove) the QuicConnectionImpl in QuicEndpoint. + // remap the QuicConnectionImpl in QuicEndpoint. connection.pushDatagram(protectionRecord); } diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java index c07df1c6eb2..580bbe23d31 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicConnectionImpl.java @@ -2811,7 +2811,7 @@ public class QuicConnectionImpl extends QuicConnection implements QuicPacketRece // a CONNECTION_CLOSE frame is being sent to the peer when the local // connection state is in DRAINING. This implies that the local endpoint // is responding to an incoming CONNECTION_CLOSE frame from the peer. - // we remove the connection from the endpoint for such cases. + // we switch this connection to one that does not respond to incoming packets. endpoint.pushClosedDatagram(this, peerAddress(), datagram); } else if (stateHandle.isMarked(QuicConnectionState.CLOSING)) { // a CONNECTION_CLOSE frame is being sent to the peer when the local diff --git a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java index 8bdba21594c..91b4a678a23 100644 --- a/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java +++ b/src/java.net.http/share/classes/jdk/internal/net/http/quic/QuicEndpoint.java @@ -1509,7 +1509,8 @@ public abstract sealed class QuicEndpoint implements AutoCloseable /** * Called to schedule sending of a datagram that contains a single {@code ConnectionCloseFrame} * sent in response to a {@code ConnectionClose} frame. - * This will completely remove the connection from the connection map. + * This will replace the {@link QuicConnectionImpl} with a {@link DrainingConnection} that + * will discard all incoming packets. * @param connection the connection being closed * @param destination the peer address * @param datagram the datagram @@ -1518,7 +1519,7 @@ public abstract sealed class QuicEndpoint implements AutoCloseable InetSocketAddress destination, ByteBuffer datagram) { if (debug.on()) debug.log("Pushing closed datagram for " + connection.logTag()); - removeConnection(connection); + draining(connection); pushDatagram(connection, destination, datagram); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdCDebugger.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdCDebugger.java index c119e374d66..01a71c8f44c 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdCDebugger.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/BsdCDebugger.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * @@ -86,18 +86,22 @@ class BsdCDebugger implements CDebugger { String cpu = dbg.getCPU(); if (cpu.equals("amd64") || cpu.equals("x86_64")) { AMD64ThreadContext context = (AMD64ThreadContext) thread.getContext(); + Address rsp = context.getRegisterAsAddress(AMD64ThreadContext.RSP); + if (rsp == null) return null; Address rbp = context.getRegisterAsAddress(AMD64ThreadContext.RBP); if (rbp == null) return null; Address pc = context.getRegisterAsAddress(AMD64ThreadContext.RIP); if (pc == null) return null; - return new BsdAMD64CFrame(dbg, rbp, pc); + return new BsdAMD64CFrame(dbg, rsp, rbp, pc); } else if (cpu.equals("aarch64")) { AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext(); + Address sp = context.getRegisterAsAddress(AARCH64ThreadContext.SP); + if (sp == null) return null; Address fp = context.getRegisterAsAddress(AARCH64ThreadContext.FP); if (fp == null) return null; Address pc = context.getRegisterAsAddress(AARCH64ThreadContext.PC); if (pc == null) return null; - return new BsdAARCH64CFrame(dbg, fp, pc); + return new BsdAARCH64CFrame(dbg, sp, fp, pc); } else { throw new DebuggerException(cpu + " is not yet supported"); } diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/aarch64/BsdAARCH64CFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/aarch64/BsdAARCH64CFrame.java index ebc5c7d716e..185148b39b4 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/aarch64/BsdAARCH64CFrame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/aarch64/BsdAARCH64CFrame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2015, Red Hat Inc. * Copyright (c) 2021, Azul Systems, Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,15 +26,19 @@ package sun.jvm.hotspot.debugger.bsd.aarch64; +import sun.jvm.hotspot.code.*; import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.debugger.aarch64.*; import sun.jvm.hotspot.debugger.bsd.*; import sun.jvm.hotspot.debugger.cdbg.*; import sun.jvm.hotspot.debugger.cdbg.basic.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.runtime.aarch64.*; public final class BsdAARCH64CFrame extends BasicCFrame { - public BsdAARCH64CFrame(BsdDebugger dbg, Address fp, Address pc) { + public BsdAARCH64CFrame(BsdDebugger dbg, Address sp, Address fp, Address pc) { super(dbg.getCDebugger()); + this.sp = sp; this.fp = fp; this.pc = pc; this.dbg = dbg; @@ -54,28 +58,65 @@ public final class BsdAARCH64CFrame extends BasicCFrame { return fp; } + @Override public CFrame sender(ThreadProxy thread) { - AARCH64ThreadContext context = (AARCH64ThreadContext) thread.getContext(); - Address rsp = context.getRegisterAsAddress(AARCH64ThreadContext.SP); + return sender(thread, null, null, null); + } - if ((fp == null) || fp.lessThan(rsp)) { + @Override + public CFrame sender(ThreadProxy thread, Address nextSP, Address nextFP, Address nextPC) { + // Check fp + // Skip if both nextFP and nextPC are given - do not need to load from fp. + if (nextFP == null && nextPC == null) { + if (fp == null) { + return null; + } + + // Check alignment of fp + if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) { + return null; + } + } + + if (nextFP == null) { + nextFP = fp.getAddressAt(0); + } + if (nextFP == null) { return null; } - // Check alignment of fp - if (dbg.getAddressValue(fp) % (2 * ADDRESS_SIZE) != 0) { - return null; + if (nextPC == null) { + nextPC = fp.getAddressAt(ADDRESS_SIZE); } - - Address nextFP = fp.getAddressAt(0 * ADDRESS_SIZE); - if (nextFP == null || nextFP.lessThanOrEqual(fp)) { - return null; - } - Address nextPC = fp.getAddressAt(1 * ADDRESS_SIZE); if (nextPC == null) { return null; } - return new BsdAARCH64CFrame(dbg, nextFP, nextPC); + + if (nextSP == null) { + CodeCache cc = VM.getVM().getCodeCache(); + CodeBlob currentBlob = cc.findBlobUnsafe(pc()); + + // This case is different from HotSpot. See JDK-8371194 for details. + if (currentBlob != null && (currentBlob.isContinuationStub() || currentBlob.isNativeMethod())) { + // Use FP since it should always be valid for these cases. + // TODO: These should be walked as Frames not CFrames. + nextSP = fp.addOffsetTo(2 * ADDRESS_SIZE); + } else { + CodeBlob codeBlob = cc.findBlobUnsafe(nextPC); + boolean useCodeBlob = codeBlob != null && codeBlob.getFrameSize() > 0; + nextSP = useCodeBlob ? nextFP.addOffsetTo((2 * ADDRESS_SIZE) - codeBlob.getFrameSize()) : nextFP; + } + } + if (nextSP == null) { + return null; + } + + return new BsdAARCH64CFrame(dbg, nextSP, nextFP, nextPC); + } + + @Override + public Frame toFrame() { + return new AARCH64Frame(sp, fp, pc); } // package/class internals only diff --git a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/amd64/BsdAMD64CFrame.java b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/amd64/BsdAMD64CFrame.java index 8d0d94b11b7..9077e0007a5 100644 --- a/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/amd64/BsdAMD64CFrame.java +++ b/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/debugger/bsd/amd64/BsdAMD64CFrame.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003, 2021, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -29,10 +29,13 @@ import sun.jvm.hotspot.debugger.amd64.*; import sun.jvm.hotspot.debugger.bsd.*; import sun.jvm.hotspot.debugger.cdbg.*; import sun.jvm.hotspot.debugger.cdbg.basic.*; +import sun.jvm.hotspot.runtime.*; +import sun.jvm.hotspot.runtime.amd64.*; public final class BsdAMD64CFrame extends BasicCFrame { - public BsdAMD64CFrame(BsdDebugger dbg, Address rbp, Address rip) { + public BsdAMD64CFrame(BsdDebugger dbg, Address rsp, Address rbp, Address rip) { super(dbg.getCDebugger()); + this.rsp = rsp; this.rbp = rbp; this.rip = rip; this.dbg = dbg; @@ -52,32 +55,49 @@ public final class BsdAMD64CFrame extends BasicCFrame { return rbp; } + @Override public CFrame sender(ThreadProxy thread) { - AMD64ThreadContext context = (AMD64ThreadContext) thread.getContext(); - Address rsp = context.getRegisterAsAddress(AMD64ThreadContext.RSP); + return sender(thread, null, null, null); + } - if ( (rbp == null) || rbp.lessThan(rsp) ) { - return null; + @Override + public CFrame sender(ThreadProxy thread, Address sp, Address fp, Address pc) { + // Check fp + // Skip if both fp and pc are given - do not need to load from rbp. + if (fp == null && pc == null) { + if (rbp == null) { + return null; + } + + // Check alignment of rbp + if (dbg.getAddressValue(rbp) % ADDRESS_SIZE != 0) { + return null; + } } - // Check alignment of rbp - if (dbg.getAddressValue(rbp) % ADDRESS_SIZE != 0) { + Address nextRSP = sp != null ? sp : rbp.addOffsetTo(2 * ADDRESS_SIZE); + if (nextRSP == null) { return null; } - - Address nextRBP = rbp.getAddressAt( 0 * ADDRESS_SIZE); - if (nextRBP == null || nextRBP.lessThanOrEqual(rbp)) { + Address nextRBP = fp != null ? fp : rbp.getAddressAt(0); + if (nextRBP == null) { return null; } - Address nextPC = rbp.getAddressAt( 1 * ADDRESS_SIZE); + Address nextPC = pc != null ? pc : rbp.getAddressAt(ADDRESS_SIZE); if (nextPC == null) { return null; } - return new BsdAMD64CFrame(dbg, nextRBP, nextPC); + return new BsdAMD64CFrame(dbg, nextRSP, nextRBP, nextPC); + } + + @Override + public Frame toFrame() { + return new AMD64Frame(rsp, rbp, rip); } // package/class internals only private static final int ADDRESS_SIZE = 8; + private Address rsp; private Address rip; private Address rbp; private BsdDebugger dbg; diff --git a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16.java b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16.java index 335cbef2331..dfdd2a304c6 100644 --- a/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16.java +++ b/src/jdk.incubator.vector/share/classes/jdk/incubator/vector/Float16.java @@ -260,6 +260,9 @@ public final class Float16 * Float#toString(float)} in the handling of special values * (signed zeros, infinities, and NaN) and the generation of a * decimal string that will convert back to the argument value. + * However, the range for plain notation is defined to be the interval + * [10-3, 103) rather than the interval used + * for {@code float} and {@code double}. * * @param f16 the {@code Float16} to be converted. * @return a string representation of the argument. @@ -2106,7 +2109,7 @@ public final class Float16 int h = (int) (f * 107_375L >>> 30); int l = f - 10_000 * h; - if (0 < e && e <= 7) { + if (0 < e && e <= 3) { return toChars1(h, l, e); } if (-3 < e && e <= 0) { diff --git a/test/docs/ProblemList.txt b/test/docs/ProblemList.txt index 4df8bbcc53c..83693eacbd1 100644 --- a/test/docs/ProblemList.txt +++ b/test/docs/ProblemList.txt @@ -41,5 +41,3 @@ ############################################################################# jdk/javadoc/doccheck/checks/jdkCheckLinks.java 8370249 generic-all -jdk/javadoc/doccheck/checks/jdkCheckHtml.java 8370970 generic-all -jdk/javadoc/doccheck/checks/jdkDoctypeBadcharsCheck.java 8370970 generic-all diff --git a/test/hotspot/gtest/opto/test_rangeinference.cpp b/test/hotspot/gtest/opto/test_rangeinference.cpp index 1a62941a486..61a9ff7fb70 100644 --- a/test/hotspot/gtest/opto/test_rangeinference.cpp +++ b/test/hotspot/gtest/opto/test_rangeinference.cpp @@ -25,15 +25,16 @@ #include "opto/rangeinference.hpp" #include "opto/type.hpp" #include "runtime/os.hpp" -#include "utilities/intn_t.hpp" #include "unittest.hpp" +#include "utilities/intn_t.hpp" +#include "utilities/rbTree.hpp" +#include +#include +#include template -static U uniform_random(); - -template <> -juint uniform_random() { - return os::random(); +static U uniform_random() { + return U(juint(os::random())); } template <> @@ -201,7 +202,7 @@ static void test_canonicalize_constraints_random() { } } -TEST_VM(opto, canonicalize_constraints) { +TEST(opto, canonicalize_constraints) { test_canonicalize_constraints_trivial(); test_canonicalize_constraints_exhaustive, uintn_t<1>>(); test_canonicalize_constraints_exhaustive, uintn_t<2>>(); @@ -212,3 +213,413 @@ TEST_VM(opto, canonicalize_constraints) { test_canonicalize_constraints_random(); test_canonicalize_constraints_random(); } + +// Implementations of TypeIntMirror methods for testing purposes +template +const TypeIntMirror* TypeIntMirror::operator->() const { + return this; +} + +template +TypeIntMirror TypeIntMirror::meet(const TypeIntMirror& o) const { + return TypeIntHelper::int_type_union(*this, o); +} + +template +bool TypeIntMirror::contains(U u) const { + S s = S(u); + return s >= _lo && s <= _hi && u >= _ulo && u <= _uhi && _bits.is_satisfied_by(u); +} + +template +bool TypeIntMirror::contains(const TypeIntMirror& o) const { + return TypeIntHelper::int_type_is_subset(*this, o); +} + +template +bool TypeIntMirror::operator==(const TypeIntMirror& o) const { + return TypeIntHelper::int_type_is_equal(*this, o); +} + +template +template +TypeIntMirror TypeIntMirror::cast() const { + static_assert(std::is_same_v); + return *this; +} + +// The number of TypeIntMirror instances for integral types with a few bits. These values are +// calculated once and written down for usage in constexpr contexts. +template +static constexpr size_t all_instances_size() { + using U = decltype(CTP::_ulo); + constexpr juint max_unsigned = juint(std::numeric_limits::max()); + if constexpr (max_unsigned == 1U) { + // 1 bit + return 3; + } else if constexpr (max_unsigned == 3U) { + // 2 bits + return 15; + } else if constexpr (max_unsigned == 7U) { + // 3 bits + return 134; + } else { + // 4 bits + static_assert(max_unsigned == 15U); + // For more than 4 bits, the number of instances is too large and it is not realistic to + // compute all of them. + return 1732; + } +} + +template +static std::array()> compute_all_instances() { + using S = decltype(CTP::_lo); + using U = decltype(CTP::_ulo); + + class CTPComparator { + public: + static RBTreeOrdering cmp(const CTP& x, const RBNode* node) { + // Quick helper for the tediousness below + auto f = [](auto x, auto y) { + assert(x != y, "we only handle lt and gt cases"); + return x < y ? RBTreeOrdering::LT : RBTreeOrdering::GT; + }; + + const CTP& y = node->key(); + if (x._lo != y._lo) { + return f(x._lo, y._lo); + } else if (x._hi != y._hi) { + return f(x._hi, y._hi); + } else if (x._ulo != y._ulo) { + return f(x._ulo, y._ulo); + } else if (x._uhi != y._uhi) { + return f(x._uhi, y._uhi); + } else if (x._bits._zeros != y._bits._zeros) { + return f(x._bits._zeros, y._bits._zeros); + } else if (x._bits._ones != y._bits._ones) { + return f(x._bits._ones, y._bits._ones); + } else { + return RBTreeOrdering::EQ; + } + } + }; + + RBTreeCHeap collector; + for (jint lo = jint(std::numeric_limits::min()); lo <= jint(std::numeric_limits::max()); lo++) { + for (jint hi = lo; hi <= jint(std::numeric_limits::max()); hi++) { + for (juint ulo = 0; ulo <= juint(std::numeric_limits::max()); ulo++) { + for (juint uhi = ulo; uhi <= juint(std::numeric_limits::max()); uhi++) { + for (juint zeros = 0; zeros <= juint(std::numeric_limits::max()); zeros++) { + for (juint ones = 0; ones <= juint(std::numeric_limits::max()); ones++) { + TypeIntPrototype t{{S(lo), S(hi)}, {U(ulo), U(uhi)}, {U(zeros), U(ones)}}; + auto canonicalized_t = t.canonicalize_constraints(); + if (canonicalized_t.empty()) { + continue; + } + + TypeIntPrototype ct = canonicalized_t._data; + collector.upsert(CTP{ct._srange._lo, ct._srange._hi, ct._urange._lo, ct._urange._hi, ct._bits}, 0); + } + } + } + } + } + } + + assert(collector.size() == all_instances_size(), "unexpected size of all_instance, expected %d, actual %d", jint(all_instances_size()), jint(collector.size())); + std::array()> res; + size_t idx = 0; + collector.visit_in_order([&](RBNode* node) { + res[idx] = node->key(); + idx++; + return true; + }); + return res; +} + +template +static const std::array()>& all_instances() { + static std::array()> res = compute_all_instances(); + static_assert(std::is_trivially_destructible_v); + return res; +} + +// Check the correctness, that is, if v1 is an element of input1, v2 is an element of input2, then +// op(v1, v2) must be an element of infer(input1, input2). This version does the check exhaustively +// on all elements of input1 and input2. +template +static void test_binary_instance_correctness_exhaustive(Operation op, Inference infer, const InputType& input1, const InputType& input2) { + using S = std::remove_const_t_lo)>; + using U = std::remove_const_t_ulo)>; + InputType result = infer(input1, input2); + + for (juint v1 = juint(std::numeric_limits::min()); v1 <= juint(std::numeric_limits::max()); v1++) { + if (!input1.contains(U(v1))) { + continue; + } + + for (juint v2 = juint(std::numeric_limits::min()); v2 <= juint(std::numeric_limits::max()); v2++) { + if (!input2.contains(U(v2))) { + continue; + } + + U r = op(U(v1), U(v2)); + ASSERT_TRUE(result.contains(r)); + } + } +} + +// Check the correctness, that is, if v1 is an element of input1, v2 is an element of input2, then +// op(v1, v2) must be an element of infer(input1, input2). This version does the check randomly on +// a number of elements in input1 and input2. +template +static void test_binary_instance_correctness_samples(Operation op, Inference infer, const InputType& input1, const InputType& input2) { + using U = std::remove_const_t_ulo)>; + auto result = infer(input1, input2); + + constexpr size_t sample_count = 6; + U input1_samples[sample_count] {U(input1._lo), U(input1._hi), input1._ulo, input1._uhi, input1._ulo, input1._ulo}; + U input2_samples[sample_count] {U(input2._lo), U(input2._hi), input2._ulo, input2._uhi, input2._ulo, input2._ulo}; + + auto random_sample = [](U* samples, const InputType& input) { + constexpr size_t max_tries = 100; + constexpr size_t start_random_idx = 4; + for (size_t tries = 0, idx = start_random_idx; tries < max_tries && idx < sample_count; tries++) { + U n = uniform_random(); + if (input.contains(n)) { + samples[idx] = n; + idx++; + } + } + }; + random_sample(input1_samples, input1); + random_sample(input2_samples, input2); + + for (size_t i = 0; i < sample_count; i++) { + for (size_t j = 0; j < sample_count; j++) { + U r = op(input1_samples[i], input2_samples[j]); + ASSERT_TRUE(result.contains(r)); + } + } +} + +// Check the monotonicity, that is, if input1 is a subset of super1, input2 is a subset of super2, +// then infer(input1, input2) must be a subset of infer(super1, super2). This version does the +// check exhaustively on all supersets of input1 and input2. +template +static void test_binary_instance_monotonicity_exhaustive(Inference infer, const InputType& input1, const InputType& input2) { + InputType result = infer(input1, input2); + + for (const InputType& super1 : all_instances()) { + if (!super1.contains(input1) || super1 == input1) { + continue; + } + + for (const InputType& super2 : all_instances()) { + if (!super2.contains(input2) || super2 == input2) { + continue; + } + + ASSERT_TRUE(infer(input1, super2).contains(result)); + ASSERT_TRUE(infer(super1, input2).contains(result)); + ASSERT_TRUE(infer(super1, super2).contains(result)); + } + } +} + +// Check the monotonicity, that is, if input1 is a subset of super1, input2 is a subset of super2, +// then infer(input1, input2) must be a subset of infer(super1, super2). This version does the +// check randomly on a number of supersets of input1 and input2. +template +static void test_binary_instance_monotonicity_samples(Inference infer, const InputType& input1, const InputType& input2) { + using S = std::remove_const_t_lo)>; + using U = std::remove_const_t_ulo)>; + auto result = infer(input1, input2); + + // The set that is a superset of all other sets + InputType universe = InputType{std::numeric_limits::min(), std::numeric_limits::max(), U(0), U(-1), {U(0), U(0)}}; + ASSERT_TRUE(infer(universe, input2).contains(result)); + ASSERT_TRUE(infer(input1, universe).contains(result)); + ASSERT_TRUE(infer(universe, universe).contains(result)); + + auto random_superset = [](const InputType& input) { + S lo = MIN2(input->_lo, S(uniform_random())); + S hi = MAX2(input->_hi, S(uniform_random())); + U ulo = MIN2(input->_ulo, uniform_random()); + U uhi = MAX2(input->_uhi, uniform_random()); + U zeros = input->_bits._zeros & uniform_random(); + U ones = input->_bits._ones & uniform_random(); + InputType super = InputType::make(TypeIntPrototype{{lo, hi}, {ulo, uhi}, {zeros, ones}}, 0); + assert(super.contains(input), "impossible"); + return super; + }; + + InputType super1 = random_superset(input1); + InputType super2 = random_superset(input2); + ASSERT_TRUE(infer(super1, input2).contains(result)); + ASSERT_TRUE(infer(input1, super2).contains(result)); + ASSERT_TRUE(infer(super1, super2).contains(result)); +} + +// Verify the correctness and monotonicity of an inference function by exhautively analyzing all +// instances of InputType +template +static void test_binary_exhaustive(Operation op, Inference infer) { + for (const InputType& input1 : all_instances()) { + for (const InputType& input2 : all_instances()) { + test_binary_instance_correctness_exhaustive(op, infer, input1, input2); + if (all_instances().size() < 100) { + // This effectively covers the cases up to uintn_t<2> + test_binary_instance_monotonicity_exhaustive(infer, input1, input2); + } else { + // This effectively covers the cases of uintn_t<3> + test_binary_instance_monotonicity_samples(infer, input1, input2); + } + } + } +} + +// Verify the correctness and monotonicity of an inference function by randomly sampling instances +// of InputType +template +static void test_binary_random(Operation op, Inference infer) { + using S = std::remove_const_t; + using U = std::remove_const_t; + + constexpr size_t sample_count = 100; + InputType samples[sample_count]; + + // Fill with {0} + for (size_t i = 0; i < sample_count; i++) { + samples[i] = InputType::make(TypeIntPrototype{{S(0), S(0)}, {U(0), U(0)}, {U(0), U(0)}}, 0); + } + + // {1} + samples[1] = InputType::make(TypeIntPrototype{{S(1), S(1)}, {U(1), U(1)}, {U(0), U(0)}}, 0); + // {-1} + samples[2] = InputType::make(TypeIntPrototype{{S(-1), S(-1)}, {U(-1), U(-1)}, {U(0), U(0)}}, 0); + // {0, 1} + samples[3] = InputType::make(TypeIntPrototype{{S(0), S(1)}, {U(0), U(1)}, {U(0), U(0)}}, 0); + // {-1, 0, 1} + samples[4] = InputType::make(TypeIntPrototype{{S(-1), S(1)}, {U(0), U(-1)}, {U(0), U(0)}}, 0); + // {-1, 1} + samples[5] = InputType::make(TypeIntPrototype{{S(-1), S(1)}, {U(1), U(-1)}, {U(0), U(0)}}, 0); + // {0, 1, 2} + samples[6] = InputType::make(TypeIntPrototype{{S(0), S(2)}, {U(0), U(2)}, {U(0), U(0)}}, 0); + // {0, 2} + samples[7] = InputType::make(TypeIntPrototype{{S(0), S(2)}, {U(0), U(2)}, {U(1), U(0)}}, 0); + // [min_signed, max_signed] + samples[8] = InputType::make(TypeIntPrototype{{std::numeric_limits::min(), std::numeric_limits::max()}, {U(0), U(-1)}, {U(0), U(0)}}, 0); + // [0, max_signed] + samples[9] = InputType::make(TypeIntPrototype{{S(0), std::numeric_limits::max()}, {U(0), U(-1)}, {U(0), U(0)}}, 0); + // [min_signed, 0) + samples[10] = InputType::make(TypeIntPrototype{{std::numeric_limits::min(), S(-1)}, {U(0), U(-1)}, {U(0), U(0)}}, 0); + + constexpr size_t max_tries = 1000; + constexpr size_t start_random_idx = 11; + for (size_t tries = 0, idx = start_random_idx; tries < max_tries && idx < sample_count; tries++) { + // Try to have lo < hi + S signed_bound1 = S(uniform_random()); + S signed_bound2 = S(uniform_random()); + S lo = MIN2(signed_bound1, signed_bound2); + S hi = MAX2(signed_bound1, signed_bound2); + + // Try to have ulo < uhi + U unsigned_bound1 = uniform_random(); + U unsigned_bound2 = uniform_random(); + U ulo = MIN2(unsigned_bound1, unsigned_bound2); + U uhi = MAX2(unsigned_bound1, unsigned_bound2); + + // Try to have (zeros & ones) == 0 + U zeros = uniform_random(); + U ones = uniform_random(); + U common = zeros & ones; + zeros = zeros ^ common; + ones = ones ^ common; + + TypeIntPrototype t{{lo, hi}, {ulo, uhi}, {zeros, ones}}; + auto canonicalized_t = t.canonicalize_constraints(); + if (canonicalized_t.empty()) { + continue; + } + + samples[idx] = TypeIntMirror{canonicalized_t._data._srange._lo, canonicalized_t._data._srange._hi, + canonicalized_t._data._urange._lo, canonicalized_t._data._urange._hi, + canonicalized_t._data._bits}; + idx++; + } + + for (size_t i = 0; i < sample_count; i++) { + for (size_t j = 0; j < sample_count; j++) { + test_binary_instance_correctness_samples(op, infer, samples[i], samples[j]); + test_binary_instance_monotonicity_samples(infer, samples[i], samples[j]); + } + } +} + +template