*
- * will produce the following HTML for a docs build configured for Java SE 12.
+ * will produce the following HTML, depending on the file containing
+ * the tag.
*
*
All unit tests for a class from foo/bar/baz.cpp
-should be placed foo/bar/test_baz.cpp in
-hotspot/test/native/ directory. Having all tests for a
-class in one file is a common practice for unit tests, it helps to see
-all existing tests at once, share functions and/or resources without
-losing encapsulation.
+should be placed foo/bar/test_baz.cpp in the
+test/hotspot/gtest/ directory. Having all tests for a class
+in one file is a common practice for unit tests, it helps to see all
+existing tests at once, share functions and/or resources without losing
+encapsulation.
For tests which test more than one class, directory hierarchy
should be the same as product hierarchy, and file name should reflect
the name of the tested subsystem/functionality. For example, if a
@@ -319,7 +319,7 @@ placed in gc/g1 directory.
Please note that framework prepends directory name to a test group
name. For example, if TEST(foo, check_this) and
TEST(bar, check_that) are defined in
-hotspot/test/native/gc/shared/test_foo.cpp file, they will
+test/hotspot/gtest/gc/shared/test_foo.cpp file, they will
be reported as gc/shared/foo::check_this and
gc/shared/bar::check_that.
Test names
diff --git a/doc/hotspot-unit-tests.md b/doc/hotspot-unit-tests.md
index 69a95307109..f59e6084910 100644
--- a/doc/hotspot-unit-tests.md
+++ b/doc/hotspot-unit-tests.md
@@ -241,7 +241,7 @@ recognize your tests.
Test file location should reflect a location of the tested part of the product.
* All unit tests for a class from `foo/bar/baz.cpp` should be placed
-`foo/bar/test_baz.cpp` in `hotspot/test/native/` directory. Having all
+`foo/bar/test_baz.cpp` in the `test/hotspot/gtest/` directory. Having all
tests for a class in one file is a common practice for unit tests, it
helps to see all existing tests at once, share functions and/or
resources without losing encapsulation.
@@ -254,7 +254,7 @@ sub-system under tests belongs to `gc/g1`, tests should be placed in
Please note that framework prepends directory name to a test group
name. For example, if `TEST(foo, check_this)` and `TEST(bar, check_that)`
-are defined in `hotspot/test/native/gc/shared/test_foo.cpp` file, they
+are defined in `test/hotspot/gtest/gc/shared/test_foo.cpp` file, they
will be reported as `gc/shared/foo::check_this` and
`gc/shared/bar::check_that`.
From 10220ed06ea452083693406113107484fce40275 Mon Sep 17 00:00:00 2001
From: Kim Barrett
Date: Thu, 13 Nov 2025 08:43:59 +0000
Subject: [PATCH 029/418] 8367013: Add Atomic to package/replace idiom of
volatile var plus AtomicAccess:: operations
Reviewed-by: stefank, aboldtch, jsjolen
---
.../shared/stringdedup/stringDedupTable.cpp | 34 +-
.../shared/stringdedup/stringDedupTable.hpp | 9 +-
src/hotspot/share/runtime/atomic.hpp | 546 +++++++++++++++
.../share/utilities/globalDefinitions.hpp | 8 +
.../utilities/singleWriterSynchronizer.cpp | 22 +-
.../utilities/singleWriterSynchronizer.hpp | 16 +-
test/hotspot/gtest/runtime/test_atomic.cpp | 640 ++++++++++++++++++
7 files changed, 1236 insertions(+), 39 deletions(-)
create mode 100644 src/hotspot/share/runtime/atomic.hpp
create mode 100644 test/hotspot/gtest/runtime/test_atomic.cpp
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
index cfa276c0de0..6682993766d 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.cpp
@@ -245,20 +245,20 @@ void StringDedup::Table::num_dead_callback(size_t num_dead) {
// Lock while modifying dead count and state.
MonitorLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
- switch (AtomicAccess::load(&_dead_state)) {
+ switch (_dead_state.load_relaxed()) {
case DeadState::good:
- AtomicAccess::store(&_dead_count, num_dead);
+ _dead_count.store_relaxed(num_dead);
break;
case DeadState::wait1:
// Set count first, so dedup thread gets this or a later value if it
// sees the good state.
- AtomicAccess::store(&_dead_count, num_dead);
- AtomicAccess::release_store(&_dead_state, DeadState::good);
+ _dead_count.store_relaxed(num_dead);
+ _dead_state.release_store(DeadState::good);
break;
case DeadState::wait2:
- AtomicAccess::release_store(&_dead_state, DeadState::wait1);
+ _dead_state.release_store(DeadState::wait1);
break;
case DeadState::cleaning:
@@ -423,8 +423,10 @@ size_t StringDedup::Table::_number_of_entries = 0;
size_t StringDedup::Table::_grow_threshold;
StringDedup::Table::CleanupState* StringDedup::Table::_cleanup_state = nullptr;
bool StringDedup::Table::_need_bucket_shrinking = false;
-volatile size_t StringDedup::Table::_dead_count = 0;
-volatile StringDedup::Table::DeadState StringDedup::Table::_dead_state = DeadState::good;
+Atomic StringDedup::Table::_dead_count{};
+
+Atomic
+StringDedup::Table::_dead_state{DeadState::good};
void StringDedup::Table::initialize_storage() {
assert(_table_storage == nullptr, "storage already created");
@@ -477,19 +479,19 @@ void StringDedup::Table::add(TableValue tv, uint hash_code) {
}
bool StringDedup::Table::is_dead_count_good_acquire() {
- return AtomicAccess::load_acquire(&_dead_state) == DeadState::good;
+ return _dead_state.load_acquire() == DeadState::good;
}
// Should be consistent with cleanup_start_if_needed.
bool StringDedup::Table::is_grow_needed() {
return is_dead_count_good_acquire() &&
- ((_number_of_entries - AtomicAccess::load(&_dead_count)) > _grow_threshold);
+ ((_number_of_entries - _dead_count.load_relaxed()) > _grow_threshold);
}
// Should be consistent with cleanup_start_if_needed.
bool StringDedup::Table::is_dead_entry_removal_needed() {
return is_dead_count_good_acquire() &&
- Config::should_cleanup_table(_number_of_entries, AtomicAccess::load(&_dead_count));
+ Config::should_cleanup_table(_number_of_entries, _dead_count.load_relaxed());
}
StringDedup::Table::TableValue
@@ -651,7 +653,7 @@ bool StringDedup::Table::cleanup_start_if_needed(bool grow_only, bool force) {
// If dead count is good then we can read it once and use it below
// without needing any locking. The recorded count could increase
// after the read, but that's okay.
- size_t dead_count = AtomicAccess::load(&_dead_count);
+ size_t dead_count = _dead_count.load_relaxed();
// This assertion depends on dead state tracking. Otherwise, concurrent
// reference processing could detect some, but a cleanup operation could
// remove them before they are reported.
@@ -675,8 +677,8 @@ bool StringDedup::Table::cleanup_start_if_needed(bool grow_only, bool force) {
void StringDedup::Table::set_dead_state_cleaning() {
MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
- AtomicAccess::store(&_dead_count, size_t(0));
- AtomicAccess::store(&_dead_state, DeadState::cleaning);
+ _dead_count.store_relaxed(0);
+ _dead_state.store_relaxed(DeadState::cleaning);
}
bool StringDedup::Table::start_resizer(bool grow_only, size_t number_of_entries) {
@@ -710,7 +712,7 @@ void StringDedup::Table::cleanup_end() {
delete _cleanup_state;
_cleanup_state = nullptr;
MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
- AtomicAccess::store(&_dead_state, DeadState::wait2);
+ _dead_state.store_relaxed(DeadState::wait2);
}
void StringDedup::Table::verify() {
@@ -732,8 +734,8 @@ void StringDedup::Table::log_statistics() {
int dead_state;
{
MutexLocker ml(StringDedup_lock, Mutex::_no_safepoint_check_flag);
- dead_count = _dead_count;
- dead_state = static_cast(_dead_state);
+ dead_count = _dead_count.load_relaxed();
+ dead_state = static_cast(_dead_state.load_relaxed());
}
log_debug(stringdedup)("Table: %zu values in %zu buckets, %zu dead (%d)",
_number_of_entries, _number_of_buckets,
diff --git a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp
index a163319e84c..4c57339f2d5 100644
--- a/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp
+++ b/src/hotspot/share/gc/shared/stringdedup/stringDedupTable.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -30,6 +30,7 @@
#include "memory/allStatic.hpp"
#include "oops/typeArrayOop.hpp"
#include "oops/weakHandle.hpp"
+#include "runtime/atomic.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@@ -86,9 +87,9 @@ private:
static CleanupState* _cleanup_state;
static bool _need_bucket_shrinking;
// These are always written while holding StringDedup_lock, but may be
- // read by the dedup thread without holding the lock lock.
- static volatile size_t _dead_count;
- static volatile DeadState _dead_state;
+ // read by the dedup thread without holding the lock.
+ static Atomic _dead_count;
+ static Atomic _dead_state;
static uint compute_hash(typeArrayOop obj);
static size_t hash_to_index(uint hash_code);
diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp
new file mode 100644
index 00000000000..5b4d7d8659f
--- /dev/null
+++ b/src/hotspot/share/runtime/atomic.hpp
@@ -0,0 +1,546 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_RUNTIME_ATOMIC_HPP
+#define SHARE_RUNTIME_ATOMIC_HPP
+
+#include "cppstdlib/type_traits.hpp"
+#include "metaprogramming/enableIf.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+#include "runtime/atomicAccess.hpp"
+#include "utilities/globalDefinitions.hpp"
+
+// Atomic is used to declare a variable of type T with atomic access.
+//
+// The following value types T are supported:
+//
+// (1) Integers with sizeof the same as sizeof int32_t or int64_t. These are
+// referred to as atomic integers below.
+//
+// (2) Integers with sizeof 1, including bool. These are referred to as atomic
+// bytes below.
+//
+// (3) Pointers. These are referred to as atomic pointers below.
+//
+// (4) Types with a PrimitiveValues::Translate definition. These are referred
+// to as atomic translated types below. The atomic value for the associated
+// decayed type is referred to as the atomic decayed type.
+//
+// The interface provided by an Atomic depends on the value type.
+//
+// If T is the value type, v is an Atomic, x and y are instances of T, i is
+// an integer, and o is an atomic_memory_order, then:
+//
+// (1) All Atomic types provide
+//
+// nested types:
+// ValueType -> T
+//
+// special functions:
+// explicit constructor(T)
+// noncopyable
+// destructor
+//
+// static member functions:
+// value_offset_in_bytes() -> int // constexpr
+// value_size_in_bytes() -> int // constexpr
+// These provide the compiler and the like with direct access to the
+// value field. They shouldn't be used directly to bypass normal access.
+//
+// member functions:
+// v.load_relaxed() -> T
+// v.load_acquire() -> T
+// v.store_relaxed(x) -> void
+// v.release_store(x) -> void
+// v.release_store_fence(x) -> void
+// v.compare_exchange(x, y [, o]) -> T
+//
+// (2) All atomic types are default constructible.
+//
+// Default construction of an atomic integer or atomic byte initializes the
+// value to zero. Default construction of an atomic pointer initializes the
+// value to null.
+//
+// If the value type of an atomic translated type is default constructible,
+// then default construction of the atomic translated type will initialize the
+// value to a default constructed object of the value type. Otherwise, the
+// value will be initialized as if by translating the value that would be
+// provided by default constructing an atomic type for the value type's
+// decayed type.
+
+// (3) Atomic pointers and atomic integers additionally provide
+//
+// member functions:
+// v.exchange(x [, o]) -> T
+// v.add_then_fetch(i [, o]) -> T
+// v.sub_then_fetch(i [, o]) -> T
+// v.fetch_then_add(i [, o]) -> T
+// v.fetch_then_sub(i [, o]) -> T
+//
+// sizeof(i) must not exceed sizeof(T). For atomic integers, both T and the
+// type of i must be signed, or both must be unsigned. Atomic pointers perform
+// element arithmetic.
+//
+// (4) An atomic translated type additionally provides the exchange
+// function if its associated atomic decayed type provides that function.
+//
+// (5) Atomic integers additionally provide
+//
+// member functions:
+// v.and_then_fetch(x [, o]) -> T
+// v.or_then_fetch(x [, o]) -> T
+// v.xor_then_fetch(x [, o]) -> T
+// v.fetch_then_and(x [, o]) -> T
+// v.fetch_then_or(x [, o]) -> T
+// v.fetch_then_xor(x [, o]) -> T
+//
+// (6) Atomic pointers additionally provide
+//
+// nested types:
+// ElementType -> std::remove_pointer_t
+//
+// Some of the function names provided by (some variants of) Atomic differ
+// from the corresponding functions provided by the AtomicAccess class. In
+// some cases this is done for regularity; there are some inconsistencies in
+// the AtomicAccess names. Some of the naming choices are also to make them
+// stand out a little more when used in surrounding non-atomic code. Without
+// the "AtomicAccess::" qualifier, some of those names are easily overlooked.
+//
+// Atomic bytes don't provide exchange(). This is because that operation
+// hasn't been implemented for 1 byte values. That could be changed if needed.
+//
+// Atomic for 2 byte integers is not supported. This is because atomic
+// operations of that size have not been implemented. There haven't been
+// required use-cases. Many platforms don't provide hardware support.
+//
+// Atomic translated types don't provide the full interface of the associated
+// atomic decayed type. They could do so, perhaps under the control of an
+// associated type trait.
+//
+// Atomic is not intended to be anything approaching a drop-in replacement
+// for std::atomic. Rather, it's wrapping up a long-established HotSpot
+// idiom in a tidier and more rigorous package. Some of the differences from
+// std::atomic include
+//
+// * Atomic supports a much more limited set of value types.
+//
+// * All supported Atomic types are "lock free", so the standard mechanisms
+// for testing for that are not provided. (There might have been some types on
+// some platforms that used a lock long-ago, but that's no longer the case.)
+//
+// * Rather than load and store operations with a memory order parameter,
+// Atomic provides load_relaxed(), load_acquire(), release_store(),
+// store_relaxed(), and release_store_fence() operations.
+//
+// * Atomic doesn't provide operator overloads that perform various
+// operations with sequentially consistent ordering semantics. The rationale
+// for not providing these is similar to that for having different (often
+// longer) names for some operations than the corresponding AtomicAccess
+// functions.
+
+// Implementation support for Atomic.
+class AtomicImpl {
+ enum class Category {
+ Integer,
+ Byte,
+ Pointer,
+ Translated
+ };
+
+#if defined(__GNUC__) && !defined(__clang__)
+ // Workaround for gcc bug. Make category() public, else we get this error
+ // error: 'static constexpr AtomicImpl::Category AtomicImpl::category()
+ // [with T = unsigned int]' is private within this context
+ // The only reference is the default template parameter value in the Atomic
+ // class a couple lines below, in this same class!
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=122098
+public:
+#endif
+ // Selection of Atomic category, based on T.
+ template
+ static constexpr Category category();
+private:
+
+ // Helper base classes, providing various parts of the APIs.
+ template class CommonCore;
+ template class SupportsExchange;
+ template class SupportsArithmetic;
+
+ // Support conditional exchange() for atomic translated types.
+ template class HasExchange;
+ template class DecayedHasExchange;
+ template::value>
+ class TranslatedExchange;
+
+public:
+ template()>
+ class Atomic;
+};
+
+// The Atomic type.
+template
+using Atomic = AtomicImpl::Atomic;
+
+template
+constexpr auto AtomicImpl::category() -> Category {
+ static_assert(std::is_same_v>,
+ "Value type must not be cv-qualified");
+ if constexpr (std::is_integral_v) {
+ if constexpr ((sizeof(T) == sizeof(int32_t)) || (sizeof(T) == sizeof(int64_t))) {
+ return Category::Integer;
+ } else if constexpr (sizeof(T) == 1) {
+ return Category::Byte;
+ } else {
+ static_assert(DependentAlwaysFalse, "Invalid atomic integer type");
+ }
+ } else if constexpr (std::is_pointer_v) {
+ return Category::Pointer;
+ } else if constexpr (PrimitiveConversions::Translate::value) {
+ return Category::Translated;
+ } else {
+ static_assert(DependentAlwaysFalse, "Invalid atomic value type");
+ }
+}
+
+// Atomic implementation classes.
+
+template
+class AtomicImpl::CommonCore {
+ T volatile _value;
+
+protected:
+ explicit CommonCore(T value) : _value(value) {}
+ ~CommonCore() = default;
+
+ T volatile* value_ptr() { return &_value; }
+ T const volatile* value_ptr() const { return &_value; }
+
+ // Support for value_offset_in_bytes.
+ template
+ static constexpr int value_offset_in_bytes_impl() {
+ return offsetof(Derived, _value);
+ }
+
+public:
+ NONCOPYABLE(CommonCore);
+
+ static constexpr int value_size_in_bytes() {
+ return sizeof(_value);
+ }
+
+ // Common core Atomic operations.
+
+ T load_relaxed() const {
+ return AtomicAccess::load(value_ptr());
+ }
+
+ T load_acquire() const {
+ return AtomicAccess::load_acquire(value_ptr());
+ }
+
+ void store_relaxed(T value) {
+ AtomicAccess::store(value_ptr(), value);
+ }
+
+ void release_store(T value) {
+ AtomicAccess::release_store(value_ptr(), value);
+ }
+
+ void release_store_fence(T value) {
+ AtomicAccess::release_store_fence(value_ptr(), value);
+ }
+
+ T compare_exchange(T compare_value, T new_value,
+ atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::cmpxchg(value_ptr(), compare_value, new_value, order);
+ }
+};
+
+template
+class AtomicImpl::SupportsExchange : public CommonCore {
+protected:
+ explicit SupportsExchange(T value) : CommonCore(value) {}
+ ~SupportsExchange() = default;
+
+public:
+ T exchange(T new_value,
+ atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::xchg(this->value_ptr(), new_value, order);
+ }
+};
+
+template
+class AtomicImpl::SupportsArithmetic : public SupportsExchange {
+ // Guarding the AtomicAccess calls with constexpr checking of Offset produces
+ // better compile-time error messages.
+ template
+ static constexpr bool check_offset_type() {
+ static_assert(std::is_integral_v, "offset must be integral");
+ static_assert(sizeof(Offset) <= sizeof(T), "offset size exceeds value size");
+ if constexpr (!std::is_integral_v) {
+ static_assert(std::is_pointer_v, "must be");
+ } else if constexpr (std::is_signed_v) {
+ static_assert(std::is_signed_v,
+ "value is signed but offset is unsigned");
+ } else {
+ static_assert(std::is_unsigned_v,
+ "value is unsigned but offset is signed");
+ }
+ return true;
+ }
+
+protected:
+ explicit SupportsArithmetic(T value) : SupportsExchange(value) {}
+ ~SupportsArithmetic() = default;
+
+public:
+ template
+ T add_then_fetch(Offset add_value,
+ atomic_memory_order order = memory_order_conservative) {
+ if constexpr (check_offset_type()) {
+ return AtomicAccess::add(this->value_ptr(), add_value, order);
+ }
+ }
+
+ template
+ T fetch_then_add(Offset add_value,
+ atomic_memory_order order = memory_order_conservative) {
+ if constexpr (check_offset_type()) {
+ return AtomicAccess::fetch_then_add(this->value_ptr(), add_value, order);
+ }
+ }
+
+ template
+ T sub_then_fetch(Offset sub_value,
+ atomic_memory_order order = memory_order_conservative) {
+ if constexpr (check_offset_type()) {
+ return AtomicAccess::sub(this->value_ptr(), sub_value, order);
+ }
+ }
+
+ template
+ T fetch_then_sub(Offset sub_value,
+ atomic_memory_order order = memory_order_conservative) {
+ if constexpr (check_offset_type()) {
+ // AtomicAccess doesn't currently provide fetch_then_sub.
+ return sub_then_fetch(sub_value, order) + sub_value;
+ }
+ }
+};
+
+template
+class AtomicImpl::Atomic
+ : public SupportsArithmetic
+{
+public:
+ explicit Atomic(T value = 0) : SupportsArithmetic(value) {}
+
+ NONCOPYABLE(Atomic);
+
+ using ValueType = T;
+
+ static constexpr int value_offset_in_bytes() {
+ return CommonCore::template value_offset_in_bytes_impl();
+ }
+
+ T fetch_then_and(T bits, atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::fetch_then_and(this->value_ptr(), bits, order);
+ }
+
+ T fetch_then_or(T bits, atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::fetch_then_or(this->value_ptr(), bits, order);
+ }
+
+ T fetch_then_xor(T bits, atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::fetch_then_xor(this->value_ptr(), bits, order);
+ }
+
+ T and_then_fetch(T bits, atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::and_then_fetch(this->value_ptr(), bits, order);
+ }
+
+ T or_then_fetch(T bits, atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::or_then_fetch(this->value_ptr(), bits, order);
+ }
+
+ T xor_then_fetch(T bits, atomic_memory_order order = memory_order_conservative) {
+ return AtomicAccess::xor_then_fetch(this->value_ptr(), bits, order);
+ }
+};
+
+template
+class AtomicImpl::Atomic
+ : public CommonCore
+{
+public:
+ explicit Atomic(T value = 0) : CommonCore(value) {}
+
+ NONCOPYABLE(Atomic);
+
+ using ValueType = T;
+
+ static constexpr int value_offset_in_bytes() {
+ return CommonCore::template value_offset_in_bytes_impl();
+ }
+};
+
+template
+class AtomicImpl::Atomic
+ : public SupportsArithmetic
+{
+public:
+ explicit Atomic(T value = nullptr) : SupportsArithmetic(value) {}
+
+ NONCOPYABLE(Atomic);
+
+ using ValueType = T;
+ using ElementType = std::remove_pointer_t;
+
+ static constexpr int value_offset_in_bytes() {
+ return CommonCore::template value_offset_in_bytes_impl();
+ }
+};
+
+// Atomic translated type
+
+// Test whether Atomic has exchange().
+template
+class AtomicImpl::HasExchange {
+ template static void* test(decltype(&Check::exchange));
+ template static int test(...);
+ using test_type = decltype(test>(nullptr));
+public:
+ static constexpr bool value = std::is_pointer_v;
+};
+
+// Test whether the atomic decayed type associated with T has exchange().
+template
+class AtomicImpl::DecayedHasExchange {
+ using Translator = PrimitiveConversions::Translate;
+ using Decayed = typename Translator::Decayed;
+
+ // "Unit test" HasExchange<>.
+ static_assert(HasExchange::value);
+ static_assert(HasExchange::value);
+ static_assert(!HasExchange::value);
+
+public:
+ static constexpr bool value = HasExchange::value;
+};
+
+// Base class for atomic translated type if atomic decayed type doesn't have
+// exchange().
+template
+class AtomicImpl::TranslatedExchange {};
+
+// Base class for atomic translated type if atomic decayed type does have
+// exchange().
+template
+class AtomicImpl::TranslatedExchange {
+public:
+ T exchange(T new_value,
+ atomic_memory_order order = memory_order_conservative) {
+ return static_cast(this)->exchange_impl(new_value, order);
+ }
+};
+
+template
+class AtomicImpl::Atomic
+ : public TranslatedExchange, T>
+{
+ // Give TranslatedExchange<> access to exchange_impl() if needed.
+ friend class TranslatedExchange, T>;
+
+ using Translator = PrimitiveConversions::Translate;
+ using Decayed = typename Translator::Decayed;
+
+ Atomic _value;
+
+ static Decayed decay(T x) { return Translator::decay(x); }
+ static T recover(Decayed x) { return Translator::recover(x); }
+
+ // Support for default construction via the default construction of _value.
+ struct UseDecayedCtor {};
+ explicit Atomic(UseDecayedCtor) : _value() {}
+ using DefaultCtorSelect =
+ std::conditional_t, T, UseDecayedCtor>;
+
+public:
+ using ValueType = T;
+
+ // If T is default constructible, construct from a default constructed T.
+ // Otherwise, default construct the underlying Atomic.
+ Atomic() : Atomic(DefaultCtorSelect()) {}
+
+ explicit Atomic(T value) : _value(decay(value)) {}
+
+ NONCOPYABLE(Atomic);
+
+ static constexpr int value_offset_in_bytes() {
+ return (offsetof(Atomic, _value) +
+ Atomic::value_offset_in_bytes());
+ }
+
+ static constexpr int value_size_in_bytes() {
+ return Atomic::value_size_in_bytes();
+ }
+
+ T load_relaxed() const {
+ return recover(_value.load_relaxed());
+ }
+
+ T load_acquire() const {
+ return recover(_value.load_acquire());
+ }
+
+ void store_relaxed(T value) {
+ _value.store_relaxed(decay(value));
+ }
+
+ void release_store(T value) {
+ _value.release_store(decay(value));
+ }
+
+ void release_store_fence(T value) {
+ _value.release_store_fence(decay(value));
+ }
+
+ T compare_exchange(T compare_value, T new_value,
+ atomic_memory_order order = memory_order_conservative) {
+ return recover(_value.compare_exchange(decay(compare_value),
+ decay(new_value),
+ order));
+ }
+
+private:
+ // Implementation of exchange() if needed.
+ // Exclude when not needed, to prevent reference to non-existent function
+ // of atomic decayed type if someone explicitly instantiates Atomic.
+ template::value)>
+ T exchange_impl(T new_value, atomic_memory_order order) {
+ return recover(_value.exchange(decay(new_value), order));
+ }
+};
+
+#endif // SHARE_RUNTIME_ATOMIC_HPP
diff --git a/src/hotspot/share/utilities/globalDefinitions.hpp b/src/hotspot/share/utilities/globalDefinitions.hpp
index 12a69043013..1910759b434 100644
--- a/src/hotspot/share/utilities/globalDefinitions.hpp
+++ b/src/hotspot/share/utilities/globalDefinitions.hpp
@@ -1374,6 +1374,14 @@ template int primitive_compare(const K& k0, const K& k1) {
template
std::add_rvalue_reference_t declval() noexcept;
+// This provides a workaround for static_assert(false) in discarded or
+// otherwise uninstantiated places. Instead use
+// static_assert(DependentAlwaysFalse, "...")
+// See http://wg21.link/p2593r1. Some, but not all, compiler versions we're
+// using have implemented that change as a DR:
+// https://cplusplus.github.io/CWG/issues/2518.html
+template inline constexpr bool DependentAlwaysFalse = false;
+
// Quickly test to make sure IEEE-754 subnormal numbers are correctly
// handled.
bool IEEE_subnormal_handling_OK();
diff --git a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp
index 5e4c9777468..932cd5c9093 100644
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.cpp
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.cpp
@@ -43,17 +43,17 @@ SingleWriterSynchronizer::SingleWriterSynchronizer() :
// synchronization have exited that critical section.
void SingleWriterSynchronizer::synchronize() {
// Side-effect in assert balanced by debug-only dec at end.
- assert(AtomicAccess::add(&_writers, 1u) == 1u, "multiple writers");
+ assert(_writers.fetch_then_add(1u) == 0u, "multiple writers");
// We don't know anything about the muxing between this invocation
// and invocations in other threads. We must start with the latest
// _enter polarity, else we could clobber the wrong _exit value on
// the first iteration. So fence to ensure everything here follows
// whatever muxing was used.
OrderAccess::fence();
- uint value = _enter;
+ uint value = _enter.load_relaxed();
// (1) Determine the old and new exit counters, based on the
// polarity (bit0 value) of the on-entry enter counter.
- volatile uint* new_ptr = &_exit[(value + 1) & 1];
+ Atomic& new_exit = _exit[(value + 1) & 1];
// (2) Change the in-use exit counter to the new counter, by adding
// 1 to the enter counter (flipping the polarity), meanwhile
// "simultaneously" initializing the new exit counter to that enter
@@ -62,29 +62,29 @@ void SingleWriterSynchronizer::synchronize() {
uint old;
do {
old = value;
- *new_ptr = ++value;
- value = AtomicAccess::cmpxchg(&_enter, old, value);
+ new_exit.store_relaxed(++value);
+ value = _enter.compare_exchange(old, value);
} while (old != value);
// Critical sections entered before we changed the polarity will use
// the old exit counter. Critical sections entered after the change
// will use the new exit counter.
- volatile uint* old_ptr = &_exit[old & 1];
- assert(old_ptr != new_ptr, "invariant");
+ Atomic& old_exit = _exit[old & 1];
+ assert(&new_exit != &old_exit, "invariant");
// (3) Inform threads in in-progress critical sections that there is
// a pending synchronize waiting. The thread that completes the
// request (_exit value == old) will signal the _wakeup semaphore to
// allow us to proceed.
- _waiting_for = old;
+ _waiting_for.store_relaxed(old);
// Write of _waiting_for must precede read of _exit and associated
// conditional semaphore wait. If they were re-ordered then a
// critical section exit could miss the wakeup request, failing to
// signal us while we're waiting.
OrderAccess::fence();
// (4) Wait for all the critical sections started before the change
- // to complete, e.g. for the value of old_ptr to catch up with old.
+ // to complete, e.g. for the value of old_exit to catch up with old.
// Loop because there could be pending wakeups unrelated to this
// synchronize request.
- while (old != AtomicAccess::load_acquire(old_ptr)) {
+ while (old != old_exit.load_acquire()) {
_wakeup.wait();
}
// (5) Drain any pending wakeups. A critical section exit may have
@@ -95,5 +95,5 @@ void SingleWriterSynchronizer::synchronize() {
// lead to semaphore overflow. This doesn't guarantee no unrelated
// wakeups for the next wait, but prevents unbounded accumulation.
while (_wakeup.trywait()) {}
- DEBUG_ONLY(AtomicAccess::dec(&_writers);)
+ assert(_writers.sub_then_fetch(1u) == 0u, "invariant");
}
diff --git a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp
index 737d5c6d4ac..450c7e89233 100644
--- a/src/hotspot/share/utilities/singleWriterSynchronizer.hpp
+++ b/src/hotspot/share/utilities/singleWriterSynchronizer.hpp
@@ -26,7 +26,7 @@
#define SHARE_UTILITIES_SINGLEWRITERSYNCHRONIZER_HPP
#include "memory/allocation.hpp"
-#include "runtime/atomicAccess.hpp"
+#include "runtime/atomic.hpp"
#include "runtime/semaphore.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
@@ -48,12 +48,12 @@
// the single writer at a time restriction. Use this only in
// situations where GlobalCounter won't work for some reason.
class SingleWriterSynchronizer {
- volatile uint _enter;
- volatile uint _exit[2];
- volatile uint _waiting_for;
+ Atomic _enter;
+ Atomic _exit[2];
+ Atomic _waiting_for;
Semaphore _wakeup;
- DEBUG_ONLY(volatile uint _writers;)
+ DEBUG_ONLY(Atomic _writers;)
NONCOPYABLE(SingleWriterSynchronizer);
@@ -87,15 +87,15 @@ public:
};
inline uint SingleWriterSynchronizer::enter() {
- return AtomicAccess::add(&_enter, 2u);
+ return _enter.add_then_fetch(2u);
}
inline void SingleWriterSynchronizer::exit(uint enter_value) {
- uint exit_value = AtomicAccess::add(&_exit[enter_value & 1], 2u);
+ uint exit_value = _exit[enter_value & 1].add_then_fetch(2u);
// If this exit completes a synchronize request, wakeup possibly
// waiting synchronizer. Read of _waiting_for must follow the _exit
// update.
- if (exit_value == _waiting_for) {
+ if (exit_value == _waiting_for.load_relaxed()) {
_wakeup.signal();
}
}
diff --git a/test/hotspot/gtest/runtime/test_atomic.cpp b/test/hotspot/gtest/runtime/test_atomic.cpp
new file mode 100644
index 00000000000..dc492e523d1
--- /dev/null
+++ b/test/hotspot/gtest/runtime/test_atomic.cpp
@@ -0,0 +1,640 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "cppstdlib/type_traits.hpp"
+#include "metaprogramming/primitiveConversions.hpp"
+#include "runtime/atomic.hpp"
+
+#include "unittest.hpp"
+
+// These tests of Atomic only verify functionality. They don't verify
+// atomicity.
+
+template
+struct AtomicIntegerArithmeticTestSupport {
+ Atomic _test_value;
+
+ static constexpr T _old_value = static_cast(UCONST64(0x2000000020000));
+ static constexpr T _change_value = static_cast(UCONST64( 0x100000001));
+
+ AtomicIntegerArithmeticTestSupport() : _test_value(0) {}
+
+ void fetch_then_add() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value + _change_value;
+ T result = _test_value.fetch_then_add(_change_value);
+ EXPECT_EQ(_old_value, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void fetch_then_sub() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value - _change_value;
+ T result = _test_value.fetch_then_sub(_change_value);
+ EXPECT_EQ(_old_value, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void add_then_fetch() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value + _change_value;
+ T result = _test_value.add_then_fetch(_change_value);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void sub_then_fetch() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value - _change_value;
+ T result = _test_value.sub_then_fetch(_change_value);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+#define TEST_ARITHMETIC(name) { SCOPED_TRACE(XSTR(name)); name(); }
+
+ void operator()() {
+ TEST_ARITHMETIC(fetch_then_add)
+ TEST_ARITHMETIC(fetch_then_sub)
+ TEST_ARITHMETIC(add_then_fetch)
+ TEST_ARITHMETIC(sub_then_fetch)
+ }
+
+#undef TEST_ARITHMETIC
+};
+
+TEST_VM(AtomicIntegerTest, arith_int32) {
+ AtomicIntegerArithmeticTestSupport()();
+}
+
+TEST_VM(AtomicIntegerTest, arith_uint32) {
+ AtomicIntegerArithmeticTestSupport()();
+}
+
+TEST_VM(AtomicIntegerTest, arith_int64) {
+ AtomicIntegerArithmeticTestSupport()();
+}
+
+TEST_VM(AtomicIntegerTest, arith_uint64) {
+ AtomicIntegerArithmeticTestSupport()();
+}
+
+template
+struct AtomicIntegerXchgTestSupport {
+ Atomic _test_value;
+
+ AtomicIntegerXchgTestSupport() : _test_value{} {}
+
+ void test() {
+ T zero = 0;
+ T five = 5;
+ _test_value.store_relaxed(zero);
+ T res = _test_value.exchange(five);
+ EXPECT_EQ(zero, res);
+ EXPECT_EQ(five, _test_value.load_relaxed());
+ }
+};
+
+TEST_VM(AtomicIntegerTest, xchg_int32) {
+ using Support = AtomicIntegerXchgTestSupport;
+ Support().test();
+}
+
+TEST_VM(AtomicIntegerTest, xchg_int64) {
+ using Support = AtomicIntegerXchgTestSupport;
+ Support().test();
+}
+
+template
+struct AtomicIntegerCmpxchgTestSupport {
+ Atomic _test_value;
+
+ AtomicIntegerCmpxchgTestSupport() : _test_value{} {}
+
+ void test() {
+ T zero = 0;
+ T five = 5;
+ T ten = 10;
+ _test_value.store_relaxed(zero);
+ T res = _test_value.compare_exchange(five, ten);
+ EXPECT_EQ(zero, res);
+ EXPECT_EQ(zero, _test_value.load_relaxed());
+ res = _test_value.compare_exchange(zero, ten);
+ EXPECT_EQ(zero, res);
+ EXPECT_EQ(ten, _test_value.load_relaxed());
+ }
+};
+
+TEST_VM(AtomicIntegerTest, cmpxchg_int32) {
+ using Support = AtomicIntegerCmpxchgTestSupport;
+ Support().test();
+}
+
+TEST_VM(AtomicIntegerTest, cmpxchg_int64) {
+ // Check if 64-bit atomics are available on the machine.
+ if (!VM_Version::supports_cx8()) return;
+
+ using Support = AtomicIntegerCmpxchgTestSupport;
+ Support().test();
+}
+
+struct AtomicCmpxchg1ByteStressSupport {
+ char _default_val;
+ int _base;
+ Atomic _array[7+32+7];
+
+ AtomicCmpxchg1ByteStressSupport() : _default_val(0x7a), _base(7) {}
+
+ void validate(char val, char val2, int index) {
+ for (int i = 0; i < 7; i++) {
+ EXPECT_EQ(_array[i].load_relaxed(), _default_val);
+ }
+ for (int i = 7; i < (7+32); i++) {
+ if (i == index) {
+ EXPECT_EQ(_array[i].load_relaxed(), val2);
+ } else {
+ EXPECT_EQ(_array[i].load_relaxed(), val);
+ }
+ }
+ for (int i = 0; i < 7; i++) {
+ EXPECT_EQ(_array[i].load_relaxed(), _default_val);
+ }
+ }
+
+ void test_index(int index) {
+ char one = 1;
+ _array[index].compare_exchange(_default_val, one);
+ validate(_default_val, one, index);
+
+ _array[index].compare_exchange(one, _default_val);
+ validate(_default_val, _default_val, index);
+ }
+
+ void test() {
+ for (size_t i = 0; i < ARRAY_SIZE(_array); ++i) {
+ _array[i].store_relaxed(_default_val);
+ }
+ for (int i = _base; i < (_base+32); i++) {
+ test_index(i);
+ }
+ }
+};
+
+TEST_VM(AtomicCmpxchg1Byte, stress) {
+ AtomicCmpxchg1ByteStressSupport support;
+ support.test();
+}
+
+template
+struct AtomicEnumTestSupport {
+ Atomic _test_value;
+
+ AtomicEnumTestSupport() : _test_value{} {}
+
+ void test_store_load(T value) {
+ EXPECT_NE(value, _test_value.load_relaxed());
+ _test_value.store_relaxed(value);
+ EXPECT_EQ(value, _test_value.load_relaxed());
+ }
+
+ void test_cmpxchg(T value1, T value2) {
+ EXPECT_NE(value1, _test_value.load_relaxed());
+ _test_value.store_relaxed(value1);
+ EXPECT_EQ(value1, _test_value.compare_exchange(value2, value2));
+ EXPECT_EQ(value1, _test_value.load_relaxed());
+ EXPECT_EQ(value1, _test_value.compare_exchange(value1, value2));
+ EXPECT_EQ(value2, _test_value.load_relaxed());
+ }
+
+ void test_xchg(T value1, T value2) {
+ EXPECT_NE(value1, _test_value.load_relaxed());
+ _test_value.store_relaxed(value1);
+ EXPECT_EQ(value1, _test_value.exchange(value2));
+ EXPECT_EQ(value2, _test_value.load_relaxed());
+ }
+};
+
+namespace AtomicEnumTestUnscoped { // Scope the enumerators.
+ enum TestEnum { A, B, C };
+}
+
+TEST_VM(AtomicEnumTest, unscoped_enum) {
+ using namespace AtomicEnumTestUnscoped;
+ using Support = AtomicEnumTestSupport;
+
+ Support().test_store_load(B);
+ Support().test_cmpxchg(B, C);
+ Support().test_xchg(B, C);
+}
+
+enum class AtomicEnumTestScoped { A, B, C };
+
+TEST_VM(AtomicEnumTest, scoped_enum) {
+ const AtomicEnumTestScoped B = AtomicEnumTestScoped::B;
+ const AtomicEnumTestScoped C = AtomicEnumTestScoped::C;
+ using Support = AtomicEnumTestSupport;
+
+ Support().test_store_load(B);
+ Support().test_cmpxchg(B, C);
+ Support().test_xchg(B, C);
+}
+
+template
+struct AtomicBitopsTestSupport {
+ Atomic _test_value;
+
+ // At least one byte differs between _old_value and _old_value op _change_value.
+ static constexpr T _old_value = static_cast(UCONST64(0x7f5300007f530044));
+ static constexpr T _change_value = static_cast(UCONST64(0x3800530038005322));
+
+ AtomicBitopsTestSupport() : _test_value(0) {}
+
+ void fetch_then_and() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value & _change_value;
+ EXPECT_NE(_old_value, expected);
+ T result = _test_value.fetch_then_and(_change_value);
+ EXPECT_EQ(_old_value, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void fetch_then_or() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value | _change_value;
+ EXPECT_NE(_old_value, expected);
+ T result = _test_value.fetch_then_or(_change_value);
+ EXPECT_EQ(_old_value, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void fetch_then_xor() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value ^ _change_value;
+ EXPECT_NE(_old_value, expected);
+ T result = _test_value.fetch_then_xor(_change_value);
+ EXPECT_EQ(_old_value, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void and_then_fetch() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value & _change_value;
+ EXPECT_NE(_old_value, expected);
+ T result = _test_value.and_then_fetch(_change_value);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void or_then_fetch() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value | _change_value;
+ EXPECT_NE(_old_value, expected);
+ T result = _test_value.or_then_fetch(_change_value);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void xor_then_fetch() {
+ _test_value.store_relaxed(_old_value);
+ T expected = _old_value ^ _change_value;
+ EXPECT_NE(_old_value, expected);
+ T result = _test_value.xor_then_fetch(_change_value);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+#define TEST_BITOP(name) { SCOPED_TRACE(XSTR(name)); name(); }
+
+ void operator()() {
+ TEST_BITOP(fetch_then_and)
+ TEST_BITOP(fetch_then_or)
+ TEST_BITOP(fetch_then_xor)
+ TEST_BITOP(and_then_fetch)
+ TEST_BITOP(or_then_fetch)
+ TEST_BITOP(xor_then_fetch)
+ }
+
+#undef TEST_BITOP
+};
+
+TEST_VM(AtomicBitopsTest, int32) {
+ AtomicBitopsTestSupport()();
+}
+
+TEST_VM(AtomicBitopsTest, uint32) {
+ AtomicBitopsTestSupport()();
+}
+
+TEST_VM(AtomicBitopsTest, int64) {
+ AtomicBitopsTestSupport()();
+}
+
+TEST_VM(AtomicBitopsTest, uint64) {
+ AtomicBitopsTestSupport()();
+}
+
+template
+struct AtomicPointerTestSupport {
+ static T _test_values[10];
+ static T* _initial_ptr;
+
+ Atomic _test_value;
+
+ AtomicPointerTestSupport() : _test_value(nullptr) {}
+
+ void fetch_then_add() {
+ _test_value.store_relaxed(_initial_ptr);
+ T* expected = _initial_ptr + 2;
+ T* result = _test_value.fetch_then_add(2);
+ EXPECT_EQ(_initial_ptr, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void fetch_then_sub() {
+ _test_value.store_relaxed(_initial_ptr);
+ T* expected = _initial_ptr - 2;
+ T* result = _test_value.fetch_then_sub(2);
+ EXPECT_EQ(_initial_ptr, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void add_then_fetch() {
+ _test_value.store_relaxed(_initial_ptr);
+ T* expected = _initial_ptr + 2;
+ T* result = _test_value.add_then_fetch(2);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void sub_then_fetch() {
+ _test_value.store_relaxed(_initial_ptr);
+ T* expected = _initial_ptr - 2;
+ T* result = _test_value.sub_then_fetch(2);
+ EXPECT_EQ(expected, result);
+ EXPECT_EQ(expected, _test_value.load_relaxed());
+ }
+
+ void exchange() {
+ _test_value.store_relaxed(_initial_ptr);
+ T* replace = _initial_ptr + 3;
+ T* result = _test_value.exchange(replace);
+ EXPECT_EQ(_initial_ptr, result);
+ EXPECT_EQ(replace, _test_value.load_relaxed());
+ }
+
+ void compare_exchange() {
+ _test_value.store_relaxed(_initial_ptr);
+ T* not_initial_ptr = _initial_ptr - 1;
+ T* replace = _initial_ptr + 3;
+
+ T* result = _test_value.compare_exchange(not_initial_ptr, replace);
+ EXPECT_EQ(_initial_ptr, result);
+ EXPECT_EQ(_initial_ptr, _test_value.load_relaxed());
+
+ result = _test_value.compare_exchange(_initial_ptr, replace);
+ EXPECT_EQ(_initial_ptr, result);
+ EXPECT_EQ(replace, _test_value.load_relaxed());
+ }
+
+#define TEST_OP(name) { SCOPED_TRACE(XSTR(name)); name(); }
+
+ void operator()() {
+ TEST_OP(fetch_then_add)
+ TEST_OP(fetch_then_sub)
+ TEST_OP(add_then_fetch)
+ TEST_OP(sub_then_fetch)
+ TEST_OP(exchange)
+ TEST_OP(compare_exchange)
+ }
+
+#undef TEST_OP
+};
+
+template
+T AtomicPointerTestSupport::_test_values[10] = {};
+
+template
+T* AtomicPointerTestSupport::_initial_ptr = &_test_values[5];
+
+TEST_VM(AtomicPointerTest, ptr_to_char) {
+ AtomicPointerTestSupport()();
+}
+
+TEST_VM(AtomicPointerTest, ptr_to_int32) {
+ AtomicPointerTestSupport()();
+}
+
+TEST_VM(AtomicPointerTest, ptr_to_int64) {
+ AtomicPointerTestSupport()();
+}
+
+// Test translation, including chaining.
+
+struct TranslatedAtomicTestObject1 {
+ int _value;
+
+ // NOT default constructible.
+
+ explicit TranslatedAtomicTestObject1(int value)
+ : _value(value) {}
+};
+
+template<>
+struct PrimitiveConversions::Translate
+ : public std::true_type
+{
+ using Value = TranslatedAtomicTestObject1;
+ using Decayed = int;
+
+ static Decayed decay(Value x) { return x._value; }
+ static Value recover(Decayed x) { return Value(x); }
+};
+
+struct TranslatedAtomicTestObject2 {
+ TranslatedAtomicTestObject1 _value;
+
+ static constexpr int DefaultObject1Value = 3;
+
+ TranslatedAtomicTestObject2()
+ : TranslatedAtomicTestObject2(TranslatedAtomicTestObject1(DefaultObject1Value))
+ {}
+
+ explicit TranslatedAtomicTestObject2(TranslatedAtomicTestObject1 value)
+ : _value(value) {}
+};
+
+template<>
+struct PrimitiveConversions::Translate
+ : public std::true_type
+{
+ using Value = TranslatedAtomicTestObject2;
+ using Decayed = TranslatedAtomicTestObject1;
+
+ static Decayed decay(Value x) { return x._value; }
+ static Value recover(Decayed x) { return Value(x); }
+};
+
+struct TranslatedAtomicByteObject {
+ uint8_t _value;
+
+ // NOT default constructible.
+
+ explicit TranslatedAtomicByteObject(uint8_t value = 0) : _value(value) {}
+};
+
+template<>
+struct PrimitiveConversions::Translate
+ : public std::true_type
+{
+ using Value = TranslatedAtomicByteObject;
+ using Decayed = uint8_t;
+
+ static Decayed decay(Value x) { return x._value; }
+ static Value recover(Decayed x) { return Value(x); }
+};
+
+// Test whether Atomic has exchange().
+// Note: This is intentionally a different implementation from what is used
+// by the atomic translated type to decide whether to provide exchange().
+// The intent is to make related testing non-tautological.
+// The two implementations must agree; it's a bug if they don't.
+template
+class AtomicTypeHasExchange {
+ template,
+ typename = decltype(declval().exchange(declval()))>
+ static char* test(int);
+
+ template static char test(...);
+
+ using test_type = decltype(test(0));
+
+public:
+ static constexpr bool value = std::is_pointer_v;
+};
+
+// Unit tests for AtomicTypeHasExchange.
+static_assert(AtomicTypeHasExchange::value);
+static_assert(AtomicTypeHasExchange::value);
+static_assert(AtomicTypeHasExchange::value);
+static_assert(AtomicTypeHasExchange::value);
+static_assert(!AtomicTypeHasExchange::value);
+
+// Verify translated byte type *doesn't* have exchange.
+static_assert(!AtomicTypeHasExchange::value);
+
+// Verify that explicit instantiation doesn't attempt to reference the
+// non-existent exchange of the atomic decayed type.
+template class AtomicImpl::Atomic;
+
+template
+static void test_atomic_translated_type() {
+ // This works even if T is not default constructible.
+ Atomic _test_value{};
+
+ using Translated = PrimitiveConversions::Translate;
+
+ EXPECT_EQ(0, Translated::decay(_test_value.load_relaxed()));
+ _test_value.store_relaxed(Translated::recover(5));
+ EXPECT_EQ(5, Translated::decay(_test_value.load_relaxed()));
+ EXPECT_EQ(5, Translated::decay(_test_value.compare_exchange(Translated::recover(5),
+ Translated::recover(10))));
+ EXPECT_EQ(10, Translated::decay(_test_value.load_relaxed()));
+
+ if constexpr (AtomicTypeHasExchange::value) {
+ EXPECT_EQ(10, Translated::decay(_test_value.exchange(Translated::recover(20))));
+ EXPECT_EQ(20, Translated::decay(_test_value.load_relaxed()));
+ }
+}
+
+TEST_VM(AtomicTranslatedTypeTest, int_test) {
+ test_atomic_translated_type();
+}
+
+TEST_VM(AtomicTranslatedTypeTest, byte_test) {
+ test_atomic_translated_type();
+}
+
+TEST_VM(AtomicTranslatedTypeTest, chain) {
+ Atomic _test_value{};
+
+ using Translated1 = PrimitiveConversions::Translate;
+ using Translated2 = PrimitiveConversions::Translate;
+
+ auto resolve = [&](TranslatedAtomicTestObject2 x) {
+ return Translated1::decay(Translated2::decay(x));
+ };
+
+ auto construct = [&](int x) {
+ return Translated2::recover(Translated1::recover(x));
+ };
+
+ EXPECT_EQ(TranslatedAtomicTestObject2::DefaultObject1Value,
+ resolve(_test_value.load_relaxed()));
+ _test_value.store_relaxed(construct(5));
+ EXPECT_EQ(5, resolve(_test_value.load_relaxed()));
+ EXPECT_EQ(5, resolve(_test_value.compare_exchange(construct(5), construct(10))));
+ EXPECT_EQ(10, resolve(_test_value.load_relaxed()));
+ EXPECT_EQ(10, resolve(_test_value.exchange(construct(20))));
+ EXPECT_EQ(20, resolve(_test_value.load_relaxed()));
+};
+
+template
+static void test_value_access() {
+ using AT = Atomic;
+ // In addition to verifying values are as expected, also verify the
+ // operations are constexpr.
+ static_assert(sizeof(T) == AT::value_size_in_bytes(), "value size differs");
+ static_assert(0 == AT::value_offset_in_bytes(), "unexpected offset");
+ // Also verify no unexpected increase in size for Atomic wrapper.
+ static_assert(sizeof(T) == sizeof(AT), "unexpected size difference");
+};
+
+TEST_VM(AtomicValueAccessTest, access_char) {
+ test_value_access();
+}
+
+TEST_VM(AtomicValueAccessTest, access_bool) {
+ test_value_access();
+}
+
+TEST_VM(AtomicValueAccessTest, access_int32) {
+ test_value_access();
+}
+
+TEST_VM(AtomicValueAccessTest, access_int64) {
+ test_value_access();
+}
+
+TEST_VM(AtomicValueAccessTest, access_ptr) {
+ test_value_access();
+}
+
+TEST_VM(AtomicValueAccessTest, access_trans1) {
+ test_value_access();
+}
+
+TEST_VM(AtomicValueAccessTest, access_trans2) {
+ test_value_access();
+}
From 9d6a61fda6f43577ee8f19483e5b47100ff8eec0 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Beno=C3=AEt=20Maillard?=
Date: Thu, 13 Nov 2025 09:24:51 +0000
Subject: [PATCH 030/418] 8371558: C2: Missing optimization opportunity in
AbsNode::Ideal
Reviewed-by: thartmann, rcastanedalo, chagedorn
---
src/hotspot/share/opto/phaseX.cpp | 10 +++
.../c2/TestMissingOptAbsZeroMinusX.java | 85 +++++++++++++++++++
2 files changed, 95 insertions(+)
create mode 100644 test/hotspot/jtreg/compiler/c2/TestMissingOptAbsZeroMinusX.java
diff --git a/src/hotspot/share/opto/phaseX.cpp b/src/hotspot/share/opto/phaseX.cpp
index 8f192cf069c..1fe911aa7ac 100644
--- a/src/hotspot/share/opto/phaseX.cpp
+++ b/src/hotspot/share/opto/phaseX.cpp
@@ -2596,6 +2596,16 @@ void PhaseIterGVN::add_users_of_use_to_worklist(Node* n, Node* use, Unique_Node_
}
}
}
+ // Check for "abs(0-x)" into "abs(x)" conversion
+ if (use->is_Sub()) {
+ for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
+ Node* u = use->fast_out(i2);
+ if (u->Opcode() == Op_AbsD || u->Opcode() == Op_AbsF ||
+ u->Opcode() == Op_AbsL || u->Opcode() == Op_AbsI) {
+ worklist.push(u);
+ }
+ }
+ }
auto enqueue_init_mem_projs = [&](ProjNode* proj) {
add_users_to_worklist0(proj, worklist);
};
diff --git a/test/hotspot/jtreg/compiler/c2/TestMissingOptAbsZeroMinusX.java b/test/hotspot/jtreg/compiler/c2/TestMissingOptAbsZeroMinusX.java
new file mode 100644
index 00000000000..1422aef7b78
--- /dev/null
+++ b/test/hotspot/jtreg/compiler/c2/TestMissingOptAbsZeroMinusX.java
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @bug 8371558
+ * @summary An expression of the form "abs(0-x)" should be transformed to "abs(x)".
+ * This test ensures that updates to the Sub node’s inputs propagate as
+ * expected and that the optimization is not missed.
+ * @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:-TieredCompilation -Xcomp
+ * -XX:CompileCommand=compileonly,compiler.c2.TestMissingOptAbsZeroMinusX::test*
+ * -XX:VerifyIterativeGVN=1110 compiler.c2.TestMissingOptAbsZeroMinusX
+ * @run main compiler.c2.TestMissingOptAbsZeroMinusX
+ *
+ */
+
+package compiler.c2;
+
+public class TestMissingOptAbsZeroMinusX {
+ static int a;
+ static boolean b;
+
+ public static void main(String[] strArr) {
+ // no known reproducer for AbsL
+ testAbsI();
+ testAbsF();
+ testAbsD();
+ }
+
+ static void testAbsI() {
+ int d = 4;
+ for (int i = 8; i < 133; i += 3) {
+ d -= a;
+ b = (d != Math.abs(d));
+ for (long f = 3; f < 127; f++) {
+ for (int e = 1; e < 3; e++) {}
+ }
+ d = 0;
+ }
+ }
+
+ static void testAbsF() {
+ float d = 12.3f;
+ for (int i = 8; i < 133; i += 3) {
+ d -= a;
+ b = (d != Math.abs(d));
+ for (long f = 3; f < 127; f++) {
+ for (int e = 1; e < 3; e++) {}
+ }
+ d = 0.0f;
+ }
+ }
+
+ static void testAbsD() {
+ double d = 12.3;
+ for (int i = 8; i < 133; i += 3) {
+ d -= a;
+ b = (d != Math.abs(d));
+ for (long f = 3; f < 127; f++) {
+ for (int e = 1; e < 3; e++) {}
+ }
+ d = 0.0;
+ }
+ }
+}
\ No newline at end of file
From 48c59faf58a4d7b7ec9d6824a5cbc9a55888ce72 Mon Sep 17 00:00:00 2001
From: Daniel Fuchs
Date: Thu, 13 Nov 2025 10:46:00 +0000
Subject: [PATCH 031/418] 8371722:
java/net/httpclient/BufferSizePropertyClampTest.java should use Locale.ROOT
Reviewed-by: djelinski, jpai, vyazici
---
.../httpclient/BufferSizePropertyClampTest.java | 16 +++++++++++-----
1 file changed, 11 insertions(+), 5 deletions(-)
diff --git a/test/jdk/java/net/httpclient/BufferSizePropertyClampTest.java b/test/jdk/java/net/httpclient/BufferSizePropertyClampTest.java
index caef0a58a6d..d9695dce3cb 100644
--- a/test/jdk/java/net/httpclient/BufferSizePropertyClampTest.java
+++ b/test/jdk/java/net/httpclient/BufferSizePropertyClampTest.java
@@ -29,6 +29,7 @@ import java.text.MessageFormat;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Locale;
import java.util.logging.Handler;
import java.util.logging.LogRecord;
import java.util.logging.Logger;
@@ -68,13 +69,20 @@ class BufferSizePropertyClampTest {
private static final List CLIENT_LOGGER_MESSAGES =
Collections.synchronizedList(new ArrayList<>());
+ private static final String EXPECTED_MSG =
+ "ERROR: Property value for jdk.httpclient.bufsize={0} not in [1..16,384]: using default=16,384";
+
+ private static String format(String pattern, Object... args) {
+ return new MessageFormat(pattern, Locale.ROOT).format(args);
+ }
+
@BeforeAll
static void registerLoggerHandler() {
CLIENT_LOGGER.addHandler(new Handler() {
@Override
public void publish(LogRecord record) {
- var message = MessageFormat.format(record.getMessage(), record.getParameters());
+ var message = format(record.getMessage(), record.getParameters());
CLIENT_LOGGER_MESSAGES.add(message);
}
@@ -97,10 +105,8 @@ class BufferSizePropertyClampTest {
assertEquals(
1, CLIENT_LOGGER_MESSAGES.size(),
"Unexpected number of logger messages: " + CLIENT_LOGGER_MESSAGES);
- var expectedMessage = "ERROR: Property value for jdk.httpclient.bufsize=" +
- System.getProperty("jdk.httpclient.bufsize") +
- " not in [1..16384]: using default=16384";
- assertEquals(expectedMessage, CLIENT_LOGGER_MESSAGES.getFirst().replaceAll(",", ""));
+ var expectedMessage = format(EXPECTED_MSG, Integer.getInteger("jdk.httpclient.bufsize"));
+ assertEquals(expectedMessage, CLIENT_LOGGER_MESSAGES.getFirst());
}
}
From 6b6fdf1d9222eb03cd013cbe792fa77fd78c1acb Mon Sep 17 00:00:00 2001
From: Ramesh Bhagavatam Gangadhar
Date: Thu, 13 Nov 2025 12:57:16 +0000
Subject: [PATCH 032/418] 8357874: UNLIMTED_CRYPTO typo in class description of
JceSecurity.java.template
Reviewed-by: wetmore
---
.../share/classes/javax/crypto/JceSecurity.java.template | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/src/java.base/share/classes/javax/crypto/JceSecurity.java.template b/src/java.base/share/classes/javax/crypto/JceSecurity.java.template
index 01282394b57..a6010945660 100644
--- a/src/java.base/share/classes/javax/crypto/JceSecurity.java.template
+++ b/src/java.base/share/classes/javax/crypto/JceSecurity.java.template
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 1997, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -31,7 +31,7 @@
*
* In the current jdk builds, this file is first preprocessed to replace
* @@JCE_DEFAULT_POLICY@ [sic] with "limited" or "unlimited" which is
- * determined by the $(UNLIMTED_CRYPTO) make variable. This variable is
+ * determined by the $(UNLIMITED_CRYPTO) make variable. This variable is
* set by top-level configure script, using either
* --disable-unlimited-crypto or --enable-unlimited-crypto [default].
*
From bbc0f9ef30c467c8da8b873813bde50a7e9ff697 Mon Sep 17 00:00:00 2001
From: Thomas Schatzl
Date: Thu, 13 Nov 2025 13:53:09 +0000
Subject: [PATCH 033/418] 8371788: Fix documentation for
CollectedHeap::collect(GCCause)
Reviewed-by: ayang, iwalulya
---
src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 3 ---
src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp | 1 -
src/hotspot/share/gc/serial/serialHeap.hpp | 3 ---
src/hotspot/share/gc/shared/collectedHeap.hpp | 4 +---
4 files changed, 1 insertion(+), 10 deletions(-)
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index e160cb9e428..7a0edfacd0f 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1040,9 +1040,6 @@ public:
// old GC alloc region.
bool is_old_gc_alloc_region(G1HeapRegion* hr);
- // Perform a collection of the heap; intended for use in implementing
- // "System.gc". This probably implies as full a collection as the
- // "CollectedHeap" supports.
void collect(GCCause::Cause cause) override;
// Try to perform a collection of the heap with the given cause to allocate allocation_word_size
diff --git a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
index de1e3ce851c..f9161afc28f 100644
--- a/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
+++ b/src/hotspot/share/gc/parallel/parallelScavengeHeap.hpp
@@ -206,7 +206,6 @@ public:
HeapWord* satisfy_failed_allocation(size_t size, bool is_tlab);
- // Support for System.gc()
void collect(GCCause::Cause cause) override;
void collect_at_safepoint(bool full);
diff --git a/src/hotspot/share/gc/serial/serialHeap.hpp b/src/hotspot/share/gc/serial/serialHeap.hpp
index 432cd73f616..ee016173c2a 100644
--- a/src/hotspot/share/gc/serial/serialHeap.hpp
+++ b/src/hotspot/share/gc/serial/serialHeap.hpp
@@ -139,9 +139,6 @@ public:
// Callback from VM_SerialGCCollect.
void collect_at_safepoint(bool full);
- // Perform a full collection of the heap; intended for use in implementing
- // "System.gc". This implies as full a collection as the CollectedHeap
- // supports. Caller does not hold the Heap_lock on entry.
void collect(GCCause::Cause cause) override;
// Returns "TRUE" iff "p" points into the committed areas of the heap.
diff --git a/src/hotspot/share/gc/shared/collectedHeap.hpp b/src/hotspot/share/gc/shared/collectedHeap.hpp
index 659106f9c19..6be0057480d 100644
--- a/src/hotspot/share/gc/shared/collectedHeap.hpp
+++ b/src/hotspot/share/gc/shared/collectedHeap.hpp
@@ -353,9 +353,7 @@ protected:
// collection or expansion activity.
virtual size_t unsafe_max_tlab_alloc() const = 0;
- // Perform a collection of the heap; intended for use in implementing
- // "System.gc". This probably implies as full a collection as the
- // "CollectedHeap" supports.
+ // Perform a collection of the heap of a type depending on the given cause.
virtual void collect(GCCause::Cause cause) = 0;
// Perform a full collection
From 7d78818ae609461ab830c32c222f15f1cab0d2d4 Mon Sep 17 00:00:00 2001
From: Thomas Schatzl
Date: Thu, 13 Nov 2025 13:55:25 +0000
Subject: [PATCH 034/418] 8274178: G1: Occupancy value in IHOP logging and JFR
event is inaccurate 8371635: G1: Young gen allocations should never be
considered when comparing against IHOP threshold
Reviewed-by: ayang, iwalulya
---
src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 17 +++++++++
src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 11 ++++--
.../share/gc/g1/g1HeapSizingPolicy.cpp | 36 ++++++++----------
src/hotspot/share/gc/g1/g1IHOPControl.cpp | 37 +++++++++++--------
src/hotspot/share/gc/g1/g1IHOPControl.hpp | 16 +++++---
src/hotspot/share/gc/g1/g1Policy.cpp | 34 +++++------------
src/hotspot/share/gc/g1/g1Policy.hpp | 6 +--
src/hotspot/share/gc/g1/g1Trace.cpp | 8 ++--
src/hotspot/share/gc/g1/g1Trace.hpp | 4 +-
9 files changed, 91 insertions(+), 78 deletions(-)
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 9a5e390f6f1..f04658a1415 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -353,6 +353,14 @@ size_t G1CollectedHeap::humongous_obj_size_in_regions(size_t word_size) {
return align_up(word_size, G1HeapRegion::GrainWords) / G1HeapRegion::GrainWords;
}
+size_t G1CollectedHeap::allocation_used_bytes(size_t allocation_word_size) {
+ if (is_humongous(allocation_word_size)) {
+ return humongous_obj_size_in_regions(allocation_word_size) * G1HeapRegion::GrainBytes;
+ } else {
+ return allocation_word_size * HeapWordSize;
+ }
+}
+
// If could fit into free regions w/o expansion, try.
// Otherwise, if can expand, do so.
// Otherwise, if using ex regions might help, try with ex given back.
@@ -2955,6 +2963,15 @@ void G1CollectedHeap::abandon_collection_set() {
collection_set()->abandon();
}
+size_t G1CollectedHeap::non_young_occupancy_after_allocation(size_t allocation_word_size) {
+ // For simplicity, just count whole regions.
+ const size_t cur_occupancy = (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes;
+ // Humongous allocations will always be assigned to non-young heap, so consider
+ // that allocation in the result as well. Otherwise the allocation will always
+ // be in young gen, so there is no need to account it here.
+ return cur_occupancy + (is_humongous(allocation_word_size) ? allocation_used_bytes(allocation_word_size) : 0);
+}
+
bool G1CollectedHeap::is_old_gc_alloc_region(G1HeapRegion* hr) {
return _allocator->is_retained_old_region(hr);
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 7a0edfacd0f..5dccf41e909 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -1032,9 +1032,10 @@ public:
inline void old_set_add(G1HeapRegion* hr);
inline void old_set_remove(G1HeapRegion* hr);
- size_t non_young_capacity_bytes() {
- return (old_regions_count() + humongous_regions_count()) * G1HeapRegion::GrainBytes;
- }
+ // Returns how much memory there is assigned to non-young heap that can not be
+ // allocated into any more without garbage collection after a hypothetical
+ // allocation of allocation_word_size.
+ size_t non_young_occupancy_after_allocation(size_t allocation_word_size);
// Determine whether the given region is one that we are using as an
// old GC alloc region.
@@ -1226,6 +1227,10 @@ public:
// requires.
static size_t humongous_obj_size_in_regions(size_t word_size);
+ // Returns how much space in bytes an allocation of word_size will use up in the
+ // heap.
+ static size_t allocation_used_bytes(size_t word_size);
+
// Print the maximum heap capacity.
size_t max_capacity() const override;
size_t min_capacity() const;
diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
index 03a1444f8e6..4dd0a509bcd 100644
--- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
+++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp
@@ -366,28 +366,24 @@ static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) {
}
size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand, size_t allocation_word_size) {
- // If the full collection was triggered by an allocation failure, we should account
- // for the bytes required for this allocation under used_after_gc. This prevents
- // unnecessary shrinking that would be followed by an expand call to satisfy the
- // allocation.
- size_t allocation_bytes = allocation_word_size * HeapWordSize;
- if (_g1h->is_humongous(allocation_word_size)) {
- // Humongous objects are allocated in entire regions, we must calculate
- // required space in terms of full regions, not just the object size.
- allocation_bytes = G1HeapRegion::align_up_to_region_byte_size(allocation_bytes);
- }
-
+ const size_t capacity_after_gc = _g1h->capacity();
// Capacity, free and used after the GC counted as full regions to
// include the waste in the following calculations.
- const size_t capacity_after_gc = _g1h->capacity();
- const size_t used_after_gc = capacity_after_gc + allocation_bytes -
- _g1h->unused_committed_regions_in_bytes() -
- // Discount space used by current Eden to establish a
- // situation during Remark similar to at the end of full
- // GC where eden is empty. During Remark there can be an
- // arbitrary number of eden regions which would skew the
- // results.
- _g1h->eden_regions_count() * G1HeapRegion::GrainBytes;
+ const size_t current_used_after_gc = capacity_after_gc -
+ _g1h->unused_committed_regions_in_bytes() -
+ // Discount space used by current Eden to establish a
+ // situation during Remark similar to at the end of full
+ // GC where eden is empty. During Remark there can be an
+ // arbitrary number of eden regions which would skew the
+ // results.
+ _g1h->eden_regions_count() * G1HeapRegion::GrainBytes;
+
+ // Add pending allocation;
+ const size_t used_after_gc = current_used_after_gc +
+ // If the full collection was triggered by an allocation failure,
+ // account that allocation too. Otherwise we could shrink and then
+ // expand immediately to satisfy the allocation.
+ _g1h->allocation_used_bytes(allocation_word_size);
size_t minimum_desired_capacity = target_heap_capacity(used_after_gc, MinHeapFreeRatio);
size_t maximum_desired_capacity = target_heap_capacity(used_after_gc, MaxHeapFreeRatio);
diff --git a/src/hotspot/share/gc/g1/g1IHOPControl.cpp b/src/hotspot/share/gc/g1/g1IHOPControl.cpp
index 5c05169c29d..34c8cd0366b 100644
--- a/src/hotspot/share/gc/g1/g1IHOPControl.cpp
+++ b/src/hotspot/share/gc/g1/g1IHOPControl.cpp
@@ -44,32 +44,37 @@ void G1IHOPControl::update_target_occupancy(size_t new_target_occupancy) {
_target_occupancy = new_target_occupancy;
}
+void G1IHOPControl::report_statistics(G1NewTracer* new_tracer, size_t non_young_occupancy) {
+ print_log(non_young_occupancy);
+ send_trace_event(new_tracer, non_young_occupancy);
+}
+
void G1IHOPControl::update_allocation_info(double allocation_time_s, size_t additional_buffer_size) {
assert(allocation_time_s >= 0.0, "Allocation time must be positive but is %.3f", allocation_time_s);
_last_allocation_time_s = allocation_time_s;
}
-void G1IHOPControl::print() {
+void G1IHOPControl::print_log(size_t non_young_occupancy) {
assert(_target_occupancy > 0, "Target occupancy still not updated yet.");
size_t cur_conc_mark_start_threshold = get_conc_mark_start_threshold();
- log_debug(gc, ihop)("Basic information (value update), threshold: %zuB (%1.2f), target occupancy: %zuB, current occupancy: %zuB, "
+ log_debug(gc, ihop)("Basic information (value update), threshold: %zuB (%1.2f), target occupancy: %zuB, non-young occupancy: %zuB, "
"recent allocation size: %zuB, recent allocation duration: %1.2fms, recent old gen allocation rate: %1.2fB/s, recent marking phase length: %1.2fms",
cur_conc_mark_start_threshold,
percent_of(cur_conc_mark_start_threshold, _target_occupancy),
_target_occupancy,
- G1CollectedHeap::heap()->used(),
+ non_young_occupancy,
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
_last_allocation_time_s * 1000.0,
_last_allocation_time_s > 0.0 ? _old_gen_alloc_tracker->last_period_old_gen_bytes() / _last_allocation_time_s : 0.0,
last_marking_length_s() * 1000.0);
}
-void G1IHOPControl::send_trace_event(G1NewTracer* tracer) {
+void G1IHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) {
assert(_target_occupancy > 0, "Target occupancy still not updated yet.");
tracer->report_basic_ihop_statistics(get_conc_mark_start_threshold(),
_target_occupancy,
- G1CollectedHeap::heap()->used(),
+ non_young_occupancy,
_old_gen_alloc_tracker->last_period_old_gen_bytes(),
_last_allocation_time_s,
last_marking_length_s());
@@ -165,27 +170,27 @@ void G1AdaptiveIHOPControl::update_marking_length(double marking_length_s) {
_marking_times_s.add(marking_length_s);
}
-void G1AdaptiveIHOPControl::print() {
- G1IHOPControl::print();
- size_t actual_target = actual_target_threshold();
- log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target occupancy: %zuB, "
- "occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, "
+void G1AdaptiveIHOPControl::print_log(size_t non_young_occupancy) {
+ G1IHOPControl::print_log(non_young_occupancy);
+ size_t actual_threshold = actual_target_threshold();
+ log_debug(gc, ihop)("Adaptive IHOP information (value update), threshold: %zuB (%1.2f), internal target threshold: %zuB, "
+ "non-young occupancy: %zuB, additional buffer size: %zuB, predicted old gen allocation rate: %1.2fB/s, "
"predicted marking phase length: %1.2fms, prediction active: %s",
get_conc_mark_start_threshold(),
- percent_of(get_conc_mark_start_threshold(), actual_target),
- actual_target,
- G1CollectedHeap::heap()->used(),
+ percent_of(get_conc_mark_start_threshold(), actual_threshold),
+ actual_threshold,
+ non_young_occupancy,
_last_unrestrained_young_size,
predict(&_allocation_rate_s),
predict(&_marking_times_s) * 1000.0,
have_enough_data_for_prediction() ? "true" : "false");
}
-void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer) {
- G1IHOPControl::send_trace_event(tracer);
+void G1AdaptiveIHOPControl::send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy) {
+ G1IHOPControl::send_trace_event(tracer, non_young_occupancy);
tracer->report_adaptive_ihop_statistics(get_conc_mark_start_threshold(),
actual_target_threshold(),
- G1CollectedHeap::heap()->used(),
+ non_young_occupancy,
_last_unrestrained_young_size,
predict(&_allocation_rate_s),
predict(&_marking_times_s),
diff --git a/src/hotspot/share/gc/g1/g1IHOPControl.hpp b/src/hotspot/share/gc/g1/g1IHOPControl.hpp
index 507fbb269d1..392a12a785a 100644
--- a/src/hotspot/share/gc/g1/g1IHOPControl.hpp
+++ b/src/hotspot/share/gc/g1/g1IHOPControl.hpp
@@ -55,7 +55,11 @@ class G1IHOPControl : public CHeapObj {
// Most recent time from the end of the concurrent start to the start of the first
// mixed gc.
virtual double last_marking_length_s() const = 0;
- public:
+
+ virtual void print_log(size_t non_young_occupancy);
+ virtual void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
+
+public:
virtual ~G1IHOPControl() { }
// Get the current non-young occupancy at which concurrent marking should start.
@@ -76,8 +80,7 @@ class G1IHOPControl : public CHeapObj {
// the first mixed gc.
virtual void update_marking_length(double marking_length_s) = 0;
- virtual void print();
- virtual void send_trace_event(G1NewTracer* tracer);
+ void report_statistics(G1NewTracer* tracer, size_t non_young_occupancy);
};
// The returned concurrent mark starting occupancy threshold is a fixed value
@@ -139,6 +142,10 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
double last_mutator_period_old_allocation_rate() const;
protected:
virtual double last_marking_length_s() const { return _marking_times_s.last(); }
+
+ virtual void print_log(size_t non_young_occupancy);
+ virtual void send_trace_event(G1NewTracer* tracer, size_t non_young_occupancy);
+
public:
G1AdaptiveIHOPControl(double ihop_percent,
G1OldGenAllocationTracker const* old_gen_alloc_tracker,
@@ -150,9 +157,6 @@ class G1AdaptiveIHOPControl : public G1IHOPControl {
virtual void update_allocation_info(double allocation_time_s, size_t additional_buffer_size);
virtual void update_marking_length(double marking_length_s);
-
- virtual void print();
- virtual void send_trace_event(G1NewTracer* tracer);
};
#endif // SHARE_GC_G1_G1IHOPCONTROL_HPP
diff --git a/src/hotspot/share/gc/g1/g1Policy.cpp b/src/hotspot/share/gc/g1/g1Policy.cpp
index ddcdfb0c3a4..19573e11cd7 100644
--- a/src/hotspot/share/gc/g1/g1Policy.cpp
+++ b/src/hotspot/share/gc/g1/g1Policy.cpp
@@ -749,22 +749,14 @@ bool G1Policy::need_to_start_conc_mark(const char* source, size_t allocation_wor
}
size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
-
- size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
- size_t allocation_byte_size = allocation_word_size * HeapWordSize;
- // For humongous allocations, we need to consider that we actually use full regions
- // for allocations. So compare the threshold to this size.
- if (_g1h->is_humongous(allocation_word_size)) {
- allocation_byte_size = G1HeapRegion::align_up_to_region_byte_size(allocation_byte_size);
- }
- size_t marking_request_bytes = cur_used_bytes + allocation_byte_size;
+ size_t non_young_occupancy = _g1h->non_young_occupancy_after_allocation(allocation_word_size);
bool result = false;
- if (marking_request_bytes > marking_initiating_used_threshold) {
+ if (non_young_occupancy > marking_initiating_used_threshold) {
result = collector_state()->in_young_only_phase();
- log_debug(gc, ergo, ihop)("%s occupancy: %zuB allocation request: %zuB threshold: %zuB (%1.2f) source: %s",
+ log_debug(gc, ergo, ihop)("%s non-young occupancy: %zuB allocation request: %zuB threshold: %zuB (%1.2f) source: %s",
result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
- cur_used_bytes, allocation_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
+ non_young_occupancy, allocation_word_size * HeapWordSize, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
}
return result;
}
@@ -995,10 +987,10 @@ void G1Policy::record_young_collection_end(bool concurrent_operation_is_full_mar
update_young_length_bounds();
_old_gen_alloc_tracker.reset_after_gc(_g1h->humongous_regions_count() * G1HeapRegion::GrainBytes);
- update_ihop_prediction(app_time_ms / 1000.0,
- G1GCPauseTypeHelper::is_young_only_pause(this_pause));
-
- _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
+ if (update_ihop_prediction(app_time_ms / 1000.0,
+ G1GCPauseTypeHelper::is_young_only_pause(this_pause))) {
+ _ihop_control->report_statistics(_g1h->gc_tracer_stw(), _g1h->non_young_occupancy_after_allocation(allocation_word_size));
+ }
} else {
// Any garbage collection triggered as periodic collection resets the time-to-mixed
// measurement. Periodic collection typically means that the application is "inactive", i.e.
@@ -1045,7 +1037,7 @@ G1IHOPControl* G1Policy::create_ihop_control(const G1OldGenAllocationTracker* ol
}
}
-void G1Policy::update_ihop_prediction(double mutator_time_s,
+bool G1Policy::update_ihop_prediction(double mutator_time_s,
bool this_gc_was_young_only) {
// Always try to update IHOP prediction. Even evacuation failures give information
// about e.g. whether to start IHOP earlier next time.
@@ -1082,13 +1074,7 @@ void G1Policy::update_ihop_prediction(double mutator_time_s,
report = true;
}
- if (report) {
- report_ihop_statistics();
- }
-}
-
-void G1Policy::report_ihop_statistics() {
- _ihop_control->print();
+ return report;
}
void G1Policy::record_young_gc_pause_end(bool evacuation_failed) {
diff --git a/src/hotspot/share/gc/g1/g1Policy.hpp b/src/hotspot/share/gc/g1/g1Policy.hpp
index 93724657235..72fdc6deb5b 100644
--- a/src/hotspot/share/gc/g1/g1Policy.hpp
+++ b/src/hotspot/share/gc/g1/g1Policy.hpp
@@ -60,10 +60,10 @@ class G1Policy: public CHeapObj {
static G1IHOPControl* create_ihop_control(const G1OldGenAllocationTracker* old_gen_alloc_tracker,
const G1Predictions* predictor);
- // Update the IHOP control with necessary statistics.
- void update_ihop_prediction(double mutator_time_s,
+ // Update the IHOP control with the necessary statistics. Returns true if there
+ // has been a significant update to the prediction.
+ bool update_ihop_prediction(double mutator_time_s,
bool this_gc_was_young_only);
- void report_ihop_statistics();
G1Predictions _predictor;
G1Analytics* _analytics;
diff --git a/src/hotspot/share/gc/g1/g1Trace.cpp b/src/hotspot/share/gc/g1/g1Trace.cpp
index 6c9a87e4d98..ed6a91f41ed 100644
--- a/src/hotspot/share/gc/g1/g1Trace.cpp
+++ b/src/hotspot/share/gc/g1/g1Trace.cpp
@@ -98,13 +98,13 @@ void G1NewTracer::report_evacuation_statistics(const G1EvacSummary& young_summar
void G1NewTracer::report_basic_ihop_statistics(size_t threshold,
size_t target_ccupancy,
- size_t current_occupancy,
+ size_t non_young_occupancy,
size_t last_allocation_size,
double last_allocation_duration,
double last_marking_length) {
send_basic_ihop_statistics(threshold,
target_ccupancy,
- current_occupancy,
+ non_young_occupancy,
last_allocation_size,
last_allocation_duration,
last_marking_length);
@@ -206,7 +206,7 @@ void G1NewTracer::send_old_evacuation_statistics(const G1EvacSummary& summary) c
void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
size_t target_occupancy,
- size_t current_occupancy,
+ size_t non_young_occupancy,
size_t last_allocation_size,
double last_allocation_duration,
double last_marking_length) {
@@ -216,7 +216,7 @@ void G1NewTracer::send_basic_ihop_statistics(size_t threshold,
evt.set_threshold(threshold);
evt.set_targetOccupancy(target_occupancy);
evt.set_thresholdPercentage(target_occupancy > 0 ? ((double)threshold / target_occupancy) : 0.0);
- evt.set_currentOccupancy(current_occupancy);
+ evt.set_currentOccupancy(non_young_occupancy);
evt.set_recentMutatorAllocationSize(last_allocation_size);
evt.set_recentMutatorDuration(last_allocation_duration * MILLIUNITS);
evt.set_recentAllocationRate(last_allocation_duration != 0.0 ? last_allocation_size / last_allocation_duration : 0.0);
diff --git a/src/hotspot/share/gc/g1/g1Trace.hpp b/src/hotspot/share/gc/g1/g1Trace.hpp
index 025a1c3fe95..1415828f376 100644
--- a/src/hotspot/share/gc/g1/g1Trace.hpp
+++ b/src/hotspot/share/gc/g1/g1Trace.hpp
@@ -73,13 +73,13 @@ private:
void send_basic_ihop_statistics(size_t threshold,
size_t target_occupancy,
- size_t current_occupancy,
+ size_t non_young_occupancy,
size_t last_allocation_size,
double last_allocation_duration,
double last_marking_length);
void send_adaptive_ihop_statistics(size_t threshold,
size_t internal_target_occupancy,
- size_t current_occupancy,
+ size_t non_young_occupancy,
size_t additional_buffer_size,
double predicted_allocation_rate,
double predicted_marking_length,
From 8102f436f5586253302cd8cef49bfe2b4af41693 Mon Sep 17 00:00:00 2001
From: Vicente Romero
Date: Thu, 13 Nov 2025 15:28:08 +0000
Subject: [PATCH 035/418] 8371480: VerifyError after JDK-8369654
Reviewed-by: mcimadamore
---
.../com/sun/tools/javac/code/Types.java | 2 +-
.../classes/com/sun/tools/javac/jvm/Code.java | 19 +--
.../VerifierErrorWrongSuperTypeTest.java | 130 ++++++++++++++++++
3 files changed, 142 insertions(+), 9 deletions(-)
create mode 100644 test/langtools/tools/javac/switchexpr/VerifierErrorWrongSuperTypeTest.java
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java
index ffd304c18a2..d59505555f2 100644
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/code/Types.java
@@ -3971,7 +3971,7 @@ public class Types {
* Return the minimum types of a closure, suitable for computing
* compoundMin or glb.
*/
- private List closureMin(List cl) {
+ public List closureMin(List cl) {
ListBuffer classes = new ListBuffer<>();
ListBuffer interfaces = new ListBuffer<>();
Set toSkip = new HashSet<>();
diff --git a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Code.java b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Code.java
index 7899b8335b6..227143c3148 100644
--- a/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Code.java
+++ b/src/jdk.compiler/share/classes/com/sun/tools/javac/jvm/Code.java
@@ -1854,14 +1854,17 @@ public class Code {
} else {
t1 = types.skipTypeVars(t1, false);
t2 = types.skipTypeVars(t2, false);
- List intersection = types.intersect(
- t1.hasTag(ARRAY) ?
- List.of(syms.serializableType, syms.cloneableType, syms.objectType) :
- types.erasedSupertypes(t1),
- t2.hasTag(ARRAY) ?
- List.of(syms.serializableType, syms.cloneableType, syms.objectType) :
- types.erasedSupertypes(t2));
- return intersection.head;
+ List result = types.closureMin(
+ types.intersect(
+ t1.hasTag(ARRAY) ?
+ List.of(syms.serializableType, syms.cloneableType, syms.objectType) :
+ types.erasedSupertypes(t1),
+ t2.hasTag(ARRAY) ?
+ List.of(syms.serializableType, syms.cloneableType, syms.objectType) :
+ types.erasedSupertypes(t2)
+ )
+ );
+ return result.head;
}
}
diff --git a/test/langtools/tools/javac/switchexpr/VerifierErrorWrongSuperTypeTest.java b/test/langtools/tools/javac/switchexpr/VerifierErrorWrongSuperTypeTest.java
new file mode 100644
index 00000000000..115bf8980b0
--- /dev/null
+++ b/test/langtools/tools/javac/switchexpr/VerifierErrorWrongSuperTypeTest.java
@@ -0,0 +1,130 @@
+/*
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+/*
+ * @test
+ * @summary VerifyError after JDK-8369654, incorrect supertype
+ * @library /tools/lib
+ * @modules jdk.compiler/com.sun.tools.javac.api
+ * jdk.compiler/com.sun.tools.javac.main
+ * jdk.compiler/com.sun.tools.javac.util
+ * jdk.compiler/com.sun.tools.javac.code
+ * @build toolbox.ToolBox toolbox.JavacTask
+ * @run main VerifierErrorWrongSuperTypeTest
+ */
+
+import java.util.*;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+
+import toolbox.TestRunner;
+import toolbox.ToolBox;
+import toolbox.JavaTask;
+import toolbox.JavacTask;
+import toolbox.Task;
+import toolbox.Task.OutputKind;
+
+public class VerifierErrorWrongSuperTypeTest extends TestRunner {
+ ToolBox tb;
+
+ VerifierErrorWrongSuperTypeTest() {
+ super(System.err);
+ tb = new ToolBox();
+ }
+
+ protected void runTests() throws Exception {
+ runTests(m -> new Object[]{Paths.get(m.getName())});
+ }
+
+ Path[] findJavaFiles(Path... paths) throws IOException {
+ return tb.findJavaFiles(paths);
+ }
+
+ public static void main(String... args) throws Exception {
+ VerifierErrorWrongSuperTypeTest t = new VerifierErrorWrongSuperTypeTest();
+ t.runTests();
+ }
+
+ @Test
+ public void testCompatibilityAfterMakingSuperclassSealed(Path base) throws Exception {
+ Path src = base.resolve("src");
+ Path pkg = src.resolve("p");
+ Path v = pkg.resolve("V");
+ tb.writeJavaFiles(v,
+ """
+ package p;
+ public abstract class V {}
+ """
+ );
+ Path d = pkg.resolve("D");
+ tb.writeJavaFiles(d,
+ """
+ package p;
+ public abstract class D extends V implements Cloneable {}
+ """
+ );
+ Path a = pkg.resolve("A");
+ tb.writeJavaFiles(a,
+ """
+ package p;
+ public class A extends V implements Cloneable {}
+ """
+ );
+ Path t = src.resolve("T");
+ tb.writeJavaFiles(t,
+ """
+ import p.A;
+ import p.D;
+ import p.V;
+ class T {
+ public static void main(String[] args) {
+ new T().foo(false, null);
+ }
+ void foo(boolean b, D d) {
+ V u = b ? d : new A();
+ g(u);
+ }
+ void g(V u) {}
+ }
+ """
+ );
+ Path out = base.resolve("out");
+ Files.createDirectories(out);
+ new JavacTask(tb)
+ .outdir(out)
+ .files(findJavaFiles(src))
+ .run();
+
+ try {
+ new JavaTask(tb)
+ .classpath(out.toString())
+ .classArgs("T")
+ .run();
+ } catch (Throwable error) {
+ throw new AssertionError("execution failed");
+ }
+ }
+}
From bfc048aba6391d52c07d9a5146466b47d2f6fed8 Mon Sep 17 00:00:00 2001
From: Matthias Baesken
Date: Thu, 13 Nov 2025 16:26:17 +0000
Subject: [PATCH 036/418] 8371608: Jtreg test
jdk/internal/vm/Continuation/Fuzz.java sometimes fails with (fast)debug
binaries
Reviewed-by: mdoerr, rrich
---
test/jdk/jdk/internal/vm/Continuation/Fuzz.java | 3 +++
1 file changed, 3 insertions(+)
diff --git a/test/jdk/jdk/internal/vm/Continuation/Fuzz.java b/test/jdk/jdk/internal/vm/Continuation/Fuzz.java
index 8d522cc83e1..49d86099dc7 100644
--- a/test/jdk/jdk/internal/vm/Continuation/Fuzz.java
+++ b/test/jdk/jdk/internal/vm/Continuation/Fuzz.java
@@ -96,6 +96,9 @@ public class Fuzz implements Runnable {
if (Platform.isPPC()) {
COMPILATION_TIMEOUT = COMPILATION_TIMEOUT * 2;
}
+ if (Platform.isDebugBuild()) {
+ COMPILATION_TIMEOUT = COMPILATION_TIMEOUT * 2;
+ }
warmup();
for (int compileLevel : new int[]{4}) {
for (boolean compileRun : new boolean[]{true}) {
From 2199b5fef4540ae8da77c5c4feafc8822a3d9d3d Mon Sep 17 00:00:00 2001
From: Rui Li
Date: Thu, 13 Nov 2025 18:01:58 +0000
Subject: [PATCH 037/418] 8371381: [Shenandoah] Setting ergo flags should use
FLAG_SET_ERGO
Reviewed-by: xpeng, wkemper, ysr, cslucas
---
.../share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp | 6 +++---
.../share/gc/shenandoah/mode/shenandoahPassiveMode.cpp | 6 +++++-
2 files changed, 8 insertions(+), 4 deletions(-)
diff --git a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
index 6e3062d158f..3cd2cb1d171 100644
--- a/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
+++ b/src/hotspot/share/gc/shenandoah/heuristics/shenandoahHeuristics.hpp
@@ -36,7 +36,7 @@
do { \
if (FLAG_IS_DEFAULT(name) && (name)) { \
log_info(gc)("Heuristics ergonomically sets -XX:-" #name); \
- FLAG_SET_DEFAULT(name, false); \
+ FLAG_SET_ERGO(name, false); \
} \
} while (0)
@@ -44,7 +44,7 @@
do { \
if (FLAG_IS_DEFAULT(name) && !(name)) { \
log_info(gc)("Heuristics ergonomically sets -XX:+" #name); \
- FLAG_SET_DEFAULT(name, true); \
+ FLAG_SET_ERGO(name, true); \
} \
} while (0)
@@ -52,7 +52,7 @@
do { \
if (FLAG_IS_DEFAULT(name)) { \
log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value); \
- FLAG_SET_DEFAULT(name, value); \
+ FLAG_SET_ERGO(name, value); \
} \
} while (0)
diff --git a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp
index 4c0bc209d78..41b2703730b 100644
--- a/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp
+++ b/src/hotspot/share/gc/shenandoah/mode/shenandoahPassiveMode.cpp
@@ -29,6 +29,7 @@
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "logging/log.hpp"
#include "logging/logTag.hpp"
+#include "runtime/globals_extension.hpp"
#include "runtime/java.hpp"
void ShenandoahPassiveMode::initialize_flags() const {
@@ -38,7 +39,10 @@ void ShenandoahPassiveMode::initialize_flags() const {
// No need for evacuation reserve with Full GC, only for Degenerated GC.
if (!ShenandoahDegeneratedGC) {
- SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahEvacReserve, 0);
+ if (FLAG_IS_DEFAULT(ShenandoahEvacReserve)) {
+ log_info(gc)("Heuristics sets -XX:ShenandoahEvacReserve=0");
+ FLAG_SET_DEFAULT(ShenandoahEvacReserve, 0);
+ }
}
// Disable known barriers by default.
From d09a8cb81b70a6c51ef5599bee04f1445a48e63f Mon Sep 17 00:00:00 2001
From: Sergey Bylokhov
Date: Thu, 13 Nov 2025 18:39:49 +0000
Subject: [PATCH 038/418] 8371746: Some imports in Integer.java and Long.java
became unused after JDK-8370503
Reviewed-by: liach, darcy, iris
---
src/java.base/share/classes/java/lang/Integer.java | 2 --
src/java.base/share/classes/java/lang/Long.java | 2 --
2 files changed, 4 deletions(-)
diff --git a/src/java.base/share/classes/java/lang/Integer.java b/src/java.base/share/classes/java/lang/Integer.java
index 20d1edb6d5f..2742ec40abf 100644
--- a/src/java.base/share/classes/java/lang/Integer.java
+++ b/src/java.base/share/classes/java/lang/Integer.java
@@ -41,8 +41,6 @@ import java.util.Optional;
import static java.lang.Character.digit;
import static java.lang.String.COMPACT_STRINGS;
-import static java.lang.String.LATIN1;
-import static java.lang.String.UTF16;
/**
* The {@code Integer} class is the {@linkplain
diff --git a/src/java.base/share/classes/java/lang/Long.java b/src/java.base/share/classes/java/lang/Long.java
index b0477fdab6d..3077e7c0a38 100644
--- a/src/java.base/share/classes/java/lang/Long.java
+++ b/src/java.base/share/classes/java/lang/Long.java
@@ -41,8 +41,6 @@ import jdk.internal.vm.annotation.Stable;
import static java.lang.Character.digit;
import static java.lang.String.COMPACT_STRINGS;
-import static java.lang.String.LATIN1;
-import static java.lang.String.UTF16;
/**
* The {@code Long} class is the {@linkplain
From db3a8386d482c161c45fae1689826bd53709f11f Mon Sep 17 00:00:00 2001
From: Brian Burkhalter
Date: Thu, 13 Nov 2025 18:59:34 +0000
Subject: [PATCH 039/418] 8371436: (fs) java/nio/file/FileStore/Basic.java
fails on macOS platform due to assertTrue(!store.equals(prev));
Reviewed-by: alanb
---
.../macosx/native/libnio/fs/BsdNativeDispatcher.c | 12 +++++++++++-
1 file changed, 11 insertions(+), 1 deletion(-)
diff --git a/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c b/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c
index 8776411be07..7a9e92d6fa9 100644
--- a/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c
+++ b/src/java.base/macosx/native/libnio/fs/BsdNativeDispatcher.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2008, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -50,6 +50,7 @@ static jfieldID entry_name;
static jfieldID entry_dir;
static jfieldID entry_fstype;
static jfieldID entry_options;
+static jfieldID entry_dev;
struct fsstat_iter {
struct statfs *buf;
@@ -85,6 +86,8 @@ Java_sun_nio_fs_BsdNativeDispatcher_initIDs(JNIEnv* env, jclass this)
CHECK_NULL(entry_fstype);
entry_options = (*env)->GetFieldID(env, clazz, "opts", "[B");
CHECK_NULL(entry_options);
+ entry_dev = (*env)->GetFieldID(env, clazz, "dev", "J");
+ CHECK_NULL(entry_dev);
}
JNIEXPORT jlong JNICALL
@@ -151,6 +154,8 @@ Java_sun_nio_fs_BsdNativeDispatcher_fsstatEntry(JNIEnv* env, jclass this,
char* dir;
char* fstype;
char* options;
+ int32_t fsid_val[2];
+ long dev;
if (iter == NULL || iter->pos >= iter->nentries)
return -1;
@@ -162,6 +167,8 @@ Java_sun_nio_fs_BsdNativeDispatcher_fsstatEntry(JNIEnv* env, jclass this,
options="ro";
else
options="";
+ fsid_val[0] = iter->buf[iter->pos].f_fsid.val[0];
+ fsid_val[1] = iter->buf[iter->pos].f_fsid.val[1];
iter->pos++;
@@ -193,6 +200,9 @@ Java_sun_nio_fs_BsdNativeDispatcher_fsstatEntry(JNIEnv* env, jclass this,
(*env)->SetByteArrayRegion(env, bytes, 0, len, (jbyte*)options);
(*env)->SetObjectField(env, entry, entry_options, bytes);
+ dev = (((long)fsid_val[1]) << 32) | (long)fsid_val[0];
+ (*env)->SetLongField(env, entry, entry_dev, long_to_jlong(dev));
+
return 0;
}
From 6322aaba63b235cb6c73d23a932210af318404ec Mon Sep 17 00:00:00 2001
From: Roger Riggs
Date: Thu, 13 Nov 2025 19:08:35 +0000
Subject: [PATCH 040/418] 8371821: Duplicate export of jdk.internal.util to
java.net.http
Reviewed-by: naoto, alanb
---
src/java.base/share/classes/module-info.java | 1 -
1 file changed, 1 deletion(-)
diff --git a/src/java.base/share/classes/module-info.java b/src/java.base/share/classes/module-info.java
index 3ae84fdf198..70a79390828 100644
--- a/src/java.base/share/classes/module-info.java
+++ b/src/java.base/share/classes/module-info.java
@@ -262,7 +262,6 @@ module java.base {
jdk.jfr;
exports jdk.internal.util to
java.desktop,
- java.net.http,
java.prefs,
java.security.jgss,
java.smartcardio,
From 155d7df555fcebc318db89408ef0fffbd95414a0 Mon Sep 17 00:00:00 2001
From: Leonid Mesnik
Date: Thu, 13 Nov 2025 23:54:07 +0000
Subject: [PATCH 041/418] 8371749: New test
serviceability/jvmti/events/VMDeath/AllocatingInVMDeath/TestAllocatingInVMDeath.java
fails with -Xcheck:jni
Reviewed-by: sspitsyn, amenkov, cjplummer
---
.../AllocatingInVMDeath/libTestAllocatingInVMDeath.cpp | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/test/hotspot/jtreg/serviceability/jvmti/events/VMDeath/AllocatingInVMDeath/libTestAllocatingInVMDeath.cpp b/test/hotspot/jtreg/serviceability/jvmti/events/VMDeath/AllocatingInVMDeath/libTestAllocatingInVMDeath.cpp
index 5f46d69afa1..93d66bc38bc 100644
--- a/test/hotspot/jtreg/serviceability/jvmti/events/VMDeath/AllocatingInVMDeath/libTestAllocatingInVMDeath.cpp
+++ b/test/hotspot/jtreg/serviceability/jvmti/events/VMDeath/AllocatingInVMDeath/libTestAllocatingInVMDeath.cpp
@@ -37,6 +37,10 @@ cbVMDeath(jvmtiEnv* jvmti, JNIEnv* jni) {
fatal(jni, "Can't find upCall method.");
}
jni->CallStaticObjectMethod(clz, mid);
+ if (jni->ExceptionOccurred()) {
+ jni->ExceptionDescribe();
+ fatal(jni, "cbVMDeath: unexpected exception occurred in Java upcall method.");
+ }
}
JNIEXPORT jint JNICALL
From 0d8b5188bb4315be3c63898a2ce4e68dd2bd4481 Mon Sep 17 00:00:00 2001
From: Alexey Semenyuk
Date: Fri, 14 Nov 2025 01:07:05 +0000
Subject: [PATCH 042/418] 8364560: The default value of --linux-menu-group
option is invalid 8356574: Test --linux-menu-group option
Reviewed-by: almatvee
---
.../internal/LinuxPackageBuilder.java | 10 ++++----
.../internal/model/LinuxPackageMixin.java | 15 +++++++++++-
.../resources/LinuxResources.properties | 1 -
.../jdk/jpackage/test/LinuxHelper.java | 3 ++-
.../jpackage/linux/ShortcutHintTest.java | 24 +++++++++++++++----
5 files changed, 41 insertions(+), 12 deletions(-)
diff --git a/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/LinuxPackageBuilder.java b/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/LinuxPackageBuilder.java
index cc00d7816f5..4cfb8a26c8f 100644
--- a/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/LinuxPackageBuilder.java
+++ b/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/LinuxPackageBuilder.java
@@ -84,9 +84,9 @@ final class LinuxPackageBuilder {
private LinuxPackage create(Package pkg) throws ConfigException {
return LinuxPackage.create(pkg, new LinuxPackageMixin.Stub(
Optional.ofNullable(menuGroupName).orElseGet(DEFAULTS::menuGroupName),
- Optional.ofNullable(category),
+ category(),
Optional.ofNullable(additionalDependencies),
- Optional.ofNullable(release),
+ release(),
pkg.asStandardPackageType().map(LinuxPackageArch::getValue).orElseThrow()));
}
@@ -192,7 +192,7 @@ final class LinuxPackageBuilder {
private final PackageBuilder pkgBuilder;
- private static final Defaults DEFAULTS = new Defaults(I18N.getString(
- "param.menu-group.default"));
-
+ // Should be one of https://specifications.freedesktop.org/menu/latest/category-registry.html#main-category-registry
+ // The category is an ID, not a localizable string
+ private static final Defaults DEFAULTS = new Defaults("Utility");
}
diff --git a/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/model/LinuxPackageMixin.java b/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/model/LinuxPackageMixin.java
index 5bcf57194f6..056e3b89527 100644
--- a/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/model/LinuxPackageMixin.java
+++ b/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/model/LinuxPackageMixin.java
@@ -34,6 +34,12 @@ public interface LinuxPackageMixin {
/**
* Gets the name of the start menu group where to create shortcuts for
* application launchers of this package.
+ *
+ * It sets the value of the "Categories" property in .desktop files of the
+ * package.
+ *
+ * Should be one of the values from https://specifications.freedesktop.org/menu/latest/category-registry.html
*
* @return the name of the start menu group where to create shortcuts for
* application launchers of this package
@@ -44,6 +50,13 @@ public interface LinuxPackageMixin {
/**
* Gets the category of this package.
+ *
+ * For RPM packages this is the value of the optional "Group" property.
+ *
+ * For DEB packages this is the value of the mandatory "Section" property.
+ * The
+ * present list of recognized values.
*
* @return the category of this package
*/
@@ -62,7 +75,7 @@ public interface LinuxPackageMixin {
* Gets the release of this package. Returns an empty {@link Optional} instance
* if this package doesn't have a release.
*
- * For RPM packages, this is the value of a "Release" property in spec file. RPM
+ * For RPM packages, this is the value of the "Release" property in spec file. RPM
* packages always have a release.
*
* For DEB packages, this is an optional {@code debian_revision} component of a
diff --git a/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/resources/LinuxResources.properties b/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/resources/LinuxResources.properties
index fbc83ba2e10..a732d02c7d1 100644
--- a/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/resources/LinuxResources.properties
+++ b/src/jdk.jpackage/linux/classes/jdk/jpackage/internal/resources/LinuxResources.properties
@@ -28,7 +28,6 @@ deb.bundler.name=DEB Bundle
rpm.bundler.name=RPM Bundle
param.license-type.default=Unknown
-param.menu-group.default=Unknown
resource.deb-control-file=DEB control file
resource.deb-preinstall-script=DEB preinstall script
diff --git a/test/jdk/tools/jpackage/helpers/jdk/jpackage/test/LinuxHelper.java b/test/jdk/tools/jpackage/helpers/jdk/jpackage/test/LinuxHelper.java
index 25358e8ecdc..e795f7c9760 100644
--- a/test/jdk/tools/jpackage/helpers/jdk/jpackage/test/LinuxHelper.java
+++ b/test/jdk/tools/jpackage/helpers/jdk/jpackage/test/LinuxHelper.java
@@ -564,7 +564,8 @@ public final class LinuxHelper {
for (var e : List.of(
Map.entry("Type", "Application"),
Map.entry("Terminal", "false"),
- Map.entry("Comment", launcherDescription)
+ Map.entry("Comment", launcherDescription),
+ Map.entry("Categories", Optional.ofNullable(cmd.getArgumentValue("--linux-menu-group")).orElse("Utility"))
)) {
String key = e.getKey();
TKit.assertEquals(e.getValue(), data.find(key).orElseThrow(), String.format(
diff --git a/test/jdk/tools/jpackage/linux/ShortcutHintTest.java b/test/jdk/tools/jpackage/linux/ShortcutHintTest.java
index 8d373cb2b86..2591d1d393a 100644
--- a/test/jdk/tools/jpackage/linux/ShortcutHintTest.java
+++ b/test/jdk/tools/jpackage/linux/ShortcutHintTest.java
@@ -26,13 +26,15 @@ import java.nio.file.Files;
import java.nio.file.Path;
import java.util.List;
import jdk.jpackage.test.AdditionalLauncher;
+import jdk.jpackage.test.Annotations.Parameter;
+import jdk.jpackage.test.Annotations.Test;
import jdk.jpackage.test.FileAssociations;
-import jdk.jpackage.test.PackageType;
-import jdk.jpackage.test.PackageTest;
-import jdk.jpackage.test.TKit;
import jdk.jpackage.test.JPackageCommand;
import jdk.jpackage.test.LinuxHelper;
-import jdk.jpackage.test.Annotations.Test;
+import jdk.jpackage.test.PackageTest;
+import jdk.jpackage.test.PackageType;
+import jdk.jpackage.test.RunnablePackageTest.Action;
+import jdk.jpackage.test.TKit;
/**
* Test --linux-shortcut parameter. Output of the test should be
@@ -179,4 +181,18 @@ public class ShortcutHintTest {
.apply(Files.readAllLines(desktopFile));
}).run();
}
+
+ /**
+ * Test "--linux-menu-group" option.
+ *
+ * @param menuGroup value of "--linux-menu-group" option
+ */
+ @Test
+ // Values from https://specifications.freedesktop.org/menu/latest/category-registry.html#main-category-registry
+ @Parameter("Development")
+ public static void testMenuGroup(String menuGroup) {
+ createTest().addInitializer(JPackageCommand::setFakeRuntime).addInitializer(cmd -> {
+ cmd.addArgument("--linux-shortcut").setArgumentValue("--linux-menu-group", menuGroup);
+ }).run(Action.CREATE_AND_UNPACK);
+ }
}
From eaddefb475c6431821c2d62baf550ba2c5f357bf Mon Sep 17 00:00:00 2001
From: Fei Yang
Date: Fri, 14 Nov 2025 01:10:11 +0000
Subject: [PATCH 043/418] 8371753: compiler/c2/cr7200264/TestIntVect.java fails
IR verification
Reviewed-by: chagedorn, fjiang
---
.../compiler/c2/cr7200264/TestIntVect.java | 37 ++++++++++---------
1 file changed, 19 insertions(+), 18 deletions(-)
diff --git a/test/hotspot/jtreg/compiler/c2/cr7200264/TestIntVect.java b/test/hotspot/jtreg/compiler/c2/cr7200264/TestIntVect.java
index 76c33ec1b07..3df6f956b0f 100644
--- a/test/hotspot/jtreg/compiler/c2/cr7200264/TestIntVect.java
+++ b/test/hotspot/jtreg/compiler/c2/cr7200264/TestIntVect.java
@@ -413,7 +413,8 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.LOAD_VECTOR_I, "> 0",
IRNode.ADD_REDUCTION_VI, "> 0",
- IRNode.ADD_VI, "> 0" })
+ IRNode.ADD_VI, "> 0" },
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
// The reduction is moved outside the loop, and we use a
// element-wise accumulator inside the loop.
int test_sum(int[] a1) {
@@ -426,7 +427,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.ADD_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_addc(int[] a0, int[] a1) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]+VALUE);
@@ -435,7 +436,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.ADD_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_addv(int[] a0, int[] a1, int b) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]+b);
@@ -444,7 +445,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.ADD_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_adda(int[] a0, int[] a1, int[] a2) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]+a2[i]);
@@ -453,7 +454,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.ADD_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_subc(int[] a0, int[] a1) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]-VALUE);
@@ -462,7 +463,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.SUB_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_subv(int[] a0, int[] a1, int b) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]-b);
@@ -471,7 +472,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.SUB_VI, "> 0", },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_suba(int[] a0, int[] a1, int[] a2) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]-a2[i]);
@@ -498,7 +499,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.MUL_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true", "rvv", "true"})
void test_mulv(int[] a0, int[] a1, int b) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]*b);
@@ -507,7 +508,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.MUL_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse4.1", "true", "asimd", "true", "rvv", "true"})
void test_mula(int[] a0, int[] a1, int[] a2) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]*a2[i]);
@@ -580,7 +581,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.AND_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_andc(int[] a0, int[] a1) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]&BIT_MASK);
@@ -589,7 +590,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.AND_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_andv(int[] a0, int[] a1, int b) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]&b);
@@ -598,7 +599,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.AND_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_anda(int[] a0, int[] a1, int[] a2) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]&a2[i]);
@@ -607,7 +608,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.OR_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_orc(int[] a0, int[] a1) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]|BIT_MASK);
@@ -616,7 +617,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.OR_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_orv(int[] a0, int[] a1, int b) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]|b);
@@ -625,7 +626,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.OR_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_ora(int[] a0, int[] a1, int[] a2) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]|a2[i]);
@@ -634,7 +635,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.XOR_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_xorc(int[] a0, int[] a1) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]^BIT_MASK);
@@ -643,7 +644,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.XOR_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_xorv(int[] a0, int[] a1, int b) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]^b);
@@ -652,7 +653,7 @@ public class TestIntVect {
@Test
@IR(counts = { IRNode.XOR_VI, "> 0" },
- applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true"})
+ applyIfCPUFeatureOr = {"sse2", "true", "asimd", "true", "rvv", "true"})
void test_xora(int[] a0, int[] a1, int[] a2) {
for (int i = 0; i < a0.length; i+=1) {
a0[i] = (int)(a1[i]^a2[i]);
From 7733632f90a17ec848c4c9259c1aa58fded8c15a Mon Sep 17 00:00:00 2001
From: Alexey Semenyuk
Date: Fri, 14 Nov 2025 02:08:45 +0000
Subject: [PATCH 044/418] 8369206: jpackage should not set R/O permission on
app launchers
Reviewed-by: almatvee
---
.../internal/ExecutableRebrander.java | 69 +++++++++----------
1 file changed, 32 insertions(+), 37 deletions(-)
diff --git a/src/jdk.jpackage/windows/classes/jdk/jpackage/internal/ExecutableRebrander.java b/src/jdk.jpackage/windows/classes/jdk/jpackage/internal/ExecutableRebrander.java
index 913a44dbe18..05b87e6f449 100644
--- a/src/jdk.jpackage/windows/classes/jdk/jpackage/internal/ExecutableRebrander.java
+++ b/src/jdk.jpackage/windows/classes/jdk/jpackage/internal/ExecutableRebrander.java
@@ -120,46 +120,41 @@ final class ExecutableRebrander {
List actions) throws IOException {
Objects.requireNonNull(actions);
actions.forEach(Objects::requireNonNull);
+
+ String tempDirectory = env.buildRoot().toAbsolutePath().toString();
+ if (WindowsDefender.isThereAPotentialWindowsDefenderIssue(tempDirectory)) {
+ Log.verbose(I18N.format("message.potential.windows.defender.issue", tempDirectory));
+ }
+
+ var shortTargetPath = ShortPathUtils.toShortPath(target);
+ long resourceLock = lockResource(shortTargetPath.orElse(target).toString());
+ if (resourceLock == 0) {
+ throw I18N.buildException().message("error.lock-resource", shortTargetPath.orElse(target)).create(RuntimeException::new);
+ }
+
+ final boolean resourceUnlockedSuccess;
try {
- String tempDirectory = env.buildRoot().toAbsolutePath().toString();
- if (WindowsDefender.isThereAPotentialWindowsDefenderIssue(tempDirectory)) {
- Log.verbose(I18N.format("message.potential.windows.defender.issue", tempDirectory));
- }
-
- target.toFile().setWritable(true, true);
-
- var shortTargetPath = ShortPathUtils.toShortPath(target);
- long resourceLock = lockResource(shortTargetPath.orElse(target).toString());
- if (resourceLock == 0) {
- throw I18N.buildException().message("error.lock-resource", shortTargetPath.orElse(target)).create(RuntimeException::new);
- }
-
- final boolean resourceUnlockedSuccess;
- try {
- for (var action : actions) {
- action.editResource(resourceLock);
- }
- } finally {
- if (resourceLock == 0) {
- resourceUnlockedSuccess = true;
- } else {
- resourceUnlockedSuccess = unlockResource(resourceLock);
- if (shortTargetPath.isPresent()) {
- // Windows will rename the excuatble in the unlock operation.
- // Should restore executable's name.
- var tmpPath = target.getParent().resolve(
- target.getFileName().toString() + ".restore");
- Files.move(shortTargetPath.get(), tmpPath);
- Files.move(tmpPath, target);
- }
- }
- }
-
- if (!resourceUnlockedSuccess) {
- throw I18N.buildException().message("error.unlock-resource", target).create(RuntimeException::new);
+ for (var action : actions) {
+ action.editResource(resourceLock);
}
} finally {
- target.toFile().setReadOnly();
+ if (resourceLock == 0) {
+ resourceUnlockedSuccess = true;
+ } else {
+ resourceUnlockedSuccess = unlockResource(resourceLock);
+ if (shortTargetPath.isPresent()) {
+ // Windows will rename the executable in the unlock operation.
+ // Should restore executable's name.
+ var tmpPath = target.getParent().resolve(
+ target.getFileName().toString() + ".restore");
+ Files.move(shortTargetPath.get(), tmpPath);
+ Files.move(tmpPath, target);
+ }
+ }
+ }
+
+ if (!resourceUnlockedSuccess) {
+ throw I18N.buildException().message("error.unlock-resource", target).create(RuntimeException::new);
}
}
From 1baf5164d6a9077e0c440b7b78be6424a052f8a9 Mon Sep 17 00:00:00 2001
From: Daniel Skantz
Date: Fri, 14 Nov 2025 07:09:05 +0000
Subject: [PATCH 045/418] 8371628: C2: add a test case for the arraycopy
changes in JDK-8297933
Reviewed-by: rcastanedalo, shade
---
.../compiler/arraycopy/TestACSameSrcDst.java | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/test/hotspot/jtreg/compiler/arraycopy/TestACSameSrcDst.java b/test/hotspot/jtreg/compiler/arraycopy/TestACSameSrcDst.java
index f85bcf27d74..93b9fead584 100644
--- a/test/hotspot/jtreg/compiler/arraycopy/TestACSameSrcDst.java
+++ b/test/hotspot/jtreg/compiler/arraycopy/TestACSameSrcDst.java
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017, Red Hat, Inc. All rights reserved.
+ * Copyright (c) 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +24,7 @@
/**
* @test
- * @bug 8179678
+ * @bug 8179678 8297933 8371628
* @summary ArrayCopy with same src and dst can cause incorrect execution or compiler crash
*
* @run main/othervm -XX:CompileCommand=compileonly,TestACSameSrcDst::test* TestACSameSrcDst
@@ -76,6 +77,17 @@ public class TestACSameSrcDst {
return 0;
}
+ static void test6(int x) {
+ int[] src = new int[10];
+ int l = 0;
+ while (l < 1) { l++; } // Delay folding.
+ // The bug relies on idealizations of ArrayCopy source and destination offsets but this would be limited if carried out before IGVN.
+
+ System.arraycopy(src, x + 1, src, x + 1, l);
+ // source and destination offsets are a shared AddNode that would go away during LShiftL idealization
+ // -- causing a crash if not for a hook node retaining its liveness.
+ }
+
public static void main(String[] args) {
int[] array = new int[15];
for (int i = 0; i < 20000; i++) {
@@ -101,6 +113,7 @@ public class TestACSameSrcDst {
if (res != 0) {
throw new RuntimeException("bad result: " + res + " != " + 0);
}
+ test6(0);
}
}
}
From 0829c6acde496833300efb38b4b900bf94b99dc0 Mon Sep 17 00:00:00 2001
From: Anton Seoane Ampudia
Date: Fri, 14 Nov 2025 07:25:44 +0000
Subject: [PATCH 046/418] 8356761: IGV: dump escape analysis information
Reviewed-by: rcastanedalo, chagedorn
---
src/hotspot/share/opto/escape.cpp | 41 +++++++++++++--
src/hotspot/share/opto/escape.hpp | 6 ++-
src/hotspot/share/opto/idealGraphPrinter.cpp | 38 ++++++++++++++
src/hotspot/share/opto/idealGraphPrinter.hpp | 3 ++
src/hotspot/share/opto/phasetype.hpp | 17 +++++++
.../filters/colorEscapeAnalysis.filter | 51 +++++++++++++++++++
.../showConnectionGraphNodesOnly.filter | 6 +++
.../filters/showConnectionInfo.filter | 16 ++++++
.../sun/hotspot/igv/servercompiler/layer.xml | 14 ++++-
.../lib/ir_framework/CompilePhase.java | 17 +++++++
10 files changed, 204 insertions(+), 5 deletions(-)
create mode 100644 src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/colorEscapeAnalysis.filter
create mode 100644 src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionGraphNodesOnly.filter
create mode 100644 src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionInfo.filter
diff --git a/src/hotspot/share/opto/escape.cpp b/src/hotspot/share/opto/escape.cpp
index 61aa009361f..fb3b5dba42c 100644
--- a/src/hotspot/share/opto/escape.cpp
+++ b/src/hotspot/share/opto/escape.cpp
@@ -114,11 +114,13 @@ void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
invocation = C->congraph()->_invocation + 1;
}
ConnectionGraph* congraph = new(C->comp_arena()) ConnectionGraph(C, igvn, invocation);
+ NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(congraph);)
// Perform escape analysis
if (congraph->compute_escape()) {
// There are non escaping objects.
C->set_congraph(congraph);
}
+ NOT_PRODUCT(if (C->should_print_igv(/* Any level */ 1)) C->igv_printer()->set_congraph(nullptr);)
// Cleanup.
if (oop_null->outcnt() == 0) {
igvn->hash_delete(oop_null);
@@ -126,6 +128,8 @@ void ConnectionGraph::do_analysis(Compile *C, PhaseIterGVN *igvn) {
if (noop_null->outcnt() == 0) {
igvn->hash_delete(noop_null);
}
+
+ C->print_method(PHASE_AFTER_EA, 2);
}
bool ConnectionGraph::compute_escape() {
@@ -281,6 +285,8 @@ bool ConnectionGraph::compute_escape() {
return false;
}
+ _compile->print_method(PHASE_EA_AFTER_INITIAL_CONGRAPH, 4);
+
// 2. Finish Graph construction by propagating references to all
// java objects through graph.
if (!complete_connection_graph(ptnodes_worklist, non_escaped_allocs_worklist,
@@ -291,6 +297,8 @@ bool ConnectionGraph::compute_escape() {
return false;
}
+ _compile->print_method(PHASE_EA_AFTER_COMPLETE_CONGRAPH, 4);
+
// 3. Adjust scalar_replaceable state of nonescaping objects and push
// scalar replaceable allocations on alloc_worklist for processing
// in split_unique_types().
@@ -312,6 +320,7 @@ bool ConnectionGraph::compute_escape() {
found_nsr_alloc = true;
}
}
+ _compile->print_method(PHASE_EA_ADJUST_SCALAR_REPLACEABLE_ITER, 6, n);
}
// Propagate NSR (Not Scalar Replaceable) state.
@@ -350,6 +359,7 @@ bool ConnectionGraph::compute_escape() {
_collecting = false;
+ _compile->print_method(PHASE_EA_AFTER_PROPAGATE_NSR, 4);
} // TracePhase t3("connectionGraph")
// 4. Optimize ideal graph based on EA information.
@@ -387,6 +397,8 @@ bool ConnectionGraph::compute_escape() {
}
#endif
+ _compile->print_method(PHASE_EA_AFTER_GRAPH_OPTIMIZATION, 4);
+
// 5. Separate memory graph for scalar replaceable allcations.
bool has_scalar_replaceable_candidates = (alloc_worklist.length() > 0);
if (has_scalar_replaceable_candidates && EliminateAllocations) {
@@ -398,7 +410,6 @@ bool ConnectionGraph::compute_escape() {
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return false;
}
- C->print_method(PHASE_AFTER_EA, 2);
#ifdef ASSERT
} else if (Verbose && (PrintEscapeAnalysis || PrintEliminateAllocations)) {
@@ -413,6 +424,8 @@ bool ConnectionGraph::compute_escape() {
#endif
}
+ _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES, 4);
+
// 6. Reduce allocation merges used as debug information. This is done after
// split_unique_types because the methods used to create SafePointScalarObject
// need to traverse the memory graph to find values for object fields. We also
@@ -454,6 +467,8 @@ bool ConnectionGraph::compute_escape() {
}
}
+ _compile->print_method(PHASE_EA_AFTER_REDUCE_PHI_ON_SAFEPOINTS, 4);
+
NOT_PRODUCT(escape_state_statistics(java_objects_worklist);)
return has_non_escaping_obj;
}
@@ -1302,11 +1317,14 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray &alloc_work
}
}
+ _compile->print_method(PHASE_EA_BEFORE_PHI_REDUCTION, 5, ophi);
+
// CastPPs need to be processed before Cmps because during the process of
// splitting CastPPs we make reference to the inputs of the Cmp that is used
// by the If controlling the CastPP.
for (uint i = 0; i < castpps.size(); i++) {
reduce_phi_on_castpp_field_load(castpps.at(i), alloc_worklist);
+ _compile->print_method(PHASE_EA_AFTER_PHI_CASTPP_REDUCTION, 6, castpps.at(i));
}
for (uint i = 0; i < others.size(); i++) {
@@ -1314,8 +1332,10 @@ void ConnectionGraph::reduce_phi(PhiNode* ophi, GrowableArray &alloc_work
if (use->is_AddP()) {
reduce_phi_on_field_access(use, alloc_worklist);
+ _compile->print_method(PHASE_EA_AFTER_PHI_ADDP_REDUCTION, 6, use);
} else if(use->is_Cmp()) {
reduce_phi_on_cmp(use);
+ _compile->print_method(PHASE_EA_AFTER_PHI_CMP_REDUCTION, 6, use);
}
}
@@ -2417,6 +2437,7 @@ bool ConnectionGraph::complete_connection_graph(
timeout = true;
break;
}
+ _compile->print_method(PHASE_EA_COMPLETE_CONNECTION_GRAPH_ITER, 5);
}
if ((iterations < GRAPH_BUILD_ITER_LIMIT) && !timeout) {
time.start();
@@ -2490,7 +2511,8 @@ bool ConnectionGraph::complete_connection_graph(
// Propagate GlobalEscape and ArgEscape escape states to all nodes
// and check that we still have non-escaping java objects.
bool ConnectionGraph::find_non_escaped_objects(GrowableArray& ptnodes_worklist,
- GrowableArray& non_escaped_allocs_worklist) {
+ GrowableArray& non_escaped_allocs_worklist,
+ bool print_method) {
GrowableArray escape_worklist;
// First, put all nodes with GlobalEscape and ArgEscape states on worklist.
int ptnodes_length = ptnodes_worklist.length();
@@ -2550,6 +2572,9 @@ bool ConnectionGraph::find_non_escaped_objects(GrowableArray& ptn
escape_worklist.push(e);
}
}
+ if (print_method) {
+ _compile->print_method(PHASE_EA_CONNECTION_GRAPH_PROPAGATE_ITER, 6, e->ideal_node());
+ }
}
}
// Remove escaped objects from non_escaped list.
@@ -3137,6 +3162,7 @@ void ConnectionGraph::find_scalar_replaceable_allocs(GrowableArrayprint_method(PHASE_EA_PROPAGATE_NSR_ITER, 5, jobj->ideal_node());
}
}
}
@@ -3159,7 +3185,7 @@ void ConnectionGraph::verify_connection_graph(
assert(new_edges == 0, "graph was not complete");
// Verify that escape state is final.
int length = non_escaped_allocs_worklist.length();
- find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist);
+ find_non_escaped_objects(ptnodes_worklist, non_escaped_allocs_worklist, /*print_method=*/ false);
assert((non_escaped_length == non_escaped_allocs_worklist.length()) &&
(non_escaped_length == length) &&
(_worklist.length() == 0), "escape state was not final");
@@ -4720,6 +4746,8 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist,
// New alias types were created in split_AddP().
uint new_index_end = (uint) _compile->num_alias_types();
+ _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_1, 5);
+
// Phase 2: Process MemNode's from memnode_worklist. compute new address type and
// compute new values for Memory inputs (the Memory inputs are not
// actually updated until phase 4.)
@@ -4920,6 +4948,8 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist,
record_for_optimizer(nmm);
}
+ _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_3, 5);
+
// Phase 4: Update the inputs of non-instance memory Phis and
// the Memory input of memnodes
// First update the inputs of any non-instance Phi's from
@@ -4988,6 +5018,7 @@ void ConnectionGraph::split_unique_types(GrowableArray &alloc_worklist,
assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
}
#endif
+ _compile->print_method(PHASE_EA_AFTER_SPLIT_UNIQUE_TYPES_4, 5);
}
#ifndef PRODUCT
@@ -5010,6 +5041,10 @@ static const char *esc_names[] = {
"GlobalEscape"
};
+const char* PointsToNode::esc_name() const {
+ return esc_names[(int)escape_state()];
+}
+
void PointsToNode::dump_header(bool print_state, outputStream* out) const {
NodeType nt = node_type();
out->print("%s(%d) ", node_type_names[(int) nt], _pidx);
diff --git a/src/hotspot/share/opto/escape.hpp b/src/hotspot/share/opto/escape.hpp
index 77d14525383..eea6403acdd 100644
--- a/src/hotspot/share/opto/escape.hpp
+++ b/src/hotspot/share/opto/escape.hpp
@@ -26,6 +26,7 @@
#define SHARE_OPTO_ESCAPE_HPP
#include "opto/addnode.hpp"
+#include "opto/idealGraphPrinter.hpp"
#include "opto/node.hpp"
#include "utilities/growableArray.hpp"
@@ -235,6 +236,7 @@ public:
NodeType node_type() const { return (NodeType)_type;}
void dump(bool print_state=true, outputStream* out=tty, bool newline=true) const;
void dump_header(bool print_state=true, outputStream* out=tty) const;
+ const char* esc_name() const;
#endif
};
@@ -321,6 +323,7 @@ public:
class ConnectionGraph: public ArenaObj {
friend class PointsToNode; // to access _compile
friend class FieldNode;
+ friend class IdealGraphPrinter;
private:
GrowableArray _nodes; // Map from ideal nodes to
// ConnectionGraph nodes.
@@ -467,7 +470,8 @@ private:
// Propagate GlobalEscape and ArgEscape escape states to all nodes
// and check that we still have non-escaping java objects.
bool find_non_escaped_objects(GrowableArray& ptnodes_worklist,
- GrowableArray& non_escaped_worklist);
+ GrowableArray& non_escaped_worklist,
+ bool print_method = true);
// Adjust scalar_replaceable state after Connection Graph is built.
void adjust_scalar_replaceable_state(JavaObjectNode* jobj, Unique_Node_List &reducible_merges);
diff --git a/src/hotspot/share/opto/idealGraphPrinter.cpp b/src/hotspot/share/opto/idealGraphPrinter.cpp
index 2873c1ef9d7..6a738878a1b 100644
--- a/src/hotspot/share/opto/idealGraphPrinter.cpp
+++ b/src/hotspot/share/opto/idealGraphPrinter.cpp
@@ -24,6 +24,7 @@
#include "memory/resourceArea.hpp"
#include "opto/chaitin.hpp"
+#include "opto/escape.hpp"
#include "opto/idealGraphPrinter.hpp"
#include "opto/machnode.hpp"
#include "opto/parse.hpp"
@@ -161,6 +162,7 @@ void IdealGraphPrinter::init(const char* file_name, bool use_multiple_files, boo
_current_method = nullptr;
_network_stream = nullptr;
_append = append;
+ _congraph = nullptr;
_parse = nullptr;
if (file_name != nullptr) {
@@ -637,6 +639,29 @@ void IdealGraphPrinter::visit_node(Node* n, bool edges) {
print_prop("is_block_start", "true");
}
+ // Dump escape analysis state for relevant nodes.
+ if (node->is_Allocate()) {
+ AllocateNode* alloc = node->as_Allocate();
+ if (alloc->_is_scalar_replaceable) {
+ print_prop("is_scalar_replaceable", "true");
+ }
+ if (alloc->_is_non_escaping) {
+ print_prop("is_non_escaping", "true");
+ }
+ if (alloc->does_not_escape_thread()) {
+ print_prop("does_not_escape_thread", "true");
+ }
+ }
+ if (node->is_SafePoint() && node->as_SafePoint()->has_ea_local_in_scope()) {
+ print_prop("has_ea_local_in_scope", "true");
+ }
+ if (node->is_CallJava() && node->as_CallJava()->arg_escape()) {
+ print_prop("arg_escape", "true");
+ }
+ if (node->is_Initialize() && node->as_Initialize()->does_not_escape()) {
+ print_prop("does_not_escape", "true");
+ }
+
const char *short_name = "short_name";
if (strcmp(node->Name(), "Parm") == 0 && node->as_Proj()->_con >= TypeFunc::Parms) {
int index = node->as_Proj()->_con - TypeFunc::Parms;
@@ -731,6 +756,19 @@ void IdealGraphPrinter::visit_node(Node* n, bool edges) {
print_prop("lrg", lrg_id);
}
+ if (_congraph != nullptr && node->_idx < _congraph->nodes_size()) {
+ PointsToNode* ptn = _congraph->ptnode_adr(node->_idx);
+ if (ptn != nullptr) {
+ stringStream node_head;
+ ptn->dump_header(false, &node_head);
+ print_prop("ea_node", node_head.freeze());
+ print_prop("escape_state", ptn->esc_name());
+ if (ptn->scalar_replaceable()) {
+ print_prop("scalar_replaceable", "true");
+ }
+ }
+ }
+
if (node->is_MachSafePoint()) {
const OopMap* oopmap = node->as_MachSafePoint()->oop_map();
if (oopmap != nullptr) {
diff --git a/src/hotspot/share/opto/idealGraphPrinter.hpp b/src/hotspot/share/opto/idealGraphPrinter.hpp
index df1c6b254d5..2159779ddfa 100644
--- a/src/hotspot/share/opto/idealGraphPrinter.hpp
+++ b/src/hotspot/share/opto/idealGraphPrinter.hpp
@@ -42,6 +42,7 @@ class Node;
class InlineTree;
class ciMethod;
class JVMState;
+class ConnectionGraph;
class Parse;
class IdealGraphPrinter : public CHeapObj {
@@ -116,6 +117,7 @@ class IdealGraphPrinter : public CHeapObj {
Compile *C;
double _max_freq;
bool _append;
+ ConnectionGraph* _congraph;
const Parse* _parse;
// Walk the native stack and print relevant C2 frames as IGV properties (if
@@ -165,6 +167,7 @@ class IdealGraphPrinter : public CHeapObj {
void end_method();
void print_graph(const char* name, const frame* fr = nullptr);
void print(const char* name, Node* root, GrowableArray& hidden_nodes, const frame* fr = nullptr);
+ void set_congraph(ConnectionGraph* congraph) { _congraph = congraph; }
void set_compile(Compile* compile) {C = compile; }
void update_compiled_method(ciMethod* current_method);
};
diff --git a/src/hotspot/share/opto/phasetype.hpp b/src/hotspot/share/opto/phasetype.hpp
index f24938b51c1..f388dc6cdc6 100644
--- a/src/hotspot/share/opto/phasetype.hpp
+++ b/src/hotspot/share/opto/phasetype.hpp
@@ -50,6 +50,23 @@
flags(ITER_GVN_AFTER_VECTOR, "Iter GVN after Vector Box Elimination") \
flags(BEFORE_LOOP_OPTS, "Before Loop Optimizations") \
flags(PHASEIDEAL_BEFORE_EA, "PhaseIdealLoop before EA") \
+ flags(EA_AFTER_INITIAL_CONGRAPH, "EA: 1. Intial Connection Graph") \
+ flags(EA_CONNECTION_GRAPH_PROPAGATE_ITER, "EA: 2. Connection Graph Propagate Iter") \
+ flags(EA_COMPLETE_CONNECTION_GRAPH_ITER, "EA: 2. Complete Connection Graph Iter") \
+ flags(EA_AFTER_COMPLETE_CONGRAPH, "EA: 2. Complete Connection Graph") \
+ flags(EA_ADJUST_SCALAR_REPLACEABLE_ITER, "EA: 3. Adjust scalar_replaceable State Iter") \
+ flags(EA_PROPAGATE_NSR_ITER, "EA: 3. Propagate NSR Iter") \
+ flags(EA_AFTER_PROPAGATE_NSR, "EA: 3. Propagate NSR") \
+ flags(EA_AFTER_GRAPH_OPTIMIZATION, "EA: 4. After Graph Optimization") \
+ flags(EA_AFTER_SPLIT_UNIQUE_TYPES_1, "EA: 5. After split_unique_types Phase 1") \
+ flags(EA_AFTER_SPLIT_UNIQUE_TYPES_3, "EA: 5. After split_unique_types Phase 3") \
+ flags(EA_AFTER_SPLIT_UNIQUE_TYPES_4, "EA: 5. After split_unique_types Phase 4") \
+ flags(EA_AFTER_SPLIT_UNIQUE_TYPES, "EA: 5. After split_unique_types") \
+ flags(EA_AFTER_REDUCE_PHI_ON_SAFEPOINTS, "EA: 6. After reduce_phi_on_safepoints") \
+ flags(EA_BEFORE_PHI_REDUCTION, "EA: 5. Before Phi Reduction") \
+ flags(EA_AFTER_PHI_CASTPP_REDUCTION, "EA: 5. Phi -> CastPP Reduction") \
+ flags(EA_AFTER_PHI_ADDP_REDUCTION, "EA: 5. Phi -> AddP Reduction") \
+ flags(EA_AFTER_PHI_CMP_REDUCTION, "EA: 5. Phi -> Cmp Reduction") \
flags(AFTER_EA, "After Escape Analysis") \
flags(ITER_GVN_AFTER_EA, "Iter GVN after EA") \
flags(BEFORE_BEAUTIFY_LOOPS, "Before Beautify Loops") \
diff --git a/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/colorEscapeAnalysis.filter b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/colorEscapeAnalysis.filter
new file mode 100644
index 00000000000..2ef14f65110
--- /dev/null
+++ b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/colorEscapeAnalysis.filter
@@ -0,0 +1,51 @@
+// Color those nodes present in the escape analysis connection graph
+// to indicate the result of scape analysis.
+// This filter is relevant between the first EA phase and "After Macro
+// Expansion".
+
+var bestColor = java.awt.Color.decode("#6aa84f"); // Green.
+var betterColor = java.awt.Color.decode("#f1c232"); // Yellow.
+var worseColor = java.awt.Color.decode("#e69138"); // Orange.
+var worstColor = java.awt.Color.decode("#cc0000"); // Red.
+
+// Apply first colors based on persistent node attributes
+
+// Object does not escape compilation unit and is scalar replaceable.
+colorize(and([hasProperty("is_non_escaping"),
+ hasProperty("is_scalar_replaceable")]),
+ bestColor);
+
+// Object does not escape compilation unit but is not scalar replaceable,
+// due to of scalar replacement limitations. We can at least elide locks.
+colorize(and([hasProperty("is_non_escaping"),
+ not(hasProperty("is_scalar_replaceable"))]),
+ betterColor);
+
+// Object may escape compilation unit but does not escape thread.
+// We can at least elide locks.
+colorize(and([hasProperty("does_not_escape_thread"),
+ not(hasProperty("is_non_escaping"))]),
+ worseColor);
+
+// Object may escape compilation unit and thread. Nothing to do.
+colorize(and([matches("name", "Allocate"),
+ not(hasProperty("is_non_escaping")),
+ not(hasProperty("does_not_escape_thread"))]),
+ worstColor);
+
+// Apply colors again based on connection graph-derived attributes
+
+colorize(and([matches("escape_state", "NoEscape"),
+ hasProperty("scalar_replaceable")]),
+ bestColor);
+
+colorize(and([matches("escape_state", "NoEscape"),
+ not(hasProperty("scalar_replaceable"))]),
+ betterColor);
+
+colorize(and([matches("escape_state", "ArgEscape"),
+ not(hasProperty("scalar_replaceable"))]),
+ worseColor);
+
+colorize(matches("escape_state", "GlobalEscape"),
+ worstColor);
diff --git a/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionGraphNodesOnly.filter b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionGraphNodesOnly.filter
new file mode 100644
index 00000000000..b0986db221a
--- /dev/null
+++ b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionGraphNodesOnly.filter
@@ -0,0 +1,6 @@
+// This filter shows only the nodes that are present in the escape analysis
+// connection graph. This can be used to approximate the connection graph inside
+// IGV.
+// This filter is only relevant during the escape analysis phases.
+
+remove(not(hasProperty("ea_node")));
diff --git a/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionInfo.filter b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionInfo.filter
new file mode 100644
index 00000000000..ca3509687f5
--- /dev/null
+++ b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/filters/showConnectionInfo.filter
@@ -0,0 +1,16 @@
+// This filter appends escape analysis connection graph node information to the
+// (possibly already existing) extra-label line.
+// This is only carried out for those nodes that are relevant to escape
+// analysis (and therefore represented in the connection graph).
+
+// Merge a possibly existing extra label with the escape analysis node type into a
+// new, single extra label.
+function mergeAndAppendTypeInfo(extra_label, ea_node) {
+ new_extra_label = extra_label == null ? "" : (extra_label + " ");
+ return new_extra_label + ea_node;
+}
+
+editProperty(hasProperty("ea_node"),
+ ["extra_label", "ea_node"],
+ "extra_label",
+ function(propertyValues) {return mergeAndAppendTypeInfo(propertyValues[0], propertyValues[1]);});
diff --git a/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/layer.xml b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/layer.xml
index db4682f1cab..38dfc911a44 100644
--- a/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/layer.xml
+++ b/src/utils/IdealGraphVisualizer/ServerCompiler/src/main/resources/com/sun/hotspot/igv/servercompiler/layer.xml
@@ -85,9 +85,21 @@
+
+
+
+
+
+
+
+
+
+
+
+
-
+
diff --git a/test/hotspot/jtreg/compiler/lib/ir_framework/CompilePhase.java b/test/hotspot/jtreg/compiler/lib/ir_framework/CompilePhase.java
index 06b2afa8a67..a536808d269 100644
--- a/test/hotspot/jtreg/compiler/lib/ir_framework/CompilePhase.java
+++ b/test/hotspot/jtreg/compiler/lib/ir_framework/CompilePhase.java
@@ -60,6 +60,23 @@ public enum CompilePhase {
ITER_GVN_AFTER_VECTOR( "Iter GVN after Vector Box Elimination"),
BEFORE_LOOP_OPTS( "Before Loop Optimizations"),
PHASEIDEAL_BEFORE_EA( "PhaseIdealLoop before EA"),
+ EA_AFTER_INITIAL_CONGRAPH( "EA: 1. Intial Connection Graph"),
+ EA_CONNECTION_GRAPH_PROPAGATE_ITER("EA: 2. Connection Graph Propagate Iter"),
+ EA_COMPLETE_CONNECTION_GRAPH_ITER( "EA: 2. Complete Connection Graph Iter"),
+ EA_AFTER_COMPLETE_CONGRAPH( "EA: 2. Complete Connection Graph"),
+ EA_ADJUST_SCALAR_REPLACEABLE_ITER( "EA: 3. Adjust scalar_replaceable State Iter"),
+ EA_PROPAGATE_NSR_ITER( "EA: 3. Propagate NSR Iter"),
+ EA_AFTER_PROPAGATE_NSR( "EA: 3. Propagate NSR"),
+ EA_AFTER_GRAPH_OPTIMIZATION( "EA: 4. After Graph Optimization"),
+ EA_AFTER_SPLIT_UNIQUE_TYPES_1( "EA: 5. After split_unique_types Phase 1"),
+ EA_AFTER_SPLIT_UNIQUE_TYPES_3( "EA: 5. After split_unique_types Phase 3"),
+ EA_AFTER_SPLIT_UNIQUE_TYPES_4( "EA: 5. After split_unique_types Phase 4"),
+ EA_AFTER_SPLIT_UNIQUE_TYPES( "EA: 5. After split_unique_types"),
+ EA_AFTER_REDUCE_PHI_ON_SAFEPOINTS( "EA: 6. After reduce_phi_on_safepoints"),
+ EA_BEFORE_PHI_REDUCTION( "EA: 5. Before Phi Reduction"),
+ EA_AFTER_PHI_CASTPP_REDUCTION( "EA: 5. Phi -> CastPP Reduction"),
+ EA_AFTER_PHI_ADDP_REDUCTION( "EA: 5. Phi -> AddP Reduction"),
+ EA_AFTER_PHI_CMP_REDUCTION( "EA: 5. Phi -> Cmp Reduction"),
AFTER_EA( "After Escape Analysis"),
ITER_GVN_AFTER_EA( "Iter GVN after EA"),
BEFORE_BEAUTIFY_LOOPS( "Before Beautify Loops"),
From f4305923fb6203089fd13cf3387c81e127ae5fe2 Mon Sep 17 00:00:00 2001
From: Anton Seoane Ampudia
Date: Fri, 14 Nov 2025 07:26:03 +0000
Subject: [PATCH 047/418] 8369002: Extract the
loop->is_member(get_loop(get_ctrl(node))) pattern in a new function
Reviewed-by: bmaillard, rcastanedalo
---
src/hotspot/share/opto/loopTransform.cpp | 12 +++++-----
src/hotspot/share/opto/loopnode.cpp | 10 ++++-----
src/hotspot/share/opto/loopnode.hpp | 12 +++++++---
src/hotspot/share/opto/loopopts.cpp | 28 ++++++++++--------------
src/hotspot/share/opto/predicates.cpp | 2 +-
src/hotspot/share/opto/superword.cpp | 3 +--
6 files changed, 33 insertions(+), 34 deletions(-)
diff --git a/src/hotspot/share/opto/loopTransform.cpp b/src/hotspot/share/opto/loopTransform.cpp
index 9a21c7f5dda..31d1cbe0443 100644
--- a/src/hotspot/share/opto/loopTransform.cpp
+++ b/src/hotspot/share/opto/loopTransform.cpp
@@ -107,7 +107,7 @@ void IdealLoopTree::compute_trip_count(PhaseIdealLoop* phase, BasicType loop_bt)
cl->set_nonexact_trip_count();
// Loop's test should be part of loop.
- if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue))))
+ if (!phase->ctrl_is_member(this, cl->loopexit()->in(CountedLoopEndNode::TestValue)))
return; // Infinite loop
#ifdef ASSERT
@@ -611,7 +611,7 @@ void PhaseIdealLoop::peeled_dom_test_elim(IdealLoopTree* loop, Node_List& old_ne
if (test_cond != nullptr && // Test?
!test_cond->is_Con() && // And not already obvious?
// And condition is not a member of this loop?
- !loop->is_member(get_loop(get_ctrl(test_cond)))) {
+ !ctrl_is_member(loop, test_cond)) {
// Walk loop body looking for instances of this test
for (uint i = 0; i < loop->_body.size(); i++) {
Node* n = loop->_body.at(i);
@@ -1682,7 +1682,7 @@ Node* PhaseIdealLoop::find_last_store_in_outer_loop(Node* store, const IdealLoop
for (DUIterator_Fast imax, l = last->fast_outs(imax); l < imax; l++) {
Node* use = last->fast_out(l);
if (use->is_Store() && use->in(MemNode::Memory) == last) {
- if (is_member(outer_loop, get_ctrl(use))) {
+ if (ctrl_is_member(outer_loop, use)) {
assert(unique_next == last, "memory node should only have one usage in the loop body");
unique_next = use;
}
@@ -1795,7 +1795,7 @@ Node *PhaseIdealLoop::insert_post_loop(IdealLoopTree* loop, Node_List& old_new,
// as this is when we would normally expect a Phi as input. If the memory input
// is in the loop body as well, then we can safely assume it is still correct as the entire
// body was cloned as a unit
- if (!is_member(outer_loop, get_ctrl(store->in(MemNode::Memory)))) {
+ if (!ctrl_is_member(outer_loop, store->in(MemNode::Memory))) {
Node* mem_out = find_last_store_in_outer_loop(store, outer_loop);
Node* store_new = old_new[store->_idx];
store_new->set_req(MemNode::Memory, mem_out);
@@ -3285,7 +3285,7 @@ bool IdealLoopTree::empty_loop_candidate(PhaseIdealLoop* phase) const {
if (!cl->is_valid_counted_loop(T_INT)) {
return false; // Malformed loop
}
- if (!phase->is_member(this, phase->get_ctrl(cl->loopexit()->in(CountedLoopEndNode::TestValue)))) {
+ if (!phase->ctrl_is_member(this, cl->loopexit()->in(CountedLoopEndNode::TestValue))) {
return false; // Infinite loop
}
return true;
@@ -3376,7 +3376,7 @@ bool IdealLoopTree::empty_loop_with_extra_nodes_candidate(PhaseIdealLoop* phase)
return false;
}
- if (phase->is_member(this, phase->get_ctrl(cl->limit()))) {
+ if (phase->ctrl_is_member(this, cl->limit())) {
return false;
}
return true;
diff --git a/src/hotspot/share/opto/loopnode.cpp b/src/hotspot/share/opto/loopnode.cpp
index 3b398b053a3..dfff7ef96a5 100644
--- a/src/hotspot/share/opto/loopnode.cpp
+++ b/src/hotspot/share/opto/loopnode.cpp
@@ -463,16 +463,16 @@ Node* PhaseIdealLoop::loop_exit_test(Node* back_control, IdealLoopTree* loop, No
// need 'loop()' test to tell if limit is loop invariant
// ---------
- if (!is_member(loop, get_ctrl(incr))) { // Swapped trip counter and limit?
+ if (!ctrl_is_member(loop, incr)) { // Swapped trip counter and limit?
Node* tmp = incr; // Then reverse order into the CmpI
incr = limit;
limit = tmp;
bt = BoolTest(bt).commute(); // And commute the exit test
}
- if (is_member(loop, get_ctrl(limit))) { // Limit must be loop-invariant
+ if (ctrl_is_member(loop, limit)) { // Limit must be loop-invariant
return nullptr;
}
- if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
+ if (!ctrl_is_member(loop, incr)) { // Trip counter must be loop-variant
return nullptr;
}
return cmp;
@@ -485,7 +485,7 @@ Node* PhaseIdealLoop::loop_iv_incr(Node* incr, Node* x, IdealLoopTree* loop, Nod
}
phi_incr = incr;
incr = phi_incr->in(LoopNode::LoopBackControl); // Assume incr is on backedge of Phi
- if (!is_member(loop, get_ctrl(incr))) { // Trip counter must be loop-variant
+ if (!ctrl_is_member(loop, incr)) { // Trip counter must be loop-variant
return nullptr;
}
}
@@ -1795,7 +1795,7 @@ bool PhaseIdealLoop::convert_to_long_loop(Node* cmp, Node* phi, IdealLoopTree* l
if (in == nullptr) {
continue;
}
- if (loop->is_member(get_loop(get_ctrl(in)))) {
+ if (ctrl_is_member(loop, in)) {
iv_nodes.push(in);
}
}
diff --git a/src/hotspot/share/opto/loopnode.hpp b/src/hotspot/share/opto/loopnode.hpp
index 7ea66ea5486..1e34331f213 100644
--- a/src/hotspot/share/opto/loopnode.hpp
+++ b/src/hotspot/share/opto/loopnode.hpp
@@ -1376,7 +1376,7 @@ public:
Node* exact_limit( IdealLoopTree *loop );
// Return a post-walked LoopNode
- IdealLoopTree *get_loop( Node *n ) const {
+ IdealLoopTree* get_loop(const Node* n) const {
// Dead nodes have no loop, so return the top level loop instead
if (!has_node(n)) return _ltree_root;
assert(!has_ctrl(n), "");
@@ -1386,8 +1386,14 @@ public:
IdealLoopTree* ltree_root() const { return _ltree_root; }
// Is 'n' a (nested) member of 'loop'?
- int is_member( const IdealLoopTree *loop, Node *n ) const {
- return loop->is_member(get_loop(n)); }
+ bool is_member(const IdealLoopTree* loop, const Node* n) const {
+ return loop->is_member(get_loop(n));
+ }
+
+ // is the control for 'n' a (nested) member of 'loop'?
+ bool ctrl_is_member(const IdealLoopTree* loop, const Node* n) {
+ return is_member(loop, get_ctrl(n));
+ }
// This is the basic building block of the loop optimizations. It clones an
// entire loop body. It makes an old_new loop body mapping; with this
diff --git a/src/hotspot/share/opto/loopopts.cpp b/src/hotspot/share/opto/loopopts.cpp
index 50b1ae0de8d..3ef6a085b1c 100644
--- a/src/hotspot/share/opto/loopopts.cpp
+++ b/src/hotspot/share/opto/loopopts.cpp
@@ -637,11 +637,11 @@ Node* PhaseIdealLoop::remix_address_expressions(Node* n) {
if (n->in(3)->Opcode() == Op_AddX) {
Node* V = n->in(3)->in(1);
Node* I = n->in(3)->in(2);
- if (is_member(n_loop,get_ctrl(V))) {
+ if (ctrl_is_member(n_loop, V)) {
} else {
Node *tmp = V; V = I; I = tmp;
}
- if (!is_member(n_loop,get_ctrl(I))) {
+ if (!ctrl_is_member(n_loop, I)) {
Node* add1 = new AddPNode(n->in(1), n->in(2), I);
// Stuff new AddP in the loop preheader
register_new_node(add1, n_loop->_head->as_Loop()->skip_strip_mined(1)->in(LoopNode::EntryControl));
@@ -937,8 +937,6 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
Node* address = n->in(MemNode::Address);
Node* value = n->in(MemNode::ValueIn);
Node* mem = n->in(MemNode::Memory);
- IdealLoopTree* address_loop = get_loop(get_ctrl(address));
- IdealLoopTree* value_loop = get_loop(get_ctrl(value));
// - address and value must be loop invariant
// - memory must be a memory Phi for the loop
@@ -957,8 +955,8 @@ Node* PhaseIdealLoop::try_move_store_before_loop(Node* n, Node *n_ctrl) {
// memory Phi but sometimes is a bottom memory Phi that takes the
// store as input).
- if (!n_loop->is_member(address_loop) &&
- !n_loop->is_member(value_loop) &&
+ if (!ctrl_is_member(n_loop, address) &&
+ !ctrl_is_member(n_loop, value) &&
mem->is_Phi() && mem->in(0) == n_loop->_head &&
mem->outcnt() == 1 &&
mem->in(LoopNode::LoopBackControl) == n) {
@@ -1021,17 +1019,15 @@ void PhaseIdealLoop::try_move_store_after_loop(Node* n) {
if (n_loop != _ltree_root && !n_loop->_irreducible) {
Node* address = n->in(MemNode::Address);
Node* value = n->in(MemNode::ValueIn);
- IdealLoopTree* address_loop = get_loop(get_ctrl(address));
// address must be loop invariant
- if (!n_loop->is_member(address_loop)) {
+ if (!ctrl_is_member(n_loop, address)) {
// Store must be last on this memory slice in the loop and
// nothing in the loop must observe it
Node* phi = nullptr;
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
Node* u = n->fast_out(i);
if (has_ctrl(u)) { // control use?
- IdealLoopTree *u_loop = get_loop(get_ctrl(u));
- if (!n_loop->is_member(u_loop)) {
+ if (!ctrl_is_member(n_loop, u)) {
continue;
}
if (u->is_Phi() && u->in(0) == n_loop->_head) {
@@ -1838,7 +1834,7 @@ void PhaseIdealLoop::try_sink_out_of_loop(Node* n) {
Node* cast = nullptr;
for (uint k = 0; k < x->req(); k++) {
Node* in = x->in(k);
- if (in != nullptr && n_loop->is_member(get_loop(get_ctrl(in)))) {
+ if (in != nullptr && ctrl_is_member(n_loop, in)) {
const Type* in_t = _igvn.type(in);
cast = ConstraintCastNode::make_cast_for_type(x_ctrl, in, in_t,
ConstraintCastNode::UnconditionalDependency, nullptr);
@@ -2366,11 +2362,9 @@ static void collect_nodes_in_outer_loop_not_reachable_from_sfpt(Node* n, const I
Node* u = n->fast_out(j);
assert(check_old_new || old_new[u->_idx] == nullptr, "shouldn't have been cloned");
if (!u->is_CFG() && (!check_old_new || old_new[u->_idx] == nullptr)) {
- Node* c = phase->get_ctrl(u);
- IdealLoopTree* u_loop = phase->get_loop(c);
- assert(!loop->is_member(u_loop) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
- if (!loop->is_member(u_loop)) {
- if (outer_loop->is_member(u_loop)) {
+ assert(!phase->ctrl_is_member(loop, u) || !loop->_body.contains(u), "can be in outer loop or out of both loops only");
+ if (!phase->ctrl_is_member(loop, u)) {
+ if (phase->ctrl_is_member(outer_loop, u)) {
wq.push(u);
} else {
// nodes pinned with control in the outer loop but not referenced from the safepoint must be moved out of
@@ -2849,7 +2843,7 @@ int PhaseIdealLoop::stride_of_possible_iv(Node* iff) {
return 0;
}
// Must have an invariant operand
- if (is_member(get_loop(iff), get_ctrl(cmp->in(2)))) {
+ if (ctrl_is_member(get_loop(iff), cmp->in(2))) {
return 0;
}
Node* add2 = nullptr;
diff --git a/src/hotspot/share/opto/predicates.cpp b/src/hotspot/share/opto/predicates.cpp
index da9f704ee8d..208bd6583c5 100644
--- a/src/hotspot/share/opto/predicates.cpp
+++ b/src/hotspot/share/opto/predicates.cpp
@@ -1001,7 +1001,7 @@ InitializedAssertionPredicate CreateAssertionPredicatesVisitor::initialize_from_
}
bool NodeInSingleLoopBody::check_node_in_loop_body(Node* node) const {
- return _phase->is_member(_ilt, _phase->get_ctrl(node));
+ return _phase->ctrl_is_member(_ilt, node);
}
// Clone the provided Template Assertion Predicate and set '_init' as new input for the OpaqueLoopInitNode.
diff --git a/src/hotspot/share/opto/superword.cpp b/src/hotspot/share/opto/superword.cpp
index 7a2e6bc7fbd..35b46d22732 100644
--- a/src/hotspot/share/opto/superword.cpp
+++ b/src/hotspot/share/opto/superword.cpp
@@ -73,8 +73,7 @@ public:
void set_ignored(Node* n) {
// Only consider nodes in the loop.
- Node* ctrl = _vloop.phase()->get_ctrl(n);
- if (_vloop.lpt()->is_member(_vloop.phase()->get_loop(ctrl))) {
+ if (_vloop.phase()->ctrl_is_member(_vloop.lpt(), n)) {
// Find the index in the loop.
for (uint j = 0; j < _body.size(); j++) {
if (n == _body.at(j)) {
From 81e0c87f28934cb0d66ad2500352b2728f44a1b7 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Paul=20H=C3=BCbner?=
Date: Fri, 14 Nov 2025 08:29:57 +0000
Subject: [PATCH 048/418] 8371320:
runtime/ErrorHandling/PrintVMInfoAtExitTest.java fails with unexpected amount
for Java Heap reserved memory
Reviewed-by: azafari, jsikstro
---
.../ErrorHandling/PrintVMInfoAtExitTest.java | 22 +++++++++----------
1 file changed, 10 insertions(+), 12 deletions(-)
diff --git a/test/hotspot/jtreg/runtime/ErrorHandling/PrintVMInfoAtExitTest.java b/test/hotspot/jtreg/runtime/ErrorHandling/PrintVMInfoAtExitTest.java
index 5e535bab626..80c209a9ec4 100644
--- a/test/hotspot/jtreg/runtime/ErrorHandling/PrintVMInfoAtExitTest.java
+++ b/test/hotspot/jtreg/runtime/ErrorHandling/PrintVMInfoAtExitTest.java
@@ -27,17 +27,13 @@
* @test
* @summary Test PrintVMInfoAtExit
* @library /test/lib
- * @build jdk.test.whitebox.WhiteBox
- * @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
- * @modules java.base/jdk.internal.misc
* @requires vm.flagless
* @requires vm.bits == "64"
- * @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI PrintVMInfoAtExitTest
+ * @run driver PrintVMInfoAtExitTest
*/
import jdk.test.lib.process.OutputAnalyzer;
import jdk.test.lib.process.ProcessTools;
-import jdk.test.whitebox.WhiteBox;
public class PrintVMInfoAtExitTest {
@@ -52,18 +48,20 @@ public class PrintVMInfoAtExitTest {
"-XX:CompressedClassSpaceSize=256m",
"-version");
+ // How many kb of committed memory we expect in the NMT summary.
+ int committed_kb = 65536;
OutputAnalyzer output_detail = new OutputAnalyzer(pb.start());
output_detail.shouldContain("# JRE version:");
output_detail.shouldContain("-- S U M M A R Y --");
output_detail.shouldContain("Command Line: -Xmx64M -Xms64M -XX:-CreateCoredumpOnCrash -XX:+UnlockDiagnosticVMOptions -XX:+PrintVMInfoAtExit -XX:NativeMemoryTracking=summary -XX:CompressedClassSpaceSize=256m");
output_detail.shouldContain("Native Memory Tracking:");
- WhiteBox wb = WhiteBox.getWhiteBox();
- if (wb.isAsanEnabled()) {
- // the reserved value can be influenced by asan
- output_detail.shouldContain("Java Heap (reserved=");
- output_detail.shouldContain(", committed=65536KB)");
- } else {
- output_detail.shouldContain("Java Heap (reserved=65536KB, committed=65536KB)");
+ // Make sure the heap summary is present.
+ output_detail.shouldMatch("Java Heap \\(reserved=[0-9]+KB, committed=" + committed_kb + "KB\\)");
+ // Check reserved >= committed.
+ String reserved_kb_string = output_detail.firstMatch("Java Heap \\(reserved=([0-9]+)KB, committed=" + committed_kb + "KB\\)", 1);
+ int reserved_kb = Integer.parseInt(reserved_kb_string);
+ if (reserved_kb < committed_kb) {
+ throw new RuntimeException("committed more memory than reserved");
}
}
}
From 9eaa364a5221cba960467ffbaea14ea790809c6a Mon Sep 17 00:00:00 2001
From: Afshin Zafari
Date: Fri, 14 Nov 2025 09:03:11 +0000
Subject: [PATCH 049/418] 8361487: [ubsan] test_committed_virtualmemory.cpp
check_covered_pages shows overflow
Reviewed-by: jsjolen, phubner
---
.../gtest/runtime/test_committed_virtualmemory.cpp | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp b/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp
index 4d3fc1ff82e..5b78a66a3ae 100644
--- a/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp
+++ b/test/hotspot/gtest/runtime/test_committed_virtualmemory.cpp
@@ -83,14 +83,21 @@ public:
ASSERT_TRUE(found_stack_top);
}
+ static const int PAGE_CONTAINED_IN_RANGE_TAG = -1;
+ static bool is_page_in_committed_region(int a) { return (a == PAGE_CONTAINED_IN_RANGE_TAG); }
+ static void set_page_as_contained_in_committed_region(int &a) { a = PAGE_CONTAINED_IN_RANGE_TAG; }
+
static void check_covered_pages(address addr, size_t size, address base, size_t touch_pages, int* page_num) {
const size_t page_sz = os::vm_page_size();
size_t index;
for (index = 0; index < touch_pages; index ++) {
+ if (is_page_in_committed_region(page_num[index])) { // Already tagged?
+ continue;
+ }
address page_addr = base + page_num[index] * page_sz;
// The range covers this page, marks the page
if (page_addr >= addr && page_addr < addr + size) {
- page_num[index] = -1;
+ set_page_as_contained_in_committed_region(page_num[index]);
}
}
}
@@ -135,7 +142,7 @@ public:
if (precise_tracking_supported) {
// All touched pages should be committed
for (size_t index = 0; index < touch_pages; index ++) {
- ASSERT_EQ(page_num[index], -1);
+ ASSERT_TRUE(is_page_in_committed_region(page_num[index]));
}
}
From 8a7af77e991511e144914abc129a9d4d40c0b76b Mon Sep 17 00:00:00 2001
From: Daniel Fuchs
Date: Fri, 14 Nov 2025 10:10:03 +0000
Subject: [PATCH 050/418] 8371366:
java/net/httpclient/whitebox/RawChannelTestDriver.java fails intermittently
in jtreg timeout
Reviewed-by: djelinski, vyazici
---
.../whitebox/RawChannelTestDriver.java | 9 +-
.../jdk/internal/net/http/RawChannelTest.java | 82 +++++++++++++++----
2 files changed, 75 insertions(+), 16 deletions(-)
diff --git a/test/jdk/java/net/httpclient/whitebox/RawChannelTestDriver.java b/test/jdk/java/net/httpclient/whitebox/RawChannelTestDriver.java
index ac577069b70..ab44fba5ecd 100644
--- a/test/jdk/java/net/httpclient/whitebox/RawChannelTestDriver.java
+++ b/test/jdk/java/net/httpclient/whitebox/RawChannelTestDriver.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, 2018, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2016, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -25,6 +25,11 @@
* @test
* @bug 8151299 8164704
* @modules java.net.http/jdk.internal.net.http
- * @run testng java.net.http/jdk.internal.net.http.RawChannelTest
+ * @run testng/othervm java.net.http/jdk.internal.net.http.RawChannelTest
*/
+// use
+// @run testng/othervm -Dseed=6434511950803022575
+// java.net.http/jdk.internal.net.http.RawChannelTest
+// to reproduce a failure with a particular seed (e.g. 6434511950803022575)
+// if this test is observed failing with that seed
//-Djdk.internal.httpclient.websocket.debug=true
diff --git a/test/jdk/java/net/httpclient/whitebox/java.net.http/jdk/internal/net/http/RawChannelTest.java b/test/jdk/java/net/httpclient/whitebox/java.net.http/jdk/internal/net/http/RawChannelTest.java
index 9b5764735b2..f6bcdcb4d33 100644
--- a/test/jdk/java/net/httpclient/whitebox/java.net.http/jdk/internal/net/http/RawChannelTest.java
+++ b/test/jdk/java/net/httpclient/whitebox/java.net.http/jdk/internal/net/http/RawChannelTest.java
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017, 2024, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2017, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,6 +23,7 @@
package jdk.internal.net.http;
+import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
@@ -43,8 +44,9 @@ import java.util.concurrent.atomic.AtomicLong;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
+import java.util.concurrent.atomic.AtomicReference;
+
import jdk.internal.net.http.websocket.RawChannel;
-import jdk.internal.net.http.websocket.WebSocketRequest;
import org.testng.annotations.Test;
import static java.net.http.HttpResponse.BodyHandlers.discarding;
import static java.util.concurrent.TimeUnit.SECONDS;
@@ -57,6 +59,20 @@ import static org.testng.Assert.assertEquals;
*/
public class RawChannelTest {
+ // can't use jdk.test.lib when injected in java.net.httpclient
+ // Seed can be specified on the @run line with -Dseed=