diff --git a/src/hotspot/cpu/ppc/atomicAccess_ppc.hpp b/src/hotspot/cpu/ppc/atomicAccess_ppc.hpp index a0ff19e6171..c4529b0eb1a 100644 --- a/src/hotspot/cpu/ppc/atomicAccess_ppc.hpp +++ b/src/hotspot/cpu/ppc/atomicAccess_ppc.hpp @@ -157,6 +157,9 @@ inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_va return result; } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/bsd_aarch64/atomicAccess_bsd_aarch64.hpp b/src/hotspot/os_cpu/bsd_aarch64/atomicAccess_bsd_aarch64.hpp index 3d2c632ace8..67701775f94 100644 --- a/src/hotspot/os_cpu/bsd_aarch64/atomicAccess_bsd_aarch64.hpp +++ b/src/hotspot/os_cpu/bsd_aarch64/atomicAccess_bsd_aarch64.hpp @@ -52,12 +52,16 @@ struct AtomicAccess::PlatformAdd { } }; +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template template inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(byte_size == sizeof(T)); + STATIC_ASSERT(byte_size == 4 || byte_size == 8); T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; diff --git a/src/hotspot/os_cpu/bsd_x86/atomicAccess_bsd_x86.hpp b/src/hotspot/os_cpu/bsd_x86/atomicAccess_bsd_x86.hpp index 1024c6b1418..29471300f3d 100644 --- a/src/hotspot/os_cpu/bsd_x86/atomicAccess_bsd_x86.hpp +++ b/src/hotspot/os_cpu/bsd_x86/atomicAccess_bsd_x86.hpp @@ -52,6 +52,9 @@ inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_va return old_value; } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp index 6a720dac54e..6c8684718fc 100644 --- a/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp +++ b/src/hotspot/os_cpu/bsd_zero/atomicAccess_bsd_zero.hpp @@ -66,6 +66,9 @@ inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_va return res; } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/linux_aarch64/atomicAccess_linux_aarch64.hpp b/src/hotspot/os_cpu/linux_aarch64/atomicAccess_linux_aarch64.hpp index 6e5f53edfa3..4ddb2b758b4 100644 --- a/src/hotspot/os_cpu/linux_aarch64/atomicAccess_linux_aarch64.hpp +++ b/src/hotspot/os_cpu/linux_aarch64/atomicAccess_linux_aarch64.hpp @@ -113,6 +113,9 @@ inline D AtomicAccess::PlatformAdd<8>::fetch_then_add(D volatile* dest, I add_va return atomic_fastcall(stub, dest, add_value); } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp b/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp index 5b5f9da51a6..390207f9e5e 100644 --- a/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp +++ b/src/hotspot/os_cpu/linux_arm/atomicAccess_linux_arm.hpp @@ -118,6 +118,8 @@ inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_va return add_using_helper(ARMAtomicFuncs::_add_func, dest, add_value); } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; template<> template diff --git a/src/hotspot/os_cpu/linux_riscv/atomicAccess_linux_riscv.hpp b/src/hotspot/os_cpu/linux_riscv/atomicAccess_linux_riscv.hpp index 6d57ea55a83..bdbc0b8ac7f 100644 --- a/src/hotspot/os_cpu/linux_riscv/atomicAccess_linux_riscv.hpp +++ b/src/hotspot/os_cpu/linux_riscv/atomicAccess_linux_riscv.hpp @@ -152,6 +152,9 @@ inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest __attribu } #endif +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template template inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, @@ -164,6 +167,7 @@ inline T AtomicAccess::PlatformXchg::operator()(T volatile* dest, #endif STATIC_ASSERT(byte_size == sizeof(T)); + STATIC_ASSERT(byte_size == 4 || byte_size == 8); if (order != memory_order_relaxed) { FULL_MEM_BARRIER; diff --git a/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp b/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp index 5849d69ae2f..f3c1e8f1a2c 100644 --- a/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp +++ b/src/hotspot/os_cpu/linux_s390/atomicAccess_linux_s390.hpp @@ -209,6 +209,9 @@ inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I inc, // // The return value is the (unchanged) value from memory as it was when the // replacement succeeded. +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/linux_x86/atomicAccess_linux_x86.hpp b/src/hotspot/os_cpu/linux_x86/atomicAccess_linux_x86.hpp index dd91444d0a3..6b43b5e8e09 100644 --- a/src/hotspot/os_cpu/linux_x86/atomicAccess_linux_x86.hpp +++ b/src/hotspot/os_cpu/linux_x86/atomicAccess_linux_x86.hpp @@ -52,6 +52,9 @@ inline D AtomicAccess::PlatformAdd<4>::fetch_then_add(D volatile* dest, I add_va return old_value; } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/linux_zero/atomicAccess_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/atomicAccess_linux_zero.hpp index 376ef7a9dc9..96c46c6f59a 100644 --- a/src/hotspot/os_cpu/linux_zero/atomicAccess_linux_zero.hpp +++ b/src/hotspot/os_cpu/linux_zero/atomicAccess_linux_zero.hpp @@ -65,6 +65,9 @@ inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_va return res; } +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, diff --git a/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp b/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp index 62b6e3f87ec..f8119654c50 100644 --- a/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp +++ b/src/hotspot/os_cpu/windows_aarch64/atomicAccess_windows_aarch64.hpp @@ -68,6 +68,9 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) #undef DEFINE_INTRINSIC_ADD +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + #define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \ template<> \ template \ @@ -75,6 +78,8 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) T exchange_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ + STATIC_ASSERT(sizeof(IntrinsicType) == 4 || \ + sizeof(IntrinsicType) == 8); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ PrimitiveConversions::cast(exchange_value))); \ diff --git a/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp b/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp index a95da151688..aa78a401235 100644 --- a/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp +++ b/src/hotspot/os_cpu/windows_x86/atomicAccess_windows_x86.hpp @@ -70,6 +70,9 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) #undef DEFINE_INTRINSIC_ADD +template<> +struct AtomicAccess::PlatformXchg<1> : AtomicAccess::XchgUsingCmpxchg<1> {}; + #define DEFINE_INTRINSIC_XCHG(IntrinsicName, IntrinsicType) \ template<> \ template \ @@ -77,6 +80,8 @@ DEFINE_INTRINSIC_ADD(InterlockedAdd64, __int64) T exchange_value, \ atomic_memory_order order) const { \ STATIC_ASSERT(sizeof(IntrinsicType) == sizeof(T)); \ + STATIC_ASSERT(sizeof(IntrinsicType) == 4 || \ + sizeof(IntrinsicType) == 8); \ return PrimitiveConversions::cast( \ IntrinsicName(reinterpret_cast(dest), \ PrimitiveConversions::cast(exchange_value))); \ diff --git a/src/hotspot/share/runtime/atomic.hpp b/src/hotspot/share/runtime/atomic.hpp index 5b4d7d8659f..b8960fd796b 100644 --- a/src/hotspot/share/runtime/atomic.hpp +++ b/src/hotspot/share/runtime/atomic.hpp @@ -75,6 +75,7 @@ // v.release_store(x) -> void // v.release_store_fence(x) -> void // v.compare_exchange(x, y [, o]) -> T +// v.exchange(x [, o]) -> T // // (2) All atomic types are default constructible. // @@ -92,7 +93,6 @@ // (3) Atomic pointers and atomic integers additionally provide // // member functions: -// v.exchange(x [, o]) -> T // v.add_then_fetch(i [, o]) -> T // v.sub_then_fetch(i [, o]) -> T // v.fetch_then_add(i [, o]) -> T @@ -102,10 +102,7 @@ // type of i must be signed, or both must be unsigned. Atomic pointers perform // element arithmetic. // -// (4) An atomic translated type additionally provides the exchange -// function if its associated atomic decayed type provides that function. -// -// (5) Atomic integers additionally provide +// (4) Atomic integers additionally provide // // member functions: // v.and_then_fetch(x [, o]) -> T @@ -115,7 +112,7 @@ // v.fetch_then_or(x [, o]) -> T // v.fetch_then_xor(x [, o]) -> T // -// (6) Atomic pointers additionally provide +// (5) Atomic pointers additionally provide // // nested types: // ElementType -> std::remove_pointer_t @@ -127,9 +124,6 @@ // stand out a little more when used in surrounding non-atomic code. Without // the "AtomicAccess::" qualifier, some of those names are easily overlooked. // -// Atomic bytes don't provide exchange(). This is because that operation -// hasn't been implemented for 1 byte values. That could be changed if needed. -// // Atomic for 2 byte integers is not supported. This is because atomic // operations of that size have not been implemented. There haven't been // required use-cases. Many platforms don't provide hardware support. @@ -184,15 +178,8 @@ private: // Helper base classes, providing various parts of the APIs. template class CommonCore; - template class SupportsExchange; template class SupportsArithmetic; - // Support conditional exchange() for atomic translated types. - template class HasExchange; - template class DecayedHasExchange; - template::value> - class TranslatedExchange; - public: template()> class Atomic; @@ -275,15 +262,7 @@ public: atomic_memory_order order = memory_order_conservative) { return AtomicAccess::cmpxchg(value_ptr(), compare_value, new_value, order); } -}; -template -class AtomicImpl::SupportsExchange : public CommonCore { -protected: - explicit SupportsExchange(T value) : CommonCore(value) {} - ~SupportsExchange() = default; - -public: T exchange(T new_value, atomic_memory_order order = memory_order_conservative) { return AtomicAccess::xchg(this->value_ptr(), new_value, order); @@ -291,7 +270,7 @@ public: }; template -class AtomicImpl::SupportsArithmetic : public SupportsExchange { +class AtomicImpl::SupportsArithmetic : public CommonCore { // Guarding the AtomicAccess calls with constexpr checking of Offset produces // better compile-time error messages. template @@ -311,7 +290,7 @@ class AtomicImpl::SupportsArithmetic : public SupportsExchange { } protected: - explicit SupportsArithmetic(T value) : SupportsExchange(value) {} + explicit SupportsArithmetic(T value) : CommonCore(value) {} ~SupportsArithmetic() = default; public: @@ -424,54 +403,8 @@ public: // Atomic translated type -// Test whether Atomic has exchange(). template -class AtomicImpl::HasExchange { - template static void* test(decltype(&Check::exchange)); - template static int test(...); - using test_type = decltype(test>(nullptr)); -public: - static constexpr bool value = std::is_pointer_v; -}; - -// Test whether the atomic decayed type associated with T has exchange(). -template -class AtomicImpl::DecayedHasExchange { - using Translator = PrimitiveConversions::Translate; - using Decayed = typename Translator::Decayed; - - // "Unit test" HasExchange<>. - static_assert(HasExchange::value); - static_assert(HasExchange::value); - static_assert(!HasExchange::value); - -public: - static constexpr bool value = HasExchange::value; -}; - -// Base class for atomic translated type if atomic decayed type doesn't have -// exchange(). -template -class AtomicImpl::TranslatedExchange {}; - -// Base class for atomic translated type if atomic decayed type does have -// exchange(). -template -class AtomicImpl::TranslatedExchange { -public: - T exchange(T new_value, - atomic_memory_order order = memory_order_conservative) { - return static_cast(this)->exchange_impl(new_value, order); - } -}; - -template -class AtomicImpl::Atomic - : public TranslatedExchange, T> -{ - // Give TranslatedExchange<> access to exchange_impl() if needed. - friend class TranslatedExchange, T>; - +class AtomicImpl::Atomic { using Translator = PrimitiveConversions::Translate; using Decayed = typename Translator::Decayed; @@ -533,12 +466,7 @@ public: order)); } -private: - // Implementation of exchange() if needed. - // Exclude when not needed, to prevent reference to non-existent function - // of atomic decayed type if someone explicitly instantiates Atomic. - template::value)> - T exchange_impl(T new_value, atomic_memory_order order) { + T exchange(T new_value, atomic_memory_order order = memory_order_conservative) { return recover(_value.exchange(decay(new_value), order)); } }; diff --git a/src/hotspot/share/runtime/atomicAccess.hpp b/src/hotspot/share/runtime/atomicAccess.hpp index 72b7f92cf04..fb06f084366 100644 --- a/src/hotspot/share/runtime/atomicAccess.hpp +++ b/src/hotspot/share/runtime/atomicAccess.hpp @@ -419,8 +419,8 @@ private: struct XchgImpl; // Platform-specific implementation of xchg. Support for sizes - // of 4, and sizeof(intptr_t) are required. The class is a function - // object that must be default constructable, with these requirements: + // of 1, 4, and 8 are required. The class is a function object + // that must be default constructable, with these requirements: // // - dest is of type T*. // - exchange_value is of type T. diff --git a/test/hotspot/gtest/runtime/test_atomic.cpp b/test/hotspot/gtest/runtime/test_atomic.cpp index dc492e523d1..b37c14d41a7 100644 --- a/test/hotspot/gtest/runtime/test_atomic.cpp +++ b/test/hotspot/gtest/runtime/test_atomic.cpp @@ -101,10 +101,10 @@ TEST_VM(AtomicIntegerTest, arith_uint64) { } template -struct AtomicIntegerXchgTestSupport { +struct AtomicByteAndIntegerXchgTestSupport { Atomic _test_value; - AtomicIntegerXchgTestSupport() : _test_value{} {} + AtomicByteAndIntegerXchgTestSupport() : _test_value{} {} void test() { T zero = 0; @@ -116,13 +116,18 @@ struct AtomicIntegerXchgTestSupport { } }; +TEST_VM(AtomicIntegerTest, xchg_char) { + using Support = AtomicByteAndIntegerXchgTestSupport; + Support().test(); +} + TEST_VM(AtomicIntegerTest, xchg_int32) { - using Support = AtomicIntegerXchgTestSupport; + using Support = AtomicByteAndIntegerXchgTestSupport; Support().test(); } TEST_VM(AtomicIntegerTest, xchg_int64) { - using Support = AtomicIntegerXchgTestSupport; + using Support = AtomicByteAndIntegerXchgTestSupport; Support().test(); } @@ -153,18 +158,16 @@ TEST_VM(AtomicIntegerTest, cmpxchg_int32) { TEST_VM(AtomicIntegerTest, cmpxchg_int64) { // Check if 64-bit atomics are available on the machine. - if (!VM_Version::supports_cx8()) return; - using Support = AtomicIntegerCmpxchgTestSupport; Support().test(); } -struct AtomicCmpxchg1ByteStressSupport { +struct AtomicXchgAndCmpxchg1ByteStressSupport { char _default_val; int _base; Atomic _array[7+32+7]; - AtomicCmpxchg1ByteStressSupport() : _default_val(0x7a), _base(7) {} + AtomicXchgAndCmpxchg1ByteStressSupport() : _default_val(0x7a), _base(7) {} void validate(char val, char val2, int index) { for (int i = 0; i < 7; i++) { @@ -182,35 +185,60 @@ struct AtomicCmpxchg1ByteStressSupport { } } + template void test_index(int index) { + Exchange exchange; char one = 1; - _array[index].compare_exchange(_default_val, one); + exchange(_array[index], _default_val, one); validate(_default_val, one, index); - _array[index].compare_exchange(one, _default_val); + exchange(_array[index], one, _default_val); validate(_default_val, _default_val, index); } + template void test() { for (size_t i = 0; i < ARRAY_SIZE(_array); ++i) { _array[i].store_relaxed(_default_val); } for (int i = _base; i < (_base+32); i++) { - test_index(i); + test_index(i); } } + void test_exchange() { + struct StressWithExchange { + void operator()(Atomic& atomic, char compare_value, char new_value) { + EXPECT_EQ(compare_value, atomic.exchange(new_value)); + } + }; + test(); + } + + void test_compare_exchange() { + struct StressWithCompareExchange { + void operator()(Atomic& atomic, char compare_value, char new_value) { + EXPECT_EQ(compare_value, atomic.compare_exchange(compare_value, new_value)); + } + }; + test(); + } }; -TEST_VM(AtomicCmpxchg1Byte, stress) { - AtomicCmpxchg1ByteStressSupport support; - support.test(); +TEST_VM(AtomicByteTest, stress_xchg) { + AtomicXchgAndCmpxchg1ByteStressSupport support; + support.test_exchange(); +} + +TEST_VM(AtomicByteTest, stress_cmpxchg) { + AtomicXchgAndCmpxchg1ByteStressSupport support; + support.test_compare_exchange(); } template -struct AtomicEnumTestSupport { +struct AtomicTestSupport { Atomic _test_value; - AtomicEnumTestSupport() : _test_value{} {} + AtomicTestSupport() : _test_value{} {} void test_store_load(T value) { EXPECT_NE(value, _test_value.load_relaxed()); @@ -233,6 +261,13 @@ struct AtomicEnumTestSupport { EXPECT_EQ(value1, _test_value.exchange(value2)); EXPECT_EQ(value2, _test_value.load_relaxed()); } + + template + static void test() { + AtomicTestSupport().test_store_load(B); + AtomicTestSupport().test_cmpxchg(B, C); + AtomicTestSupport().test_xchg(B, C); + } }; namespace AtomicEnumTestUnscoped { // Scope the enumerators. @@ -241,11 +276,7 @@ namespace AtomicEnumTestUnscoped { // Scope the enumerators. TEST_VM(AtomicEnumTest, unscoped_enum) { using namespace AtomicEnumTestUnscoped; - using Support = AtomicEnumTestSupport; - - Support().test_store_load(B); - Support().test_cmpxchg(B, C); - Support().test_xchg(B, C); + AtomicTestSupport::test(); } enum class AtomicEnumTestScoped { A, B, C }; @@ -253,11 +284,35 @@ enum class AtomicEnumTestScoped { A, B, C }; TEST_VM(AtomicEnumTest, scoped_enum) { const AtomicEnumTestScoped B = AtomicEnumTestScoped::B; const AtomicEnumTestScoped C = AtomicEnumTestScoped::C; - using Support = AtomicEnumTestSupport; + AtomicTestSupport::test(); +} - Support().test_store_load(B); - Support().test_cmpxchg(B, C); - Support().test_xchg(B, C); +enum class AtomicEnumTestScoped64Bit : uint64_t { A, B, C }; + +TEST_VM(AtomicEnumTest, scoped_enum_64_bit) { + const AtomicEnumTestScoped64Bit B = AtomicEnumTestScoped64Bit::B; + const AtomicEnumTestScoped64Bit C = AtomicEnumTestScoped64Bit::C; + AtomicTestSupport::test(); +} + +enum class AtomicEnumTestScoped8Bit : uint8_t { A, B, C }; + +TEST_VM(AtomicEnumTest, scoped_enum_8_bit) { + const AtomicEnumTestScoped8Bit B = AtomicEnumTestScoped8Bit::B; + const AtomicEnumTestScoped8Bit C = AtomicEnumTestScoped8Bit::C; + AtomicTestSupport::test(); +} + +TEST_VM(AtomicByteTest, char_test) { + const char B = 0xB; + const char C = 0xC; + AtomicTestSupport::test(); +} + +TEST_VM(AtomicByteTest, bool_test) { + const bool B = true; + const bool C = false; + AtomicTestSupport::test(); } template @@ -514,40 +569,6 @@ struct PrimitiveConversions::Translate static Value recover(Decayed x) { return Value(x); } }; -// Test whether Atomic has exchange(). -// Note: This is intentionally a different implementation from what is used -// by the atomic translated type to decide whether to provide exchange(). -// The intent is to make related testing non-tautological. -// The two implementations must agree; it's a bug if they don't. -template -class AtomicTypeHasExchange { - template, - typename = decltype(declval().exchange(declval()))> - static char* test(int); - - template static char test(...); - - using test_type = decltype(test(0)); - -public: - static constexpr bool value = std::is_pointer_v; -}; - -// Unit tests for AtomicTypeHasExchange. -static_assert(AtomicTypeHasExchange::value); -static_assert(AtomicTypeHasExchange::value); -static_assert(AtomicTypeHasExchange::value); -static_assert(AtomicTypeHasExchange::value); -static_assert(!AtomicTypeHasExchange::value); - -// Verify translated byte type *doesn't* have exchange. -static_assert(!AtomicTypeHasExchange::value); - -// Verify that explicit instantiation doesn't attempt to reference the -// non-existent exchange of the atomic decayed type. -template class AtomicImpl::Atomic; - template static void test_atomic_translated_type() { // This works even if T is not default constructible. @@ -562,10 +583,8 @@ static void test_atomic_translated_type() { Translated::recover(10)))); EXPECT_EQ(10, Translated::decay(_test_value.load_relaxed())); - if constexpr (AtomicTypeHasExchange::value) { - EXPECT_EQ(10, Translated::decay(_test_value.exchange(Translated::recover(20)))); - EXPECT_EQ(20, Translated::decay(_test_value.load_relaxed())); - } + EXPECT_EQ(10, Translated::decay(_test_value.exchange(Translated::recover(20)))); + EXPECT_EQ(20, Translated::decay(_test_value.load_relaxed())); } TEST_VM(AtomicTranslatedTypeTest, int_test) { diff --git a/test/hotspot/gtest/runtime/test_atomicAccess.cpp b/test/hotspot/gtest/runtime/test_atomicAccess.cpp index 6e05f429970..a489be71b19 100644 --- a/test/hotspot/gtest/runtime/test_atomicAccess.cpp +++ b/test/hotspot/gtest/runtime/test_atomicAccess.cpp @@ -100,6 +100,11 @@ struct AtomicAccessXchgTestSupport { } }; +TEST_VM(AtomicAccessXchgTest, int8) { + using Support = AtomicAccessXchgTestSupport; + Support().test(); +} + TEST_VM(AtomicAccessXchgTest, int32) { using Support = AtomicAccessXchgTestSupport; Support().test(); @@ -136,9 +141,6 @@ TEST_VM(AtomicAccessCmpxchgTest, int32) { } TEST_VM(AtomicAccessCmpxchgTest, int64) { - // Check if 64-bit atomics are available on the machine. - if (!VM_Version::supports_cx8()) return; - using Support = AtomicAccessCmpxchgTestSupport; Support().test(); }