diff --git a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp index b39f03fd0d2..4d5d97fddcd 100644 --- a/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp +++ b/src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp @@ -286,31 +286,19 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, } // Atomically copy 64 bits of data -static void atomic_copy64(const volatile void *src, volatile void *dst) { -#if defined(PPC32) - double tmp; - asm volatile ("lfd %0, 0(%1)\n" - "stfd %0, 0(%2)\n" - : "=f"(tmp) - : "b"(src), "b"(dst)); -#elif defined(S390) && !defined(_LP64) - double tmp; - asm volatile ("ld %0, 0(%1)\n" - "std %0, 0(%2)\n" - : "=r"(tmp) - : "a"(src), "a"(dst)); -#else - *(jlong *) dst = *(const jlong *) src; -#endif +inline void atomic_copy64(const volatile void *src, volatile void *dst) { + int64_t tmp; + __atomic_load(reinterpret_cast(src), &tmp, __ATOMIC_RELAXED); + __atomic_store(reinterpret_cast(dst), &tmp, __ATOMIC_RELAXED); } template<> template inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); - volatile int64_t dest; - atomic_copy64(reinterpret_cast(src), reinterpret_cast(&dest)); - return PrimitiveConversions::cast(dest); + T dest; + __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED); + return dest; } template<> @@ -318,7 +306,7 @@ template inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, T store_value) const { STATIC_ASSERT(8 == sizeof(T)); - atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest)); + __atomic_store(dest, &store_value, __ATOMIC_RELAXED); } #endif // OS_CPU_BSD_ZERO_ATOMIC_BSD_ZERO_HPP diff --git a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp index eefa8d5d062..7b7beba6565 100644 --- a/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp +++ b/src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp @@ -134,54 +134,18 @@ inline T Atomic::PlatformCmpxchg<8>::operator()(T volatile* dest, // Atomically copy 64 bits of data inline void atomic_copy64(const volatile void *src, volatile void *dst) { -#if defined(PPC32) && !defined(__SPE__) - double tmp; - asm volatile ("lfd %0, %2\n" - "stfd %0, %1\n" - : "=&f"(tmp), "=Q"(*(volatile double*)dst) - : "Q"(*(volatile double*)src)); -#elif defined(PPC32) && defined(__SPE__) - long tmp; - asm volatile ("evldd %0, %2\n" - "evstdd %0, %1\n" - : "=&r"(tmp), "=Q"(*(volatile long*)dst) - : "Q"(*(volatile long*)src)); -#elif defined(S390) && !defined(_LP64) - double tmp; - asm volatile ("ld %0, %2\n" - "std %0, %1\n" - : "=&f"(tmp), "=Q"(*(volatile double*)dst) - : "Q"(*(volatile double*)src)); -#elif defined(__ARM_ARCH_7A__) - // The only way to perform the atomic 64-bit load/store - // is to use ldrexd/strexd for both reads and writes. - // For store, we need to have the matching (fake) load first. - // Put clrex between exclusive ops on src and dst for clarity. - uint64_t tmp_r, tmp_w; - uint32_t flag_w; - asm volatile ("ldrexd %[tmp_r], [%[src]]\n" - "clrex\n" - "1:\n" - "ldrexd %[tmp_w], [%[dst]]\n" - "strexd %[flag_w], %[tmp_r], [%[dst]]\n" - "cmp %[flag_w], 0\n" - "bne 1b\n" - : [tmp_r] "=&r" (tmp_r), [tmp_w] "=&r" (tmp_w), - [flag_w] "=&r" (flag_w) - : [src] "r" (src), [dst] "r" (dst) - : "cc", "memory"); -#else - *(jlong *) dst = *(const jlong *) src; -#endif + int64_t tmp; + __atomic_load(reinterpret_cast(src), &tmp, __ATOMIC_RELAXED); + __atomic_store(reinterpret_cast(dst), &tmp, __ATOMIC_RELAXED); } template<> template inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); - volatile int64_t dest; - atomic_copy64(reinterpret_cast(src), reinterpret_cast(&dest)); - return PrimitiveConversions::cast(dest); + T dest; + __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED); + return dest; } template<> @@ -189,7 +153,7 @@ template inline void Atomic::PlatformStore<8>::operator()(T volatile* dest, T store_value) const { STATIC_ASSERT(8 == sizeof(T)); - atomic_copy64(reinterpret_cast(&store_value), reinterpret_cast(dest)); + __atomic_store(dest, &store_value, __ATOMIC_RELAXED); } #endif // OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP