/* * Copyright (c) 2003, 2025, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef OS_CPU_LINUX_ZERO_ATOMICACCESS_LINUX_ZERO_HPP #define OS_CPU_LINUX_ZERO_ATOMICACCESS_LINUX_ZERO_HPP #include "orderAccess_linux_zero.hpp" // Implementation of class AtomicAccess template struct AtomicAccess::PlatformAdd { template D add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; template D fetch_then_add(D volatile* dest, I add_value, atomic_memory_order order) const { return add_then_fetch(dest, add_value, order) - add_value; } }; template<> template inline D AtomicAccess::PlatformAdd<4>::add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } template<> template inline D AtomicAccess::PlatformAdd<8>::add_then_fetch(D volatile* dest, I add_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE); FULL_MEM_BARRIER; return res; } template<> template inline T AtomicAccess::PlatformXchg<4>::operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); FULL_MEM_BARRIER; T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); FULL_MEM_BARRIER; return result; } template<> template inline T AtomicAccess::PlatformXchg<8>::operator()(T volatile* dest, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); FULL_MEM_BARRIER; T result = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELAXED); FULL_MEM_BARRIER; return result; } // No direct support for cmpxchg of bytes; emulate using int. template<> struct AtomicAccess::PlatformCmpxchg<1> : AtomicAccess::CmpxchgByteUsingInt {}; template<> template inline T AtomicAccess::PlatformCmpxchg<4>::operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); T value = compare_value; FULL_MEM_BARRIER; __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, __ATOMIC_RELAXED, __ATOMIC_RELAXED); FULL_MEM_BARRIER; return value; } template<> template inline T AtomicAccess::PlatformCmpxchg<8>::operator()(T volatile* dest, T compare_value, T exchange_value, atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); FULL_MEM_BARRIER; T value = compare_value; __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false, __ATOMIC_RELAXED, __ATOMIC_RELAXED); FULL_MEM_BARRIER; return value; } // Atomically copy 64 bits of data inline void atomic_copy64(const volatile void *src, volatile void *dst) { int64_t tmp; __atomic_load(reinterpret_cast(src), &tmp, __ATOMIC_RELAXED); __atomic_store(reinterpret_cast(dst), &tmp, __ATOMIC_RELAXED); } template<> template inline T AtomicAccess::PlatformLoad<8>::operator()(T const volatile* src) const { STATIC_ASSERT(8 == sizeof(T)); T dest; __atomic_load(const_cast(src), &dest, __ATOMIC_RELAXED); return dest; } template<> template inline void AtomicAccess::PlatformStore<8>::operator()(T volatile* dest, T store_value) const { STATIC_ASSERT(8 == sizeof(T)); __atomic_store(dest, &store_value, __ATOMIC_RELAXED); } #endif // OS_CPU_LINUX_ZERO_ATOMICACCESS_LINUX_ZERO_HPP