886 lines
22 KiB
C++
886 lines
22 KiB
C++
/***
|
|
Copyright (C) 2022 J Reece Wilson (a/k/a "Reece"). All rights reserved.
|
|
|
|
File: auAtomic.hpp
|
|
Date: 2022-2-1
|
|
Author: Reece
|
|
***/
|
|
#pragma once
|
|
|
|
#if defined(AURORA_COMPILER_CLANG)
|
|
#include <stdatomic.h>
|
|
#endif
|
|
|
|
#if defined(AURORA_COMPILER_MSVC) || 1
|
|
using AuAInt8 = volatile AuInt8;
|
|
using AuAUInt8 = volatile AuUInt8;
|
|
|
|
using AuAInt16 = volatile AuInt16;
|
|
using AuAUInt16 = volatile AuUInt16;
|
|
|
|
using AuAInt32 = volatile AuInt32;
|
|
using AuAUInt32 = volatile AuUInt32;
|
|
|
|
#if defined(AURORA_IS_64BIT)
|
|
using AuAInt64 = volatile AuInt64;
|
|
using AuAUInt64 = volatile AuUInt64;
|
|
#endif
|
|
|
|
#define AU_ATOMIC_INIT(n) n
|
|
#else
|
|
|
|
using AuAInt8 = _Atomic(int8_t);
|
|
using AuAUInt8 = _Atomic(uint8_t);
|
|
|
|
using AuAInt16 = _Atomic(int16_t);
|
|
using AuAUInt16 = _Atomic(uint16_t);
|
|
|
|
|
|
using AuAInt32 = _Atomic(int32_t);
|
|
using AuAUInt32 = _Atomic(uint32_t);
|
|
|
|
#if defined(AURORA_IS_64BIT)
|
|
using AuAInt64 = _Atomic(int64_t);
|
|
using AuAUInt64 = _Atomic(uint64_t);
|
|
#endif
|
|
|
|
#define AU_ATOMIC_INIT ATOMIC_VAR_INIT
|
|
#endif
|
|
|
|
|
|
// Defines:
|
|
// AuAtomicCompareExchange (no weak variants yet)
|
|
// -
|
|
// AuAtomicTestAndSet
|
|
// AuAtomicClearU8Lock
|
|
// AuAtomicSet
|
|
// AuAtomicUnset (ret: bool)
|
|
// AuAtomicAndUnsetBit (ret: T)
|
|
// -
|
|
// AuAtomicLoadWeak
|
|
// AuAtomicStoreWeak
|
|
// -
|
|
// AuAtomicLoad
|
|
// AuAtomicStore
|
|
// -
|
|
// AuAtomicOrSetBit
|
|
// AuAtomicOr
|
|
// AuAtomicAnd
|
|
// AuAtomicAdd (warn: returns post op atomic value)
|
|
// AuAtomicSub (warn: returns post op atomic value)
|
|
|
|
template <class T>
|
|
struct AuAtomicUtils
|
|
{
|
|
/**
|
|
* @brief Generic bitwise (1 << offset)
|
|
* @return original value
|
|
* @warning T is bound by platform and compiler constraints
|
|
*/
|
|
static T Set(T *in, AuUInt8 offset);
|
|
|
|
/**
|
|
* @brief
|
|
* @param in
|
|
* @param orValue
|
|
* @return original value
|
|
*/
|
|
static T Or(T *in, T orValue);
|
|
|
|
/**
|
|
* @brief
|
|
* @param in
|
|
* @param andValue
|
|
* @return original value
|
|
*/
|
|
static T And(T *in, T andValue);
|
|
|
|
/**
|
|
* @brief Adds addend to in
|
|
* @return updated value
|
|
* @warning T is bound by platform and compiler constraints
|
|
*/
|
|
static T Add(T *in, T addend);
|
|
|
|
/**
|
|
* @brief Subtracts the minuend from in
|
|
* @return updated value
|
|
* @warning T is bound by platform and compiler constraints
|
|
*/
|
|
static T Sub(T *in, T minuend);
|
|
|
|
/**
|
|
* @brief Generic compare exchange
|
|
* @param replace replacement value for in if in matches compare
|
|
* @param compare required reference value
|
|
* @return original value
|
|
* @warning T is bound by platform and compiler constraints
|
|
*/
|
|
static T CompareExchange(T *in, T replace, T compare);
|
|
|
|
/**
|
|
* @brief { return *in & (1 << offset); in |= (1 << offset) }
|
|
* @param in
|
|
* @param offset Bit index
|
|
* @return *in & (1 << offset)
|
|
* @warning T is bound by platform and compiler constraints
|
|
*/
|
|
static bool TestAndSet(T *in, const AuUInt8 offset);
|
|
|
|
//
|
|
static T Load(T *in);
|
|
|
|
//
|
|
static void Store(T *in, T value);
|
|
|
|
//
|
|
static void ClearU8Lock(T *in);
|
|
|
|
//
|
|
static T LoadWeak(T *in);
|
|
|
|
static void StoreWeak(T *in, T value);
|
|
};
|
|
|
|
#if defined(AURORA_COMPILER_MSVC)
|
|
|
|
template <>
|
|
inline auline AuUInt64 AuAtomicUtils<AuUInt64>::CompareExchange(AuUInt64 *in, AuUInt64 replace, AuUInt64 compare)
|
|
{
|
|
return static_cast<AuUInt64>(_InterlockedCompareExchange64(reinterpret_cast<long long volatile *>(in), static_cast<long long>(replace), static_cast<long long>(compare)));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuUInt32 AuAtomicUtils<AuUInt32>::CompareExchange(AuUInt32 *in, AuUInt32 replace, AuUInt32 compare)
|
|
{
|
|
return static_cast<AuUInt32>(_InterlockedCompareExchange(reinterpret_cast<long volatile *>(in), static_cast<long>(replace), static_cast<long>(compare)));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuUInt16 AuAtomicUtils<AuUInt16>::CompareExchange(AuUInt16 *in, AuUInt16 replace, AuUInt16 compare)
|
|
{
|
|
return static_cast<AuUInt16>(_InterlockedCompareExchange16(reinterpret_cast<short volatile *>(in), static_cast<short>(replace), static_cast<short>(compare)));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuUInt8 AuAtomicUtils<AuUInt8>::CompareExchange(AuUInt8 *in, AuUInt8 replace, AuUInt8 compare)
|
|
{
|
|
return static_cast<AuUInt8>(_InterlockedCompareExchange8(reinterpret_cast<char volatile *>(in), static_cast<char>(replace), static_cast<char>(compare)));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt64 AuAtomicUtils<AuInt64>::CompareExchange(AuInt64 *in, AuInt64 replace, AuInt64 compare)
|
|
{
|
|
return _InterlockedCompareExchange64(reinterpret_cast<long long volatile *>(in), static_cast<long long>(replace), static_cast<long long>(compare));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt32 AuAtomicUtils<AuInt32>::CompareExchange(AuInt32 *in, AuInt32 replace, AuInt32 compare)
|
|
{
|
|
return _InterlockedCompareExchange(reinterpret_cast<long volatile *>(in), static_cast<long>(replace), static_cast<long>(compare));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt16 AuAtomicUtils<AuInt16>::CompareExchange(AuInt16 *in, AuInt16 replace, AuInt16 compare)
|
|
{
|
|
return _InterlockedCompareExchange16(reinterpret_cast<short volatile *>(in), static_cast<short>(replace), static_cast<short>(compare));
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt8 AuAtomicUtils<AuInt8>::CompareExchange(AuInt8 *in, AuInt8 replace, AuInt8 compare)
|
|
{
|
|
return _InterlockedCompareExchange8(reinterpret_cast<char volatile *>(in), static_cast<char>(replace), static_cast<char>(compare));
|
|
}
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline AuUInt64 AuAtomicUtils<AuUInt64>::Add(AuUInt64 *in, AuUInt64 addend)
|
|
{
|
|
return static_cast<AuUInt64>(_InterlockedExchangeAdd64(reinterpret_cast<long long volatile *>(in), static_cast<long long>(addend)) + static_cast<long long>(addend));
|
|
}
|
|
#endif
|
|
|
|
template <>
|
|
inline auline AuUInt32 AuAtomicUtils<AuUInt32>::Add(AuUInt32 *in, AuUInt32 addend)
|
|
{
|
|
return static_cast<AuUInt32>(_InterlockedExchangeAdd(reinterpret_cast<long volatile *>(in), static_cast<long>(addend)) + static_cast<long>(addend));
|
|
}
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline AuInt64 AuAtomicUtils<AuInt64>::Add(AuInt64 *in, AuInt64 addend)
|
|
{
|
|
return _InterlockedExchangeAdd64(reinterpret_cast<long long volatile *>(in), static_cast<long long>(addend)) + static_cast<long long>(addend);
|
|
}
|
|
#endif
|
|
|
|
template <>
|
|
inline auline AuInt32 AuAtomicUtils<AuInt32>::Add(AuInt32 *in, AuInt32 addend)
|
|
{
|
|
return _InterlockedExchangeAdd(reinterpret_cast<long volatile *>(in), static_cast<long>(addend)) + static_cast<long>(addend);
|
|
}
|
|
|
|
// TODO:
|
|
#if 0
|
|
template <>
|
|
inline auline AuUInt16 AuAtomicUtils<AuUInt16>::Add(AuUInt16 *in, AuUInt16 addend)
|
|
{
|
|
return {};
|
|
}
|
|
#endif
|
|
|
|
|
|
template <>
|
|
inline auline AuUInt64 AuAtomicUtils<AuUInt64>::Sub(AuUInt64 *in, AuUInt64 minuend)
|
|
{
|
|
return Add(in, AuUInt64(0) - minuend);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuUInt32 AuAtomicUtils<AuUInt32>::Sub(AuUInt32 *in, AuUInt32 minuend)
|
|
{
|
|
return Add(in, AuUInt32(0) - minuend);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt64 AuAtomicUtils<AuInt64>::Sub(AuInt64 *in, AuInt64 minuend)
|
|
{
|
|
return Add(in, AuInt64(0) - minuend);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt32 AuAtomicUtils<AuInt32>::Sub(AuInt32 *in, AuInt32 minuend)
|
|
{
|
|
return Add(in, AuInt32(0) - minuend);
|
|
}
|
|
|
|
// TODO:
|
|
#if 0
|
|
template <>
|
|
inline auline AuUInt16 AuAtomicUtils<AuUInt16>::Sub(AuUInt16 *in, AuUInt16 minuend)
|
|
{
|
|
return {};
|
|
}
|
|
|
|
#endif
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline AuUInt64 AuAtomicUtils<AuUInt64>::Or(AuUInt64 *in, AuUInt64 orValue)
|
|
{
|
|
return _InterlockedOr64(reinterpret_cast<long long volatile *>(in), orValue);
|
|
}
|
|
#endif
|
|
|
|
template <>
|
|
inline auline AuUInt32 AuAtomicUtils<AuUInt32>::Or(AuUInt32 *in, AuUInt32 orValue)
|
|
{
|
|
return _InterlockedOr(reinterpret_cast<long volatile *>(in), orValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuUInt16 AuAtomicUtils<AuUInt16>::Or(AuUInt16 *in, AuUInt16 orValue)
|
|
{
|
|
return _InterlockedOr16(reinterpret_cast<short volatile *>(in), orValue);
|
|
}
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline AuInt64 AuAtomicUtils<AuInt64>::Or(AuInt64 *in, AuInt64 orValue)
|
|
{
|
|
return _InterlockedOr64(reinterpret_cast<long long volatile *>(in), orValue);
|
|
}
|
|
#endif
|
|
|
|
template <>
|
|
inline auline AuInt32 AuAtomicUtils<AuInt32>::Or(AuInt32 *in, AuInt32 orValue)
|
|
{
|
|
return _InterlockedOr(reinterpret_cast<long volatile *>(in), orValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline long AuAtomicUtils<long>::Or(long *in, long orValue)
|
|
{
|
|
return _InterlockedOr(reinterpret_cast<long volatile *>(in), orValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline unsigned long AuAtomicUtils<unsigned long>::Or(unsigned long *in, unsigned long orValue)
|
|
{
|
|
return _InterlockedOr(reinterpret_cast<long volatile *>(in), orValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt16 AuAtomicUtils<AuInt16>::Or(AuInt16 *in, AuInt16 orValue)
|
|
{
|
|
return _InterlockedOr16(reinterpret_cast<short volatile *>(in), orValue);
|
|
}
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::Set(T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicUtils<T>::Or(in, T(1) << offset);
|
|
}
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline AuUInt64 AuAtomicUtils<AuUInt64>::And(AuUInt64 *in, AuUInt64 AndValue)
|
|
{
|
|
return _InterlockedAnd64(reinterpret_cast<long long volatile *>(in), AndValue);
|
|
}
|
|
#endif
|
|
|
|
template <>
|
|
inline auline AuUInt32 AuAtomicUtils<AuUInt32>::And(AuUInt32 *in, AuUInt32 AndValue)
|
|
{
|
|
return _InterlockedAnd(reinterpret_cast<long volatile *>(in), AndValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuUInt16 AuAtomicUtils<AuUInt16>::And(AuUInt16 *in, AuUInt16 AndValue)
|
|
{
|
|
return _InterlockedAnd16(reinterpret_cast<short volatile *>(in), AndValue);
|
|
}
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline AuInt64 AuAtomicUtils<AuInt64>::And(AuInt64 *in, AuInt64 AndValue)
|
|
{
|
|
return _InterlockedAnd64(reinterpret_cast<long long volatile *>(in), AndValue);
|
|
}
|
|
#endif
|
|
|
|
template <>
|
|
inline auline AuInt32 AuAtomicUtils<AuInt32>::And(AuInt32 *in, AuInt32 AndValue)
|
|
{
|
|
return _InterlockedAnd(reinterpret_cast<long volatile *>(in), AndValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline long AuAtomicUtils<long>::And(long *in, long AndValue)
|
|
{
|
|
return _InterlockedAnd(reinterpret_cast<long volatile *>(in), AndValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline unsigned long AuAtomicUtils<unsigned long>::And(unsigned long *in, unsigned long AndValue)
|
|
{
|
|
return _InterlockedAnd(reinterpret_cast<long volatile *>(in), AndValue);
|
|
}
|
|
|
|
template <>
|
|
inline auline AuInt16 AuAtomicUtils<AuInt16>::And(AuInt16 *in, AuInt16 AndValue)
|
|
{
|
|
return _InterlockedAnd16(reinterpret_cast<short volatile *>(in), AndValue);
|
|
}
|
|
|
|
|
|
#elif defined(AURORA_COMPILER_CLANG) || defined(AURORA_COMPILER_GCC)
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::CompareExchange(T *in, T replace, T compare)
|
|
{
|
|
return __sync_val_compare_and_swap(in, compare, replace);
|
|
}
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::Add(T *in, T addend)
|
|
{
|
|
return __sync_add_and_fetch(in, addend);
|
|
}
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::Sub(T *in, T minuend)
|
|
{
|
|
return __sync_sub_and_fetch(in, minuend);
|
|
}
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::Set(T *in, AuUInt8 offset)
|
|
{
|
|
return __sync_fetch_and_or(in, T(1) << offset);
|
|
}
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::Or(T *in, T value)
|
|
{
|
|
return __sync_fetch_and_or(in, value);
|
|
}
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::And(T *in, T value)
|
|
{
|
|
return __sync_fetch_and_and(in, value);
|
|
}
|
|
|
|
#endif
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::LoadWeak(T *in)
|
|
{
|
|
return AuAtomicUtils<T>::Load(in);
|
|
}
|
|
|
|
template <class T>
|
|
inline void AuAtomicUtils<T>::StoreWeak(T *in, T val)
|
|
{
|
|
*in = val;
|
|
}
|
|
|
|
#if defined(AURORA_COMPILER_CLANG)
|
|
#define ATOMIC_PREFIX_HAX(name) __c11_ ## name
|
|
#else
|
|
#define ATOMIC_PREFIX_HAX(name) __ ## name ## _explicit
|
|
#endif
|
|
|
|
template <class T>
|
|
inline auline T AuAtomicUtils<T>::Load(T *in)
|
|
{
|
|
if constexpr (AuIsPointer_v<T>)
|
|
{
|
|
return (T)AuAtomicUtils<AuUInt>::Load((AuUInt *)in);
|
|
}
|
|
else
|
|
{
|
|
#if defined(AURORA_COMPILER_MSVC)
|
|
return *(volatile T*)in;
|
|
#else
|
|
|
|
#if defined(AURORA_COMPILER_CLANG)
|
|
if constexpr (AuIsSame_v<AuUInt8, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuUInt8) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt8, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuInt8) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt16, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuUInt16) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt16, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuInt16) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt32, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuUInt32) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt32, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuInt32) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt64, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuUInt64) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt64, T>)
|
|
{
|
|
return ATOMIC_PREFIX_HAX(atomic_load)((_Atomic(AuInt64) *)(in), __ATOMIC_ACQUIRE);
|
|
}
|
|
else
|
|
{
|
|
static_assert(AuIsVoid_v<T>, "T");
|
|
}
|
|
#else
|
|
return __sync_val_compare_and_swap(*in, 0, 0);
|
|
#endif
|
|
#endif
|
|
}
|
|
}
|
|
|
|
template <class T>
|
|
inline auline void AuAtomicUtils<T>::Store(T *in, T val)
|
|
{
|
|
if constexpr (AuIsPointer_v<T>)
|
|
{
|
|
AuAtomicUtils<AuUInt>::Store((AuUInt *)in, (AuUInt)val);
|
|
return;
|
|
}
|
|
else
|
|
{
|
|
#if defined(AURORA_COMPILER_MSVC) && (defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
|
|
*(volatile T*)in = val;
|
|
#elif defined(AURORA_COMPILER_MSVC)
|
|
#if 0
|
|
::MemoryBarrier();
|
|
*(volatile T *)in = val;
|
|
#else
|
|
// assume /volatile:iso
|
|
MemoryBarrier();
|
|
|
|
if constexpr (AuIsSame_v<AuUInt8, T>)
|
|
{
|
|
::InterlockedExchange8((CHAR volatile *)in, (CHAR)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt8, T>)
|
|
{
|
|
::InterlockedExchange8((CHAR volatile *)in, (CHAR)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt16, T>)
|
|
{
|
|
::InterlockedExchange16((SHORT volatile *)in, (SHORT)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt16, T>)
|
|
{
|
|
::InterlockedExchange16((SHORT volatile *)in, (SHORT)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt32, T>)
|
|
{
|
|
::InterlockedExchange32((LONG32 volatile *)in, (LONG32)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt32, T>)
|
|
{
|
|
::InterlockedExchange32((LONG32 volatile *)in, (LONG32)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt64, T>)
|
|
{
|
|
::InterlockedExchange64((LONG64 volatile *)in, (LONG64)val);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt64, T>)
|
|
{
|
|
::InterlockedExchange64((LONG64 volatile *)in, (LONG64)val);
|
|
}
|
|
else
|
|
{
|
|
static_assert(AuIsVoid_v<T>, "T");
|
|
}
|
|
|
|
#endif
|
|
#else
|
|
#if !(defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
|
|
__sync_synchronize();
|
|
#endif
|
|
if constexpr (AuIsSame_v<AuUInt8, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuUInt8) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt8, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuInt8) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt16, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuUInt16) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt16, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuInt16) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt32, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuUInt32) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt32, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuInt32) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuUInt64, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuUInt64) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else if constexpr (AuIsSame_v<AuInt64, T>)
|
|
{
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuInt64) *)(in), val, __ATOMIC_RELEASE);
|
|
}
|
|
else
|
|
{
|
|
static_assert(AuIsVoid_v<T>, "T");
|
|
}
|
|
#endif
|
|
}
|
|
}
|
|
|
|
template <>
|
|
inline auline
|
|
void AuAtomicUtils<AuUInt8>::ClearU8Lock(AuUInt8 *in)
|
|
{
|
|
#if defined(AURORA_COMPILER_MSVC) && (defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
|
|
*(volatile AuUInt8 *)in = 0;
|
|
#elif defined(AURORA_COMPILER_MSVC)
|
|
// assume /volatile:iso
|
|
MemoryBarrier();
|
|
::_InterlockedExchange8((volatile char *)in, 0);
|
|
// i think this will work on aarch64 and most risc architectures
|
|
//InterlockedAndRelease((volatile LONG *)in, ~0xFF);
|
|
#else
|
|
// Flush non-atomic operations within L1 or other store/load queue
|
|
#if !(defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
|
|
__sync_synchronize();
|
|
#endif
|
|
|
|
// Clear the lock
|
|
ATOMIC_PREFIX_HAX(atomic_store)((_Atomic(AuUInt8) *)(in), 0, __ATOMIC_RELEASE);
|
|
#endif
|
|
}
|
|
|
|
template <>
|
|
inline auline
|
|
void AuAtomicUtils<AuUInt32>::ClearU8Lock(AuUInt32 *in)
|
|
{
|
|
#if defined(AU_CPU_ENDIAN_LITTLE)
|
|
AuAtomicUtils<AuUInt8>::ClearU8Lock((AuUInt8 *)in);
|
|
#else
|
|
AuAtomicUtils<AuUInt8>::ClearU8Lock(((AuUInt8 *)in) + 3);
|
|
#endif
|
|
}
|
|
|
|
template <class T>
|
|
inline auline bool AuAtomicUtils<T>::TestAndSet(T *in, const AuUInt8 offset)
|
|
{
|
|
return AuAtomicUtils<T>::Set(in, offset) & (1 << offset);
|
|
}
|
|
|
|
#if defined(AURORA_COMPILER_MSVC) && (defined(AURORA_ARCH_X64) || defined(AURORA_ARCH_X86))
|
|
|
|
template <>
|
|
inline auline bool AuAtomicUtils<unsigned long>::TestAndSet(unsigned long *in, const AuUInt8 offset)
|
|
{
|
|
return _interlockedbittestandset(reinterpret_cast<volatile long *>(in), offset);
|
|
}
|
|
|
|
template <>
|
|
inline auline bool AuAtomicUtils<long>::TestAndSet(long *in, const AuUInt8 offset)
|
|
{
|
|
return _interlockedbittestandset(reinterpret_cast<volatile long *>(in), offset);
|
|
}
|
|
|
|
template <>
|
|
inline auline bool AuAtomicUtils<AuUInt32>::TestAndSet(AuUInt32 *in, const AuUInt8 offset)
|
|
{
|
|
return _interlockedbittestandset(reinterpret_cast<volatile long *>(in), offset);
|
|
}
|
|
|
|
template <>
|
|
inline auline bool AuAtomicUtils<AuInt32>::TestAndSet(AuInt32 *in, const AuUInt8 offset)
|
|
{
|
|
return _interlockedbittestandset(reinterpret_cast<volatile long *>(in), offset);
|
|
}
|
|
|
|
#if !defined(AURORA_IS_32BIT)
|
|
template <>
|
|
inline auline bool AuAtomicUtils<AuUInt64>::TestAndSet(AuUInt64 *in, const AuUInt8 offset)
|
|
{
|
|
return _interlockedbittestandset64(reinterpret_cast<volatile long long *>(in), offset);
|
|
}
|
|
|
|
template <>
|
|
inline auline bool AuAtomicUtils<AuInt64>::TestAndSet(AuInt64 *in, const AuUInt8 offset)
|
|
{
|
|
return _interlockedbittestandset64(reinterpret_cast<volatile long long *>(in), offset);
|
|
}
|
|
#endif
|
|
|
|
#endif
|
|
|
|
template <class T>
|
|
auline
|
|
void AuAtomicStore(T *in, T value)
|
|
{
|
|
AuAtomicUtils<T>::Store(in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicLoad(T *in)
|
|
{
|
|
return AuAtomicUtils<T>::Load(in);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicLoadWeak(T *in)
|
|
{
|
|
return AuAtomicUtils<T>::LoadWeak(in);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
void AuAtomicStoreWeak(T *in, T value)
|
|
{
|
|
AuAtomicUtils<T>::StoreWeak(in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
void AuAtomicClearU8Lock(T *in)
|
|
{
|
|
AuAtomicUtils<T>::ClearU8Lock(in);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicOrSetBit(T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicUtils<T>::Set(in, offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
bool AuAtomicSet(T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicOrSetBit(in, offset) & (T(1) << offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicAndUnsetBit(T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicUtils<T>::And(in, ~(T(1) << T(offset)));
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
bool AuAtomicUnset(T *in, AuUInt8 offset)
|
|
{
|
|
auto uBit = T(1) << T(offset);
|
|
return AuAtomicUtils<T>::And(in, ~(uBit)) & uBit;
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicOr(T *in, T value)
|
|
{
|
|
return AuAtomicUtils<T>::Or(in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicAnd(T *in, T value)
|
|
{
|
|
return AuAtomicUtils<T>::And(in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicAdd(T *in, T addend)
|
|
{
|
|
return AuAtomicUtils<T>::Add(in, addend);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicSub(T *in, T minuend)
|
|
{
|
|
return AuAtomicUtils<T>::Sub(in, minuend);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicCompareExchange(T *in, T replace, T compare)
|
|
{
|
|
return AuAtomicUtils<T>::CompareExchange(in, replace, compare);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
bool AuAtomicTestAndSet(T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicUtils<T>::TestAndSet(in, offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
void AuAtomicStore(volatile T *in, T value)
|
|
{
|
|
AuAtomicUtils<T>::Store((T *)in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicLoad(volatile T *in)
|
|
{
|
|
return AuAtomicUtils<T>::Load((T *)in);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicLoadWeak(volatile T *in)
|
|
{
|
|
return AuAtomicUtils<T>::LoadWeak((T *)in);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
void AuAtomicStoreWeak(volatile T *in, T value)
|
|
{
|
|
AuAtomicUtils<T>::StoreWeak((T *)in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
void AuAtomicClearU8Lock(volatile T *in)
|
|
{
|
|
AuAtomicUtils<T>::ClearU8Lock((T *)in);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicOrSetBit(volatile T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicOrSetBit((T *)in, offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
bool AuAtomicSet(volatile T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicSet((T *)in, offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicAndUnsetBit(volatile T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicAndUnsetBit((T *)in, offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
bool AuAtomicUnset(volatile T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicUnset((T *)in, offset);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicOr(volatile T *in, T value)
|
|
{
|
|
return AuAtomicOr((T *)in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicAnd(volatile T *in, T value)
|
|
{
|
|
return AuAtomicAnd((T *)in, value);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicAdd(volatile T *in, T addend)
|
|
{
|
|
return AuAtomicUtils<T>::Add((T *)(in), addend);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicSub(volatile T *in, T minuend)
|
|
{
|
|
return AuAtomicUtils<T>::Sub((T *)(in), minuend);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
T AuAtomicCompareExchange(volatile T *in, T replace, T compare)
|
|
{
|
|
return AuAtomicUtils<T>::CompareExchange((T *)(in), replace, compare);
|
|
}
|
|
|
|
template <class T>
|
|
auline
|
|
bool AuAtomicTestAndSet(volatile T *in, AuUInt8 offset)
|
|
{
|
|
return AuAtomicUtils<T>::TestAndSet((T *)(in), offset);
|
|
} |