diff --git a/src/ports/SkBarriers_tsan.h b/src/ports/SkBarriers_tsan.h index ae68b3daf9..6f273907ab 100644 --- a/src/ports/SkBarriers_tsan.h +++ b/src/ports/SkBarriers_tsan.h @@ -8,41 +8,18 @@ #ifndef SkBarriers_tsan_DEFINED #define SkBarriers_tsan_DEFINED -#include - static inline void sk_compiler_barrier() { asm volatile("" : : : "memory"); } -// We'd do this as separate functions, but you can't partially specialize functions... -template -struct SkBarriers { - static T AcquireLoad(T*); - static void ReleaseStore(T*, T); -}; - -#define SK_BARRIERS(BITS) \ - template \ - struct SkBarriers { \ - static T AcquireLoad(T* ptr) { \ - return (T)__tsan_atomic ## BITS ## _load((__tsan_atomic ## BITS*)ptr, \ - __tsan_memory_order_acquire); \ - } \ - static void ReleaseStore(T* ptr, T val) { \ - __tsan_atomic ## BITS ## _store((__tsan_atomic ## BITS*)ptr, \ - val, \ - __tsan_memory_order_release); \ - } \ - } -SK_BARRIERS(8); -SK_BARRIERS(16); -SK_BARRIERS(32); -SK_BARRIERS(64); -#undef SK_BARRIERS +template +T sk_acquire_load(T* ptr) { + SkASSERT(__atomic_always_lock_free(sizeof(T), ptr)); + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); +} template -T sk_acquire_load(T* ptr) { return SkBarriers::AcquireLoad(ptr); } - -template -void sk_release_store(T* ptr, T val) { SkBarriers::ReleaseStore(ptr, val); } - +void sk_release_store(T* ptr, T val) { + SkASSERT(__atomic_always_lock_free(sizeof(T), ptr)); + return __atomic_store_n(ptr, val, __ATOMIC_RELEASE); +} #endif//SkBarriers_tsan_DEFINED