/* * Copyright 2015 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef SkAtomics_DEFINED #define SkAtomics_DEFINED // This file is not part of the public Skia API. #include "../private/SkNoncopyable.h" #include "SkTypes.h" #include // ~~~~~~~~ APIs ~~~~~~~~~ enum sk_memory_order { sk_memory_order_relaxed, sk_memory_order_consume, sk_memory_order_acquire, sk_memory_order_release, sk_memory_order_acq_rel, sk_memory_order_seq_cst, }; template T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); template void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); template T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); template bool sk_atomic_compare_exchange(T*, T* expected, T desired, sk_memory_order success = sk_memory_order_seq_cst, sk_memory_order failure = sk_memory_order_seq_cst); // A little wrapper class for small T (think, builtins: int, float, void*) to // ensure they're always used atomically. This is our stand-in for std::atomic. // !!! Please _really_ know what you're doing if you change default_memory_order. !!! template class SkAtomic : SkNoncopyable { public: SkAtomic() {} explicit SkAtomic(const T& val) : fVal(val) {} // It is essential we return by value rather than by const&. fVal may change at any time. T load(sk_memory_order mo = default_memory_order) const { return sk_atomic_load(&fVal, mo); } void store(const T& val, sk_memory_order mo = default_memory_order) { sk_atomic_store(&fVal, val, mo); } // Alias for .load(default_memory_order). operator T() const { return this->load(); } // Alias for .store(v, default_memory_order). T operator=(const T& v) { this->store(v); return v; } private: T fVal; }; // ~~~~~~~~ Implementations ~~~~~~~~~ template T sk_atomic_load(const T* ptr, sk_memory_order mo) { SkASSERT(mo == sk_memory_order_relaxed || mo == sk_memory_order_seq_cst || mo == sk_memory_order_acquire || mo == sk_memory_order_consume); const std::atomic* ap = reinterpret_cast*>(ptr); return std::atomic_load_explicit(ap, (std::memory_order)mo); } template void sk_atomic_store(T* ptr, T val, sk_memory_order mo) { SkASSERT(mo == sk_memory_order_relaxed || mo == sk_memory_order_seq_cst || mo == sk_memory_order_release); std::atomic* ap = reinterpret_cast*>(ptr); return std::atomic_store_explicit(ap, val, (std::memory_order)mo); } template T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) { // All values of mo are valid. std::atomic* ap = reinterpret_cast*>(ptr); return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo); } template bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order success, sk_memory_order failure) { // All values of success are valid. SkASSERT(failure == sk_memory_order_relaxed || failure == sk_memory_order_seq_cst || failure == sk_memory_order_acquire || failure == sk_memory_order_consume); SkASSERT(failure <= success); std::atomic* ap = reinterpret_cast*>(ptr); return std::atomic_compare_exchange_strong_explicit(ap, expected, desired, (std::memory_order)success, (std::memory_order)failure); } // ~~~~~~~~ Legacy APIs ~~~~~~~~~ // From here down we have shims for our old atomics API, to be weaned off of. // We use the default sequentially-consistent memory order to make things simple // and to match the practical reality of our old _sync and _win implementations. inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); } inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); } #endif//SkAtomics_DEFINED