2015-03-30 15:13:33 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2015 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
2015-01-21 21:13:31 +00:00
|
|
|
#ifndef SkAtomics_DEFINED
|
|
|
|
#define SkAtomics_DEFINED
|
|
|
|
|
|
|
|
// This file is not part of the public Skia API.
|
|
|
|
#include "SkTypes.h"
|
2015-11-12 19:07:53 +00:00
|
|
|
#include <atomic>
|
|
|
|
|
|
|
|
// ~~~~~~~~ APIs ~~~~~~~~~
|
2015-01-21 21:13:31 +00:00
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
enum sk_memory_order {
|
|
|
|
sk_memory_order_relaxed,
|
|
|
|
sk_memory_order_consume,
|
|
|
|
sk_memory_order_acquire,
|
|
|
|
sk_memory_order_release,
|
|
|
|
sk_memory_order_acq_rel,
|
|
|
|
sk_memory_order_seq_cst,
|
|
|
|
};
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
|
|
|
|
|
2015-09-16 14:46:17 +00:00
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
template <typename T>
|
|
|
|
bool sk_atomic_compare_exchange(T*, T* expected, T desired,
|
|
|
|
sk_memory_order success = sk_memory_order_seq_cst,
|
|
|
|
sk_memory_order failure = sk_memory_order_seq_cst);
|
2015-02-24 22:38:12 +00:00
|
|
|
|
2015-03-30 15:13:33 +00:00
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
|
|
|
|
|
2015-02-24 22:38:12 +00:00
|
|
|
// A little wrapper class for small T (think, builtins: int, float, void*) to
|
|
|
|
// ensure they're always used atomically. This is our stand-in for std::atomic<T>.
|
2015-10-07 19:46:43 +00:00
|
|
|
// !!! Please _really_ know what you're doing if you change default_memory_order. !!!
|
|
|
|
template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq_cst>
|
2015-02-24 22:38:12 +00:00
|
|
|
class SkAtomic : SkNoncopyable {
|
|
|
|
public:
|
|
|
|
SkAtomic() {}
|
2015-06-17 14:53:22 +00:00
|
|
|
explicit SkAtomic(const T& val) : fVal(val) {}
|
2015-02-24 22:38:12 +00:00
|
|
|
|
|
|
|
// It is essential we return by value rather than by const&. fVal may change at any time.
|
2015-10-07 19:46:43 +00:00
|
|
|
T load(sk_memory_order mo = default_memory_order) const {
|
2015-02-24 22:38:12 +00:00
|
|
|
return sk_atomic_load(&fVal, mo);
|
|
|
|
}
|
|
|
|
|
2015-10-07 19:46:43 +00:00
|
|
|
void store(const T& val, sk_memory_order mo = default_memory_order) {
|
2015-02-24 22:38:12 +00:00
|
|
|
sk_atomic_store(&fVal, val, mo);
|
|
|
|
}
|
2015-02-25 20:51:55 +00:00
|
|
|
|
2015-10-07 19:46:43 +00:00
|
|
|
// Alias for .load(default_memory_order).
|
2015-09-28 15:59:18 +00:00
|
|
|
operator T() const {
|
|
|
|
return this->load();
|
|
|
|
}
|
|
|
|
|
2015-10-07 19:46:43 +00:00
|
|
|
// Alias for .store(v, default_memory_order).
|
2015-09-28 15:59:18 +00:00
|
|
|
T operator=(const T& v) {
|
|
|
|
this->store(v);
|
|
|
|
return v;
|
|
|
|
}
|
|
|
|
|
2015-10-07 19:46:43 +00:00
|
|
|
T fetch_add(const T& val, sk_memory_order mo = default_memory_order) {
|
2015-06-17 14:53:22 +00:00
|
|
|
return sk_atomic_fetch_add(&fVal, val, mo);
|
|
|
|
}
|
|
|
|
|
2015-10-07 19:46:43 +00:00
|
|
|
T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) {
|
2015-09-16 14:46:17 +00:00
|
|
|
return sk_atomic_fetch_sub(&fVal, val, mo);
|
|
|
|
}
|
|
|
|
|
2015-02-25 20:51:55 +00:00
|
|
|
bool compare_exchange(T* expected, const T& desired,
|
2015-10-07 19:46:43 +00:00
|
|
|
sk_memory_order success = default_memory_order,
|
|
|
|
sk_memory_order failure = default_memory_order) {
|
2015-02-25 20:51:55 +00:00
|
|
|
return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
|
|
|
|
}
|
2015-02-24 22:38:12 +00:00
|
|
|
private:
|
|
|
|
T fVal;
|
|
|
|
};
|
|
|
|
|
2015-11-12 19:07:53 +00:00
|
|
|
// ~~~~~~~~ Implementations ~~~~~~~~~
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_load(const T* ptr, sk_memory_order mo) {
|
|
|
|
SkASSERT(mo == sk_memory_order_relaxed ||
|
|
|
|
mo == sk_memory_order_seq_cst ||
|
|
|
|
mo == sk_memory_order_acquire ||
|
|
|
|
mo == sk_memory_order_consume);
|
|
|
|
const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
|
|
|
|
return std::atomic_load_explicit(ap, (std::memory_order)mo);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
|
|
|
|
SkASSERT(mo == sk_memory_order_relaxed ||
|
|
|
|
mo == sk_memory_order_seq_cst ||
|
|
|
|
mo == sk_memory_order_release);
|
|
|
|
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
|
|
|
|
return std::atomic_store_explicit(ap, val, (std::memory_order)mo);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) {
|
|
|
|
// All values of mo are valid.
|
|
|
|
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
|
|
|
|
return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) {
|
|
|
|
// All values of mo are valid.
|
|
|
|
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
|
|
|
|
return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired,
|
|
|
|
sk_memory_order success,
|
|
|
|
sk_memory_order failure) {
|
|
|
|
// All values of success are valid.
|
|
|
|
SkASSERT(failure == sk_memory_order_relaxed ||
|
|
|
|
failure == sk_memory_order_seq_cst ||
|
|
|
|
failure == sk_memory_order_acquire ||
|
|
|
|
failure == sk_memory_order_consume);
|
|
|
|
SkASSERT(failure <= success);
|
|
|
|
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
|
|
|
|
return std::atomic_compare_exchange_strong_explicit(ap, expected, desired,
|
|
|
|
(std::memory_order)success,
|
|
|
|
(std::memory_order)failure);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) {
|
|
|
|
// All values of mo are valid.
|
|
|
|
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
|
|
|
|
return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo);
|
|
|
|
}
|
|
|
|
|
|
|
|
// ~~~~~~~~ Legacy APIs ~~~~~~~~~
|
2015-01-21 21:13:31 +00:00
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
// From here down we have shims for our old atomics API, to be weaned off of.
|
|
|
|
// We use the default sequentially-consistent memory order to make things simple
|
|
|
|
// and to match the practical reality of our old _sync and _win implementations.
|
|
|
|
|
2016-07-27 15:40:45 +00:00
|
|
|
inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_add(ptr, +1); }
|
|
|
|
inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_add(ptr, -1); }
|
2015-02-02 20:22:07 +00:00
|
|
|
|
2015-01-21 21:13:31 +00:00
|
|
|
#endif//SkAtomics_DEFINED
|