2015-03-30 15:13:33 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2015 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
2013-12-18 15:27:39 +00:00
|
|
|
#ifndef SkAtomics_sync_DEFINED
|
|
|
|
#define SkAtomics_sync_DEFINED
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
// This file is mostly a shim. We'd like to delete it. Please don't put much
|
|
|
|
// effort into maintaining it, and if you find bugs in it, the right fix is to
|
|
|
|
// delete this file and upgrade your compiler to something that supports
|
|
|
|
// __atomic builtins or std::atomic.
|
|
|
|
|
|
|
|
static inline void barrier(sk_memory_order mo) {
|
|
|
|
asm volatile("" : : : "memory"); // Prevents the compiler from reordering code.
|
|
|
|
#if SK_CPU_X86
|
|
|
|
// On x86, we generally don't need an extra memory barrier for loads or stores.
|
|
|
|
if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); }
|
|
|
|
#else
|
|
|
|
// On other platforms (e.g. ARM) we do unless the memory order is relaxed.
|
|
|
|
if (sk_memory_order_relaxed != mo) { __sync_synchronize(); }
|
|
|
|
#endif
|
2013-12-18 15:27:39 +00:00
|
|
|
}
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
// These barriers only support our majority use cases: acquire and relaxed loads, release stores.
|
|
|
|
// For anything more complicated, please consider deleting this file and upgrading your compiler.
|
2014-07-11 15:42:11 +00:00
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_load(const T* ptr, sk_memory_order mo) {
|
|
|
|
T val = *ptr;
|
|
|
|
barrier(mo);
|
|
|
|
return val;
|
2013-12-18 15:27:39 +00:00
|
|
|
}
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
template <typename T>
|
|
|
|
void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
|
|
|
|
barrier(mo);
|
|
|
|
*ptr = val;
|
2013-12-18 15:27:39 +00:00
|
|
|
}
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
|
|
|
|
return __sync_fetch_and_add(ptr, val);
|
2014-01-08 21:15:56 +00:00
|
|
|
}
|
|
|
|
|
2015-09-16 14:46:17 +00:00
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order) {
|
|
|
|
return __sync_fetch_and_sub(ptr, val);
|
|
|
|
}
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
template <typename T>
|
|
|
|
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
|
|
|
|
T prev = __sync_val_compare_and_swap(ptr, *expected, desired);
|
|
|
|
if (prev == *expected) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
*expected = prev;
|
|
|
|
return false;
|
2014-05-28 21:43:59 +00:00
|
|
|
}
|
|
|
|
|
2015-03-30 15:13:33 +00:00
|
|
|
template <typename T>
|
|
|
|
T sk_atomic_exchange(T* ptr, T val, sk_memory_order) {
|
|
|
|
// There is no __sync exchange. Emulate it with a CAS loop.
|
|
|
|
T prev;
|
|
|
|
do {
|
|
|
|
prev = sk_atomic_load(ptr);
|
|
|
|
} while(!sk_atomic_compare_exchange(ptr, &prev, val));
|
|
|
|
return prev;
|
|
|
|
}
|
|
|
|
|
2015-02-02 20:22:07 +00:00
|
|
|
#endif//SkAtomics_sync_DEFINED
|