skia2/include/ports/SkAtomics_sync.h
herb cd7f035974 Revert of Parallel cache - preliminary (patchset #24 id:460001 of https://codereview.chromium.org/1264103003/ )
Reason for revert:
Breaks DrMemory in the chrome roll.

Original issue's description:
> Parallel cache.
>
> TBR=reed@google.com
>
> BUG=skia:1330,528560
>
> Committed: https://skia.googlesource.com/skia/+/6f2a486040cb25465990196c229feb47e668e87f
>
> Committed: https://skia.googlesource.com/skia/+/bf2988833e5a36c6b430da6fdd2cfebd0015adec
>
> Committed: https://skia.googlesource.com/skia/+/014ffdb01ea5317614a1569efc30c50f06434222

TBR=reed@google.com,mtklein@google.com,mtklein@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=skia:1330,528560

Review URL: https://codereview.chromium.org/1345903002
2015-09-15 15:15:41 -07:00

69 lines
2.1 KiB
C++

/*
* Copyright 2015 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef SkAtomics_sync_DEFINED
#define SkAtomics_sync_DEFINED
// This file is mostly a shim. We'd like to delete it. Please don't put much
// effort into maintaining it, and if you find bugs in it, the right fix is to
// delete this file and upgrade your compiler to something that supports
// __atomic builtins or std::atomic.
static inline void barrier(sk_memory_order mo) {
asm volatile("" : : : "memory"); // Prevents the compiler from reordering code.
#if SK_CPU_X86
// On x86, we generally don't need an extra memory barrier for loads or stores.
if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); }
#else
// On other platforms (e.g. ARM) we do unless the memory order is relaxed.
if (sk_memory_order_relaxed != mo) { __sync_synchronize(); }
#endif
}
// These barriers only support our majority use cases: acquire and relaxed loads, release stores.
// For anything more complicated, please consider deleting this file and upgrading your compiler.
template <typename T>
T sk_atomic_load(const T* ptr, sk_memory_order mo) {
T val = *ptr;
barrier(mo);
return val;
}
template <typename T>
void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
barrier(mo);
*ptr = val;
}
template <typename T>
T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
return __sync_fetch_and_add(ptr, val);
}
template <typename T>
bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
T prev = __sync_val_compare_and_swap(ptr, *expected, desired);
if (prev == *expected) {
return true;
}
*expected = prev;
return false;
}
template <typename T>
T sk_atomic_exchange(T* ptr, T val, sk_memory_order) {
// There is no __sync exchange. Emulate it with a CAS loop.
T prev;
do {
prev = sk_atomic_load(ptr);
} while(!sk_atomic_compare_exchange(ptr, &prev, val));
return prev;
}
#endif//SkAtomics_sync_DEFINED