remove sk_atomic_load and sk_atomic_store

Change-Id: I6eb4469df0315283f96631ff6023170fa80aaa8a
Reviewed-on: https://skia-review.googlesource.com/c/174281
Reviewed-by: Brian Osman <brianosman@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
This commit is contained in:
Mike Klein 2018-12-04 09:31:31 -05:00 committed by Skia Commit-Bot
parent 9603395183
commit 820e79b86c
5 changed files with 36 additions and 55 deletions

View File

@ -16,25 +16,6 @@
// Please use types from <atomic> for any new code.
// That's all this file ends up doing under the hood.
template <typename T>
T sk_atomic_load(const T* ptr, std::memory_order mo = std::memory_order_seq_cst) {
SkASSERT(mo == std::memory_order_relaxed ||
mo == std::memory_order_seq_cst ||
mo == std::memory_order_acquire ||
mo == std::memory_order_consume);
const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr);
return std::atomic_load_explicit(ap, mo);
}
template <typename T>
void sk_atomic_store(T* ptr, T val, std::memory_order mo = std::memory_order_seq_cst) {
SkASSERT(mo == std::memory_order_relaxed ||
mo == std::memory_order_seq_cst ||
mo == std::memory_order_release);
std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr);
return std::atomic_store_explicit(ap, val, mo);
}
template <typename T>
T sk_atomic_fetch_add(T* ptr, T val, std::memory_order mo = std::memory_order_seq_cst) {
// All values of mo are valid.

View File

@ -5,6 +5,7 @@
* found in the LICENSE file.
*/
#include "SkAtomics.h"
#include "SkBitmapCache.h"
#include "SkMutex.h"
#include "SkPixelRef.h"

View File

@ -10,9 +10,9 @@
#ifndef SkTraceEvent_DEFINED
#define SkTraceEvent_DEFINED
#include "SkAtomics.h"
#include "SkEventTracer.h"
#include "SkTraceEventCommon.h"
#include <atomic>
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
@ -70,11 +70,6 @@
#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
SkEventTracer::GetInstance()->updateTraceEventDuration
#define TRACE_EVENT_API_ATOMIC_WORD intptr_t
#define TRACE_EVENT_API_ATOMIC_LOAD(var) sk_atomic_load(&var, std::memory_order_relaxed)
#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
sk_atomic_store(&var, value, std::memory_order_relaxed)
// Defines visibility for classes in trace_event.h
#define TRACE_EVENT_API_CLASS_EXPORT SK_API
@ -101,15 +96,15 @@
#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
category_group, atomic, category_group_enabled) \
category_group_enabled = \
reinterpret_cast<const uint8_t*>(TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \
reinterpret_cast<const uint8_t*>(atomic.load(std::memory_order_relaxed)); \
if (!category_group_enabled) { \
category_group_enabled = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
TRACE_EVENT_API_ATOMIC_STORE(atomic, \
reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(category_group_enabled)); \
atomic.store(reinterpret_cast<intptr_t>(category_group_enabled), \
std::memory_order_relaxed); \
}
#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
static std::atomic<intptr_t> INTERNAL_TRACE_EVENT_UID(atomic){0}; \
const uint8_t* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
TRACE_CATEGORY_PREFIX category_group, \

View File

@ -10,9 +10,9 @@
#include "GrVkVulkan.h"
#include "SkAtomics.h"
#include "SkRandom.h"
#include "SkTHash.h"
#include <atomic>
class GrVkGpu;
@ -62,14 +62,14 @@ public:
SkTHashSet<const GrVkResource*, GrVkResource::Hash> fHashSet;
};
static uint32_t fKeyCounter;
static std::atomic<uint32_t> fKeyCounter;
#endif
/** Default construct, initializing the reference count to 1.
*/
GrVkResource() : fRefCnt(1) {
#ifdef SK_TRACE_VK_RESOURCES
fKey = sk_atomic_fetch_add(&fKeyCounter, 1u, std::memory_order_relaxed);
fKey = fKeyCounter.fetch_add(+1, std::memory_order_relaxed);
GetTrace()->add(this);
#endif
}
@ -78,35 +78,33 @@ public:
*/
virtual ~GrVkResource() {
#ifdef SK_DEBUG
SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt);
fRefCnt = 0; // illegal value, to catch us if we reuse after delete
auto count = this->getRefCnt();
SkASSERTF(count == 1, "fRefCnt was %d", count);
fRefCnt.store(0); // illegal value, to catch us if we reuse after delete
#endif
}
#ifdef SK_DEBUG
/** Return the reference count. Use only for debugging. */
int32_t getRefCnt() const { return fRefCnt; }
int32_t getRefCnt() const { return fRefCnt.load(); }
#endif
/** May return true if the caller is the only owner.
* Ensures that all previous owner's actions are complete.
*/
bool unique() const {
if (1 == sk_atomic_load(&fRefCnt, std::memory_order_acquire)) {
// The acquire barrier is only really needed if we return true. It
// prevents code conditioned on the result of unique() from running
// until previous owners are all totally done calling unref().
return true;
}
return false;
// The acquire barrier is only really needed if we return true. It
// prevents code conditioned on the result of unique() from running
// until previous owners are all totally done calling unref().
return 1 == fRefCnt.load(std::memory_order_acquire);
}
/** Increment the reference count.
Must be balanced by a call to unref() or unrefAndFreeResources().
*/
void ref() const {
SkASSERT(fRefCnt > 0);
(void)sk_atomic_fetch_add(&fRefCnt, +1, std::memory_order_relaxed); // No barrier required.
SkASSERT(this->getRefCnt() > 0);
(void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); // No barrier required.
}
/** Decrement the reference count. If the reference count is 1 before the
@ -115,10 +113,10 @@ public:
Any GPU data associated with this resource will be freed before it's deleted.
*/
void unref(const GrVkGpu* gpu) const {
SkASSERT(fRefCnt > 0);
SkASSERT(this->getRefCnt() > 0);
SkASSERT(gpu);
// A release here acts in place of all releases we "should" have been doing in ref().
if (1 == sk_atomic_fetch_add(&fRefCnt, -1, std::memory_order_acq_rel)) {
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
// Like unique(), the acquire is only needed on success, to make sure
// code in internal_dispose() doesn't happen before the decrement.
this->internal_dispose(gpu);
@ -127,9 +125,9 @@ public:
/** Unref without freeing GPU data. Used only when we're abandoning the resource */
void unrefAndAbandon() const {
SkASSERT(fRefCnt > 0);
SkASSERT(this->getRefCnt() > 0);
// A release here acts in place of all releases we "should" have been doing in ref().
if (1 == sk_atomic_fetch_add(&fRefCnt, -1, std::memory_order_acq_rel)) {
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
// Like unique(), the acquire is only needed on success, to make sure
// code in internal_dispose() doesn't happen before the decrement.
this->internal_dispose();
@ -138,7 +136,7 @@ public:
#ifdef SK_DEBUG
void validate() const {
SkASSERT(fRefCnt > 0);
SkASSERT(this->getRefCnt() > 0);
}
#endif
@ -176,8 +174,11 @@ private:
#ifdef SK_TRACE_VK_RESOURCES
GetTrace()->remove(this);
#endif
SkASSERT(0 == fRefCnt);
fRefCnt = 1;
#ifdef SK_DEBUG
SkASSERT(0 == this->getRefCnt());
fRefCnt.store(1);
#endif
delete this;
}
@ -189,12 +190,15 @@ private:
#ifdef SK_TRACE_VK_RESOURCES
GetTrace()->remove(this);
#endif
SkASSERT(0 == fRefCnt);
fRefCnt = 1;
#ifdef SK_DEBUG
SkASSERT(0 == this->getRefCnt());
fRefCnt.store(1);
#endif
delete this;
}
mutable int32_t fRefCnt;
mutable std::atomic<int32_t> fRefCnt;
#ifdef SK_TRACE_VK_RESOURCES
uint32_t fKey;
#endif

View File

@ -17,7 +17,7 @@
#include "GrVkUtil.h"
#ifdef SK_TRACE_VK_RESOURCES
uint32_t GrVkResource::fKeyCounter = 0;
std::atomic<uint32_t> GrVkResource::fKeyCounter{0};
#endif
GrVkResourceProvider::GrVkResourceProvider(GrVkGpu* gpu)