Convert SkRefCnt to std::atomic.

This enables removing the more complicated atomic shims from SkAtomics.h.

TBR=reed
This doesn't actually change any API.

CQ_EXTRA_TRYBOTS=client.skia:Test-Ubuntu-GCC-GCE-CPU-AVX2-x86_64-Release-TSAN-Trybot,Test-Ubuntu-GCC-Golo-GPU-GT610-x86_64-Release-TSAN-Trybot

Review URL: https://codereview.chromium.org/1867863002
This commit is contained in:
bungeman 2016-04-08 06:58:51 -07:00 committed by Commit bot
parent 6a5d7139ff
commit 2c4bd0798e
9 changed files with 77 additions and 79 deletions

View File

@ -9,6 +9,7 @@
#ifndef SkPathRef_DEFINED
#define SkPathRef_DEFINED
#include "../private/SkAtomics.h"
#include "../private/SkTDArray.h"
#include "SkMatrix.h"
#include "SkPoint.h"

View File

@ -8,9 +8,9 @@
#ifndef SkRefCnt_DEFINED
#define SkRefCnt_DEFINED
#include "../private/SkAtomics.h"
#include "../private/SkTLogic.h"
#include "SkTypes.h"
#include <atomic>
#include <functional>
#include <memory>
#include <utility>
@ -37,21 +37,28 @@ public:
*/
virtual ~SkRefCntBase() {
#ifdef SK_DEBUG
SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt);
fRefCnt = 0; // illegal value, to catch us if we reuse after delete
SkASSERTF(getRefCnt() == 1, "fRefCnt was %d", getRefCnt());
// illegal value, to catch us if we reuse after delete
fRefCnt.store(0, std::memory_order_relaxed);
#endif
}
#ifdef SK_DEBUG
/** Return the reference count. Use only for debugging. */
int32_t getRefCnt() const { return fRefCnt; }
int32_t getRefCnt() const {
return fRefCnt.load(std::memory_order_relaxed);
}
void validate() const {
SkASSERT(getRefCnt() > 0);
}
#endif
/** May return true if the caller is the only owner.
* Ensures that all previous owner's actions are complete.
*/
bool unique() const {
if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
if (1 == fRefCnt.load(std::memory_order_acquire)) {
// The acquire barrier is only really needed if we return true. It
// prevents code conditioned on the result of unique() from running
// until previous owners are all totally done calling unref().
@ -68,11 +75,12 @@ public:
// go to zero, but not below, prior to reusing the object. This breaks
// the use of unique() on such objects and as such should be removed
// once the Android code is fixed.
SkASSERT(fRefCnt >= 0);
SkASSERT(getRefCnt() >= 0);
#else
SkASSERT(fRefCnt > 0);
SkASSERT(getRefCnt() > 0);
#endif
(void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required.
// No barrier required.
(void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
}
/** Decrement the reference count. If the reference count is 1 before the
@ -80,21 +88,15 @@ public:
the object needs to have been allocated via new, and not on the stack.
*/
void unref() const {
SkASSERT(fRefCnt > 0);
SkASSERT(getRefCnt() > 0);
// A release here acts in place of all releases we "should" have been doing in ref().
if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
// Like unique(), the acquire is only needed on success, to make sure
// code in internal_dispose() doesn't happen before the decrement.
this->internal_dispose();
}
}
#ifdef SK_DEBUG
void validate() const {
SkASSERT(fRefCnt > 0);
}
#endif
protected:
/**
* Allow subclasses to call this if they've overridden internal_dispose
@ -104,8 +106,8 @@ protected:
*/
void internal_dispose_restore_refcnt_to_1() const {
#ifdef SK_DEBUG
SkASSERT(0 == fRefCnt);
fRefCnt = 1;
SkASSERT(0 == getRefCnt());
fRefCnt.store(1, std::memory_order_relaxed);
#endif
}
@ -122,7 +124,7 @@ private:
// and conditionally call SkRefCnt::internal_dispose().
friend class SkWeakRefCnt;
mutable int32_t fRefCnt;
mutable std::atomic<int32_t> fRefCnt;
typedef SkNoncopyable INHERITED;
};
@ -215,25 +217,29 @@ template <typename Derived>
class SkNVRefCnt : SkNoncopyable {
public:
SkNVRefCnt() : fRefCnt(1) {}
~SkNVRefCnt() { SkASSERTF(1 == fRefCnt, "NVRefCnt was %d", fRefCnt); }
~SkNVRefCnt() { SkASSERTF(1 == getRefCnt(), "NVRefCnt was %d", getRefCnt()); }
// Implementation is pretty much the same as SkRefCntBase. All required barriers are the same:
// - unique() needs acquire when it returns true, and no barrier if it returns false;
// - ref() doesn't need any barrier;
// - unref() needs a release barrier, and an acquire if it's going to call delete.
bool unique() const { return 1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire); }
void ref() const { (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); }
bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); }
void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); }
void unref() const {
if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
SkDEBUGCODE(fRefCnt = 1;) // restore the 1 for our destructor's assert
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
// restore the 1 for our destructor's assert
SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed));
delete (const Derived*)this;
}
}
void deref() const { this->unref(); }
private:
mutable int32_t fRefCnt;
mutable std::atomic<int32_t> fRefCnt;
int32_t getRefCnt() const {
return fRefCnt.load(std::memory_order_relaxed);
}
};
///////////////////////////////////////////////////////////////////////////////////////////////////

View File

@ -159,32 +159,4 @@ inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }
inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
return sk_atomic_compare_exchange(ptr, &expected, desired);
}
inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
(void)sk_atomic_compare_exchange(ptr, &expected, desired);
return expected;
}
inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
int32_t prev = sk_atomic_load(ptr);
do {
if (0 == prev) {
break;
}
} while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
return prev;
}
template <typename T>
T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
template <typename T>
void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }
inline void sk_membar_acquire__after_atomic_dec() {}
inline void sk_membar_acquire__after_atomic_conditional_inc() {}
#endif//SkAtomics_DEFINED

View File

@ -83,7 +83,7 @@ static void sk_once_slow(bool* done, Lock* lock, void (*f)(Arg), Arg arg) {
//
// We'll use this in the fast path to make sure f(arg)'s effects are
// observable whenever we observe *done == true.
sk_release_store(done, true);
sk_atomic_store(done, true, sk_memory_order_release);
}
lock->release();
}

View File

@ -9,7 +9,7 @@
#define SkWeakRefCnt_DEFINED
#include "SkRefCnt.h"
#include "../private/SkAtomics.h"
#include <atomic>
/** \class SkWeakRefCnt
@ -62,22 +62,39 @@ public:
*/
virtual ~SkWeakRefCnt() {
#ifdef SK_DEBUG
SkASSERT(fWeakCnt == 1);
fWeakCnt = 0;
SkASSERT(getWeakCnt() == 1);
fWeakCnt.store(0, std::memory_order_relaxed);
#endif
}
/** Return the weak reference count.
*/
int32_t getWeakCnt() const { return fWeakCnt; }
#ifdef SK_DEBUG
/** Return the weak reference count. */
int32_t getWeakCnt() const {
return fWeakCnt.load(std::memory_order_relaxed);
}
void validate() const {
this->INHERITED::validate();
SkASSERT(fWeakCnt > 0);
SkASSERT(getWeakCnt() > 0);
}
#endif
private:
/** If fRefCnt is 0, returns 0.
* Otherwise increments fRefCnt, acquires, and returns the old value.
*/
int32_t atomic_conditional_acquire_strong_ref() const {
int32_t prev = fRefCnt.load(std::memory_order_relaxed);
do {
if (0 == prev) {
break;
}
} while(!fRefCnt.compare_exchange_weak(prev, prev+1, std::memory_order_acquire,
std::memory_order_relaxed));
return prev;
}
public:
/** Creates a strong reference from a weak reference, if possible. The
caller must already be an owner. If try_ref() returns true the owner
is in posession of an additional strong reference. Both the original
@ -86,10 +103,9 @@ public:
reference is in the same state as before the call.
*/
bool SK_WARN_UNUSED_RESULT try_ref() const {
if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
if (atomic_conditional_acquire_strong_ref() != 0) {
// Acquire barrier (L/SL), if not provided above.
// Prevents subsequent code from happening before the increment.
sk_membar_acquire__after_atomic_conditional_inc();
return true;
}
return false;
@ -99,9 +115,10 @@ public:
weak_unref().
*/
void weak_ref() const {
SkASSERT(fRefCnt > 0);
SkASSERT(fWeakCnt > 0);
sk_atomic_inc(&fWeakCnt); // No barrier required.
SkASSERT(getRefCnt() > 0);
SkASSERT(getWeakCnt() > 0);
// No barrier required.
(void)fWeakCnt.fetch_add(+1, std::memory_order_relaxed);
}
/** Decrement the weak reference count. If the weak reference count is 1
@ -110,15 +127,14 @@ public:
not on the stack.
*/
void weak_unref() const {
SkASSERT(fWeakCnt > 0);
// Release barrier (SL/S), if not provided below.
if (sk_atomic_dec(&fWeakCnt) == 1) {
// Acquire barrier (L/SL), if not provided above.
// Prevents code in destructor from happening before the decrement.
sk_membar_acquire__after_atomic_dec();
SkASSERT(getWeakCnt() > 0);
// A release here acts in place of all releases we "should" have been doing in ref().
if (1 == fWeakCnt.fetch_add(-1, std::memory_order_acq_rel)) {
// Like try_ref(), the acquire is only needed on success, to make sure
// code in internal_dispose() doesn't happen before the decrement.
#ifdef SK_DEBUG
// so our destructor won't complain
fWeakCnt = 1;
fWeakCnt.store(1, std::memory_order_relaxed);
#endif
this->INHERITED::internal_dispose();
}
@ -128,7 +144,7 @@ public:
is the case all future calls to try_ref() will return false.
*/
bool weak_expired() const {
return fRefCnt == 0;
return fRefCnt.load(std::memory_order_relaxed) == 0;
}
protected:
@ -151,7 +167,7 @@ private:
}
/* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
mutable int32_t fWeakCnt;
mutable std::atomic<int32_t> fWeakCnt;
typedef SkRefCnt INHERITED;
};

View File

@ -5,6 +5,7 @@
* found in the LICENSE file.
*/
#include "SkAtomics.h"
#include "SkRWBuffer.h"
#include "SkStream.h"

View File

@ -8,12 +8,13 @@
#ifndef GrBatch_DEFINED
#define GrBatch_DEFINED
#include <new>
#include "../private/SkAtomics.h"
#include "GrNonAtomicRef.h"
#include "SkRect.h"
#include "SkString.h"
#include <new>
class GrCaps;
class GrBatchFlushState;
class GrRenderTarget;

View File

@ -71,7 +71,7 @@ static void test_weakRefCnt(skiatest::Reporter* reporter) {
thing4.join();
REPORTER_ASSERT(reporter, ref->unique());
REPORTER_ASSERT(reporter, ref->getWeakCnt() == 1);
SkDEBUGCODE(REPORTER_ASSERT(reporter, ref->getWeakCnt() == 1));
ref->unref();
}

View File

@ -5,6 +5,7 @@
* found in the LICENSE file.
*/
#include "SkAtomics.h"
#include "SkGraphics.h"
#include "SkPaint.h"
#include "SkTLS.h"