2008-12-17 15:59:43 +00:00
|
|
|
/*
|
2011-07-28 14:26:00 +00:00
|
|
|
* Copyright 2006 The Android Open Source Project
|
2008-12-17 15:59:43 +00:00
|
|
|
*
|
2011-07-28 14:26:00 +00:00
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SkRefCnt_DEFINED
|
|
|
|
#define SkRefCnt_DEFINED
|
|
|
|
|
2015-09-28 18:24:13 +00:00
|
|
|
#include "../private/SkAtomics.h"
|
2016-03-16 17:28:35 +00:00
|
|
|
#include "../private/SkTLogic.h"
|
2015-08-19 18:56:48 +00:00
|
|
|
#include "SkTypes.h"
|
2016-03-08 16:35:23 +00:00
|
|
|
#include <functional>
|
2016-03-16 17:28:35 +00:00
|
|
|
#include <memory>
|
2016-03-02 16:06:20 +00:00
|
|
|
#include <utility>
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2016-03-08 20:54:48 +00:00
|
|
|
#define SK_SUPPORT_TRANSITION_TO_SP_INTERFACES
|
|
|
|
|
2013-10-16 15:15:58 +00:00
|
|
|
/** \class SkRefCntBase
|
|
|
|
|
2013-10-25 18:40:24 +00:00
|
|
|
SkRefCntBase is the base class for objects that may be shared by multiple
|
2012-05-16 18:21:56 +00:00
|
|
|
objects. When an existing owner wants to share a reference, it calls ref().
|
|
|
|
When an owner wants to release its reference, it calls unref(). When the
|
|
|
|
shared object's reference count goes to zero as the result of an unref()
|
|
|
|
call, its (virtual) destructor is called. It is an error for the
|
|
|
|
destructor to be called explicitly (or via the object going out of scope on
|
|
|
|
the stack or calling delete) if getRefCnt() > 1.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
2014-04-07 19:34:38 +00:00
|
|
|
class SK_API SkRefCntBase : SkNoncopyable {
|
2008-12-17 15:59:43 +00:00
|
|
|
public:
|
|
|
|
/** Default construct, initializing the reference count to 1.
|
|
|
|
*/
|
2013-10-25 18:40:24 +00:00
|
|
|
SkRefCntBase() : fRefCnt(1) {}
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2012-05-16 18:21:56 +00:00
|
|
|
/** Destruct, asserting that the reference count is 1.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
2013-10-25 18:40:24 +00:00
|
|
|
virtual ~SkRefCntBase() {
|
2011-09-12 19:54:12 +00:00
|
|
|
#ifdef SK_DEBUG
|
2014-06-18 14:54:47 +00:00
|
|
|
SkASSERTF(fRefCnt == 1, "fRefCnt was %d", fRefCnt);
|
2011-09-12 19:54:12 +00:00
|
|
|
fRefCnt = 0; // illegal value, to catch us if we reuse after delete
|
|
|
|
#endif
|
|
|
|
}
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2014-11-25 15:30:19 +00:00
|
|
|
#ifdef SK_DEBUG
|
2013-07-19 23:18:52 +00:00
|
|
|
/** Return the reference count. Use only for debugging. */
|
2008-12-17 15:59:43 +00:00
|
|
|
int32_t getRefCnt() const { return fRefCnt; }
|
2014-11-25 15:30:19 +00:00
|
|
|
#endif
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2014-02-04 18:00:23 +00:00
|
|
|
/** May return true if the caller is the only owner.
|
2013-07-19 23:18:52 +00:00
|
|
|
* Ensures that all previous owner's actions are complete.
|
|
|
|
*/
|
|
|
|
bool unique() const {
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
if (1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire)) {
|
|
|
|
// The acquire barrier is only really needed if we return true. It
|
|
|
|
// prevents code conditioned on the result of unique() from running
|
|
|
|
// until previous owners are all totally done calling unref().
|
|
|
|
return true;
|
2013-07-19 23:18:52 +00:00
|
|
|
}
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
return false;
|
2013-07-19 23:18:52 +00:00
|
|
|
}
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
/** Increment the reference count. Must be balanced by a call to unref().
|
|
|
|
*/
|
|
|
|
void ref() const {
|
2015-08-24 21:21:23 +00:00
|
|
|
#ifdef SK_BUILD_FOR_ANDROID_FRAMEWORK
|
|
|
|
// Android employs some special subclasses that enable the fRefCnt to
|
|
|
|
// go to zero, but not below, prior to reusing the object. This breaks
|
|
|
|
// the use of unique() on such objects and as such should be removed
|
|
|
|
// once the Android code is fixed.
|
|
|
|
SkASSERT(fRefCnt >= 0);
|
|
|
|
#else
|
2014-04-27 19:21:51 +00:00
|
|
|
SkASSERT(fRefCnt > 0);
|
2015-08-24 21:21:23 +00:00
|
|
|
#endif
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
(void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); // No barrier required.
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Decrement the reference count. If the reference count is 1 before the
|
2012-05-16 18:21:56 +00:00
|
|
|
decrement, then delete the object. Note that if this is the case, then
|
|
|
|
the object needs to have been allocated via new, and not on the stack.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
|
|
|
void unref() const {
|
2014-04-27 19:21:51 +00:00
|
|
|
SkASSERT(fRefCnt > 0);
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
// A release here acts in place of all releases we "should" have been doing in ref().
|
|
|
|
if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
|
|
|
|
// Like unique(), the acquire is only needed on success, to make sure
|
|
|
|
// code in internal_dispose() doesn't happen before the decrement.
|
|
|
|
this->internal_dispose();
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-02 16:42:21 +00:00
|
|
|
#ifdef SK_DEBUG
|
2011-07-15 15:25:22 +00:00
|
|
|
void validate() const {
|
2014-04-27 19:21:51 +00:00
|
|
|
SkASSERT(fRefCnt > 0);
|
2011-07-15 15:25:22 +00:00
|
|
|
}
|
2013-10-02 16:42:21 +00:00
|
|
|
#endif
|
2011-07-15 15:25:22 +00:00
|
|
|
|
2012-07-23 14:50:38 +00:00
|
|
|
protected:
|
|
|
|
/**
|
|
|
|
* Allow subclasses to call this if they've overridden internal_dispose
|
|
|
|
* so they can reset fRefCnt before the destructor is called. Should only
|
|
|
|
* be called right before calling through to inherited internal_dispose()
|
|
|
|
* or before calling the destructor.
|
|
|
|
*/
|
|
|
|
void internal_dispose_restore_refcnt_to_1() const {
|
2012-05-16 18:21:56 +00:00
|
|
|
#ifdef SK_DEBUG
|
2012-07-23 14:50:38 +00:00
|
|
|
SkASSERT(0 == fRefCnt);
|
2012-05-16 18:21:56 +00:00
|
|
|
fRefCnt = 1;
|
|
|
|
#endif
|
2012-07-23 14:50:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
/**
|
|
|
|
* Called when the ref count goes to 0.
|
|
|
|
*/
|
|
|
|
virtual void internal_dispose() const {
|
|
|
|
this->internal_dispose_restore_refcnt_to_1();
|
2015-08-26 20:07:48 +00:00
|
|
|
delete this;
|
2012-05-16 18:21:56 +00:00
|
|
|
}
|
2012-06-22 12:41:43 +00:00
|
|
|
|
2013-07-19 23:18:52 +00:00
|
|
|
// The following friends are those which override internal_dispose()
|
|
|
|
// and conditionally call SkRefCnt::internal_dispose().
|
2012-05-16 18:21:56 +00:00
|
|
|
friend class SkWeakRefCnt;
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
mutable int32_t fRefCnt;
|
2012-06-13 18:54:08 +00:00
|
|
|
|
2013-10-25 18:40:24 +00:00
|
|
|
typedef SkNoncopyable INHERITED;
|
2008-12-17 15:59:43 +00:00
|
|
|
};
|
|
|
|
|
2013-10-25 18:40:24 +00:00
|
|
|
#ifdef SK_REF_CNT_MIXIN_INCLUDE
|
|
|
|
// It is the responsibility of the following include to define the type SkRefCnt.
|
|
|
|
// This SkRefCnt should normally derive from SkRefCntBase.
|
|
|
|
#include SK_REF_CNT_MIXIN_INCLUDE
|
|
|
|
#else
|
|
|
|
class SK_API SkRefCnt : public SkRefCntBase { };
|
|
|
|
#endif
|
|
|
|
|
2011-07-15 15:25:22 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/** Helper macro to safely assign one SkRefCnt[TS]* to another, checking for
|
|
|
|
null in on each side of the assignment, and ensuring that ref() is called
|
|
|
|
before unref(), in case the two pointers point to the same object.
|
|
|
|
*/
|
|
|
|
#define SkRefCnt_SafeAssign(dst, src) \
|
|
|
|
do { \
|
|
|
|
if (src) src->ref(); \
|
|
|
|
if (dst) dst->unref(); \
|
|
|
|
dst = src; \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
2015-10-01 19:28:49 +00:00
|
|
|
/** Call obj->ref() and return obj. The obj must not be nullptr.
|
2011-07-15 15:25:22 +00:00
|
|
|
*/
|
2012-08-22 18:56:56 +00:00
|
|
|
template <typename T> static inline T* SkRef(T* obj) {
|
|
|
|
SkASSERT(obj);
|
|
|
|
obj->ref();
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Check if the argument is non-null, and if so, call obj->ref() and return obj.
|
|
|
|
*/
|
|
|
|
template <typename T> static inline T* SkSafeRef(T* obj) {
|
2011-07-15 15:25:22 +00:00
|
|
|
if (obj) {
|
|
|
|
obj->ref();
|
|
|
|
}
|
2012-08-22 18:56:56 +00:00
|
|
|
return obj;
|
2011-07-15 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Check if the argument is non-null, and if so, call obj->unref()
|
|
|
|
*/
|
|
|
|
template <typename T> static inline void SkSafeUnref(T* obj) {
|
|
|
|
if (obj) {
|
|
|
|
obj->unref();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-09 13:38:37 +00:00
|
|
|
template<typename T> static inline void SkSafeSetNull(T*& obj) {
|
2014-09-05 20:34:00 +00:00
|
|
|
if (obj) {
|
2013-09-09 13:38:37 +00:00
|
|
|
obj->unref();
|
2015-10-01 19:28:49 +00:00
|
|
|
obj = nullptr;
|
2013-09-09 13:38:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-07-15 15:25:22 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2015-10-01 19:28:49 +00:00
|
|
|
template <typename T> struct SkTUnref {
|
|
|
|
void operator()(T* t) { t->unref(); }
|
|
|
|
};
|
|
|
|
|
2011-02-25 18:10:29 +00:00
|
|
|
/**
|
2012-07-30 15:03:59 +00:00
|
|
|
* Utility class that simply unref's its argument in the destructor.
|
2011-02-25 18:10:29 +00:00
|
|
|
*/
|
2016-03-16 17:28:35 +00:00
|
|
|
template <typename T> class SkAutoTUnref : public std::unique_ptr<T, SkTUnref<T>> {
|
2008-12-17 15:59:43 +00:00
|
|
|
public:
|
2016-03-16 17:28:35 +00:00
|
|
|
explicit SkAutoTUnref(T* obj = nullptr) : std::unique_ptr<T, SkTUnref<T>>(obj) {}
|
2012-10-03 13:46:20 +00:00
|
|
|
|
2016-01-25 03:49:24 +00:00
|
|
|
operator T*() const { return this->get(); }
|
2016-03-16 17:28:35 +00:00
|
|
|
|
2016-03-17 12:31:07 +00:00
|
|
|
#if defined(SK_BUILD_FOR_ANDROID_FRAMEWORK)
|
|
|
|
// Need to update graphics/Shader.cpp.
|
|
|
|
T* detach() { return this->release(); }
|
|
|
|
#endif
|
|
|
|
|
2016-03-16 17:28:35 +00:00
|
|
|
// Android's std::unique_ptr's operator bool() is sometimes not explicit...
|
|
|
|
// so override it with our own explcitly explicit version.
|
|
|
|
explicit operator bool() const { return this->get() != nullptr; }
|
2011-02-25 18:10:29 +00:00
|
|
|
};
|
2013-11-18 16:03:59 +00:00
|
|
|
// Can't use the #define trick below to guard a bare SkAutoTUnref(...) because it's templated. :(
|
2011-02-25 18:10:29 +00:00
|
|
|
|
|
|
|
class SkAutoUnref : public SkAutoTUnref<SkRefCnt> {
|
|
|
|
public:
|
|
|
|
SkAutoUnref(SkRefCnt* obj) : SkAutoTUnref<SkRefCnt>(obj) {}
|
2008-12-17 15:59:43 +00:00
|
|
|
};
|
2013-11-18 16:03:59 +00:00
|
|
|
#define SkAutoUnref(...) SK_REQUIRE_LOCAL_VAR(SkAutoUnref)
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2014-11-20 17:18:31 +00:00
|
|
|
// This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16.
|
|
|
|
// There's only benefit to using this if the deriving class does not otherwise need a vtable.
|
|
|
|
template <typename Derived>
|
|
|
|
class SkNVRefCnt : SkNoncopyable {
|
|
|
|
public:
|
|
|
|
SkNVRefCnt() : fRefCnt(1) {}
|
2014-11-24 22:13:55 +00:00
|
|
|
~SkNVRefCnt() { SkASSERTF(1 == fRefCnt, "NVRefCnt was %d", fRefCnt); }
|
2014-11-20 17:18:31 +00:00
|
|
|
|
|
|
|
// Implementation is pretty much the same as SkRefCntBase. All required barriers are the same:
|
|
|
|
// - unique() needs acquire when it returns true, and no barrier if it returns false;
|
|
|
|
// - ref() doesn't need any barrier;
|
|
|
|
// - unref() needs a release barrier, and an acquire if it's going to call delete.
|
|
|
|
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
bool unique() const { return 1 == sk_atomic_load(&fRefCnt, sk_memory_order_acquire); }
|
|
|
|
void ref() const { (void)sk_atomic_fetch_add(&fRefCnt, +1, sk_memory_order_relaxed); }
|
2014-11-24 20:02:31 +00:00
|
|
|
void unref() const {
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
if (1 == sk_atomic_fetch_add(&fRefCnt, -1, sk_memory_order_acq_rel)) {
|
2015-08-26 20:07:48 +00:00
|
|
|
SkDEBUGCODE(fRefCnt = 1;) // restore the 1 for our destructor's assert
|
2015-12-07 21:37:00 +00:00
|
|
|
delete (const Derived*)this;
|
2014-11-24 20:02:31 +00:00
|
|
|
}
|
|
|
|
}
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
void deref() const { this->unref(); }
|
2014-11-20 21:56:22 +00:00
|
|
|
|
2014-11-20 17:18:31 +00:00
|
|
|
private:
|
|
|
|
mutable int32_t fRefCnt;
|
|
|
|
};
|
|
|
|
|
2016-03-01 15:28:51 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Shared pointer class to wrap classes that support a ref()/unref() interface.
|
|
|
|
*
|
|
|
|
* This can be used for classes inheriting from SkRefCnt, but it also works for other
|
|
|
|
* classes that match the interface, but have different internal choices: e.g. the hosted class
|
|
|
|
* may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp.
|
|
|
|
*/
|
|
|
|
template <typename T> class sk_sp {
|
2016-03-03 15:50:49 +00:00
|
|
|
/** Supports safe bool idiom. Obsolete with explicit operator bool. */
|
|
|
|
using unspecified_bool_type = T* sk_sp::*;
|
2016-03-01 15:28:51 +00:00
|
|
|
public:
|
2016-03-08 16:35:23 +00:00
|
|
|
using element_type = T;
|
|
|
|
|
2016-03-01 15:28:51 +00:00
|
|
|
sk_sp() : fPtr(nullptr) {}
|
|
|
|
sk_sp(std::nullptr_t) : fPtr(nullptr) {}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Shares the underlying object by calling ref(), so that both the argument and the newly
|
|
|
|
* created sk_sp both have a reference to it.
|
|
|
|
*/
|
|
|
|
sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {}
|
2016-03-08 16:35:23 +00:00
|
|
|
template <typename U, typename = skstd::enable_if_t<skstd::is_convertible<U*, T*>::value>>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Move the underlying object from the argument to the newly created sk_sp. Afterwards only
|
|
|
|
* the new sk_sp will have a reference to the object, and the argument will point to null.
|
|
|
|
* No call to ref() or unref() will be made.
|
|
|
|
*/
|
|
|
|
sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {}
|
2016-03-08 16:35:23 +00:00
|
|
|
template <typename U, typename = skstd::enable_if_t<skstd::is_convertible<U*, T*>::value>>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Adopt the bare pointer into the newly created sk_sp.
|
|
|
|
* No call to ref() or unref() will be made.
|
|
|
|
*/
|
|
|
|
explicit sk_sp(T* obj) : fPtr(obj) {}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Calls unref() on the underlying object pointer.
|
|
|
|
*/
|
|
|
|
~sk_sp() {
|
|
|
|
SkSafeUnref(fPtr);
|
2016-03-08 16:35:23 +00:00
|
|
|
SkDEBUGCODE(fPtr = nullptr);
|
2016-03-01 15:28:51 +00:00
|
|
|
}
|
|
|
|
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Shares the underlying object referenced by the argument by calling ref() on it. If this
|
|
|
|
* sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that
|
|
|
|
* object.
|
|
|
|
*/
|
|
|
|
sk_sp<T>& operator=(const sk_sp<T>& that) {
|
|
|
|
this->reset(SkSafeRef(that.get()));
|
|
|
|
return *this;
|
|
|
|
}
|
2016-03-08 16:35:23 +00:00
|
|
|
template <typename U, typename = skstd::enable_if_t<skstd::is_convertible<U*, T*>::value>>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp<T>& operator=(const sk_sp<U>& that) {
|
|
|
|
this->reset(SkSafeRef(that.get()));
|
|
|
|
return *this;
|
|
|
|
}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Move the underlying object from the argument to the sk_sp. If the sk_sp previously held
|
|
|
|
* a reference to another object, unref() will be called on that object. No call to ref()
|
|
|
|
* will be made.
|
|
|
|
*/
|
|
|
|
sk_sp<T>& operator=(sk_sp<T>&& that) {
|
|
|
|
this->reset(that.release());
|
|
|
|
return *this;
|
|
|
|
}
|
2016-03-08 16:35:23 +00:00
|
|
|
template <typename U, typename = skstd::enable_if_t<skstd::is_convertible<U*, T*>::value>>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp<T>& operator=(sk_sp<U>&& that) {
|
|
|
|
this->reset(that.release());
|
|
|
|
return *this;
|
|
|
|
}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
2016-03-03 15:50:49 +00:00
|
|
|
T& operator*() const {
|
|
|
|
SkASSERT(this->get() != nullptr);
|
|
|
|
return *this->get();
|
|
|
|
}
|
|
|
|
|
2016-03-02 16:21:39 +00:00
|
|
|
// MSVC 2013 does not work correctly with explicit operator bool.
|
|
|
|
// https://chromium-cpp.appspot.com/#core-blacklist
|
2016-03-03 15:50:49 +00:00
|
|
|
// When explicit operator bool can be used, remove operator! and operator unspecified_bool_type.
|
2016-03-02 16:21:39 +00:00
|
|
|
//explicit operator bool() const { return this->get() != nullptr; }
|
2016-03-03 15:50:49 +00:00
|
|
|
operator unspecified_bool_type() const { return this->get() ? &sk_sp::fPtr : nullptr; }
|
2016-03-02 16:21:39 +00:00
|
|
|
bool operator!() const { return this->get() == nullptr; }
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
T* get() const { return fPtr; }
|
|
|
|
T* operator->() const { return fPtr; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Adopt the new bare pointer, and call unref() on any previously held object (if not null).
|
|
|
|
* No call to ref() will be made.
|
|
|
|
*/
|
|
|
|
void reset(T* ptr = nullptr) {
|
2016-03-08 16:35:23 +00:00
|
|
|
// Calling fPtr->unref() may call this->~() or this->reset(T*).
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue998
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue2262
|
|
|
|
T* oldPtr = fPtr;
|
2016-03-06 21:54:00 +00:00
|
|
|
fPtr = ptr;
|
2016-03-08 16:35:23 +00:00
|
|
|
SkSafeUnref(oldPtr);
|
2016-03-01 15:28:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the bare pointer, and set the internal object pointer to nullptr.
|
|
|
|
* The caller must assume ownership of the object, and manage its reference count directly.
|
|
|
|
* No call to unref() will be made.
|
|
|
|
*/
|
|
|
|
T* SK_WARN_UNUSED_RESULT release() {
|
|
|
|
T* ptr = fPtr;
|
|
|
|
fPtr = nullptr;
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2016-03-08 16:35:23 +00:00
|
|
|
void swap(sk_sp<T>& that) /*noexcept*/ {
|
|
|
|
using std::swap;
|
|
|
|
swap(fPtr, that.fPtr);
|
|
|
|
}
|
|
|
|
|
2016-03-01 15:28:51 +00:00
|
|
|
private:
|
|
|
|
T* fPtr;
|
|
|
|
};
|
|
|
|
|
2016-03-08 16:35:23 +00:00
|
|
|
template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ {
|
|
|
|
a.swap(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return a.get() == b.get();
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
|
|
|
|
return !a;
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
|
|
|
|
return !b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return a.get() != b.get();
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
|
|
|
|
return static_cast<bool>(a);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
|
|
|
|
return static_cast<bool>(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator<(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
// Provide defined total order on sk_sp.
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue1297
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue1401 .
|
|
|
|
return std::less<skstd::common_type_t<T*, U*>>()(a.get(), b.get());
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return std::less<T*>()(a.get(), nullptr);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return std::less<T*>()(nullptr, b.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator<=(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return !(b < a);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<=(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return !(nullptr < a);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<=(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return !(b < nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator>(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return b < a;
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return nullptr < a;
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return b < nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator>=(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return !(a < b);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>=(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return !(a < nullptr);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>=(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return !(nullptr < b);
|
|
|
|
}
|
|
|
|
|
2016-03-02 16:06:20 +00:00
|
|
|
template <typename T, typename... Args>
|
|
|
|
sk_sp<T> sk_make_sp(Args&&... args) {
|
|
|
|
return sk_sp<T>(new T(std::forward<Args>(args)...));
|
|
|
|
}
|
|
|
|
|
2016-03-08 20:54:48 +00:00
|
|
|
#ifdef SK_SUPPORT_TRANSITION_TO_SP_INTERFACES
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null).
|
|
|
|
*
|
|
|
|
* This is different than the semantics of the constructor for sk_sp, which just wraps the ptr,
|
|
|
|
* effectively "adopting" it.
|
|
|
|
*
|
|
|
|
* This function may be helpful while we convert callers from ptr-based to sk_sp-based parameters.
|
|
|
|
*/
|
|
|
|
template <typename T> sk_sp<T> sk_ref_sp(T* obj) {
|
|
|
|
return sk_sp<T>(SkSafeRef(obj));
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
#endif
|