2008-12-17 15:59:43 +00:00
|
|
|
/*
|
2011-07-28 14:26:00 +00:00
|
|
|
* Copyright 2006 The Android Open Source Project
|
2008-12-17 15:59:43 +00:00
|
|
|
*
|
2011-07-28 14:26:00 +00:00
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef SkRefCnt_DEFINED
|
|
|
|
#define SkRefCnt_DEFINED
|
|
|
|
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "include/core/SkTypes.h"
|
2018-06-20 20:13:05 +00:00
|
|
|
|
2016-04-08 13:58:51 +00:00
|
|
|
#include <atomic>
|
2018-06-20 20:13:05 +00:00
|
|
|
#include <cstddef>
|
2016-03-08 16:35:23 +00:00
|
|
|
#include <functional>
|
2018-06-21 18:38:09 +00:00
|
|
|
#include <memory>
|
2018-05-28 17:35:39 +00:00
|
|
|
#include <ostream>
|
2016-04-28 18:18:07 +00:00
|
|
|
#include <type_traits>
|
2016-03-02 16:06:20 +00:00
|
|
|
#include <utility>
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2013-10-16 15:15:58 +00:00
|
|
|
/** \class SkRefCntBase
|
|
|
|
|
2013-10-25 18:40:24 +00:00
|
|
|
SkRefCntBase is the base class for objects that may be shared by multiple
|
2012-05-16 18:21:56 +00:00
|
|
|
objects. When an existing owner wants to share a reference, it calls ref().
|
|
|
|
When an owner wants to release its reference, it calls unref(). When the
|
|
|
|
shared object's reference count goes to zero as the result of an unref()
|
|
|
|
call, its (virtual) destructor is called. It is an error for the
|
|
|
|
destructor to be called explicitly (or via the object going out of scope on
|
|
|
|
the stack or calling delete) if getRefCnt() > 1.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
2018-06-14 18:39:33 +00:00
|
|
|
class SK_API SkRefCntBase {
|
2008-12-17 15:59:43 +00:00
|
|
|
public:
|
|
|
|
/** Default construct, initializing the reference count to 1.
|
|
|
|
*/
|
2013-10-25 18:40:24 +00:00
|
|
|
SkRefCntBase() : fRefCnt(1) {}
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2012-05-16 18:21:56 +00:00
|
|
|
/** Destruct, asserting that the reference count is 1.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
2013-10-25 18:40:24 +00:00
|
|
|
virtual ~SkRefCntBase() {
|
2018-10-31 19:30:18 +00:00
|
|
|
#ifdef SK_DEBUG
|
|
|
|
SkASSERTF(this->getRefCnt() == 1, "fRefCnt was %d", this->getRefCnt());
|
2016-04-08 13:58:51 +00:00
|
|
|
// illegal value, to catch us if we reuse after delete
|
|
|
|
fRefCnt.store(0, std::memory_order_relaxed);
|
2018-10-31 19:30:18 +00:00
|
|
|
#endif
|
2016-04-08 13:58:51 +00:00
|
|
|
}
|
2008-12-17 15:59:43 +00:00
|
|
|
|
2014-02-04 18:00:23 +00:00
|
|
|
/** May return true if the caller is the only owner.
|
2013-07-19 23:18:52 +00:00
|
|
|
* Ensures that all previous owner's actions are complete.
|
|
|
|
*/
|
|
|
|
bool unique() const {
|
2016-04-08 13:58:51 +00:00
|
|
|
if (1 == fRefCnt.load(std::memory_order_acquire)) {
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
// The acquire barrier is only really needed if we return true. It
|
|
|
|
// prevents code conditioned on the result of unique() from running
|
|
|
|
// until previous owners are all totally done calling unref().
|
|
|
|
return true;
|
2013-07-19 23:18:52 +00:00
|
|
|
}
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
return false;
|
2013-07-19 23:18:52 +00:00
|
|
|
}
|
|
|
|
|
2008-12-17 15:59:43 +00:00
|
|
|
/** Increment the reference count. Must be balanced by a call to unref().
|
|
|
|
*/
|
|
|
|
void ref() const {
|
2018-10-31 19:30:18 +00:00
|
|
|
SkASSERT(this->getRefCnt() > 0);
|
2016-04-08 13:58:51 +00:00
|
|
|
// No barrier required.
|
|
|
|
(void)fRefCnt.fetch_add(+1, std::memory_order_relaxed);
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Decrement the reference count. If the reference count is 1 before the
|
2012-05-16 18:21:56 +00:00
|
|
|
decrement, then delete the object. Note that if this is the case, then
|
|
|
|
the object needs to have been allocated via new, and not on the stack.
|
2008-12-17 15:59:43 +00:00
|
|
|
*/
|
|
|
|
void unref() const {
|
2018-10-31 19:30:18 +00:00
|
|
|
SkASSERT(this->getRefCnt() > 0);
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
// A release here acts in place of all releases we "should" have been doing in ref().
|
2016-04-08 13:58:51 +00:00
|
|
|
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
|
Port SkRefCnt.h to new SkAtomics.h
This adds sk_memory_barrier(), implemented using sk_atomic_fetch_add() on an uninitialized variable. If that becomes a problem we can drop this to the porting layer, using std::atomic_thread_fence() / __atomic_thread_fence() / __sync_synchronize().
The big win is that ref() doesn't generate a memory barrier any more on ARM.
This is an instance of SkSafeRef() in SkPaint(const SkPaint&) after this CL:
4d0: 684a ldr r2, [r1, #4]
4d2: 6018 str r0, [r3, #0]
4d4: b13a cbz r2, 4e6 <_ZN7SkPaintC1ERKS_+0x2e>
4d6: 1d10 adds r0, r2, #4
4d8: e850 4f00 ldrex r4, [r0]
4dc: 3401 adds r4, #1
4de: e840 4500 strex r5, r4, [r0]
4e2: 2d00 cmp r5, #0
4e4: d1f8 bne.n 4d8 <_ZN7SkPaintC1ERKS_+0x20>
Here's the before, pretty much the same with two memory barriers surrounding the ref():
4d8: 684a ldr r2, [r1, #4]
4da: 6018 str r0, [r3, #0]
4dc: b15a cbz r2, 4f6 <_ZN7SkPaintC1ERKS_+0x3e>
4de: 1d10 adds r0, r2, #4
4e0: f3bf 8f5f dmb sy
4e4: e850 4f00 ldrex r4, [r0]
4e8: 3401 adds r4, #1
4ea: e840 4500 strex r5, r4, [r0]
4ee: 2d00 cmp r5, #0
4f0: d1f8 bne.n 4e4 <_ZN7SkPaintC1ERKS_+0x2c>
4f2: f3bf 8f5f dmb sy
The miscellaneous files in here are just fixups to explicitly include SkMutex.h,
instead of leeching it off SkRefCnt.h.
No public API changes.
TBR=reed@google.com
Build trybots seem hosed.
NOTRY=true
BUG=skia:
Review URL: https://codereview.chromium.org/896803002
2015-02-03 21:38:58 +00:00
|
|
|
// Like unique(), the acquire is only needed on success, to make sure
|
|
|
|
// code in internal_dispose() doesn't happen before the decrement.
|
|
|
|
this->internal_dispose();
|
2008-12-17 15:59:43 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-31 19:30:18 +00:00
|
|
|
private:
|
|
|
|
|
|
|
|
#ifdef SK_DEBUG
|
|
|
|
/** Return the reference count. Use only for debugging. */
|
|
|
|
int32_t getRefCnt() const {
|
|
|
|
return fRefCnt.load(std::memory_order_relaxed);
|
2012-07-23 14:50:38 +00:00
|
|
|
}
|
2018-10-31 19:30:18 +00:00
|
|
|
#endif
|
2012-07-23 14:50:38 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Called when the ref count goes to 0.
|
|
|
|
*/
|
|
|
|
virtual void internal_dispose() const {
|
2018-10-31 19:30:18 +00:00
|
|
|
#ifdef SK_DEBUG
|
|
|
|
SkASSERT(0 == this->getRefCnt());
|
|
|
|
fRefCnt.store(1, std::memory_order_relaxed);
|
|
|
|
#endif
|
2015-08-26 20:07:48 +00:00
|
|
|
delete this;
|
2012-05-16 18:21:56 +00:00
|
|
|
}
|
2012-06-22 12:41:43 +00:00
|
|
|
|
2013-07-19 23:18:52 +00:00
|
|
|
// The following friends are those which override internal_dispose()
|
|
|
|
// and conditionally call SkRefCnt::internal_dispose().
|
2012-05-16 18:21:56 +00:00
|
|
|
friend class SkWeakRefCnt;
|
|
|
|
|
2016-04-08 13:58:51 +00:00
|
|
|
mutable std::atomic<int32_t> fRefCnt;
|
2018-06-16 00:27:52 +00:00
|
|
|
|
|
|
|
SkRefCntBase(SkRefCntBase&&) = delete;
|
|
|
|
SkRefCntBase(const SkRefCntBase&) = delete;
|
|
|
|
SkRefCntBase& operator=(SkRefCntBase&&) = delete;
|
|
|
|
SkRefCntBase& operator=(const SkRefCntBase&) = delete;
|
2008-12-17 15:59:43 +00:00
|
|
|
};
|
|
|
|
|
2013-10-25 18:40:24 +00:00
|
|
|
#ifdef SK_REF_CNT_MIXIN_INCLUDE
|
|
|
|
// It is the responsibility of the following include to define the type SkRefCnt.
|
|
|
|
// This SkRefCnt should normally derive from SkRefCntBase.
|
|
|
|
#include SK_REF_CNT_MIXIN_INCLUDE
|
|
|
|
#else
|
2016-04-14 00:51:57 +00:00
|
|
|
class SK_API SkRefCnt : public SkRefCntBase {
|
|
|
|
// "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system.
|
2017-12-19 14:09:33 +00:00
|
|
|
#if defined(SK_BUILD_FOR_GOOGLE3)
|
2016-04-14 00:51:57 +00:00
|
|
|
public:
|
|
|
|
void deref() const { this->unref(); }
|
|
|
|
#endif
|
|
|
|
};
|
2013-10-25 18:40:24 +00:00
|
|
|
#endif
|
|
|
|
|
2011-07-15 15:25:22 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2015-10-01 19:28:49 +00:00
|
|
|
/** Call obj->ref() and return obj. The obj must not be nullptr.
|
2011-07-15 15:25:22 +00:00
|
|
|
*/
|
2012-08-22 18:56:56 +00:00
|
|
|
template <typename T> static inline T* SkRef(T* obj) {
|
|
|
|
SkASSERT(obj);
|
|
|
|
obj->ref();
|
|
|
|
return obj;
|
|
|
|
}
|
|
|
|
|
|
|
|
/** Check if the argument is non-null, and if so, call obj->ref() and return obj.
|
|
|
|
*/
|
|
|
|
template <typename T> static inline T* SkSafeRef(T* obj) {
|
2011-07-15 15:25:22 +00:00
|
|
|
if (obj) {
|
|
|
|
obj->ref();
|
|
|
|
}
|
2012-08-22 18:56:56 +00:00
|
|
|
return obj;
|
2011-07-15 15:25:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Check if the argument is non-null, and if so, call obj->unref()
|
|
|
|
*/
|
|
|
|
template <typename T> static inline void SkSafeUnref(T* obj) {
|
|
|
|
if (obj) {
|
|
|
|
obj->unref();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-30 15:23:00 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
// This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16.
|
|
|
|
// There's only benefit to using this if the deriving class does not otherwise need a vtable.
|
|
|
|
template <typename Derived>
|
|
|
|
class SkNVRefCnt {
|
|
|
|
public:
|
|
|
|
SkNVRefCnt() : fRefCnt(1) {}
|
2018-10-31 19:30:18 +00:00
|
|
|
~SkNVRefCnt() {
|
|
|
|
#ifdef SK_DEBUG
|
|
|
|
int rc = fRefCnt.load(std::memory_order_relaxed);
|
|
|
|
SkASSERTF(rc == 1, "NVRefCnt was %d", rc);
|
|
|
|
#endif
|
|
|
|
}
|
2018-10-30 15:23:00 +00:00
|
|
|
|
|
|
|
// Implementation is pretty much the same as SkRefCntBase. All required barriers are the same:
|
|
|
|
// - unique() needs acquire when it returns true, and no barrier if it returns false;
|
|
|
|
// - ref() doesn't need any barrier;
|
|
|
|
// - unref() needs a release barrier, and an acquire if it's going to call delete.
|
|
|
|
|
|
|
|
bool unique() const { return 1 == fRefCnt.load(std::memory_order_acquire); }
|
|
|
|
void ref() const { (void)fRefCnt.fetch_add(+1, std::memory_order_relaxed); }
|
|
|
|
void unref() const {
|
|
|
|
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
|
|
|
|
// restore the 1 for our destructor's assert
|
|
|
|
SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed));
|
|
|
|
delete (const Derived*)this;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
void deref() const { this->unref(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
mutable std::atomic<int32_t> fRefCnt;
|
|
|
|
|
|
|
|
SkNVRefCnt(SkNVRefCnt&&) = delete;
|
|
|
|
SkNVRefCnt(const SkNVRefCnt&) = delete;
|
|
|
|
SkNVRefCnt& operator=(SkNVRefCnt&&) = delete;
|
|
|
|
SkNVRefCnt& operator=(const SkNVRefCnt&) = delete;
|
|
|
|
};
|
|
|
|
|
2016-03-01 15:28:51 +00:00
|
|
|
///////////////////////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Shared pointer class to wrap classes that support a ref()/unref() interface.
|
|
|
|
*
|
|
|
|
* This can be used for classes inheriting from SkRefCnt, but it also works for other
|
|
|
|
* classes that match the interface, but have different internal choices: e.g. the hosted class
|
|
|
|
* may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp.
|
|
|
|
*/
|
|
|
|
template <typename T> class sk_sp {
|
|
|
|
public:
|
2016-03-08 16:35:23 +00:00
|
|
|
using element_type = T;
|
|
|
|
|
2016-06-09 15:01:03 +00:00
|
|
|
constexpr sk_sp() : fPtr(nullptr) {}
|
|
|
|
constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Shares the underlying object by calling ref(), so that both the argument and the newly
|
|
|
|
* created sk_sp both have a reference to it.
|
|
|
|
*/
|
|
|
|
sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {}
|
2018-06-20 20:13:05 +00:00
|
|
|
template <typename U,
|
|
|
|
typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Move the underlying object from the argument to the newly created sk_sp. Afterwards only
|
|
|
|
* the new sk_sp will have a reference to the object, and the argument will point to null.
|
|
|
|
* No call to ref() or unref() will be made.
|
|
|
|
*/
|
|
|
|
sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {}
|
2018-06-20 20:13:05 +00:00
|
|
|
template <typename U,
|
|
|
|
typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Adopt the bare pointer into the newly created sk_sp.
|
|
|
|
* No call to ref() or unref() will be made.
|
|
|
|
*/
|
|
|
|
explicit sk_sp(T* obj) : fPtr(obj) {}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Calls unref() on the underlying object pointer.
|
|
|
|
*/
|
|
|
|
~sk_sp() {
|
|
|
|
SkSafeUnref(fPtr);
|
2016-03-08 16:35:23 +00:00
|
|
|
SkDEBUGCODE(fPtr = nullptr);
|
2016-03-01 15:28:51 +00:00
|
|
|
}
|
|
|
|
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; }
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Shares the underlying object referenced by the argument by calling ref() on it. If this
|
|
|
|
* sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that
|
|
|
|
* object.
|
|
|
|
*/
|
|
|
|
sk_sp<T>& operator=(const sk_sp<T>& that) {
|
2018-05-08 17:32:02 +00:00
|
|
|
if (this != &that) {
|
|
|
|
this->reset(SkSafeRef(that.get()));
|
|
|
|
}
|
2016-03-01 15:28:51 +00:00
|
|
|
return *this;
|
|
|
|
}
|
2018-06-20 20:13:05 +00:00
|
|
|
template <typename U,
|
|
|
|
typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp<T>& operator=(const sk_sp<U>& that) {
|
|
|
|
this->reset(SkSafeRef(that.get()));
|
|
|
|
return *this;
|
|
|
|
}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Move the underlying object from the argument to the sk_sp. If the sk_sp previously held
|
|
|
|
* a reference to another object, unref() will be called on that object. No call to ref()
|
|
|
|
* will be made.
|
|
|
|
*/
|
|
|
|
sk_sp<T>& operator=(sk_sp<T>&& that) {
|
|
|
|
this->reset(that.release());
|
|
|
|
return *this;
|
|
|
|
}
|
2018-06-20 20:13:05 +00:00
|
|
|
template <typename U,
|
|
|
|
typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type>
|
2016-03-02 16:11:26 +00:00
|
|
|
sk_sp<T>& operator=(sk_sp<U>&& that) {
|
|
|
|
this->reset(that.release());
|
|
|
|
return *this;
|
|
|
|
}
|
2016-03-01 15:28:51 +00:00
|
|
|
|
2016-03-03 15:50:49 +00:00
|
|
|
T& operator*() const {
|
|
|
|
SkASSERT(this->get() != nullptr);
|
|
|
|
return *this->get();
|
|
|
|
}
|
|
|
|
|
2018-05-28 17:35:39 +00:00
|
|
|
explicit operator bool() const { return this->get() != nullptr; }
|
2016-03-01 15:28:51 +00:00
|
|
|
|
|
|
|
T* get() const { return fPtr; }
|
|
|
|
T* operator->() const { return fPtr; }
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Adopt the new bare pointer, and call unref() on any previously held object (if not null).
|
|
|
|
* No call to ref() will be made.
|
|
|
|
*/
|
|
|
|
void reset(T* ptr = nullptr) {
|
2016-03-08 16:35:23 +00:00
|
|
|
// Calling fPtr->unref() may call this->~() or this->reset(T*).
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue998
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue2262
|
|
|
|
T* oldPtr = fPtr;
|
2016-03-06 21:54:00 +00:00
|
|
|
fPtr = ptr;
|
2016-03-08 16:35:23 +00:00
|
|
|
SkSafeUnref(oldPtr);
|
2016-03-01 15:28:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return the bare pointer, and set the internal object pointer to nullptr.
|
|
|
|
* The caller must assume ownership of the object, and manage its reference count directly.
|
|
|
|
* No call to unref() will be made.
|
|
|
|
*/
|
|
|
|
T* SK_WARN_UNUSED_RESULT release() {
|
|
|
|
T* ptr = fPtr;
|
|
|
|
fPtr = nullptr;
|
|
|
|
return ptr;
|
|
|
|
}
|
|
|
|
|
2016-03-08 16:35:23 +00:00
|
|
|
void swap(sk_sp<T>& that) /*noexcept*/ {
|
|
|
|
using std::swap;
|
|
|
|
swap(fPtr, that.fPtr);
|
|
|
|
}
|
|
|
|
|
2016-03-01 15:28:51 +00:00
|
|
|
private:
|
|
|
|
T* fPtr;
|
|
|
|
};
|
|
|
|
|
2016-03-08 16:35:23 +00:00
|
|
|
template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ {
|
|
|
|
a.swap(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return a.get() == b.get();
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
|
|
|
|
return !a;
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
|
|
|
|
return !b;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return a.get() != b.get();
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ {
|
|
|
|
return static_cast<bool>(a);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ {
|
|
|
|
return static_cast<bool>(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator<(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
// Provide defined total order on sk_sp.
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue1297
|
|
|
|
// http://wg21.cmeerw.net/lwg/issue1401 .
|
2018-06-20 20:13:05 +00:00
|
|
|
return std::less<typename std::common_type<T*, U*>::type>()(a.get(), b.get());
|
2016-03-08 16:35:23 +00:00
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return std::less<T*>()(a.get(), nullptr);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return std::less<T*>()(nullptr, b.get());
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator<=(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return !(b < a);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<=(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return !(nullptr < a);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator<=(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return !(b < nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator>(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return b < a;
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return nullptr < a;
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return b < nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
template <typename T, typename U> inline bool operator>=(const sk_sp<T>& a, const sk_sp<U>& b) {
|
|
|
|
return !(a < b);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>=(const sk_sp<T>& a, std::nullptr_t) {
|
|
|
|
return !(a < nullptr);
|
|
|
|
}
|
|
|
|
template <typename T> inline bool operator>=(std::nullptr_t, const sk_sp<T>& b) {
|
|
|
|
return !(nullptr < b);
|
|
|
|
}
|
|
|
|
|
2018-05-28 17:35:39 +00:00
|
|
|
template <typename C, typename CT, typename T>
|
|
|
|
auto operator<<(std::basic_ostream<C, CT>& os, const sk_sp<T>& sp) -> decltype(os << sp.get()) {
|
|
|
|
return os << sp.get();
|
|
|
|
}
|
|
|
|
|
2016-03-02 16:06:20 +00:00
|
|
|
template <typename T, typename... Args>
|
|
|
|
sk_sp<T> sk_make_sp(Args&&... args) {
|
|
|
|
return sk_sp<T>(new T(std::forward<Args>(args)...));
|
|
|
|
}
|
|
|
|
|
2016-03-08 20:54:48 +00:00
|
|
|
/*
|
|
|
|
* Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null).
|
|
|
|
*
|
|
|
|
* This is different than the semantics of the constructor for sk_sp, which just wraps the ptr,
|
|
|
|
* effectively "adopting" it.
|
|
|
|
*/
|
|
|
|
template <typename T> sk_sp<T> sk_ref_sp(T* obj) {
|
|
|
|
return sk_sp<T>(SkSafeRef(obj));
|
|
|
|
}
|
|
|
|
|
2018-02-19 19:10:57 +00:00
|
|
|
template <typename T> sk_sp<T> sk_ref_sp(const T* obj) {
|
|
|
|
return sk_sp<T>(const_cast<T*>(SkSafeRef(obj)));
|
|
|
|
}
|
|
|
|
|
2016-03-08 20:54:48 +00:00
|
|
|
#endif
|