Add gr_cb class to help tracking command buffer uses of GrGpuResources.
Essentially GrGpuResources have two counts now. The original fRefCnt has not changed and is still used for things like knowing if we can reuse a scratch texture. The new fCommandBufferUsageCnt is used to track when a resource is in use on a command buffer or gpu in general. We now delay calling notifyRefCntIsZero until both of the counts are zero. Bug: skia:11038 Change-Id: I1df62f28e4b98e8c1a5ab2fd33d4aead19788d93 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/343098 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Brian Salomon <bsalomon@google.com>
This commit is contained in:
parent
14a25eff99
commit
1fd8ac87dd
@ -71,6 +71,7 @@ skia_gpu_sources = [
|
||||
"$_src/gpu/GrColorInfo.h",
|
||||
"$_src/gpu/GrColorSpaceXform.cpp",
|
||||
"$_src/gpu/GrColorSpaceXform.h",
|
||||
"$_src/gpu/GrCommandBufferRef.h",
|
||||
"$_src/gpu/GrContextThreadSafeProxy.cpp",
|
||||
"$_src/gpu/GrContextThreadSafeProxyPriv.h",
|
||||
"$_src/gpu/GrContext_Base.cpp",
|
||||
|
180
src/gpu/GrCommandBufferRef.h
Normal file
180
src/gpu/GrCommandBufferRef.h
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright 2020 Google LLC
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrCommandBufferRef_DEFINED
|
||||
#define GrCommandBufferRef_DEFINED
|
||||
|
||||
#include "include/core/SkRefCnt.h"
|
||||
|
||||
/** Check if the argument is non-null, and if so, call obj->addCommandBufferUsage() and return obj.
|
||||
*/
|
||||
template <typename T> static inline T* GrSafeRefCBUsage(T* obj) {
|
||||
if (obj) {
|
||||
obj->addCommandBufferUsage();
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
||||
/** Check if the argument is non-null, and if so, call obj->removeCommandBufferUsage()
|
||||
*/
|
||||
template <typename T> static inline void GrSafeUnrefCBUsage(T* obj) {
|
||||
if (obj) {
|
||||
obj->removeCommandBufferUsage();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Shared pointer class to wrap classes that support a addCommandBufferUsage() and
|
||||
* removeCommandBufferUsage() interface.
|
||||
*
|
||||
* This class supports copying, moving, and assigning an sk_sp into it. In general these commands do
|
||||
* not modify the sk_sp at all but just call addCommandBufferUsage() on the underlying object.
|
||||
*
|
||||
* This class is designed to be used by GrGpuResources that need to track when they are in use on
|
||||
* gpu (usually via a command buffer) separately from tracking if there are any current logical
|
||||
* usages in Ganesh. This allows for a scratch GrGpuResource to be reused for new draw calls even
|
||||
* if it is in use on the GPU.
|
||||
*/
|
||||
template <typename T> class gr_cb {
|
||||
public:
|
||||
using element_type = T;
|
||||
|
||||
constexpr gr_cb() : fPtr(nullptr) {}
|
||||
constexpr gr_cb(std::nullptr_t) : fPtr(nullptr) {}
|
||||
|
||||
/**
|
||||
* Shares the underlying object by calling addCommandBufferUsage(), so that both the argument
|
||||
* and the newly created gr_cb both have a reference to it.
|
||||
*/
|
||||
gr_cb(const gr_cb<T>& that) : fPtr(GrSafeRefCBUsage(that.get())) {}
|
||||
|
||||
gr_cb(const sk_sp<T>& that) : fPtr(GrSafeRefCBUsage(that.get())) {}
|
||||
|
||||
/**
|
||||
* Move the underlying object from the argument to the newly created gr_cb. Afterwards only
|
||||
* the new gr_cb will have a reference to the object, and the argument will point to null.
|
||||
* No call to addCommandBufferUsage() or removeCommandBufferUsage() will be made.
|
||||
*/
|
||||
gr_cb(gr_cb<T>&& that) : fPtr(that.release()) {}
|
||||
|
||||
/**
|
||||
* Copies the underlying object pointer from the argument to the gr_cb. It will then call
|
||||
* addCommandBufferUsage() on the new object.
|
||||
*/
|
||||
gr_cb(sk_sp<T>&& that) : fPtr(GrSafeRefCBUsage(that.get())) {}
|
||||
|
||||
/**
|
||||
* Calls removeCommandBufferUsage() on the underlying object pointer.
|
||||
*/
|
||||
~gr_cb() {
|
||||
GrSafeUnrefCBUsage(fPtr);
|
||||
SkDEBUGCODE(fPtr = nullptr);
|
||||
}
|
||||
|
||||
gr_cb<T>& operator=(std::nullptr_t) {
|
||||
this->reset();
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shares the underlying object referenced by the argument by calling addCommandBufferUsage() on
|
||||
* it. If this gr_cb previously had a reference to an object (i.e. not null) it will call
|
||||
* removeCommandBufferUsage() on thatobject.
|
||||
*/
|
||||
gr_cb<T>& operator=(const gr_cb<T>& that) {
|
||||
if (this != &that) {
|
||||
this->reset(GrSafeRefCBUsage(that.get()));
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the underlying object pointer from the argument to the gr_cb. If the gr_cb previously
|
||||
* held a reference to another object, removeCommandBufferUsage() will be called on that object.
|
||||
* It will then call addCommandBufferUsage() on the new object.
|
||||
*/
|
||||
gr_cb<T>& operator=(const sk_sp<T>& that) {
|
||||
this->reset(GrSafeRefCBUsage(that.get()));
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Move the underlying object from the argument to the gr_cb. If the gr_cb previously held
|
||||
* a reference to another object, removeCommandBufferUsage() will be called on that object. No
|
||||
* call to addCommandBufferUsage() will be made.
|
||||
*/
|
||||
gr_cb<T>& operator=(gr_cb<T>&& that) {
|
||||
this->reset(that.release());
|
||||
return *this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copies the underlying object pointer from the argument to the gr_cb. If the gr_cb previously
|
||||
* held a reference to another object, removeCommandBufferUsage() will be called on that object.
|
||||
* It will then call addCommandBufferUsage() on the new object.
|
||||
*/
|
||||
gr_cb<T>& operator=(sk_sp<T>&& that) {
|
||||
this->reset(GrSafeRefCBUsage(that.get()));
|
||||
return *this;
|
||||
}
|
||||
|
||||
T& operator*() const {
|
||||
SkASSERT(this->get() != nullptr);
|
||||
return *this->get();
|
||||
}
|
||||
|
||||
explicit operator bool() const { return this->get() != nullptr; }
|
||||
|
||||
T* get() const { return fPtr; }
|
||||
T* operator->() const { return fPtr; }
|
||||
|
||||
private:
|
||||
/**
|
||||
* Adopt the new bare pointer, and call removeCommandBufferUsage() on any previously held object
|
||||
* (if not null). No call to addCommandBufferUsage() will be made.
|
||||
*/
|
||||
void reset(T* ptr = nullptr) {
|
||||
T* oldPtr = fPtr;
|
||||
fPtr = ptr;
|
||||
GrSafeUnrefCBUsage(oldPtr);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the bare pointer, and set the internal object pointer to nullptr.
|
||||
* The caller must assume ownership of the object, and manage its reference count directly.
|
||||
* No call to removeCommandBufferUsage() will be made.
|
||||
*/
|
||||
T* SK_WARN_UNUSED_RESULT release() {
|
||||
T* ptr = fPtr;
|
||||
fPtr = nullptr;
|
||||
return ptr;
|
||||
}
|
||||
|
||||
T* fPtr;
|
||||
};
|
||||
|
||||
template <typename T, typename U> inline bool operator==(const gr_cb<T>& a, const gr_cb<U>& b) {
|
||||
return a.get() == b.get();
|
||||
}
|
||||
template <typename T> inline bool operator==(const gr_cb<T>& a, std::nullptr_t) /*noexcept*/ {
|
||||
return !a;
|
||||
}
|
||||
template <typename T> inline bool operator==(std::nullptr_t, const gr_cb<T>& b) /*noexcept*/ {
|
||||
return !b;
|
||||
}
|
||||
|
||||
template <typename T, typename U> inline bool operator!=(const gr_cb<T>& a, const gr_cb<U>& b) {
|
||||
return a.get() != b.get();
|
||||
}
|
||||
template <typename T> inline bool operator!=(const gr_cb<T>& a, std::nullptr_t) /*noexcept*/ {
|
||||
return static_cast<bool>(a);
|
||||
}
|
||||
template <typename T> inline bool operator!=(std::nullptr_t, const gr_cb<T>& b) /*noexcept*/ {
|
||||
return static_cast<bool>(b);
|
||||
}
|
||||
|
||||
#endif
|
@ -97,11 +97,16 @@ bool GrGpuResource::isPurgeable() const {
|
||||
// Resources in the kUnbudgetedCacheable state are never purgeable when they have a unique
|
||||
// key. The key must be removed/invalidated to make them purgeable.
|
||||
return !this->hasRef() &&
|
||||
this->hasNoCommandBufferUsages() &&
|
||||
!(fBudgetedType == GrBudgetedType::kUnbudgetedCacheable && fUniqueKey.isValid());
|
||||
}
|
||||
|
||||
bool GrGpuResource::hasRef() const { return this->internalHasRef(); }
|
||||
|
||||
bool GrGpuResource::hasNoCommandBufferUsages() const {
|
||||
return this->internalHasNoCommandBufferUsages();
|
||||
}
|
||||
|
||||
SkString GrGpuResource::getResourceName() const {
|
||||
// Dump resource as "skia/gpu_resources/resource_#".
|
||||
SkString resourceName("skia/gpu_resources/resource_");
|
||||
|
@ -39,19 +39,22 @@ public:
|
||||
|
||||
void unref() const {
|
||||
SkASSERT(this->getRefCnt() > 0);
|
||||
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
|
||||
// At this point we better be the only thread accessing this resource.
|
||||
// Trick out the notifyRefCntWillBeZero() call by adding back one more ref.
|
||||
fRefCnt.fetch_add(+1, std::memory_order_relaxed);
|
||||
static_cast<const DERIVED*>(this)->notifyRefCntWillBeZero();
|
||||
// notifyRefCntWillBeZero() could have done anything, including re-refing this and
|
||||
// passing on to another thread. Take away the ref-count we re-added above and see
|
||||
// if we're back to zero.
|
||||
// TODO: Consider making it so that refs can't be added and merge
|
||||
// notifyRefCntWillBeZero()/willRemoveLastRef() with notifyRefCntIsZero().
|
||||
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
|
||||
static_cast<const DERIVED*>(this)->notifyRefCntIsZero();
|
||||
}
|
||||
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel) &&
|
||||
this->hasNoCommandBufferUsages()) {
|
||||
this->notifyWillBeZero();
|
||||
}
|
||||
}
|
||||
|
||||
void addCommandBufferUsage() const {
|
||||
// No barrier required.
|
||||
(void)fCommandBufferUsageCnt.fetch_add(+1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void removeCommandBufferUsage() const {
|
||||
SkASSERT(!this->hasNoCommandBufferUsages());
|
||||
if (1 == fCommandBufferUsageCnt.fetch_add(-1, std::memory_order_acq_rel) &&
|
||||
0 == this->getRefCnt()) {
|
||||
this->notifyWillBeZero();
|
||||
}
|
||||
}
|
||||
|
||||
@ -62,9 +65,12 @@ public:
|
||||
protected:
|
||||
friend class GrResourceCache; // for internalHasRef
|
||||
|
||||
GrIORef() : fRefCnt(1) {}
|
||||
GrIORef() : fRefCnt(1), fCommandBufferUsageCnt(0) {}
|
||||
|
||||
bool internalHasRef() const { return SkToBool(this->getRefCnt()); }
|
||||
bool internalHasNoCommandBufferUsages() const {
|
||||
return SkToBool(this->hasNoCommandBufferUsages());
|
||||
}
|
||||
|
||||
// Privileged method that allows going from ref count = 0 to ref count = 1.
|
||||
void addInitialRef() const {
|
||||
@ -74,9 +80,35 @@ protected:
|
||||
}
|
||||
|
||||
private:
|
||||
void notifyWillBeZero() const {
|
||||
// At this point we better be the only thread accessing this resource.
|
||||
// Trick out the notifyRefCntWillBeZero() call by adding back one more ref.
|
||||
fRefCnt.fetch_add(+1, std::memory_order_relaxed);
|
||||
static_cast<const DERIVED*>(this)->notifyRefCntWillBeZero();
|
||||
// notifyRefCntWillBeZero() could have done anything, including re-refing this and
|
||||
// passing on to another thread. Take away the ref-count we re-added above and see
|
||||
// if we're back to zero.
|
||||
// TODO: Consider making it so that refs can't be added and merge
|
||||
// notifyRefCntWillBeZero()/willRemoveLastRef() with notifyRefCntIsZero().
|
||||
if (1 == fRefCnt.fetch_add(-1, std::memory_order_acq_rel)) {
|
||||
static_cast<const DERIVED*>(this)->notifyRefCntIsZero();
|
||||
}
|
||||
}
|
||||
|
||||
int32_t getRefCnt() const { return fRefCnt.load(std::memory_order_relaxed); }
|
||||
|
||||
bool hasNoCommandBufferUsages() const {
|
||||
if (0 == fCommandBufferUsageCnt.load(std::memory_order_acquire)) {
|
||||
// The acquire barrier is only really needed if we return true. It
|
||||
// prevents code conditioned on the result of hasNoCommandBufferUsages() from running
|
||||
// until previous owners are all totally done calling removeCommandBufferUsage().
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
mutable std::atomic<int32_t> fRefCnt;
|
||||
mutable std::atomic<int32_t> fCommandBufferUsageCnt;
|
||||
|
||||
using INHERITED = SkNoncopyable;
|
||||
};
|
||||
@ -233,6 +265,7 @@ protected:
|
||||
private:
|
||||
bool isPurgeable() const;
|
||||
bool hasRef() const;
|
||||
bool hasNoCommandBufferUsages() const;
|
||||
|
||||
/**
|
||||
* Called by the registerWithCache if the resource is available to be used as scratch.
|
||||
|
@ -57,6 +57,9 @@ private:
|
||||
|
||||
/** Is the resource ref'ed */
|
||||
bool hasRef() const { return fResource->hasRef(); }
|
||||
bool hasRefOrCommandBufferUsage() const {
|
||||
return this->hasRef() || !fResource->hasNoCommandBufferUsages();
|
||||
}
|
||||
|
||||
/** Called by the cache to make the unique key invalid. */
|
||||
void removeUniqueKey() { fResource->fUniqueKey.reset(); }
|
||||
|
@ -388,7 +388,7 @@ void GrResourceCache::refAndMakeResourceMRU(GrGpuResource* resource) {
|
||||
fPurgeableBytes -= resource->gpuMemorySize();
|
||||
fPurgeableQueue.remove(resource);
|
||||
this->addToNonpurgeableArray(resource);
|
||||
} else if (!resource->cacheAccess().hasRef() &&
|
||||
} else if (!resource->cacheAccess().hasRefOrCommandBufferUsage() &&
|
||||
resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
|
||||
SkASSERT(fNumBudgetedResourcesFlushWillMakePurgeable > 0);
|
||||
fNumBudgetedResourcesFlushWillMakePurgeable--;
|
||||
@ -487,7 +487,8 @@ void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
|
||||
fBudgetedHighWaterBytes = std::max(fBudgetedBytes, fBudgetedHighWaterBytes);
|
||||
fBudgetedHighWaterCount = std::max(fBudgetedCount, fBudgetedHighWaterCount);
|
||||
#endif
|
||||
if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
|
||||
if (!resource->resourcePriv().isPurgeable() &&
|
||||
!resource->cacheAccess().hasRefOrCommandBufferUsage()) {
|
||||
++fNumBudgetedResourcesFlushWillMakePurgeable;
|
||||
}
|
||||
this->purgeAsNeeded();
|
||||
@ -495,7 +496,8 @@ void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
|
||||
SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
|
||||
--fBudgetedCount;
|
||||
fBudgetedBytes -= size;
|
||||
if (!resource->resourcePriv().isPurgeable() && !resource->cacheAccess().hasRef()) {
|
||||
if (!resource->resourcePriv().isPurgeable() &&
|
||||
!resource->cacheAccess().hasRefOrCommandBufferUsage()) {
|
||||
--fNumBudgetedResourcesFlushWillMakePurgeable;
|
||||
}
|
||||
}
|
||||
@ -909,7 +911,7 @@ void GrResourceCache::validate() const {
|
||||
SkASSERT(*fNonpurgeableResources[i]->cacheAccess().accessCacheIndex() == i);
|
||||
SkASSERT(!fNonpurgeableResources[i]->wasDestroyed());
|
||||
if (fNonpurgeableResources[i]->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted &&
|
||||
!fNonpurgeableResources[i]->cacheAccess().hasRef() &&
|
||||
!fNonpurgeableResources[i]->cacheAccess().hasRefOrCommandBufferUsage() &&
|
||||
fNewlyPurgeableResourceForValidation != fNonpurgeableResources[i]) {
|
||||
++numBudgetedResourcesFlushWillMakePurgeable;
|
||||
}
|
||||
|
@ -464,6 +464,74 @@ static void test_purge_unlocked(skiatest::Reporter* reporter) {
|
||||
REPORTER_ASSERT(reporter, 0 == cache->getResourceBytes());
|
||||
}
|
||||
|
||||
static void test_purge_command_buffer_usage(skiatest::Reporter* reporter) {
|
||||
Mock mock(30000);
|
||||
GrResourceCache* cache = mock.cache();
|
||||
GrGpu* gpu = mock.gpu();
|
||||
|
||||
// Create two resource w/ scratch keys.
|
||||
TestResource* a = TestResource::CreateScratch(gpu, SkBudgeted::kYes,
|
||||
TestResource::kA_SimulatedProperty, 11);
|
||||
|
||||
TestResource* b = TestResource::CreateScratch(gpu, SkBudgeted::kYes,
|
||||
TestResource::kA_SimulatedProperty, 12);
|
||||
|
||||
REPORTER_ASSERT(reporter, 2 == TestResource::NumAlive());
|
||||
REPORTER_ASSERT(reporter, 2 == cache->getResourceCount());
|
||||
REPORTER_ASSERT(reporter, a->gpuMemorySize() + b->gpuMemorySize() == cache->getResourceBytes());
|
||||
|
||||
// Should be safe to purge without deleting the resources since we still have refs.
|
||||
cache->purgeUnlockedResources(true);
|
||||
REPORTER_ASSERT(reporter, 2 == TestResource::NumAlive());
|
||||
|
||||
// Add command buffer usages to all resources
|
||||
a->addCommandBufferUsage();
|
||||
b->addCommandBufferUsage();
|
||||
|
||||
// Should be safe to purge without deleting the resources since we still have refs and command
|
||||
// buffer usages.
|
||||
cache->purgeUnlockedResources(true);
|
||||
REPORTER_ASSERT(reporter, 2 == TestResource::NumAlive());
|
||||
|
||||
// Unref the first resource
|
||||
a->unref();
|
||||
REPORTER_ASSERT(reporter, 2 == TestResource::NumAlive());
|
||||
REPORTER_ASSERT(reporter, 2 == cache->getResourceCount());
|
||||
REPORTER_ASSERT(reporter, a->gpuMemorySize() + b->gpuMemorySize() == cache->getResourceBytes());
|
||||
|
||||
// Should be safe to purge without deleting the resources since we still have command buffer
|
||||
// usages and the second still has a ref.
|
||||
cache->purgeUnlockedResources(true);
|
||||
REPORTER_ASSERT(reporter, 2 == TestResource::NumAlive());
|
||||
|
||||
// Remove command buffer usages
|
||||
a->removeCommandBufferUsage();
|
||||
b->removeCommandBufferUsage();
|
||||
REPORTER_ASSERT(reporter, 2 == TestResource::NumAlive());
|
||||
REPORTER_ASSERT(reporter, 2 == cache->getResourceCount());
|
||||
REPORTER_ASSERT(reporter, a->gpuMemorySize() + b->gpuMemorySize() == cache->getResourceBytes());
|
||||
|
||||
// Purge this time should remove the first resources since it no longer has any refs or command
|
||||
// buffer usages.
|
||||
cache->purgeUnlockedResources(true);
|
||||
REPORTER_ASSERT(reporter, 1 == TestResource::NumAlive());
|
||||
REPORTER_ASSERT(reporter, 1 == cache->getResourceCount());
|
||||
REPORTER_ASSERT(reporter, b->gpuMemorySize() == cache->getResourceBytes());
|
||||
|
||||
// Unref the second resource
|
||||
b->unref();
|
||||
REPORTER_ASSERT(reporter, 1 == TestResource::NumAlive());
|
||||
REPORTER_ASSERT(reporter, 1 == cache->getResourceCount());
|
||||
REPORTER_ASSERT(reporter, b->gpuMemorySize() == cache->getResourceBytes());
|
||||
|
||||
// Purge the last resource
|
||||
cache->purgeUnlockedResources(false);
|
||||
|
||||
REPORTER_ASSERT(reporter, 0 == TestResource::NumAlive());
|
||||
REPORTER_ASSERT(reporter, 0 == cache->getResourceCount());
|
||||
REPORTER_ASSERT(reporter, 0 == cache->getResourceBytes());
|
||||
}
|
||||
|
||||
static void test_budgeting(skiatest::Reporter* reporter) {
|
||||
Mock mock(300);
|
||||
GrResourceCache* cache = mock.cache();
|
||||
@ -1515,6 +1583,7 @@ DEF_GPUTEST(ResourceCacheMisc, reporter, /* options */) {
|
||||
// The below tests create their own mock contexts.
|
||||
test_no_key(reporter);
|
||||
test_purge_unlocked(reporter);
|
||||
test_purge_command_buffer_usage(reporter);
|
||||
test_budgeting(reporter);
|
||||
test_unbudgeted(reporter);
|
||||
test_unbudgeted_to_scratch(reporter);
|
||||
|
Loading…
Reference in New Issue
Block a user