Change the meaning of GrBudgetedType::kUnbudgetedUncacheable.
kUnbudgetedCacheable now means that the resource is never purged until its unique key is removed. This fixes an issue where a cached texture for a promise image might get purged by cache pressure. This in turn could cause Skia to call the promise image's Fulfill proc multiple times with no intervening Release calls. The balancing Release calls would occur, but the policy is that each Fulfill should be balanced by Release *before* another Fulfill. Update/add unit tests. Bug: chromium:922851 Change-Id: I6411e413b3104721ca4bb6e7f07b3b73d14cbcf9 Reviewed-on: https://skia-review.googlesource.com/c/186361 Commit-Queue: Brian Salomon <bsalomon@google.com> Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
parent
a7cb690dc8
commit
9bc76d96f9
@ -37,7 +37,7 @@ class SkTraceMemoryDump;
|
||||
* morphism using CRTP). Similarly when the ref (but not necessarily pending read/write) count
|
||||
* reaches 0 DERIVED::notifyRefCountIsZero() will be called. In the case when an unref() causes both
|
||||
* the ref cnt to reach zero and the other counts are zero, notifyRefCountIsZero() will be called
|
||||
* before notifyIsPurgeable(). Moreover, if notifyRefCountIsZero() returns false then
|
||||
* before notifyAllCntsAreZero(). Moreover, if notifyRefCountIsZero() returns false then
|
||||
* notifyAllRefCntsAreZero() won't be called at all. notifyRefCountIsZero() must return false if the
|
||||
* object may be deleted after notifyRefCntIsZero() returns.
|
||||
*
|
||||
@ -298,7 +298,8 @@ protected:
|
||||
|
||||
|
||||
private:
|
||||
bool isPurgeable() const { return !this->internalHasRef() && !this->internalHasPendingIO(); }
|
||||
bool isPurgeable() const;
|
||||
bool hasRefOrPendingIO() const;
|
||||
|
||||
/**
|
||||
* Called by the registerWithCache if the resource is available to be used as scratch.
|
||||
@ -306,7 +307,7 @@ private:
|
||||
* resources and populate the scratchKey with the key.
|
||||
* By default resources are not recycled as scratch.
|
||||
**/
|
||||
virtual void computeScratchKey(GrScratchKey*) const { }
|
||||
virtual void computeScratchKey(GrScratchKey*) const {}
|
||||
|
||||
/**
|
||||
* Frees the object in the underlying 3D API. Called by CacheAccess.
|
||||
@ -316,9 +317,9 @@ private:
|
||||
virtual size_t onGpuMemorySize() const = 0;
|
||||
|
||||
/**
|
||||
* Called by GrResourceCache when a resource transitions from being unpurgeable to purgeable.
|
||||
* Called by GrResourceCache when a resource loses its last ref or pending IO.
|
||||
*/
|
||||
virtual void becamePurgeable() {}
|
||||
virtual void removedLastRefOrPendingIO() {}
|
||||
|
||||
// See comments in CacheAccess and ResourcePriv.
|
||||
void setUniqueKey(const GrUniqueKey&);
|
||||
|
@ -500,6 +500,8 @@
|
||||
"~^PromiseImageTest$",
|
||||
"~^PromiseImageTextureReuse$",
|
||||
"~^PromiseImageTextureReuseDifferentConfig$",
|
||||
"~^PromiseImageTextureShutdown$",
|
||||
"~^PromiseImageTextureFullCache$",
|
||||
"~^ResourceAllocatorTest$",
|
||||
"~^RGB565TextureTest$",
|
||||
"~^RGBA4444TextureTest$",
|
||||
|
@ -768,6 +768,8 @@ def dm_flags(api, bot):
|
||||
match.append('~^PromiseImageTest$')
|
||||
match.append('~^PromiseImageTextureReuse$')
|
||||
match.append('~^PromiseImageTextureReuseDifferentConfig$')
|
||||
match.append('~^PromiseImageTextureShutdown$')
|
||||
match.append('~^PromiseImageTextureFullCache$')
|
||||
match.append('~^ResourceAllocatorTest$')
|
||||
match.append('~^RGB565TextureTest$')
|
||||
match.append('~^RGBA4444TextureTest$')
|
||||
|
@ -261,7 +261,8 @@ void GrContext::abandonContext() {
|
||||
|
||||
bool GrContext::abandoned() const {
|
||||
ASSERT_SINGLE_OWNER
|
||||
return fDrawingManager->wasAbandoned();
|
||||
// If called from ~GrContext(), the drawing manager may already be gone.
|
||||
return !fDrawingManager || fDrawingManager->wasAbandoned();
|
||||
}
|
||||
|
||||
void GrContext::releaseResourcesAndAbandonContext() {
|
||||
|
@ -93,6 +93,17 @@ void GrGpuResource::dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump,
|
||||
this->setMemoryBacking(traceMemoryDump, resourceName);
|
||||
}
|
||||
|
||||
bool GrGpuResource::isPurgeable() const {
|
||||
// Resources in the kUnbudgetedCacheable state are never purgeable when they have a unique
|
||||
// key. The key must be removed/invalidated to make them purgeable.
|
||||
return !this->hasRefOrPendingIO() &&
|
||||
!(fBudgetedType == GrBudgetedType::kUnbudgetedCacheable && fUniqueKey.isValid());
|
||||
}
|
||||
|
||||
bool GrGpuResource::hasRefOrPendingIO() const {
|
||||
return this->internalHasRef() || this->internalHasPendingIO();
|
||||
}
|
||||
|
||||
SkString GrGpuResource::getResourceName() const {
|
||||
// Dump resource as "skia/gpu_resources/resource_#".
|
||||
SkString resourceName("skia/gpu_resources/resource_");
|
||||
@ -145,6 +156,8 @@ void GrGpuResource::setUniqueKey(const GrUniqueKey& key) {
|
||||
}
|
||||
|
||||
void GrGpuResource::notifyAllCntsAreZero(CntType lastCntTypeToReachZero) const {
|
||||
GrGpuResource* mutableThis = const_cast<GrGpuResource*>(this);
|
||||
mutableThis->removedLastRefOrPendingIO();
|
||||
if (this->wasDestroyed()) {
|
||||
// We've already been removed from the cache. Goodbye cruel world!
|
||||
delete this;
|
||||
@ -154,7 +167,6 @@ void GrGpuResource::notifyAllCntsAreZero(CntType lastCntTypeToReachZero) const {
|
||||
// We should have already handled this fully in notifyRefCntIsZero().
|
||||
SkASSERT(kRef_CntType != lastCntTypeToReachZero);
|
||||
|
||||
GrGpuResource* mutableThis = const_cast<GrGpuResource*>(this);
|
||||
static const uint32_t kFlag =
|
||||
GrResourceCache::ResourceAccess::kAllCntsReachedZero_RefNotificationFlag;
|
||||
get_resource_cache(fGpu)->resourceAccess().notifyCntReachedZero(mutableThis, kFlag);
|
||||
@ -170,6 +182,7 @@ bool GrGpuResource::notifyRefCountIsZero() const {
|
||||
uint32_t flags = GrResourceCache::ResourceAccess::kRefCntReachedZero_RefNotificationFlag;
|
||||
if (!this->internalHasPendingIO()) {
|
||||
flags |= GrResourceCache::ResourceAccess::kAllCntsReachedZero_RefNotificationFlag;
|
||||
mutableThis->removedLastRefOrPendingIO();
|
||||
}
|
||||
get_resource_cache(fGpu)->resourceAccess().notifyCntReachedZero(mutableThis, flags);
|
||||
|
||||
|
@ -29,18 +29,12 @@ private:
|
||||
GrBudgetedType::kBudgeted == fResource->resourcePriv().budgetedType();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called by GrResourceCache when a resource becomes purgeable regardless of whether the cache
|
||||
* has decided to keep the resource ot purge it immediately.
|
||||
*/
|
||||
void becamePurgeable() { fResource->becamePurgeable(); }
|
||||
|
||||
/**
|
||||
* Called by the cache to delete the resource under normal circumstances.
|
||||
*/
|
||||
void release() {
|
||||
fResource->release();
|
||||
if (fResource->isPurgeable()) {
|
||||
if (!fResource->hasRefOrPendingIO()) {
|
||||
delete fResource;
|
||||
}
|
||||
}
|
||||
@ -50,7 +44,7 @@ private:
|
||||
*/
|
||||
void abandon() {
|
||||
fResource->abandon();
|
||||
if (fResource->isPurgeable()) {
|
||||
if (!fResource->hasRefOrPendingIO()) {
|
||||
delete fResource;
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,8 @@ public:
|
||||
|
||||
bool isPurgeable() const { return fResource->isPurgeable(); }
|
||||
|
||||
bool hasRefOrPendingIO() const { return fResource->hasRefOrPendingIO(); }
|
||||
|
||||
protected:
|
||||
ResourcePriv(GrGpuResource* resource) : fResource(resource) { }
|
||||
ResourcePriv(const ResourcePriv& that) : fResource(that.fResource) {}
|
||||
|
@ -251,6 +251,7 @@ public:
|
||||
*/
|
||||
sk_sp<GrTextureProxy> testingOnly_createInstantiatedProxy(const GrSurfaceDesc&, GrSurfaceOrigin,
|
||||
SkBackingFit, SkBudgeted);
|
||||
sk_sp<GrTextureProxy> testingOnly_createWrapped(sk_sp<GrTexture>, GrSurfaceOrigin);
|
||||
|
||||
private:
|
||||
friend class GrAHardwareBufferImageGenerator; // for createWrapped
|
||||
|
@ -187,9 +187,6 @@ void GrResourceCache::abandonAll() {
|
||||
while (fNonpurgeableResources.count()) {
|
||||
GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
|
||||
SkASSERT(!back->wasDestroyed());
|
||||
// If these resources we're relying on a purgeable notification to release something, notify
|
||||
// them now. They aren't in the purgeable queue but they're getting purged anyway.
|
||||
back->cacheAccess().becamePurgeable();
|
||||
back->cacheAccess().abandon();
|
||||
}
|
||||
|
||||
@ -230,9 +227,6 @@ void GrResourceCache::releaseAll() {
|
||||
while (fNonpurgeableResources.count()) {
|
||||
GrGpuResource* back = *(fNonpurgeableResources.end() - 1);
|
||||
SkASSERT(!back->wasDestroyed());
|
||||
// If these resources we're relying on a purgeable notification to release something, notify
|
||||
// them now. They aren't in the purgeable queue but they're getting purged anyway.
|
||||
back->cacheAccess().becamePurgeable();
|
||||
back->cacheAccess().release();
|
||||
}
|
||||
|
||||
@ -319,11 +313,14 @@ void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
|
||||
fUniqueHash.remove(resource->getUniqueKey());
|
||||
}
|
||||
resource->cacheAccess().removeUniqueKey();
|
||||
|
||||
if (resource->resourcePriv().getScratchKey().isValid()) {
|
||||
fScratchMap.insert(resource->resourcePriv().getScratchKey(), resource);
|
||||
}
|
||||
|
||||
// Removing a unique key from a kUnbudgetedCacheable resource would make the resource
|
||||
// require purging. However, the resource must be ref'ed to get here and therefore can't
|
||||
// be purgeable. We'll purge it when the refs reach zero.
|
||||
SkASSERT(!resource->resourcePriv().isPurgeable());
|
||||
this->validate();
|
||||
}
|
||||
|
||||
@ -340,7 +337,8 @@ void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey
|
||||
old->resourcePriv().isPurgeable()) {
|
||||
old->cacheAccess().release();
|
||||
} else {
|
||||
this->removeUniqueKey(old);
|
||||
// removeUniqueKey expects an external owner of the resource.
|
||||
this->removeUniqueKey(sk_ref_sp(old).get());
|
||||
}
|
||||
}
|
||||
SkASSERT(nullptr == fUniqueHash.find(newKey));
|
||||
@ -412,7 +410,11 @@ void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t fla
|
||||
return;
|
||||
}
|
||||
|
||||
SkASSERT(resource->resourcePriv().isPurgeable());
|
||||
if (!resource->resourcePriv().isPurgeable()) {
|
||||
this->validate();
|
||||
return;
|
||||
}
|
||||
|
||||
this->removeFromNonpurgeableArray(resource);
|
||||
fPurgeableQueue.insert(resource);
|
||||
resource->cacheAccess().setTimeWhenResourceBecomePurgeable();
|
||||
@ -420,35 +422,33 @@ void GrResourceCache::notifyCntReachedZero(GrGpuResource* resource, uint32_t fla
|
||||
|
||||
bool hasUniqueKey = resource->getUniqueKey().isValid();
|
||||
|
||||
{
|
||||
SkScopeExit notifyPurgeable([resource] { resource->cacheAccess().becamePurgeable(); });
|
||||
GrBudgetedType budgetedType = resource->resourcePriv().budgetedType();
|
||||
|
||||
auto budgetedType = resource->resourcePriv().budgetedType();
|
||||
if (budgetedType == GrBudgetedType::kBudgeted) {
|
||||
// Purge the resource immediately if we're over budget
|
||||
// Also purge if the resource has neither a valid scratch key nor a unique key.
|
||||
bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
|
||||
if (!this->overBudget() && hasKey) {
|
||||
if (budgetedType == GrBudgetedType::kBudgeted) {
|
||||
// Purge the resource immediately if we're over budget
|
||||
// Also purge if the resource has neither a valid scratch key nor a unique key.
|
||||
bool hasKey = resource->resourcePriv().getScratchKey().isValid() || hasUniqueKey;
|
||||
if (!this->overBudget() && hasKey) {
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// We keep unbudgeted resources with a unique key in the purgeable queue of the cache so
|
||||
// they can be reused again by the image connected to the unique key.
|
||||
if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
|
||||
return;
|
||||
}
|
||||
// Check whether this resource could still be used as a scratch resource.
|
||||
if (!resource->resourcePriv().refsWrappedObjects() &&
|
||||
resource->resourcePriv().getScratchKey().isValid()) {
|
||||
// We won't purge an existing resource to make room for this one.
|
||||
if (fBudgetedCount < fMaxCount &&
|
||||
fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
|
||||
resource->resourcePriv().makeBudgeted();
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// We keep unbudgeted resources with a unique key in the purgable queue of the cache so
|
||||
// they can be reused again by the image connected to the unique key.
|
||||
if (hasUniqueKey && budgetedType == GrBudgetedType::kUnbudgetedCacheable) {
|
||||
return;
|
||||
}
|
||||
// Check whether this resource could still be used as a scratch resource.
|
||||
if (!resource->resourcePriv().refsWrappedObjects() &&
|
||||
resource->resourcePriv().getScratchKey().isValid()) {
|
||||
// We won't purge an existing resource to make room for this one.
|
||||
if (fBudgetedCount < fMaxCount &&
|
||||
fBudgetedBytes + resource->gpuMemorySize() <= fMaxBytes) {
|
||||
resource->resourcePriv().makeBudgeted();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
SkDEBUGCODE(int beforeCount = this->getResourceCount();)
|
||||
resource->cacheAccess().release();
|
||||
// We should at least free this resource, perhaps dependent resources as well.
|
||||
@ -462,8 +462,12 @@ void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
|
||||
SkASSERT(this->isInCache(resource));
|
||||
|
||||
size_t size = resource->gpuMemorySize();
|
||||
|
||||
if (GrBudgetedType::kBudgeted == resource->resourcePriv().budgetedType()) {
|
||||
// Changing from BudgetedType::kUnbudgetedCacheable to another budgeted type could make
|
||||
// resource become purgeable. However, we should never allow that transition. Wrapped
|
||||
// resources are the only resources that can be in that state and they aren't allowed to
|
||||
// transition from one budgeted state to another.
|
||||
SkDEBUGCODE(bool wasPurgeable = resource->resourcePriv().isPurgeable());
|
||||
if (resource->resourcePriv().budgetedType() == GrBudgetedType::kBudgeted) {
|
||||
++fBudgetedCount;
|
||||
fBudgetedBytes += size;
|
||||
#if GR_CACHE_STATS
|
||||
@ -472,9 +476,11 @@ void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
|
||||
#endif
|
||||
this->purgeAsNeeded();
|
||||
} else {
|
||||
SkASSERT(resource->resourcePriv().budgetedType() != GrBudgetedType::kUnbudgetedCacheable);
|
||||
--fBudgetedCount;
|
||||
fBudgetedBytes -= size;
|
||||
}
|
||||
SkASSERT(wasPurgeable == resource->resourcePriv().isPurgeable());
|
||||
TRACE_COUNTER2("skia.gpu.cache", "skia budget", "used",
|
||||
fBudgetedBytes, "free", fMaxBytes - fBudgetedBytes);
|
||||
|
||||
|
@ -137,7 +137,7 @@ private:
|
||||
fReleaseHelper.reset();
|
||||
}
|
||||
|
||||
void becamePurgeable() override {
|
||||
void removedLastRefOrPendingIO() override {
|
||||
if (fIdleProc) {
|
||||
fIdleProc(fIdleProcContext);
|
||||
fIdleProc = nullptr;
|
||||
|
@ -77,7 +77,7 @@ protected:
|
||||
|
||||
// protected so that GrMockTextureRenderTarget can call this to avoid "inheritance via
|
||||
// dominance" warning.
|
||||
void becamePurgeable() override {
|
||||
void removedLastRefOrPendingIO() override {
|
||||
if (fIdleProc) {
|
||||
fIdleProc(fIdleProcContext);
|
||||
fIdleProc = nullptr;
|
||||
@ -196,7 +196,7 @@ private:
|
||||
}
|
||||
|
||||
// We implement this to avoid the inheritance via dominance warning.
|
||||
void becamePurgeable() override { GrMockTexture::becamePurgeable(); }
|
||||
void removedLastRefOrPendingIO() override { GrMockTexture::removedLastRefOrPendingIO(); }
|
||||
|
||||
size_t onGpuMemorySize() const override {
|
||||
int numColorSamples = this->numColorSamples();
|
||||
|
@ -76,7 +76,7 @@ private:
|
||||
fReleaseHelper.reset();
|
||||
}
|
||||
|
||||
void becamePurgeable() override {
|
||||
void removedLastRefOrPendingIO() override {
|
||||
if (fIdleProc) {
|
||||
fIdleProc(fIdleProcContext);
|
||||
fIdleProc = nullptr;
|
||||
|
@ -262,7 +262,7 @@ void GrVkImage::Resource::notifyRemovedFromCommandBuffer() const {
|
||||
if (--fNumCommandBufferOwners || !fIdleProc) {
|
||||
return;
|
||||
}
|
||||
if (fOwningTexture && !fOwningTexture->resourcePriv().isPurgeable()) {
|
||||
if (fOwningTexture && fOwningTexture->resourcePriv().hasRefOrPendingIO()) {
|
||||
return;
|
||||
}
|
||||
fIdleProc(fIdleProcContext);
|
||||
|
@ -139,6 +139,7 @@ public:
|
||||
protected:
|
||||
void releaseImage(GrVkGpu* gpu);
|
||||
void abandonImage();
|
||||
bool hasResource() const { return fResource; }
|
||||
|
||||
GrVkImageInfo fInfo;
|
||||
uint32_t fInitialQueueFamily;
|
||||
|
@ -120,10 +120,13 @@ GrVkTexture::~GrVkTexture() {
|
||||
}
|
||||
|
||||
void GrVkTexture::onRelease() {
|
||||
// When there is an idle proc, the Resource will call the proc in releaseImage() so
|
||||
// we clear it here.
|
||||
fIdleProc = nullptr;
|
||||
fIdleProcContext = nullptr;
|
||||
// We're about to be severed from our GrVkResource. If there is an idle proc we have to decide
|
||||
// who will handle it. If the resource is still tied to a command buffer we let it handle it.
|
||||
// Otherwise, we handle it.
|
||||
if (this->hasResource() && this->resource()->isOwnedByCommandBuffer()) {
|
||||
fIdleProc = nullptr;
|
||||
fIdleProcContext = nullptr;
|
||||
}
|
||||
|
||||
// we create this and don't hand it off, so we should always destroy it
|
||||
if (fTextureView) {
|
||||
@ -137,10 +140,14 @@ void GrVkTexture::onRelease() {
|
||||
}
|
||||
|
||||
void GrVkTexture::onAbandon() {
|
||||
// When there is an idle proc, the Resource will call the proc in abandonImage() so
|
||||
// we clear it here.
|
||||
fIdleProc = nullptr;
|
||||
fIdleProcContext = nullptr;
|
||||
// We're about to be severed from our GrVkResource. If there is an idle proc we have to decide
|
||||
// who will handle it. If the resource is still tied to a command buffer we let it handle it.
|
||||
// Otherwise, we handle it.
|
||||
if (this->hasResource() && this->resource()->isOwnedByCommandBuffer()) {
|
||||
fIdleProc = nullptr;
|
||||
fIdleProcContext = nullptr;
|
||||
}
|
||||
|
||||
// we create this and don't hand it off, so we should always destroy it
|
||||
if (fTextureView) {
|
||||
fTextureView->unrefAndAbandon();
|
||||
@ -172,19 +179,20 @@ void GrVkTexture::setIdleProc(IdleProc proc, void* context) {
|
||||
}
|
||||
}
|
||||
|
||||
void GrVkTexture::becamePurgeable() {
|
||||
void GrVkTexture::removedLastRefOrPendingIO() {
|
||||
if (!fIdleProc) {
|
||||
return;
|
||||
}
|
||||
// This is called when the GrTexture is purgeable. However, we need to check whether the
|
||||
// Resource is still owned by any command buffers. If it is then it will call the proc.
|
||||
auto* resource = this->resource();
|
||||
SkASSERT(resource);
|
||||
if (resource->isOwnedByCommandBuffer()) {
|
||||
auto* resource = this->hasResource() ? this->resource() : nullptr;
|
||||
if (resource && resource->isOwnedByCommandBuffer()) {
|
||||
return;
|
||||
}
|
||||
fIdleProc(fIdleProcContext);
|
||||
fIdleProc = nullptr;
|
||||
fIdleProcContext = nullptr;
|
||||
resource->setIdleProc(nullptr, nullptr, nullptr);
|
||||
if (resource) {
|
||||
resource->setIdleProc(nullptr, nullptr, nullptr);
|
||||
}
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ private:
|
||||
const GrVkImageView*, GrMipMapsStatus, GrBackendObjectOwnership, GrWrapCacheable,
|
||||
GrIOType);
|
||||
|
||||
void becamePurgeable() override;
|
||||
void removedLastRefOrPendingIO() override;
|
||||
|
||||
const GrVkImageView* fTextureView;
|
||||
GrTexture::IdleProc* fIdleProc = nullptr;
|
||||
|
@ -42,13 +42,15 @@ public:
|
||||
|
||||
protected:
|
||||
void onAbandon() override {
|
||||
GrVkRenderTarget::onAbandon();
|
||||
// In order to correctly handle calling texture idle procs, GrVkTexture must go first.
|
||||
GrVkTexture::onAbandon();
|
||||
GrVkRenderTarget::onAbandon();
|
||||
}
|
||||
|
||||
void onRelease() override {
|
||||
GrVkRenderTarget::onRelease();
|
||||
// In order to correctly handle calling texture idle procs, GrVkTexture must go first.
|
||||
GrVkTexture::onRelease();
|
||||
GrVkRenderTarget::onRelease();
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -336,7 +336,6 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ReadOnlyTexture, reporter, context_info) {
|
||||
}
|
||||
|
||||
DEF_GPUTEST(TextureIdleProcTest, reporter, options) {
|
||||
GrContext* context;
|
||||
static const int kS = 10;
|
||||
|
||||
// Helper to delete a backend texture in a GrTexture's release proc.
|
||||
@ -415,7 +414,7 @@ DEF_GPUTEST(TextureIdleProcTest, reporter, options) {
|
||||
for (int type = 0; type < sk_gpu_test::GrContextFactory::kContextTypeCnt; ++type) {
|
||||
sk_gpu_test::GrContextFactory factory;
|
||||
auto contextType = static_cast<sk_gpu_test::GrContextFactory::ContextType>(type);
|
||||
context = factory.get(contextType);
|
||||
GrContext* context = factory.get(contextType);
|
||||
if (!context) {
|
||||
continue;
|
||||
}
|
||||
@ -517,21 +516,92 @@ DEF_GPUTEST(TextureIdleProcTest, reporter, options) {
|
||||
// the texture it made should be idle.
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(3) != idleIDs.end());
|
||||
|
||||
// Make sure we make the call during various shutdown scenarios.
|
||||
texture = make(context, 4);
|
||||
context->abandonContext();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(4) != idleIDs.end());
|
||||
factory.destroyContexts();
|
||||
context = factory.get(contextType);
|
||||
// Make sure we make the call during various shutdown scenarios where the texture
|
||||
// might persist after context is destroyed, abandoned, etc. We test three
|
||||
// variations of each scenario. One where the texture is just created. Another,
|
||||
// where the texture has been used in a draw and then the context is flushed. And
|
||||
// one where the the texture was drawn but the context is not flushed.
|
||||
// In each scenario we test holding a ref beyond the context shutdown and not.
|
||||
|
||||
texture = make(context, 5);
|
||||
factory.destroyContexts();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(5) != idleIDs.end());
|
||||
context = factory.get(contextType);
|
||||
// These tests are difficult to get working with Vulkan. See http://skbug.com/8705
|
||||
// and http://skbug.com/8275
|
||||
GrBackendApi api = sk_gpu_test::GrContextFactory::ContextTypeBackend(contextType);
|
||||
if (api == GrBackendApi::kVulkan) {
|
||||
continue;
|
||||
}
|
||||
int id = 4;
|
||||
enum class DrawType {
|
||||
kNoDraw,
|
||||
kDraw,
|
||||
kDrawAndFlush,
|
||||
};
|
||||
for (auto drawType :
|
||||
{DrawType::kNoDraw, DrawType::kDraw, DrawType::kDrawAndFlush}) {
|
||||
for (bool unrefFirst : {false, true}) {
|
||||
auto possiblyDrawAndFlush = [&context, &texture, drawType, unrefFirst] {
|
||||
if (drawType == DrawType::kNoDraw) {
|
||||
return;
|
||||
}
|
||||
SkImageInfo info = SkImageInfo::Make(kS, kS, kRGBA_8888_SkColorType,
|
||||
kPremul_SkAlphaType);
|
||||
auto rt = SkSurface::MakeRenderTarget(context, SkBudgeted::kNo, info, 0,
|
||||
nullptr);
|
||||
auto rtc =
|
||||
rt->getCanvas()
|
||||
->internal_private_accessTopLayerRenderTargetContext();
|
||||
auto proxy = context->contextPriv()
|
||||
.proxyProvider()
|
||||
->testingOnly_createWrapped(
|
||||
texture, kTopLeft_GrSurfaceOrigin);
|
||||
rtc->drawTexture(GrNoClip(), proxy, GrSamplerState::Filter::kNearest,
|
||||
SkPMColor4f(), SkRect::MakeWH(kS, kS),
|
||||
SkRect::MakeWH(kS, kS), GrQuadAAFlags::kNone,
|
||||
SkCanvas::kFast_SrcRectConstraint, SkMatrix::I(),
|
||||
nullptr);
|
||||
if (drawType == DrawType::kDrawAndFlush) {
|
||||
context->flush();
|
||||
}
|
||||
if (unrefFirst) {
|
||||
texture.reset();
|
||||
}
|
||||
};
|
||||
texture = make(context, id);
|
||||
possiblyDrawAndFlush();
|
||||
context->abandonContext();
|
||||
texture.reset();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(id) != idleIDs.end());
|
||||
factory.destroyContexts();
|
||||
context = factory.get(contextType);
|
||||
++id;
|
||||
|
||||
texture = make(context, 6);
|
||||
factory.releaseResourcesAndAbandonContexts();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(6) != idleIDs.end());
|
||||
// Similar to previous, but reset the texture after the context was
|
||||
// abandoned and then destroyed.
|
||||
texture = make(context, id);
|
||||
possiblyDrawAndFlush();
|
||||
context->abandonContext();
|
||||
factory.destroyContexts();
|
||||
texture.reset();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(id) != idleIDs.end());
|
||||
context = factory.get(contextType);
|
||||
id++;
|
||||
|
||||
texture = make(context, id);
|
||||
possiblyDrawAndFlush();
|
||||
factory.destroyContexts();
|
||||
texture.reset();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(id) != idleIDs.end());
|
||||
context = factory.get(contextType);
|
||||
id++;
|
||||
|
||||
texture = make(context, id);
|
||||
possiblyDrawAndFlush();
|
||||
factory.releaseResourcesAndAbandonContexts();
|
||||
texture.reset();
|
||||
REPORTER_ASSERT(reporter, idleIDs.find(id) != idleIDs.end());
|
||||
context = factory.get(contextType);
|
||||
id++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,12 +17,19 @@
|
||||
using namespace sk_gpu_test;
|
||||
|
||||
struct PromiseTextureChecker {
|
||||
explicit PromiseTextureChecker(const GrBackendTexture& tex)
|
||||
// shared indicates whether the backend texture is used to fulfill more than one promise
|
||||
// image.
|
||||
explicit PromiseTextureChecker(const GrBackendTexture& tex, skiatest::Reporter* reporter,
|
||||
bool shared)
|
||||
: fTexture(SkPromiseImageTexture::Make(tex))
|
||||
, fReporter(reporter)
|
||||
, fShared(shared)
|
||||
, fFulfillCount(0)
|
||||
, fReleaseCount(0)
|
||||
, fDoneCount(0) {}
|
||||
sk_sp<SkPromiseImageTexture> fTexture;
|
||||
skiatest::Reporter* fReporter;
|
||||
bool fShared;
|
||||
int fFulfillCount;
|
||||
int fReleaseCount;
|
||||
int fDoneCount;
|
||||
@ -37,7 +44,7 @@ struct PromiseTextureChecker {
|
||||
sk_sp<const SkPromiseImageTexture> replaceTexture(
|
||||
const GrBackendTexture& tex = GrBackendTexture()) {
|
||||
// Can't change this while in active fulfillment.
|
||||
SkASSERT(fFulfillCount == fReleaseCount);
|
||||
REPORTER_ASSERT(fReporter, fFulfillCount == fReleaseCount);
|
||||
auto temp = std::move(fTexture);
|
||||
fTexture = SkPromiseImageTexture::Make(tex);
|
||||
return std::move(temp);
|
||||
@ -53,7 +60,15 @@ struct PromiseTextureChecker {
|
||||
checker->fLastFulfilledTexture = checker->fTexture->backendTexture();
|
||||
return checker->fTexture;
|
||||
}
|
||||
static void Release(void* self) { static_cast<PromiseTextureChecker*>(self)->fReleaseCount++; }
|
||||
static void Release(void* self) {
|
||||
auto checker = static_cast<PromiseTextureChecker*>(self);
|
||||
checker->fReleaseCount++;
|
||||
if (!checker->fShared) {
|
||||
// This is only used in a single threaded fashion with a single promise image. So
|
||||
// every fulfill should be balanced by a release before the next fulfill.
|
||||
REPORTER_ASSERT(checker->fReporter, checker->fReleaseCount == checker->fFulfillCount);
|
||||
}
|
||||
}
|
||||
static void Done(void* self) {
|
||||
static_cast<PromiseTextureChecker*>(self)->fDoneCount++;
|
||||
}
|
||||
@ -129,7 +144,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTest, reporter, ctxInfo) {
|
||||
GrBackendFormat backendFormat = backendTex.getBackendFormat();
|
||||
REPORTER_ASSERT(reporter, backendFormat.isValid());
|
||||
|
||||
PromiseTextureChecker promiseChecker(backendTex);
|
||||
PromiseTextureChecker promiseChecker(backendTex, reporter, false);
|
||||
GrSurfaceOrigin texOrigin = kTopLeft_GrSurfaceOrigin;
|
||||
sk_sp<SkImage> refImg(
|
||||
SkImage_Gpu::MakePromiseTexture(ctx, backendFormat, kWidth, kHeight,
|
||||
@ -282,7 +297,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTextureReuse, reporter, ctxInfo)
|
||||
REPORTER_ASSERT(reporter, backendFormat == backendTex2.getBackendFormat());
|
||||
REPORTER_ASSERT(reporter, backendFormat == backendTex3.getBackendFormat());
|
||||
|
||||
PromiseTextureChecker promiseChecker(backendTex1);
|
||||
PromiseTextureChecker promiseChecker(backendTex1, reporter, true);
|
||||
GrSurfaceOrigin texOrigin = kTopLeft_GrSurfaceOrigin;
|
||||
sk_sp<SkImage> refImg(
|
||||
SkImage_Gpu::MakePromiseTexture(ctx, backendFormat, kWidth, kHeight,
|
||||
@ -549,7 +564,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTextureReuseDifferentConfig, repo
|
||||
gpu->deleteTestingOnlyBackendTexture(backendTex2);
|
||||
return;
|
||||
}
|
||||
PromiseTextureChecker promiseChecker(backendTex1);
|
||||
PromiseTextureChecker promiseChecker(backendTex1, reporter, true);
|
||||
sk_sp<SkImage> alphaImg(SkImage_Gpu::MakePromiseTexture(
|
||||
ctx, backendTex1.getBackendFormat(), kWidth, kHeight, GrMipMapped::kNo,
|
||||
kTopLeft_GrSurfaceOrigin, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr,
|
||||
@ -608,3 +623,134 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTextureReuseDifferentConfig, repo
|
||||
gpu->deleteTestingOnlyBackendTexture(backendTex1);
|
||||
gpu->deleteTestingOnlyBackendTexture(backendTex2);
|
||||
}
|
||||
|
||||
DEF_GPUTEST(PromiseImageTextureShutdown, reporter, ctxInfo) {
|
||||
const int kWidth = 10;
|
||||
const int kHeight = 10;
|
||||
|
||||
// Different ways of killing contexts.
|
||||
using DeathFn = std::function<void(sk_gpu_test::GrContextFactory*, GrContext*)>;
|
||||
DeathFn destroy = [](sk_gpu_test::GrContextFactory* factory, GrContext* context) {
|
||||
factory->destroyContexts();
|
||||
};
|
||||
DeathFn abandon = [](sk_gpu_test::GrContextFactory* factory, GrContext* context) {
|
||||
context->abandonContext();
|
||||
};
|
||||
DeathFn releaseResourcesAndAbandon = [](sk_gpu_test::GrContextFactory* factory,
|
||||
GrContext* context) {
|
||||
context->releaseResourcesAndAbandonContext();
|
||||
};
|
||||
|
||||
for (int type = 0; type < sk_gpu_test::GrContextFactory::kContextTypeCnt; ++type) {
|
||||
auto contextType = static_cast<sk_gpu_test::GrContextFactory::ContextType>(type);
|
||||
// These tests are difficult to get working with Vulkan. See http://skbug.com/8705
|
||||
// and http://skbug.com/8275
|
||||
GrBackendApi api = sk_gpu_test::GrContextFactory::ContextTypeBackend(contextType);
|
||||
if (api == GrBackendApi::kVulkan) {
|
||||
continue;
|
||||
}
|
||||
DeathFn contextKillers[] = {destroy, abandon, releaseResourcesAndAbandon};
|
||||
for (auto contextDeath : contextKillers) {
|
||||
sk_gpu_test::GrContextFactory factory;
|
||||
auto ctx = factory.get(contextType);
|
||||
if (!ctx) {
|
||||
continue;
|
||||
}
|
||||
GrGpu* gpu = ctx->contextPriv().getGpu();
|
||||
|
||||
GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(
|
||||
nullptr, kWidth, kHeight, GrColorType::kAlpha_8, false, GrMipMapped::kNo);
|
||||
REPORTER_ASSERT(reporter, backendTex.isValid());
|
||||
|
||||
SkImageInfo info = SkImageInfo::Make(kWidth, kHeight, kRGBA_8888_SkColorType,
|
||||
kPremul_SkAlphaType);
|
||||
sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, info);
|
||||
SkCanvas* canvas = surface->getCanvas();
|
||||
|
||||
PromiseTextureChecker promiseChecker(backendTex, reporter, false);
|
||||
sk_sp<SkImage> image(SkImage_Gpu::MakePromiseTexture(
|
||||
ctx, backendTex.getBackendFormat(), kWidth, kHeight, GrMipMapped::kNo,
|
||||
kTopLeft_GrSurfaceOrigin, kAlpha_8_SkColorType, kPremul_SkAlphaType,
|
||||
nullptr, PromiseTextureChecker::Fulfill, PromiseTextureChecker::Release,
|
||||
PromiseTextureChecker::Done, &promiseChecker));
|
||||
REPORTER_ASSERT(reporter, image);
|
||||
|
||||
canvas->drawImage(image, 0, 0);
|
||||
image.reset();
|
||||
// If the surface still holds a ref to the context then the factory will not be able
|
||||
// to destroy the context (and instead will release-all-and-abandon).
|
||||
surface.reset();
|
||||
|
||||
ctx->flush();
|
||||
contextDeath(&factory, ctx);
|
||||
|
||||
int expectedFulfillCnt = 1;
|
||||
int expectedReleaseCnt = 1;
|
||||
int expectedDoneCnt = 1;
|
||||
REPORTER_ASSERT(reporter, check_fulfill_and_release_cnts(promiseChecker,
|
||||
true,
|
||||
expectedFulfillCnt,
|
||||
expectedReleaseCnt,
|
||||
true,
|
||||
expectedDoneCnt,
|
||||
reporter));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(PromiseImageTextureFullCache, reporter, ctxInfo) {
|
||||
const int kWidth = 10;
|
||||
const int kHeight = 10;
|
||||
|
||||
GrContext* ctx = ctxInfo.grContext();
|
||||
GrGpu* gpu = ctx->contextPriv().getGpu();
|
||||
|
||||
GrBackendTexture backendTex = gpu->createTestingOnlyBackendTexture(
|
||||
nullptr, kWidth, kHeight, GrColorType::kAlpha_8, false, GrMipMapped::kNo);
|
||||
REPORTER_ASSERT(reporter, backendTex.isValid());
|
||||
|
||||
SkImageInfo info =
|
||||
SkImageInfo::Make(kWidth, kHeight, kRGBA_8888_SkColorType, kPremul_SkAlphaType);
|
||||
sk_sp<SkSurface> surface = SkSurface::MakeRenderTarget(ctx, SkBudgeted::kNo, info);
|
||||
SkCanvas* canvas = surface->getCanvas();
|
||||
|
||||
PromiseTextureChecker promiseChecker(backendTex, reporter, false);
|
||||
sk_sp<SkImage> image(SkImage_Gpu::MakePromiseTexture(
|
||||
ctx, backendTex.getBackendFormat(), kWidth, kHeight, GrMipMapped::kNo,
|
||||
kTopLeft_GrSurfaceOrigin, kAlpha_8_SkColorType, kPremul_SkAlphaType, nullptr,
|
||||
PromiseTextureChecker::Fulfill, PromiseTextureChecker::Release,
|
||||
PromiseTextureChecker::Done, &promiseChecker));
|
||||
REPORTER_ASSERT(reporter, image);
|
||||
|
||||
// Make the cache full. This tests that we don't preemptively purge cached textures for
|
||||
// fulfillment due to cache pressure.
|
||||
static constexpr int kMaxResources = 10;
|
||||
static constexpr int kMaxBytes = 100;
|
||||
ctx->setResourceCacheLimits(kMaxResources, kMaxBytes);
|
||||
sk_sp<GrTexture> textures[2 * kMaxResources];
|
||||
for (int i = 0; i < 2 * kMaxResources; ++i) {
|
||||
GrSurfaceDesc desc;
|
||||
desc.fConfig = kRGBA_8888_GrPixelConfig;
|
||||
desc.fWidth = desc.fHeight = 100;
|
||||
textures[i] = ctx->contextPriv().resourceProvider()->createTexture(desc, SkBudgeted::kYes);
|
||||
REPORTER_ASSERT(reporter, textures[i]);
|
||||
}
|
||||
|
||||
// Relying on the asserts in the promiseImageChecker to ensure that fulfills and releases are
|
||||
// properly ordered.
|
||||
canvas->drawImage(image, 0, 0);
|
||||
canvas->flush();
|
||||
canvas->drawImage(image, 1, 0);
|
||||
canvas->flush();
|
||||
canvas->drawImage(image, 2, 0);
|
||||
canvas->flush();
|
||||
canvas->drawImage(image, 3, 0);
|
||||
canvas->flush();
|
||||
canvas->drawImage(image, 4, 0);
|
||||
canvas->flush();
|
||||
canvas->drawImage(image, 5, 0);
|
||||
canvas->flush();
|
||||
// Must call this to ensure that all callbacks are performed before the checker is destroyed.
|
||||
gpu->testingOnly_flushGpuAndSync();
|
||||
gpu->deleteTestingOnlyBackendTexture(backendTex);
|
||||
}
|
||||
|
@ -556,15 +556,28 @@ static void test_budgeting(skiatest::Reporter* reporter) {
|
||||
wrappedCacheable->gpuMemorySize() +
|
||||
unbudgeted->gpuMemorySize() ==
|
||||
cache->getResourceBytes());
|
||||
REPORTER_ASSERT(reporter, 12 == cache->getPurgeableBytes());
|
||||
REPORTER_ASSERT(reporter, 0 == cache->getPurgeableBytes());
|
||||
|
||||
// Now try freeing the budgeted resources first
|
||||
wrappedCacheable = TestResource::CreateWrapped(gpu, GrWrapCacheable::kYes);
|
||||
wrappedUncacheable = TestResource::CreateWrapped(gpu, GrWrapCacheable::kNo);
|
||||
unique->unref();
|
||||
REPORTER_ASSERT(reporter, 23 == cache->getPurgeableBytes());
|
||||
REPORTER_ASSERT(reporter, 11 == cache->getPurgeableBytes());
|
||||
// This will free 'unique' but not wrappedCacheable which has a key. That requires the key to be
|
||||
// removed to be freed.
|
||||
cache->purgeAllUnlocked();
|
||||
REPORTER_ASSERT(reporter, 4 == cache->getResourceCount());
|
||||
|
||||
wrappedCacheableViaKey = cache->findAndRefUniqueResource(uniqueKey2);
|
||||
REPORTER_ASSERT(reporter, wrappedCacheableViaKey);
|
||||
if (wrappedCacheableViaKey) {
|
||||
wrappedCacheableViaKey->resourcePriv().removeUniqueKey();
|
||||
wrappedCacheable->unref();
|
||||
}
|
||||
// We shouldn't have to call purgeAllUnlocked as removing the key on a wrapped cacheable
|
||||
// resource should immediately delete it.
|
||||
REPORTER_ASSERT(reporter, 3 == cache->getResourceCount());
|
||||
|
||||
wrappedCacheable = TestResource::CreateWrapped(gpu, GrWrapCacheable::kYes);
|
||||
REPORTER_ASSERT(reporter, scratch->gpuMemorySize() + wrappedCacheable->gpuMemorySize() +
|
||||
wrappedUncacheable->gpuMemorySize() +
|
||||
unbudgeted->gpuMemorySize() ==
|
||||
|
@ -158,12 +158,26 @@ static void basic_test(GrContext* context,
|
||||
|
||||
// Once instantiated, the backing resource should have the same key
|
||||
SkAssertResult(proxy->instantiate(resourceProvider));
|
||||
const GrUniqueKey& texKey = proxy->peekSurface()->getUniqueKey();
|
||||
const GrUniqueKey texKey = proxy->peekSurface()->getUniqueKey();
|
||||
REPORTER_ASSERT(reporter, texKey.isValid());
|
||||
REPORTER_ASSERT(reporter, key == texKey);
|
||||
|
||||
// An Unbudgeted-cacheable resource will not get purged when a proxy with the same key is
|
||||
// deleted.
|
||||
bool expectResourceToOutliveProxy = proxy->peekSurface()->resourcePriv().budgetedType() ==
|
||||
GrBudgetedType::kUnbudgetedCacheable;
|
||||
|
||||
// An Unbudgeted-uncacheable resource is never kept alive if it's ref cnt reaches zero even if
|
||||
// it has a key.
|
||||
bool expectDeletingProxyToDeleteResource =
|
||||
proxy->peekSurface()->resourcePriv().budgetedType() ==
|
||||
GrBudgetedType::kUnbudgetedUncacheable;
|
||||
|
||||
// deleting the proxy should delete it from the hash but not the cache
|
||||
proxy = nullptr;
|
||||
if (expectDeletingProxyToDeleteResource) {
|
||||
expectedCacheCount -= 1;
|
||||
}
|
||||
REPORTER_ASSERT(reporter, 0 == proxyProvider->numUniqueKeyProxies_TestOnly());
|
||||
REPORTER_ASSERT(reporter, expectedCacheCount == cache->getResourceCount());
|
||||
|
||||
@ -176,13 +190,27 @@ static void basic_test(GrContext* context,
|
||||
// Mega-purging it should remove it from both the hash and the cache
|
||||
proxy = nullptr;
|
||||
cache->purgeAllUnlocked();
|
||||
expectedCacheCount--;
|
||||
if (!expectResourceToOutliveProxy) {
|
||||
expectedCacheCount--;
|
||||
}
|
||||
REPORTER_ASSERT(reporter, expectedCacheCount == cache->getResourceCount());
|
||||
|
||||
// We can bring neither the texture nor proxy back from perma-death
|
||||
// If the texture was deleted then the proxy should no longer be findable. Otherwise, it should
|
||||
// be.
|
||||
proxy = proxyProvider->findOrCreateProxyByUniqueKey(key, kBottomLeft_GrSurfaceOrigin);
|
||||
REPORTER_ASSERT(reporter, !proxy);
|
||||
REPORTER_ASSERT(reporter, expectResourceToOutliveProxy ? (bool)proxy : !proxy);
|
||||
REPORTER_ASSERT(reporter, expectedCacheCount == cache->getResourceCount());
|
||||
|
||||
if (expectResourceToOutliveProxy) {
|
||||
proxy.reset();
|
||||
GrUniqueKeyInvalidatedMessage msg(texKey, context->uniqueID());
|
||||
SkMessageBus<GrUniqueKeyInvalidatedMessage>::Post(msg);
|
||||
cache->purgeAsNeeded();
|
||||
expectedCacheCount--;
|
||||
proxy = proxyProvider->findOrCreateProxyByUniqueKey(key, kBottomLeft_GrSurfaceOrigin);
|
||||
REPORTER_ASSERT(reporter, !proxy);
|
||||
REPORTER_ASSERT(reporter, expectedCacheCount == cache->getResourceCount());
|
||||
}
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -219,6 +219,11 @@ sk_sp<GrTextureProxy> GrProxyProvider::testingOnly_createInstantiatedProxy(
|
||||
return this->createWrapped(std::move(tex), origin);
|
||||
}
|
||||
|
||||
sk_sp<GrTextureProxy> GrProxyProvider::testingOnly_createWrapped(sk_sp<GrTexture> tex,
|
||||
GrSurfaceOrigin origin) {
|
||||
return this->createWrapped(std::move(tex), origin);
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#define ASSERT_SINGLE_OWNER \
|
||||
|
Loading…
Reference in New Issue
Block a user