[graphite] Move GrResourceKey to skgpu::ResourceKey.

We'll want to reuse this key class for Graphite resources as well. There
is nothing special about these keys that is Ganesh specific.

Bug: skia:12754
Change-Id: I39fe5a9968156985865bccbfc4978583120ac2e8
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/487384
Reviewed-by: Michael Ludwig <michaelludwig@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2021-12-21 18:01:47 -05:00 committed by SkCQ
parent c2b31fb04a
commit c1bc0205d9
65 changed files with 498 additions and 468 deletions

View File

@ -26,9 +26,9 @@ public:
this->registerWithCache(SkBudgeted::kYes);
}
static void ComputeKey(int i, int keyData32Count, GrUniqueKey* key) {
static GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, kDomain, keyData32Count);
static void ComputeKey(int i, int keyData32Count, skgpu::UniqueKey* key) {
static skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(key, kDomain, keyData32Count);
for (int j = 0; j < keyData32Count; ++j) {
builder[j] = i + j;
}
@ -42,7 +42,7 @@ private:
static void populate_cache(GrGpu* gpu, int resourceCount, int keyData32Count) {
for (int i = 0; i < resourceCount; ++i) {
GrUniqueKey key;
skgpu::UniqueKey key;
BenchResource::ComputeKey(i, keyData32Count, &key);
GrGpuResource* resource = new BenchResource(gpu);
resource->resourcePriv().setUniqueKey(key);
@ -141,7 +141,7 @@ protected:
SkASSERT(CACHE_SIZE_COUNT == cache->getResourceCount());
for (int i = 0; i < loops; ++i) {
for (int k = 0; k < CACHE_SIZE_COUNT; ++k) {
GrUniqueKey key;
skgpu::UniqueKey key;
BenchResource::ComputeKey(k, fKeyData32Count, &key);
sk_sp<GrGpuResource> resource(cache->findAndRefUniqueResource(key));
SkASSERT(resource);

View File

@ -27,7 +27,6 @@ skia_gpu_sources = [
"$_include/private/GrGLTypesPriv.h",
"$_include/private/GrImageContext.h",
"$_include/private/GrMtlTypesPriv.h",
"$_include/private/GrResourceKey.h",
"$_include/private/GrSingleOwner.h",
"$_include/private/GrTypesPriv.h",
"$_include/private/chromium/GrSlug.h",
@ -821,6 +820,8 @@ skia_shared_gpu_sources = [
"$_include/gpu/ShaderErrorHandler.h",
"$_src/gpu/BufferWriter.h",
"$_src/gpu/KeyBuilder.h",
"$_src/gpu/ResourceKey.cpp",
"$_src/gpu/ResourceKey.h",
"$_src/gpu/ShaderErrorHandler.cpp",
# tessellate

View File

@ -789,9 +789,9 @@ static std::unique_ptr<GrFragmentProcessor> create_profile_effect(GrRecordingCon
// 1 / textureRadius. This is done to avoid overflow in length().
SkMatrix texM = SkMatrix::Scale(kProfileTextureWidth, 1.f);
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
GrUniqueKey::Builder builder(&key, kDomain, 1, "1-D Circular Blur");
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey key;
skgpu::UniqueKey::Builder builder(&key, kDomain, 1, "1-D Circular Blur");
builder[0] = sigmaToCircleRRatioFixed;
builder.finish();
@ -872,9 +872,9 @@ static std::unique_ptr<GrFragmentProcessor> make_rect_integral_fp(GrRecordingCon
int width = SkGpuBlurUtils::CreateIntegralTable(sixSigma, nullptr);
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
GrUniqueKey::Builder builder(&key, kDomain, 1, "Rect Blur Mask");
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey key;
skgpu::UniqueKey::Builder builder(&key, kDomain, 1, "Rect Blur Mask");
builder[0] = width;
builder.finish();
@ -1040,13 +1040,13 @@ static std::unique_ptr<GrFragmentProcessor> make_rect_blur(GrRecordingContext* c
static constexpr auto kBlurredRRectMaskOrigin = kTopLeft_GrSurfaceOrigin;
static void make_blurred_rrect_key(GrUniqueKey* key,
static void make_blurred_rrect_key(skgpu::UniqueKey* key,
const SkRRect& rrectToDraw,
float xformedSigma) {
SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, kDomain, 9, "RoundRect Blur Mask");
skgpu::UniqueKey::Builder builder(key, kDomain, 9, "RoundRect Blur Mask");
builder[0] = SkScalarCeilToInt(xformedSigma - 1 / 6.0f);
int index = 1;
@ -1262,7 +1262,7 @@ static std::unique_ptr<GrFragmentProcessor> find_or_create_rrect_blur_mask_fp(
const SkISize& dimensions,
float xformedSigma) {
SkASSERT(!SkGpuBlurUtils::IsEffectivelyZeroSigma(xformedSigma));
GrUniqueKey key;
skgpu::UniqueKey key;
make_blurred_rrect_key(&key, rrectToDraw, xformedSigma);
auto threadSafeCache = rContext->priv().threadSafeCache();

View File

@ -21,7 +21,7 @@
#if SK_SUPPORT_GPU
class GrProxyProvider;
#include "include/private/GrResourceKey.h"
#include "src/gpu/ResourceKey.h"
#endif
// Because a single save/restore state can have multiple clips, this class
@ -195,7 +195,7 @@ public:
* the element is destroyed because their key is based on this element's gen ID.
*/
void addResourceInvalidationMessage(GrProxyProvider* proxyProvider,
const GrUniqueKey& key) const {
const skgpu::UniqueKey& key) const {
SkASSERT(proxyProvider);
if (!fProxyProvider) {
@ -238,8 +238,8 @@ public:
uint32_t fGenID;
#if SK_SUPPORT_GPU
mutable GrProxyProvider* fProxyProvider = nullptr;
mutable SkTArray<GrUniqueKey> fKeysToInvalidate;
mutable GrProxyProvider* fProxyProvider = nullptr;
mutable SkTArray<skgpu::UniqueKey> fKeysToInvalidate;
#endif
Element(int saveCount) {
this->initCommon(saveCount, SkClipOp::kIntersect, false);

View File

@ -7,7 +7,6 @@
#include "src/gpu/GrAttachment.h"
#include "include/private/GrResourceKey.h"
#include "src/gpu/GrBackendUtils.h"
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrDataUtils.h"
@ -34,7 +33,7 @@ size_t GrAttachment::onGpuMemorySize() const {
return 0;
}
static void build_key(GrResourceKey::Builder* builder,
static void build_key(skgpu::ResourceKey::Builder* builder,
const GrCaps& caps,
const GrBackendFormat& format,
SkISize dimensions,
@ -69,10 +68,10 @@ void GrAttachment::ComputeSharedAttachmentUniqueKey(const GrCaps& caps,
GrMipmapped mipmapped,
GrProtected isProtected,
GrMemoryless memoryless,
GrUniqueKey* key) {
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
skgpu::UniqueKey* key) {
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, kDomain, 5);
skgpu::UniqueKey::Builder builder(key, kDomain, 5);
build_key(&builder, caps, format, dimensions, requiredUsage, sampleCnt, mipmapped, isProtected,
memoryless);
}
@ -85,15 +84,15 @@ void GrAttachment::ComputeScratchKey(const GrCaps& caps,
GrMipmapped mipmapped,
GrProtected isProtected,
GrMemoryless memoryless,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
skgpu::ScratchKey* key) {
static const skgpu::ScratchKey::ResourceType kType = skgpu::ScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 5);
skgpu::ScratchKey::Builder builder(key, kType, 5);
build_key(&builder, caps, format, dimensions, requiredUsage, sampleCnt, mipmapped, isProtected,
memoryless);
}
void GrAttachment::computeScratchKey(GrScratchKey* key) const {
void GrAttachment::computeScratchKey(skgpu::ScratchKey* key) const {
if (!SkToBool(fSupportedUsages & UsageFlags::kStencilAttachment)) {
auto isProtected = this->isProtected() ? GrProtected::kYes : GrProtected::kNo;
ComputeScratchKey(*this->getGpu()->caps(),

View File

@ -12,7 +12,6 @@
#include "src/gpu/GrSurface.h"
class GrRenderTarget;
class GrResourceKey;
/**
* This is a generic attachment class for out GrSurfaces. It always represents a single gpu
@ -55,7 +54,7 @@ public:
GrMipmapped mipmapped,
GrProtected isProtected,
GrMemoryless memoryless,
GrUniqueKey* key);
skgpu::UniqueKey* key);
// TODO: Once attachments start having multiple usages, we'll need to figure out how to search
// the cache for an attachment that simply contains the requested usage instead of equaling it.
@ -67,7 +66,7 @@ public:
GrMipmapped mipmapped,
GrProtected,
GrMemoryless,
GrScratchKey* key);
skgpu::ScratchKey* key);
protected:
GrAttachment(GrGpu* gpu, SkISize dimensions, UsageFlags supportedUsages, int sampleCnt,
@ -82,7 +81,7 @@ protected:
private:
size_t onGpuMemorySize() const final;
void computeScratchKey(GrScratchKey*) const final;
void computeScratchKey(skgpu::ScratchKey*) const final;
const char* getResourceType() const override {
if (fSupportedUsages == UsageFlags::kStencilAttachment) {

View File

@ -142,8 +142,8 @@ GrSurfaceProxyView GrBackendTextureImageGenerator::onGenerateTexture(
}
fRefHelper->fBorrowingContextID = dContext->directContextID();
if (!fRefHelper->fBorrowedTextureKey.isValid()) {
static const auto kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(&fRefHelper->fBorrowedTextureKey, kDomain, 1);
static const auto kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(&fRefHelper->fBorrowedTextureKey, kDomain, 1);
builder[0] = this->uniqueID();
}
fBorrowingMutex.release();

View File

@ -10,9 +10,9 @@
#include "include/core/SkImageGenerator.h"
#include "include/gpu/GrBackendSurface.h"
#include "include/gpu/GrDirectContext.h"
#include "include/private/GrResourceKey.h"
#include "include/private/SkMutex.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/ResourceKey.h"
class GrSemaphore;
@ -70,7 +70,7 @@ private:
// We use this key so that we don't rewrap the GrBackendTexture in a GrTexture for each
// proxy created from this generator for a particular borrowing context.
GrUniqueKey fBorrowedTextureKey;
skgpu::UniqueKey fBorrowedTextureKey;
// There is no ref associated with this pointer. We rely on our atomic bookkeeping with the
// context ID to know when this pointer is valid and safe to use. This is used to make sure
// all uses of the wrapped texture are finished on the borrowing context before we open

View File

@ -101,7 +101,7 @@ static GrSurfaceProxyView sw_create_filtered_mask(GrRecordingContext* rContext,
const SkIRect& unclippedDevShapeBounds,
const SkIRect& clipBounds,
SkIRect* drawRect,
GrUniqueKey* key) {
skgpu::UniqueKey* key) {
SkASSERT(filter);
SkASSERT(!shape.style().applies());
@ -283,7 +283,7 @@ static bool get_shape_and_clip_bounds(skgpu::v1::SurfaceDrawContext* sdc,
// The key and clip-bounds are computed together because the caching decision can impact the
// clip-bound - since we only cache un-clipped masks the clip can be removed entirely.
// A 'false' return value indicates that the shape is known to be clipped away.
static bool compute_key_and_clip_bounds(GrUniqueKey* maskKey,
static bool compute_key_and_clip_bounds(skgpu::UniqueKey* maskKey,
SkIRect* boundsForClip,
const GrCaps* caps,
const SkMatrix& viewMatrix,
@ -326,8 +326,8 @@ static bool compute_key_and_clip_bounds(GrUniqueKey* maskKey,
}
if (useCache) {
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(maskKey, kDomain, 5 + 2 + shape.unstyledKeySize(),
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(maskKey, kDomain, 5 + 2 + shape.unstyledKeySize(),
"Mask Filtered Masks");
// We require the upper left 2x2 of the matrix to match exactly for a cache hit.
@ -375,7 +375,7 @@ static GrSurfaceProxyView hw_create_filtered_mask(GrDirectContext* dContext,
const SkIRect& unclippedDevShapeBounds,
const SkIRect& clipBounds,
SkIRect* maskRect,
GrUniqueKey* key) {
skgpu::UniqueKey* key) {
if (!filter->canFilterMaskGPU(shape,
unclippedDevShapeBounds,
clipBounds,
@ -508,7 +508,7 @@ static void draw_shape_with_mask_filter(GrRecordingContext* rContext,
}
}
GrUniqueKey maskKey;
skgpu::UniqueKey maskKey;
SkIRect boundsForClip;
if (!compute_key_and_clip_bounds(&maskKey, &boundsForClip,
sdc->caps(),

View File

@ -58,10 +58,11 @@ bool GrGpuBuffer::updateData(const void* src, size_t srcSizeInBytes) {
return result;
}
void GrGpuBuffer::ComputeScratchKeyForDynamicBuffer(size_t size, GrGpuBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
void GrGpuBuffer::ComputeScratchKeyForDynamicBuffer(size_t size,
GrGpuBufferType intendedType,
skgpu::ScratchKey* key) {
static const skgpu::ScratchKey::ResourceType kType = skgpu::ScratchKey::GenerateResourceType();
skgpu::ScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
builder[0] = SkToU32(intendedType);
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
@ -69,7 +70,7 @@ void GrGpuBuffer::ComputeScratchKeyForDynamicBuffer(size_t size, GrGpuBufferType
}
}
void GrGpuBuffer::computeScratchKey(GrScratchKey* key) const {
void GrGpuBuffer::computeScratchKey(skgpu::ScratchKey* key) const {
if (SkIsPow2(fSizeInBytes) && kDynamic_GrAccessPattern == fAccessPattern) {
ComputeScratchKeyForDynamicBuffer(fSizeInBytes, fIntendedType, key);
}

View File

@ -19,7 +19,7 @@ public:
* Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
* "static" and "stream" patterns are disqualified by nature from being cached and reused.)
*/
static void ComputeScratchKeyForDynamicBuffer(size_t size, GrGpuBufferType, GrScratchKey*);
static void ComputeScratchKeyForDynamicBuffer(size_t size, GrGpuBufferType, skgpu::ScratchKey*);
GrAccessPattern accessPattern() const { return fAccessPattern; }
@ -93,7 +93,7 @@ private:
size_t onGpuMemorySize() const override { return fSizeInBytes; }
const char* getResourceType() const override { return "Buffer Object"; }
void computeScratchKey(GrScratchKey* key) const override;
void computeScratchKey(skgpu::ScratchKey* key) const override;
size_t fSizeInBytes;
GrAccessPattern fAccessPattern;

View File

@ -141,7 +141,7 @@ void GrGpuResource::removeUniqueKey() {
get_resource_cache(fGpu)->resourceAccess().removeUniqueKey(this);
}
void GrGpuResource::setUniqueKey(const GrUniqueKey& key) {
void GrGpuResource::setUniqueKey(const skgpu::UniqueKey& key) {
SkASSERT(this->internalHasRef());
SkASSERT(key.isValid());

View File

@ -8,9 +8,9 @@
#ifndef GrGpuResource_DEFINED
#define GrGpuResource_DEFINED
#include "include/private/GrResourceKey.h"
#include "include/private/GrTypesPriv.h"
#include "include/private/SkNoncopyable.h"
#include "src/gpu/ResourceKey.h"
class GrGpu;
class GrResourceCache;
@ -172,7 +172,7 @@ public:
/** Returns the current unique key for the resource. It will be invalid if the resource has no
associated unique key. */
const GrUniqueKey& getUniqueKey() const { return fUniqueKey; }
const skgpu::UniqueKey& getUniqueKey() const { return fUniqueKey; }
/**
* Internal-only helper class used for manipulations of the resource by the cache.
@ -264,7 +264,7 @@ private:
* resources and populate the scratchKey with the key.
* By default resources are not recycled as scratch.
**/
virtual void computeScratchKey(GrScratchKey*) const {}
virtual void computeScratchKey(skgpu::ScratchKey*) const {}
/**
* Removes references to objects in the underlying 3D API without freeing them.
@ -280,7 +280,7 @@ private:
virtual size_t onGpuMemorySize() const = 0;
// See comments in CacheAccess and ResourcePriv.
void setUniqueKey(const GrUniqueKey&);
void setUniqueKey(const skgpu::UniqueKey&);
void removeUniqueKey();
void notifyARefCntIsZero(LastRemovedRef removedRef) const;
void removeScratchKey();
@ -300,8 +300,8 @@ private:
GrStdSteadyClock::time_point fTimeWhenBecamePurgeable;
static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0);
GrScratchKey fScratchKey;
GrUniqueKey fUniqueKey;
skgpu::ScratchKey fScratchKey;
skgpu::UniqueKey fUniqueKey;
// This is not ref'ed but abandon() or release() will be called before the GrGpu object
// is destroyed. Those calls set will this to NULL.

View File

@ -57,7 +57,7 @@ private:
}
/** Called by the cache to assign a new unique key. */
void setUniqueKey(const GrUniqueKey& key) { fResource->fUniqueKey = key; }
void setUniqueKey(const skgpu::UniqueKey& key) { fResource->fUniqueKey = key; }
/** Is the resource ref'ed */
bool hasRef() const { return fResource->hasRef(); }

View File

@ -22,7 +22,7 @@ public:
* removeUniqueKey(). If another resource is using the key then its unique key is removed and
* this resource takes over the key.
*/
void setUniqueKey(const GrUniqueKey& key) { fResource->setUniqueKey(key); }
void setUniqueKey(const skgpu::UniqueKey& key) { fResource->setUniqueKey(key); }
/** Removes the unique key from a resource. If the resource has a scratch key, it may be
preserved for recycling as scratch. */
@ -60,7 +60,7 @@ public:
* Otherwise it returns a key for which isNullScratch is true. The resource may currently be
* used as a uniquely keyed resource rather than scratch. Check isScratch().
*/
const GrScratchKey& getScratchKey() const { return fResource->fScratchKey; }
const skgpu::ScratchKey& getScratchKey() const { return fResource->fScratchKey; }
/**
* If the resource has a scratch key, the key will be removed. Since scratch keys are installed

View File

@ -11,7 +11,6 @@
#include "include/core/SkImage.h"
#include "include/gpu/GrDirectContext.h"
#include "include/private/GrImageContext.h"
#include "include/private/GrResourceKey.h"
#include "include/private/GrSingleOwner.h"
#include "include/private/SkImageInfoPriv.h"
#include "src/core/SkAutoPixmapStorage.h"
@ -50,7 +49,7 @@ GrProxyProvider::~GrProxyProvider() {
}
}
bool GrProxyProvider::assignUniqueKeyToProxy(const GrUniqueKey& key, GrTextureProxy* proxy) {
bool GrProxyProvider::assignUniqueKeyToProxy(const skgpu::UniqueKey& key, GrTextureProxy* proxy) {
ASSERT_SINGLE_OWNER
SkASSERT(key.isValid());
if (this->isAbandoned() || !proxy) {
@ -102,7 +101,7 @@ void GrProxyProvider::removeUniqueKeyFromProxy(GrTextureProxy* proxy) {
this->processInvalidUniqueKey(proxy->getUniqueKey(), proxy, InvalidateGPUResource::kYes);
}
sk_sp<GrTextureProxy> GrProxyProvider::findProxyByUniqueKey(const GrUniqueKey& key) {
sk_sp<GrTextureProxy> GrProxyProvider::findProxyByUniqueKey(const skgpu::UniqueKey& key) {
ASSERT_SINGLE_OWNER
if (this->isAbandoned()) {
@ -213,7 +212,7 @@ sk_sp<GrTextureProxy> GrProxyProvider::createWrapped(sk_sp<GrTexture> tex,
}
}
sk_sp<GrTextureProxy> GrProxyProvider::findOrCreateProxyByUniqueKey(const GrUniqueKey& key,
sk_sp<GrTextureProxy> GrProxyProvider::findOrCreateProxyByUniqueKey(const skgpu::UniqueKey& key,
UseAllocator useAllocator) {
ASSERT_SINGLE_OWNER
@ -248,10 +247,11 @@ sk_sp<GrTextureProxy> GrProxyProvider::findOrCreateProxyByUniqueKey(const GrUniq
return result;
}
GrSurfaceProxyView GrProxyProvider::findCachedProxyWithColorTypeFallback(const GrUniqueKey& key,
GrSurfaceOrigin origin,
GrColorType ct,
int sampleCnt) {
GrSurfaceProxyView GrProxyProvider::findCachedProxyWithColorTypeFallback(
const skgpu::UniqueKey& key,
GrSurfaceOrigin origin,
GrColorType ct,
int sampleCnt) {
auto proxy = this->findOrCreateProxyByUniqueKey(key);
if (!proxy) {
return {};
@ -862,12 +862,14 @@ sk_sp<GrTextureProxy> GrProxyProvider::MakeFullyLazyProxy(LazyInstantiateCallbac
}
}
void GrProxyProvider::processInvalidUniqueKey(const GrUniqueKey& key, GrTextureProxy* proxy,
void GrProxyProvider::processInvalidUniqueKey(const skgpu::UniqueKey& key,
GrTextureProxy* proxy,
InvalidateGPUResource invalidateGPUResource) {
this->processInvalidUniqueKeyImpl(key, proxy, invalidateGPUResource, RemoveTableEntry::kYes);
}
void GrProxyProvider::processInvalidUniqueKeyImpl(const GrUniqueKey& key, GrTextureProxy* proxy,
void GrProxyProvider::processInvalidUniqueKeyImpl(const skgpu::UniqueKey& key,
GrTextureProxy* proxy,
InvalidateGPUResource invalidateGPUResource,
RemoveTableEntry removeTableEntry) {
SkASSERT(key.isValid());

View File

@ -9,9 +9,9 @@
#define GrProxyProvider_DEFINED
#include "include/gpu/GrTypes.h"
#include "include/private/GrResourceKey.h"
#include "src/core/SkTDynamicHash.h"
#include "src/gpu/GrTextureProxy.h"
#include "src/gpu/ResourceKey.h"
class GrImageContext;
class GrBackendRenderTarget;
@ -34,7 +34,7 @@ public:
* Assigns a unique key to a proxy. The proxy will be findable via this key using
* findProxyByUniqueKey(). It is an error if an existing proxy already has a key.
*/
bool assignUniqueKeyToProxy(const GrUniqueKey&, GrTextureProxy*);
bool assignUniqueKeyToProxy(const skgpu::UniqueKey&, GrTextureProxy*);
/*
* Sets the unique key of the provided proxy to the unique key of the surface. The surface must
@ -51,13 +51,13 @@ public:
/*
* Finds a proxy by unique key.
*/
sk_sp<GrTextureProxy> findProxyByUniqueKey(const GrUniqueKey&);
sk_sp<GrTextureProxy> findProxyByUniqueKey(const skgpu::UniqueKey&);
/*
* Finds a proxy by unique key or creates a new one that wraps a resource matching the unique
* key.
*/
sk_sp<GrTextureProxy> findOrCreateProxyByUniqueKey(const GrUniqueKey&,
sk_sp<GrTextureProxy> findOrCreateProxyByUniqueKey(const skgpu::UniqueKey&,
UseAllocator = UseAllocator::kYes);
/**
@ -66,7 +66,7 @@ public:
* the proxy is renderable then it was created via a fallback code path so the fallback
* color type will be used to create the view.
*/
GrSurfaceProxyView findCachedProxyWithColorTypeFallback(const GrUniqueKey&,
GrSurfaceProxyView findCachedProxyWithColorTypeFallback(const skgpu::UniqueKey&,
GrSurfaceOrigin,
GrColorType,
int sampleCnt);
@ -223,7 +223,7 @@ public:
* (in which case we don't want it cluttering up the hash table) or the client has indicated
* that it will never refer to the unique key again.
*/
void processInvalidUniqueKey(const GrUniqueKey&, GrTextureProxy*, InvalidateGPUResource);
void processInvalidUniqueKey(const skgpu::UniqueKey&, GrTextureProxy*, InvalidateGPUResource);
GrDDLProvider isDDLProvider() const;
@ -278,7 +278,7 @@ private:
// processInvalidUniqueKey() with control over removing hash table entries,
// which is not safe while iterating with foreach().
enum class RemoveTableEntry { kNo, kYes };
void processInvalidUniqueKeyImpl(const GrUniqueKey&, GrTextureProxy*,
void processInvalidUniqueKeyImpl(const skgpu::UniqueKey&, GrTextureProxy*,
InvalidateGPUResource, RemoveTableEntry);
bool isAbandoned() const;
@ -296,11 +296,11 @@ private:
sk_sp<GrTextureProxy> createWrapped(sk_sp<GrTexture> tex, UseAllocator useAllocator);
struct UniquelyKeyedProxyHashTraits {
static const GrUniqueKey& GetKey(const GrTextureProxy& p) { return p.getUniqueKey(); }
static const skgpu::UniqueKey& GetKey(const GrTextureProxy& p) { return p.getUniqueKey(); }
static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
static uint32_t Hash(const skgpu::UniqueKey& key) { return key.hash(); }
};
typedef SkTDynamicHash<GrTextureProxy, GrUniqueKey, UniquelyKeyedProxyHashTraits> UniquelyKeyedProxyHash;
typedef SkTDynamicHash<GrTextureProxy, skgpu::UniqueKey, UniquelyKeyedProxyHashTraits> UniquelyKeyedProxyHash;
// This holds the texture proxies that have unique keys. The resourceCache does not get a ref
// on these proxies but they must send a message to the resourceCache when they are deleted.

View File

@ -105,7 +105,7 @@ static bool can_proxy_use_scratch(const GrCaps& caps, GrSurfaceProxy* proxy) {
}
GrResourceAllocator::Register::Register(GrSurfaceProxy* originatingProxy,
GrScratchKey scratchKey,
skgpu::ScratchKey scratchKey,
GrResourceProvider* provider)
: fOriginatingProxy(originatingProxy)
, fScratchKey(std::move(scratchKey)) {
@ -276,13 +276,15 @@ GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSu
return *p;
}
// No need for a scratch key. These don't go in the free pool.
Register* r = fInternalAllocator.make<Register>(proxy, GrScratchKey(), resourceProvider);
Register* r = fInternalAllocator.make<Register>(proxy,
skgpu::ScratchKey(),
resourceProvider);
fUniqueKeyRegisters.set(uniqueKey, r);
return r;
}
// Then look in the free pool
GrScratchKey scratchKey;
skgpu::ScratchKey scratchKey;
proxy->priv().computeScratchKey(*fDContext->priv().caps(), &scratchKey);
auto filter = [] (const Register* r) {

View File

@ -134,21 +134,21 @@ private:
Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy);
struct FreePoolTraits {
static const GrScratchKey& GetKey(const Register& r) {
static const skgpu::ScratchKey& GetKey(const Register& r) {
return r.scratchKey();
}
static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
static uint32_t Hash(const skgpu::ScratchKey& key) { return key.hash(); }
static void OnFree(Register* r) { }
};
typedef SkTMultiMap<Register, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
typedef SkTMultiMap<Register, skgpu::ScratchKey, FreePoolTraits> FreePoolMultiMap;
typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash;
struct UniqueKeyHash {
uint32_t operator()(const GrUniqueKey& key) const { return key.hash(); }
uint32_t operator()(const skgpu::UniqueKey& key) const { return key.hash(); }
};
typedef SkTHashMap<GrUniqueKey, Register*, UniqueKeyHash> UniqueKeyRegisterHash;
typedef SkTHashMap<skgpu::UniqueKey, Register*, UniqueKeyHash> UniqueKeyRegisterHash;
// Each proxy with some exceptions is assigned a register. After all assignments are made,
// another pass is performed to instantiate and assign actual surfaces to the proxies. Right
@ -157,10 +157,10 @@ private:
class Register {
public:
// It's OK to pass an invalid scratch key iff the proxy has a unique key.
Register(GrSurfaceProxy* originatingProxy, GrScratchKey, GrResourceProvider*);
Register(GrSurfaceProxy* originatingProxy, skgpu::ScratchKey, GrResourceProvider*);
const GrScratchKey& scratchKey() const { return fScratchKey; }
const GrUniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); }
const skgpu::ScratchKey& scratchKey() const { return fScratchKey; }
const skgpu::UniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); }
bool accountedForInBudget() const { return fAccountedForInBudget; }
void setAccountedForInBudget() { fAccountedForInBudget = true; }
@ -178,10 +178,10 @@ private:
SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
private:
GrSurfaceProxy* fOriginatingProxy;
GrScratchKey fScratchKey; // free pool wants a reference to this.
sk_sp<GrSurface> fExistingSurface; // queried from resource cache. may be null.
bool fAccountedForInBudget = false;
GrSurfaceProxy* fOriginatingProxy;
skgpu::ScratchKey fScratchKey; // free pool wants a reference to this.
sk_sp<GrSurface> fExistingSurface; // queried from resource cache. may be null.
bool fAccountedForInBudget = false;
#ifdef SK_DEBUG
uint32_t fUniqueID;

View File

@ -26,7 +26,7 @@
#include "src/gpu/GrTracing.h"
#include "src/gpu/SkGr.h"
DECLARE_SKMESSAGEBUS_MESSAGE(GrUniqueKeyInvalidatedMessage, uint32_t, true);
DECLARE_SKMESSAGEBUS_MESSAGE(skgpu::UniqueKeyInvalidatedMessage, uint32_t, true);
DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectContextID, true);
@ -34,34 +34,6 @@ DECLARE_SKMESSAGEBUS_MESSAGE(GrTextureFreedMessage, GrDirectContext::DirectConte
//////////////////////////////////////////////////////////////////////////////
GrScratchKey::ResourceType GrScratchKey::GenerateResourceType() {
static std::atomic<int32_t> nextType{INHERITED::kInvalidDomain + 1};
int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
if (type > SkTo<int32_t>(UINT16_MAX)) {
SK_ABORT("Too many Resource Types");
}
return static_cast<ResourceType>(type);
}
GrUniqueKey::Domain GrUniqueKey::GenerateDomain() {
static std::atomic<int32_t> nextDomain{INHERITED::kInvalidDomain + 1};
int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
if (domain > SkTo<int32_t>(UINT16_MAX)) {
SK_ABORT("Too many GrUniqueKey Domains");
}
return static_cast<Domain>(domain);
}
uint32_t GrResourceKeyHash(const uint32_t* data, size_t size) {
return SkOpts::hash(data, size);
}
//////////////////////////////////////////////////////////////////////////////
class GrResourceCache::AutoValidate : ::SkNoncopyable {
public:
AutoValidate(GrResourceCache* cache) : fCache(cache) { cache->validate(); }
@ -290,7 +262,7 @@ public:
}
};
GrGpuResource* GrResourceCache::findAndRefScratchResource(const GrScratchKey& scratchKey) {
GrGpuResource* GrResourceCache::findAndRefScratchResource(const skgpu::ScratchKey& scratchKey) {
SkASSERT(scratchKey.isValid());
GrGpuResource* resource = fScratchMap.find(scratchKey, AvailableForScratchUse());
@ -330,7 +302,7 @@ void GrResourceCache::removeUniqueKey(GrGpuResource* resource) {
this->validate();
}
void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
void GrResourceCache::changeUniqueKey(GrGpuResource* resource, const skgpu::UniqueKey& newKey) {
ASSERT_SINGLE_OWNER
SkASSERT(resource);
SkASSERT(this->isInCache(resource));
@ -522,7 +494,7 @@ void GrResourceCache::didChangeBudgetStatus(GrGpuResource* resource) {
}
void GrResourceCache::purgeAsNeeded() {
SkTArray<GrUniqueKeyInvalidatedMessage> invalidKeyMsgs;
SkTArray<skgpu::UniqueKeyInvalidatedMessage> invalidKeyMsgs;
fInvalidUniqueKeyInbox.poll(&invalidKeyMsgs);
if (invalidKeyMsgs.count()) {
SkASSERT(fProxyProvider);
@ -919,8 +891,8 @@ void GrResourceCache::validate() const {
++fLocked;
}
const GrScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
const GrUniqueKey& uniqueKey = resource->getUniqueKey();
const skgpu::ScratchKey& scratchKey = resource->resourcePriv().getScratchKey();
const skgpu::UniqueKey& uniqueKey = resource->getUniqueKey();
if (resource->cacheAccess().isUsableAsScratch()) {
SkASSERT(!uniqueKey.isValid());

View File

@ -10,7 +10,6 @@
#include "include/core/SkRefCnt.h"
#include "include/gpu/GrDirectContext.h"
#include "include/private/GrResourceKey.h"
#include "include/private/SkTArray.h"
#include "include/private/SkTHash.h"
#include "src/core/SkMessageBus.h"
@ -20,6 +19,7 @@
#include "src/gpu/GrGpuResource.h"
#include "src/gpu/GrGpuResourceCacheAccess.h"
#include "src/gpu/GrGpuResourcePriv.h"
#include "src/gpu/ResourceKey.h"
class GrCaps;
class GrProxyProvider;
@ -123,11 +123,11 @@ public:
/**
* Find a resource that matches a scratch key.
*/
GrGpuResource* findAndRefScratchResource(const GrScratchKey& scratchKey);
GrGpuResource* findAndRefScratchResource(const skgpu::ScratchKey& scratchKey);
#ifdef SK_DEBUG
// This is not particularly fast and only used for validation, so debug only.
int countScratchEntriesForKey(const GrScratchKey& scratchKey) const {
int countScratchEntriesForKey(const skgpu::ScratchKey& scratchKey) const {
return fScratchMap.countForKey(scratchKey);
}
#endif
@ -135,7 +135,7 @@ public:
/**
* Find a resource that matches a unique key.
*/
GrGpuResource* findAndRefUniqueResource(const GrUniqueKey& key) {
GrGpuResource* findAndRefUniqueResource(const skgpu::UniqueKey& key) {
GrGpuResource* resource = fUniqueHash.find(key);
if (resource) {
this->refAndMakeResourceMRU(resource);
@ -146,7 +146,7 @@ public:
/**
* Query whether a unique key exists in the cache.
*/
bool hasUniqueKey(const GrUniqueKey& key) const {
bool hasUniqueKey(const skgpu::UniqueKey& key) const {
return SkToBool(fUniqueHash.find(key));
}
@ -260,7 +260,7 @@ private:
void insertResource(GrGpuResource*);
void removeResource(GrGpuResource*);
void notifyARefCntReachedZero(GrGpuResource*, GrGpuResource::LastRemovedRef);
void changeUniqueKey(GrGpuResource*, const GrUniqueKey&);
void changeUniqueKey(GrGpuResource*, const skgpu::UniqueKey&);
void removeUniqueKey(GrGpuResource*);
void willRemoveScratchKey(const GrGpuResource*);
void didChangeBudgetStatus(GrGpuResource*);
@ -291,21 +291,21 @@ private:
class AvailableForScratchUse;
struct ScratchMapTraits {
static const GrScratchKey& GetKey(const GrGpuResource& r) {
static const skgpu::ScratchKey& GetKey(const GrGpuResource& r) {
return r.resourcePriv().getScratchKey();
}
static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
static uint32_t Hash(const skgpu::ScratchKey& key) { return key.hash(); }
static void OnFree(GrGpuResource*) { }
};
typedef SkTMultiMap<GrGpuResource, GrScratchKey, ScratchMapTraits> ScratchMap;
typedef SkTMultiMap<GrGpuResource, skgpu::ScratchKey, ScratchMapTraits> ScratchMap;
struct UniqueHashTraits {
static const GrUniqueKey& GetKey(const GrGpuResource& r) { return r.getUniqueKey(); }
static const skgpu::UniqueKey& GetKey(const GrGpuResource& r) { return r.getUniqueKey(); }
static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
static uint32_t Hash(const skgpu::UniqueKey& key) { return key.hash(); }
};
typedef SkTDynamicHash<GrGpuResource, GrUniqueKey, UniqueHashTraits> UniqueHash;
typedef SkTDynamicHash<GrGpuResource, skgpu::UniqueKey, UniqueHashTraits> UniqueHash;
class TextureAwaitingUnref {
public:
@ -337,7 +337,7 @@ private:
using TextureFreedMessageBus = SkMessageBus<GrTextureFreedMessage,
GrDirectContext::DirectContextID>;
typedef SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Inbox InvalidUniqueKeyInbox;
typedef SkMessageBus<skgpu::UniqueKeyInvalidatedMessage, uint32_t>::Inbox InvalidUniqueKeyInbox;
typedef SkTDPQueue<GrGpuResource*, CompareTimestamp, AccessResourceIndex> PurgeableQueue;
typedef SkTDArray<GrGpuResource*> ResourceArray;
@ -433,7 +433,7 @@ private:
/**
* Called by GrGpuResources to change their unique keys.
*/
void changeUniqueKey(GrGpuResource* resource, const GrUniqueKey& newKey) {
void changeUniqueKey(GrGpuResource* resource, const skgpu::UniqueKey& newKey) {
fCache->changeUniqueKey(resource, newKey);
}

View File

@ -8,7 +8,6 @@
#include "src/gpu/GrResourceProvider.h"
#include "include/gpu/GrBackendSemaphore.h"
#include "include/private/GrResourceKey.h"
#include "include/private/GrSingleOwner.h"
#include "src/core/SkConvertPixels.h"
#include "src/core/SkMathPriv.h"
@ -25,6 +24,7 @@
#include "src/gpu/GrResourceCache.h"
#include "src/gpu/GrSemaphore.h"
#include "src/gpu/GrTexture.h"
#include "src/gpu/ResourceKey.h"
#include "src/gpu/SkGr.h"
const int GrResourceProvider::kMinScratchTextureSize = 16;
@ -304,7 +304,7 @@ sk_sp<GrTexture> GrResourceProvider::createApproxTexture(SkISize dimensions,
isProtected);
}
sk_sp<GrTexture> GrResourceProvider::findAndRefScratchTexture(const GrScratchKey& key) {
sk_sp<GrTexture> GrResourceProvider::findAndRefScratchTexture(const skgpu::ScratchKey& key) {
ASSERT_SINGLE_OWNER
SkASSERT(!this->isAbandoned());
SkASSERT(key.isValid());
@ -333,7 +333,7 @@ sk_sp<GrTexture> GrResourceProvider::findAndRefScratchTexture(SkISize dimensions
// We could make initial clears work with scratch textures but it is a rare case so we just opt
// to fall back to making a new texture.
if (fGpu->caps()->reuseScratchTextures() || renderable == GrRenderable::kYes) {
GrScratchKey key;
skgpu::ScratchKey key;
GrTexture::ComputeScratchKey(*this->caps(), format, dimensions, renderable,
renderTargetSampleCnt, mipmapped, isProtected, &key);
return this->findAndRefScratchTexture(key);
@ -390,7 +390,7 @@ sk_sp<GrRenderTarget> GrResourceProvider::wrapVulkanSecondaryCBAsRenderTarget(
}
void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
void GrResourceProvider::assignUniqueKeyToResource(const skgpu::UniqueKey& key,
GrGpuResource* resource) {
ASSERT_SINGLE_OWNER
if (this->isAbandoned() || !resource) {
@ -399,7 +399,7 @@ void GrResourceProvider::assignUniqueKeyToResource(const GrUniqueKey& key,
resource->resourcePriv().setUniqueKey(key);
}
sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueKey& key) {
sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const skgpu::UniqueKey& key) {
ASSERT_SINGLE_OWNER
return this->isAbandoned() ? nullptr
: sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
@ -408,7 +408,7 @@ sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueK
sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size,
const void* staticData,
const GrUniqueKey& key) {
const skgpu::UniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
return std::move(buffer);
}
@ -425,7 +425,7 @@ sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferT
sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(
GrGpuBufferType intendedType,
size_t size,
const GrUniqueKey& uniqueKey,
const skgpu::UniqueKey& uniqueKey,
InitializeBufferFn initializeBufferFn) {
if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(uniqueKey)) {
return std::move(buffer);
@ -456,11 +456,12 @@ sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(
return nullptr;
}
sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey* key) {
sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(
const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const skgpu::UniqueKey* key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
sk_sp<GrGpuBuffer> buffer(
@ -565,7 +566,7 @@ sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType
size_t mid = floorPow2 + (floorPow2 >> 1);
allocSize = (allocSize <= mid) ? mid : ceilPow2;
GrScratchKey key;
skgpu::ScratchKey key;
GrGpuBuffer::ComputeScratchKeyForDynamicBuffer(allocSize, intendedType, &key);
auto buffer =
sk_sp<GrGpuBuffer>(static_cast<GrGpuBuffer*>(this->cache()->findAndRefScratchResource(
@ -602,7 +603,7 @@ bool GrResourceProvider::attachStencilAttachment(GrRenderTarget* rt, bool useMSA
}
if (!rt->wasDestroyed() && rt->canAttemptStencilAttachment(useMSAASurface)) {
GrUniqueKey sbKey;
skgpu::UniqueKey sbKey;
#if 0
if (this->caps()->oversizedStencilSupport()) {
@ -660,7 +661,7 @@ sk_sp<GrAttachment> GrResourceProvider::getDiscardableMSAAAttachment(SkISize dim
return nullptr;
}
GrUniqueKey key;
skgpu::UniqueKey key;
GrAttachment::ComputeSharedAttachmentUniqueKey(*this->caps(),
format,
dimensions,
@ -731,7 +732,7 @@ sk_sp<GrAttachment> GrResourceProvider::refScratchMSAAAttachment(SkISize dimensi
GrMipmapped::kNo,
GrTextureType::kNone));
GrScratchKey key;
skgpu::ScratchKey key;
GrAttachment::ComputeScratchKey(*this->caps(), format, dimensions,
GrAttachment::UsageFlags::kColorAttachment, sampleCnt,
GrMipmapped::kNo, isProtected, memoryless, &key);

View File

@ -48,7 +48,7 @@ public:
*/
template <typename T = GrGpuResource>
typename std::enable_if<std::is_base_of<GrGpuResource, T>::value, sk_sp<T>>::type
findByUniqueKey(const GrUniqueKey& key) {
findByUniqueKey(const skgpu::UniqueKey& key) {
return sk_sp<T>(static_cast<T*>(this->findResourceByUniqueKey(key).release()));
}
@ -114,7 +114,7 @@ public:
* Search the cache for a scratch texture matching the provided arguments. Failing that
* it returns null. If non-null, the resulting texture is always budgeted.
*/
sk_sp<GrTexture> findAndRefScratchTexture(const GrScratchKey&);
sk_sp<GrTexture> findAndRefScratchTexture(const skgpu::ScratchKey&);
sk_sp<GrTexture> findAndRefScratchTexture(SkISize dimensions,
const GrBackendFormat&,
GrTextureType textureType,
@ -195,8 +195,10 @@ public:
* @return The buffer if successful, otherwise nullptr.
*/
using InitializeBufferFn = void(*)(skgpu::VertexWriter, size_t bufferSize);
sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
const GrUniqueKey& key, InitializeBufferFn);
sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size,
const skgpu::UniqueKey& key,
InitializeBufferFn);
/**
* Either finds and refs, or creates a static buffer with the given parameters and contents.
@ -208,8 +210,10 @@ public:
*
* @return The buffer if successful, otherwise nullptr.
*/
sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
const void* staticData, const GrUniqueKey& key);
sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size,
const void* staticData,
const skgpu::UniqueKey& key);
/**
* Either finds and refs, or creates an index buffer with a repeating pattern for drawing
@ -228,7 +232,7 @@ public:
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key) {
const skgpu::UniqueKey& key) {
if (auto buffer = this->findByUniqueKey<const GrGpuBuffer>(key)) {
return buffer;
}
@ -315,7 +319,7 @@ public:
* Assigns a unique key to a resource. If the key is associated with another resource that
* association is removed and replaced by this resource.
*/
void assignUniqueKeyToResource(const GrUniqueKey&, GrGpuResource*);
void assignUniqueKeyToResource(const skgpu::UniqueKey&, GrGpuResource*);
std::unique_ptr<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned = true);
@ -338,7 +342,7 @@ public:
inline const GrResourceProviderPriv priv() const; // NOLINT(readability-const-return-type)
private:
sk_sp<GrGpuResource> findResourceByUniqueKey(const GrUniqueKey&);
sk_sp<GrGpuResource> findResourceByUniqueKey(const skgpu::UniqueKey&);
/*
* Try to find an existing scratch texture that exactly matches 'desc'. If successful
@ -402,7 +406,7 @@ private:
int patternSize,
int reps,
int vertCount,
const GrUniqueKey* key);
const skgpu::UniqueKey* key);
sk_sp<const GrGpuBuffer> createNonAAQuadIndexBuffer();
sk_sp<const GrGpuBuffer> createAAQuadIndexBuffer();

View File

@ -174,7 +174,7 @@ void GrSurfaceProxy::assign(sk_sp<GrSurface> surface) {
bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
GrRenderable renderable, GrMipmapped mipMapped,
const GrUniqueKey* uniqueKey) {
const skgpu::UniqueKey* uniqueKey) {
SkASSERT(!this->isLazy());
if (fTarget) {
if (uniqueKey && uniqueKey->isValid()) {
@ -205,7 +205,7 @@ void GrSurfaceProxy::deinstantiate() {
fTarget = nullptr;
}
void GrSurfaceProxy::computeScratchKey(const GrCaps& caps, GrScratchKey* key) const {
void GrSurfaceProxy::computeScratchKey(const GrCaps& caps, skgpu::ScratchKey* key) const {
SkASSERT(!this->isFullyLazy());
GrRenderable renderable = GrRenderable::kNo;
int sampleCount = 1;
@ -446,7 +446,7 @@ bool GrSurfaceProxyPriv::doLazyInstantiation(GrResourceProvider* resourceProvide
if (GrTextureProxy* texProxy = fProxy->asTextureProxy()) {
texProxy->setTargetKeySync(syncKey);
if (syncKey) {
const GrUniqueKey& key = texProxy->getUniqueKey();
const skgpu::UniqueKey& key = texProxy->getUniqueKey();
if (key.isValid()) {
if (!surface->asTexture()->getUniqueKey().isValid()) {
// If 'surface' is newly created, attach the unique key

View File

@ -231,9 +231,9 @@ public:
virtual const GrRenderTargetProxy* asRenderTargetProxy() const { return nullptr; }
/** @return The unique key for this proxy. May be invalid. */
virtual const GrUniqueKey& getUniqueKey() const {
virtual const skgpu::UniqueKey& getUniqueKey() const {
// Base class never has a valid unique key.
static const GrUniqueKey kInvalidKey;
static const skgpu::UniqueKey kInvalidKey;
return kInvalidKey;
}
@ -391,7 +391,7 @@ protected:
bool ignoredByResourceAllocator() const { return fIgnoredByResourceAllocator; }
void setIgnoredByResourceAllocator() { fIgnoredByResourceAllocator = true; }
void computeScratchKey(const GrCaps&, GrScratchKey*) const;
void computeScratchKey(const GrCaps&, skgpu::ScratchKey*) const;
virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0;
void assign(sk_sp<GrSurface> surface);
@ -410,7 +410,7 @@ protected:
}
bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt, GrRenderable,
GrMipmapped, const GrUniqueKey*);
GrMipmapped, const skgpu::UniqueKey*);
// For deferred proxies this will be null until the proxy is instantiated.
// For wrapped proxies it will point to the wrapped resource.

View File

@ -17,7 +17,7 @@ class GrResourceProvider;
data members or virtual methods. */
class GrSurfaceProxyPriv {
public:
void computeScratchKey(const GrCaps& caps, GrScratchKey* key) const {
void computeScratchKey(const GrCaps& caps, skgpu::ScratchKey* key) const {
return fProxy->computeScratchKey(caps, key);
}

View File

@ -8,7 +8,6 @@
#include "include/core/SkMath.h"
#include "include/core/SkTypes.h"
#include "include/gpu/GrTypes.h"
#include "include/private/GrResourceKey.h"
#include "src/core/SkMipmap.h"
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrGpu.h"
@ -86,7 +85,7 @@ bool GrTexture::StealBackendTexture(sk_sp<GrTexture> texture,
return true;
}
void GrTexture::computeScratchKey(GrScratchKey* key) const {
void GrTexture::computeScratchKey(skgpu::ScratchKey* key) const {
if (!this->getGpu()->caps()->isFormatCompressed(this->backendFormat())) {
int sampleCount = 1;
GrRenderable renderable = GrRenderable::kNo;
@ -107,8 +106,8 @@ void GrTexture::ComputeScratchKey(const GrCaps& caps,
int sampleCnt,
GrMipmapped mipMapped,
GrProtected isProtected,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
skgpu::ScratchKey* key) {
static const skgpu::ScratchKey::ResourceType kType = skgpu::ScratchKey::GenerateResourceType();
SkASSERT(!dimensions.isEmpty());
SkASSERT(sampleCnt > 0);
SkASSERT(1 == sampleCnt || renderable == GrRenderable::kYes);
@ -120,7 +119,7 @@ void GrTexture::ComputeScratchKey(const GrCaps& caps,
uint64_t formatKey = caps.computeFormatKey(format);
GrScratchKey::Builder builder(key, kType, 5);
skgpu::ScratchKey::Builder builder(key, kType, 5);
builder[0] = dimensions.width();
builder[1] = dimensions.height();
builder[2] = formatKey & 0xFFFFFFFF;

View File

@ -61,14 +61,14 @@ public:
int sampleCnt,
GrMipmapped,
GrProtected,
GrScratchKey* key);
skgpu::ScratchKey* key);
protected:
GrTexture(GrGpu*, const SkISize&, GrProtected, GrTextureType, GrMipmapStatus);
virtual bool onStealBackendTexture(GrBackendTexture*, SkImage::BackendTextureReleaseProc*) = 0;
void computeScratchKey(GrScratchKey*) const override;
void computeScratchKey(skgpu::ScratchKey*) const override;
private:
size_t onGpuMemorySize() const override;

View File

@ -166,7 +166,7 @@ bool GrTextureProxy::ProxiesAreCompatibleAsDynamicState(const GrSurfaceProxy* fi
first->backendFormat() == second->backendFormat();
}
void GrTextureProxy::setUniqueKey(GrProxyProvider* proxyProvider, const GrUniqueKey& key) {
void GrTextureProxy::setUniqueKey(GrProxyProvider* proxyProvider, const skgpu::UniqueKey& key) {
SkASSERT(key.isValid());
SkASSERT(!fUniqueKey.isValid()); // proxies can only ever get one uniqueKey

View File

@ -69,7 +69,7 @@ public:
/**
* Return the texture proxy's unique key. It will be invalid if the proxy doesn't have one.
*/
const GrUniqueKey& getUniqueKey() const override {
const skgpu::UniqueKey& getUniqueKey() const override {
#ifdef SK_DEBUG
if (this->isInstantiated() && fUniqueKey.isValid() && fSyncTargetKey &&
fCreatingProvider == GrDDLProvider::kNo) {
@ -185,7 +185,7 @@ private:
// uniquely-keyed textureProxy is also always set on the backing GrTexture.
GrDDLProvider fCreatingProvider = GrDDLProvider::kNo;
GrUniqueKey fUniqueKey;
skgpu::UniqueKey fUniqueKey;
GrProxyProvider* fProxyProvider; // only set when fUniqueKey is valid
LazySurfaceDesc callbackDesc() const override;
@ -198,7 +198,7 @@ private:
size_t onUninstantiatedGpuMemorySize() const override;
// Methods made available via GrTextureProxy::CacheAccess
void setUniqueKey(GrProxyProvider*, const GrUniqueKey&);
void setUniqueKey(GrProxyProvider*, const skgpu::UniqueKey&);
void clearUniqueKey();
SkDEBUGCODE(void onValidateSurface(const GrSurface*) override;)

View File

@ -15,7 +15,7 @@
*/
class GrTextureProxy::CacheAccess {
private:
void setUniqueKey(GrProxyProvider* proxyProvider, const GrUniqueKey& key) {
void setUniqueKey(GrProxyProvider* proxyProvider, const skgpu::UniqueKey& key) {
fTextureProxy->setUniqueKey(proxyProvider, key);
}

View File

@ -120,7 +120,7 @@ bool GrTextureRenderTargetProxy::instantiate(GrResourceProvider* resourceProvide
return false;
}
const GrUniqueKey& key = this->getUniqueKey();
const skgpu::UniqueKey& key = this->getUniqueKey();
if (!this->instantiateImpl(resourceProvider, this->numSamples(), GrRenderable::kYes,
this->mipmapped(), key.isValid() ? &key : nullptr)) {

View File

@ -108,7 +108,7 @@ void GrThreadSafeCache::makeExistingEntryMRU(Entry* entry) {
}
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
const GrUniqueKey& key) {
const skgpu::UniqueKey& key) {
Entry* tmp = fUniquelyKeyedEntryMap.find(key);
if (tmp) {
this->makeExistingEntryMRU(tmp);
@ -119,7 +119,7 @@ std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalFind(
}
#ifdef SK_DEBUG
bool GrThreadSafeCache::has(const GrUniqueKey& key) {
bool GrThreadSafeCache::has(const skgpu::UniqueKey& key) {
SkAutoSpinlock lock{fSpinLock};
Entry* tmp = fUniquelyKeyedEntryMap.find(key);
@ -127,7 +127,7 @@ bool GrThreadSafeCache::has(const GrUniqueKey& key) {
}
#endif
GrSurfaceProxyView GrThreadSafeCache::find(const GrUniqueKey& key) {
GrSurfaceProxyView GrThreadSafeCache::find(const skgpu::UniqueKey& key) {
SkAutoSpinlock lock{fSpinLock};
GrSurfaceProxyView view;
@ -136,13 +136,13 @@ GrSurfaceProxyView GrThreadSafeCache::find(const GrUniqueKey& key) {
}
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findWithData(
const GrUniqueKey& key) {
const skgpu::UniqueKey& key) {
SkAutoSpinlock lock{fSpinLock};
return this->internalFind(key);
}
GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const GrUniqueKey& key,
GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
const GrSurfaceProxyView& view) {
Entry* entry;
@ -166,7 +166,7 @@ GrThreadSafeCache::Entry* GrThreadSafeCache::makeNewEntryMRU(Entry* entry) {
return entry;
}
GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const GrUniqueKey& key,
GrThreadSafeCache::Entry* GrThreadSafeCache::getEntry(const skgpu::UniqueKey& key,
sk_sp<VertexData> vertData) {
Entry* entry;
@ -193,7 +193,7 @@ void GrThreadSafeCache::recycleEntry(Entry* dead) {
}
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
const GrUniqueKey& key,
const skgpu::UniqueKey& key,
const GrSurfaceProxyView& view) {
Entry* tmp = fUniquelyKeyedEntryMap.find(key);
if (!tmp) {
@ -205,7 +205,8 @@ std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::internalAdd(
return { tmp->view(), tmp->refCustomData() };
}
GrSurfaceProxyView GrThreadSafeCache::add(const GrUniqueKey& key, const GrSurfaceProxyView& view) {
GrSurfaceProxyView GrThreadSafeCache::add(const skgpu::UniqueKey& key,
const GrSurfaceProxyView& view) {
SkAutoSpinlock lock{fSpinLock};
GrSurfaceProxyView newView;
@ -214,14 +215,14 @@ GrSurfaceProxyView GrThreadSafeCache::add(const GrUniqueKey& key, const GrSurfac
}
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::addWithData(
const GrUniqueKey& key,
const skgpu::UniqueKey& key,
const GrSurfaceProxyView& view) {
SkAutoSpinlock lock{fSpinLock};
return this->internalAdd(key, view);
}
GrSurfaceProxyView GrThreadSafeCache::findOrAdd(const GrUniqueKey& key,
GrSurfaceProxyView GrThreadSafeCache::findOrAdd(const skgpu::UniqueKey& key,
const GrSurfaceProxyView& v) {
SkAutoSpinlock lock{fSpinLock};
@ -236,7 +237,7 @@ GrSurfaceProxyView GrThreadSafeCache::findOrAdd(const GrUniqueKey& key,
}
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> GrThreadSafeCache::findOrAddWithData(
const GrUniqueKey& key,
const skgpu::UniqueKey& key,
const GrSurfaceProxyView& v) {
SkAutoSpinlock lock{fSpinLock};
@ -260,8 +261,8 @@ sk_sp<GrThreadSafeCache::VertexData> GrThreadSafeCache::MakeVertexData(sk_sp<GrG
return sk_sp<VertexData>(new VertexData(std::move(buffer), vertexCount, vertexSize));
}
std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::internalFindVerts(
const GrUniqueKey& key) {
std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
GrThreadSafeCache::internalFindVerts(const skgpu::UniqueKey& key) {
Entry* tmp = fUniquelyKeyedEntryMap.find(key);
if (tmp) {
this->makeExistingEntryMRU(tmp);
@ -271,15 +272,15 @@ std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCach
return {};
}
std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::findVertsWithData(
const GrUniqueKey& key) {
std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>>
GrThreadSafeCache::findVertsWithData(const skgpu::UniqueKey& key) {
SkAutoSpinlock lock{fSpinLock};
return this->internalFindVerts(key);
}
std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::internalAddVerts(
const GrUniqueKey& key,
const skgpu::UniqueKey& key,
sk_sp<VertexData> vertData,
IsNewerBetter isNewerBetter) {
Entry* tmp = fUniquelyKeyedEntryMap.find(key);
@ -297,7 +298,7 @@ std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCach
}
std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCache::addVertsWithData(
const GrUniqueKey& key,
const skgpu::UniqueKey& key,
sk_sp<VertexData> vertData,
IsNewerBetter isNewerBetter) {
SkAutoSpinlock lock{fSpinLock};
@ -305,7 +306,7 @@ std::tuple<sk_sp<GrThreadSafeCache::VertexData>, sk_sp<SkData>> GrThreadSafeCach
return this->internalAddVerts(key, std::move(vertData), isNewerBetter);
}
void GrThreadSafeCache::remove(const GrUniqueKey& key) {
void GrThreadSafeCache::remove(const skgpu::UniqueKey& key) {
SkAutoSpinlock lock{fSpinLock};
Entry* tmp = fUniquelyKeyedEntryMap.find(key);

View File

@ -82,20 +82,21 @@ public:
// Drop uniquely held refs that were last accessed before 'purgeTime'
void dropUniqueRefsOlderThan(GrStdSteadyClock::time_point purgeTime) SK_EXCLUDES(fSpinLock);
SkDEBUGCODE(bool has(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);)
SkDEBUGCODE(bool has(const skgpu::UniqueKey&) SK_EXCLUDES(fSpinLock);)
GrSurfaceProxyView find(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
GrSurfaceProxyView find(const skgpu::UniqueKey&) SK_EXCLUDES(fSpinLock);
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findWithData(
const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
const skgpu::UniqueKey&) SK_EXCLUDES(fSpinLock);
GrSurfaceProxyView add(const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
GrSurfaceProxyView add(
const skgpu::UniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> addWithData(
const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
const skgpu::UniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
GrSurfaceProxyView findOrAdd(const GrUniqueKey&,
GrSurfaceProxyView findOrAdd(const skgpu::UniqueKey&,
const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> findOrAddWithData(
const GrUniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
const skgpu::UniqueKey&, const GrSurfaceProxyView&) SK_EXCLUDES(fSpinLock);
// To hold vertex data in the cache and have it transparently transition from cpu-side to
// gpu-side while being shared between all the threads we need a ref counted object that
@ -162,16 +163,16 @@ public:
size_t vertexSize);
std::tuple<sk_sp<VertexData>, sk_sp<SkData>> findVertsWithData(
const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
const skgpu::UniqueKey&) SK_EXCLUDES(fSpinLock);
typedef bool (*IsNewerBetter)(SkData* incumbent, SkData* challenger);
std::tuple<sk_sp<VertexData>, sk_sp<SkData>> addVertsWithData(
const GrUniqueKey&,
const skgpu::UniqueKey&,
sk_sp<VertexData>,
IsNewerBetter) SK_EXCLUDES(fSpinLock);
void remove(const GrUniqueKey&) SK_EXCLUDES(fSpinLock);
void remove(const skgpu::UniqueKey&) SK_EXCLUDES(fSpinLock);
// To allow gpu-created resources to have priority, we pre-emptively place a lazy proxy
// in the thread-safe cache (with findOrAdd). The Trampoline object allows that lazy proxy to
@ -188,13 +189,13 @@ public:
SkBackingFit);
private:
struct Entry {
Entry(const GrUniqueKey& key, const GrSurfaceProxyView& view)
Entry(const skgpu::UniqueKey& key, const GrSurfaceProxyView& view)
: fKey(key)
, fView(view)
, fTag(Entry::kView) {
}
Entry(const GrUniqueKey& key, sk_sp<VertexData> vertData)
Entry(const skgpu::UniqueKey& key, sk_sp<VertexData> vertData)
: fKey(key)
, fVertData(std::move(vertData))
, fTag(Entry::kVertData) {
@ -216,7 +217,7 @@ private:
return false;
}
const GrUniqueKey& key() const {
const skgpu::UniqueKey& key() const {
SkASSERT(fTag != kEmpty);
return fKey;
}
@ -241,7 +242,7 @@ private:
return fVertData;
}
void set(const GrUniqueKey& key, const GrSurfaceProxyView& view) {
void set(const skgpu::UniqueKey& key, const GrSurfaceProxyView& view) {
SkASSERT(fTag == kEmpty);
fKey = key;
fView = view;
@ -258,7 +259,7 @@ private:
fTag = kEmpty;
}
void set(const GrUniqueKey& key, sk_sp<VertexData> vertData) {
void set(const skgpu::UniqueKey& key, sk_sp<VertexData> vertData) {
SkASSERT(fTag == kEmpty || fTag == kVertData);
fKey = key;
fVertData = vertData;
@ -270,15 +271,15 @@ private:
SK_DECLARE_INTERNAL_LLIST_INTERFACE(Entry);
// for SkTDynamicHash
static const GrUniqueKey& GetKey(const Entry& e) {
static const skgpu::UniqueKey& GetKey(const Entry& e) {
SkASSERT(e.fTag != kEmpty);
return e.fKey;
}
static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
static uint32_t Hash(const skgpu::UniqueKey& key) { return key.hash(); }
private:
// Note: the unique key is stored here bc it is never attached to a proxy or a GrTexture
GrUniqueKey fKey;
skgpu::UniqueKey fKey;
union {
GrSurfaceProxyView fView;
sk_sp<VertexData> fVertData;
@ -294,27 +295,24 @@ private:
void makeExistingEntryMRU(Entry*) SK_REQUIRES(fSpinLock);
Entry* makeNewEntryMRU(Entry*) SK_REQUIRES(fSpinLock);
Entry* getEntry(const GrUniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
Entry* getEntry(const GrUniqueKey&, sk_sp<VertexData>) SK_REQUIRES(fSpinLock);
Entry* getEntry(const skgpu::UniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
Entry* getEntry(const skgpu::UniqueKey&, sk_sp<VertexData>) SK_REQUIRES(fSpinLock);
void recycleEntry(Entry*) SK_REQUIRES(fSpinLock);
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalFind(
const GrUniqueKey&) SK_REQUIRES(fSpinLock);
const skgpu::UniqueKey&) SK_REQUIRES(fSpinLock);
std::tuple<GrSurfaceProxyView, sk_sp<SkData>> internalAdd(
const GrUniqueKey&,
const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
const skgpu::UniqueKey&, const GrSurfaceProxyView&) SK_REQUIRES(fSpinLock);
std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalFindVerts(
const GrUniqueKey&) SK_REQUIRES(fSpinLock);
const skgpu::UniqueKey&) SK_REQUIRES(fSpinLock);
std::tuple<sk_sp<VertexData>, sk_sp<SkData>> internalAddVerts(
const GrUniqueKey&,
sk_sp<VertexData>,
IsNewerBetter) SK_REQUIRES(fSpinLock);
const skgpu::UniqueKey&, sk_sp<VertexData>, IsNewerBetter) SK_REQUIRES(fSpinLock);
mutable SkSpinlock fSpinLock;
SkTDynamicHash<Entry, GrUniqueKey> fUniquelyKeyedEntryMap SK_GUARDED_BY(fSpinLock);
SkTDynamicHash<Entry, skgpu::UniqueKey> fUniquelyKeyedEntryMap SK_GUARDED_BY(fSpinLock);
// The head of this list is the MRU
SkTInternalLList<Entry> fUniquelyKeyedEntryList SK_GUARDED_BY(fSpinLock);

40
src/gpu/ResourceKey.cpp Normal file
View File

@ -0,0 +1,40 @@
/*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/core/SkOpts.h"
#include "src/gpu/ResourceKey.h"
namespace skgpu {
ScratchKey::ResourceType ScratchKey::GenerateResourceType() {
static std::atomic<int32_t> nextType{ResourceKey::kInvalidDomain + 1};
int32_t type = nextType.fetch_add(1, std::memory_order_relaxed);
if (type > SkTo<int32_t>(UINT16_MAX)) {
SK_ABORT("Too many Resource Types");
}
return static_cast<ResourceType>(type);
}
UniqueKey::Domain UniqueKey::GenerateDomain() {
static std::atomic<int32_t> nextDomain{ResourceKey::kInvalidDomain + 1};
int32_t domain = nextDomain.fetch_add(1, std::memory_order_relaxed);
if (domain > SkTo<int32_t>(UINT16_MAX)) {
SK_ABORT("Too many skgpu::UniqueKey Domains");
}
return static_cast<Domain>(domain);
}
uint32_t ResourceKeyHash(const uint32_t* data, size_t size) {
return SkOpts::hash(data, size);
}
} // namespace skgpu

View File

@ -5,25 +5,28 @@
* found in the LICENSE file.
*/
#ifndef GrResourceKey_DEFINED
#define GrResourceKey_DEFINED
#ifndef skgpu_ResourceKey_DEFINED
#define skgpu_ResourceKey_DEFINED
#include "include/core/SkData.h"
#include "include/core/SkString.h"
#include "include/gpu/GrTypes.h"
#include "include/private/SkOnce.h"
#include "include/private/SkTemplates.h"
#include "include/private/SkTo.h"
#include <new>
uint32_t GrResourceKeyHash(const uint32_t* data, size_t size);
class TestResource;
namespace skgpu {
uint32_t ResourceKeyHash(const uint32_t* data, size_t size);
/**
* Base class for all GrGpuResource cache keys. There are two types of cache keys. Refer to the
* Base class for all gpu Resource cache keys. There are two types of cache keys. Refer to the
* comments for each key type below.
*/
class GrResourceKey {
class ResourceKey {
public:
uint32_t hash() const {
this->validate();
@ -46,7 +49,7 @@ public:
return;
}
uint32_t* hash = &fKey->fKey[kHash_MetaDataIdx];
*hash = GrResourceKeyHash(hash + 1, fKey->internalSize() - sizeof(uint32_t));
*hash = ResourceKeyHash(hash + 1, fKey->internalSize() - sizeof(uint32_t));
fKey->validate();
fKey = nullptr;
}
@ -59,7 +62,7 @@ public:
}
protected:
Builder(GrResourceKey* key, uint32_t domain, int data32Count) : fKey(key) {
Builder(ResourceKey* key, uint32_t domain, int data32Count) : fKey(key) {
size_t count = SkToSizeT(data32Count);
SkASSERT(domain != kInvalidDomain);
key->fKey.reset(kMetaDataCnt + count);
@ -70,13 +73,13 @@ public:
}
private:
GrResourceKey* fKey;
ResourceKey* fKey;
};
protected:
static const uint32_t kInvalidDomain = 0;
GrResourceKey() { this->reset(); }
ResourceKey() { this->reset(); }
/** Reset to an invalid key. */
void reset() {
@ -85,14 +88,14 @@ protected:
fKey[kDomainAndSize_MetaDataIdx] = kInvalidDomain;
}
bool operator==(const GrResourceKey& that) const {
bool operator==(const ResourceKey& that) const {
// Both keys should be sized to at least contain the meta data. The metadata contains each
// key's length. So the second memcmp should only run if the keys have the same length.
return 0 == memcmp(fKey.get(), that.fKey.get(), kMetaDataCnt*sizeof(uint32_t)) &&
0 == memcmp(&fKey[kMetaDataCnt], &that.fKey[kMetaDataCnt], this->dataSize());
}
GrResourceKey& operator=(const GrResourceKey& that) {
ResourceKey& operator=(const ResourceKey& that) {
if (this != &that) {
if (!that.isValid()) {
this->reset();
@ -152,12 +155,12 @@ private:
void validate() const {
SkASSERT(this->isValid());
SkASSERT(fKey[kHash_MetaDataIdx] ==
GrResourceKeyHash(&fKey[kHash_MetaDataIdx] + 1,
this->internalSize() - sizeof(uint32_t)));
ResourceKeyHash(&fKey[kHash_MetaDataIdx] + 1,
this->internalSize() - sizeof(uint32_t)));
SkASSERT(SkIsAlign4(this->internalSize()));
}
friend class TestResource; // For unit test to access kMetaDataCnt.
friend class ::TestResource; // For unit test to access kMetaDataCnt.
// bmp textures require 5 uint32_t values.
SkAutoSTMalloc<kMetaDataCnt + 5, uint32_t> fKey;
@ -184,10 +187,7 @@ private:
* consume_blur(texture[0]);
* texture[0]->unref(); // texture 0 can now be recycled for the next request with scratchKey
*/
class GrScratchKey : public GrResourceKey {
private:
using INHERITED = GrResourceKey;
class ScratchKey : public ResourceKey {
public:
/** Uniquely identifies the type of resource that is cached as scratch. */
typedef uint32_t ResourceType;
@ -196,29 +196,29 @@ public:
static ResourceType GenerateResourceType();
/** Creates an invalid scratch key. It must be initialized using a Builder object before use. */
GrScratchKey() {}
ScratchKey() {}
GrScratchKey(const GrScratchKey& that) { *this = that; }
ScratchKey(const ScratchKey& that) { *this = that; }
/** reset() returns the key to the invalid state. */
using INHERITED::reset;
using ResourceKey::reset;
using INHERITED::isValid;
using ResourceKey::isValid;
ResourceType resourceType() const { return this->domain(); }
GrScratchKey& operator=(const GrScratchKey& that) {
this->INHERITED::operator=(that);
ScratchKey& operator=(const ScratchKey& that) {
this->ResourceKey::operator=(that);
return *this;
}
bool operator==(const GrScratchKey& that) const { return this->INHERITED::operator==(that); }
bool operator!=(const GrScratchKey& that) const { return !(*this == that); }
bool operator==(const ScratchKey& that) const { return this->ResourceKey::operator==(that); }
bool operator!=(const ScratchKey& that) const { return !(*this == that); }
class Builder : public INHERITED::Builder {
class Builder : public ResourceKey::Builder {
public:
Builder(GrScratchKey* key, ResourceType type, int data32Count)
: INHERITED::Builder(key, type, data32Count) {}
Builder(ScratchKey* key, ResourceType type, int data32Count)
: ResourceKey::Builder(key, type, data32Count) {}
};
};
@ -236,34 +236,31 @@ public:
* Unique keys preempt scratch keys. While a resource has a unique key it is inaccessible via its
* scratch key. It can become scratch again if the unique key is removed.
*/
class GrUniqueKey : public GrResourceKey {
private:
using INHERITED = GrResourceKey;
class UniqueKey : public ResourceKey {
public:
typedef uint32_t Domain;
/** Generate a Domain for unique keys. */
static Domain GenerateDomain();
/** Creates an invalid unique key. It must be initialized using a Builder object before use. */
GrUniqueKey() : fTag(nullptr) {}
UniqueKey() : fTag(nullptr) {}
GrUniqueKey(const GrUniqueKey& that) { *this = that; }
UniqueKey(const UniqueKey& that) { *this = that; }
/** reset() returns the key to the invalid state. */
using INHERITED::reset;
using ResourceKey::reset;
using INHERITED::isValid;
using ResourceKey::isValid;
GrUniqueKey& operator=(const GrUniqueKey& that) {
this->INHERITED::operator=(that);
UniqueKey& operator=(const UniqueKey& that) {
this->ResourceKey::operator=(that);
this->setCustomData(sk_ref_sp(that.getCustomData()));
fTag = that.fTag;
return *this;
}
bool operator==(const GrUniqueKey& that) const { return this->INHERITED::operator==(that); }
bool operator!=(const GrUniqueKey& that) const { return !(*this == that); }
bool operator==(const UniqueKey& that) const { return this->ResourceKey::operator==(that); }
bool operator!=(const UniqueKey& that) const { return !(*this == that); }
void setCustomData(sk_sp<SkData> data) { fData = std::move(data); }
SkData* getCustomData() const { return fData.get(); }
@ -274,21 +271,23 @@ public:
#ifdef SK_DEBUG
void dump(const char* label) const {
SkDebugf("%s tag: %s\n", label, fTag ? fTag : "None");
this->INHERITED::dump();
this->ResourceKey::dump();
}
#endif
class Builder : public INHERITED::Builder {
class Builder : public ResourceKey::Builder {
public:
Builder(GrUniqueKey* key, Domain type, int data32Count, const char* tag = nullptr)
: INHERITED::Builder(key, type, data32Count) {
Builder(UniqueKey* key, Domain type, int data32Count, const char* tag = nullptr)
: ResourceKey::Builder(key, type, data32Count) {
key->fTag = tag;
}
/** Used to build a key that wraps another key and adds additional data. */
Builder(GrUniqueKey* key, const GrUniqueKey& innerKey, Domain domain, int extraData32Cnt,
Builder(UniqueKey* key, const UniqueKey& innerKey, Domain domain, int extraData32Cnt,
const char* tag = nullptr)
: INHERITED::Builder(key, domain, Data32CntForInnerKey(innerKey) + extraData32Cnt) {
: ResourceKey::Builder(key,
domain,
Data32CntForInnerKey(innerKey) + extraData32Cnt) {
SkASSERT(&innerKey != key);
// add the inner key to the end of the key so that op[] can be indexed normally.
uint32_t* innerKeyData = &this->operator[](extraData32Cnt);
@ -299,7 +298,7 @@ public:
}
private:
static int Data32CntForInnerKey(const GrUniqueKey& innerKey) {
static int Data32CntForInnerKey(const UniqueKey& innerKey) {
// key data + domain
return SkToInt((innerKey.dataSize() >> 2) + 1);
}
@ -311,52 +310,56 @@ private:
};
/**
* It is common to need a frequently reused GrUniqueKey where the only requirement is that the key
* It is common to need a frequently reused UniqueKey where the only requirement is that the key
* is unique. These macros create such a key in a thread safe manner so the key can be truly global
* and only constructed once.
*/
/** Place outside of function/class definitions. */
#define GR_DECLARE_STATIC_UNIQUE_KEY(name) static SkOnce name##_once
#define SKGPU_DECLARE_STATIC_UNIQUE_KEY(name) static SkOnce name##_once
/** Place inside function where the key is used. */
#define GR_DEFINE_STATIC_UNIQUE_KEY(name) \
static SkAlignedSTStorage<1, GrUniqueKey> name##_storage; \
name##_once(gr_init_static_unique_key_once, &name##_storage); \
static const GrUniqueKey& name = *reinterpret_cast<GrUniqueKey*>(name##_storage.get())
#define SKGPU_DEFINE_STATIC_UNIQUE_KEY(name) \
static SkAlignedSTStorage<1, skgpu::UniqueKey> name##_storage; \
name##_once(skgpu::skgpu_init_static_unique_key_once, &name##_storage); \
static const skgpu::UniqueKey& name = \
*reinterpret_cast<skgpu::UniqueKey*>(name##_storage.get())
static inline void gr_init_static_unique_key_once(SkAlignedSTStorage<1, GrUniqueKey>* keyStorage) {
GrUniqueKey* key = new (keyStorage->get()) GrUniqueKey;
GrUniqueKey::Builder builder(key, GrUniqueKey::GenerateDomain(), 0);
static inline void skgpu_init_static_unique_key_once(SkAlignedSTStorage<1, UniqueKey>* keyStorage) {
UniqueKey* key = new (keyStorage->get()) UniqueKey;
UniqueKey::Builder builder(key, UniqueKey::GenerateDomain(), 0);
}
// The cache listens for these messages to purge junk resources proactively.
class GrUniqueKeyInvalidatedMessage {
class UniqueKeyInvalidatedMessage {
public:
GrUniqueKeyInvalidatedMessage() = default;
GrUniqueKeyInvalidatedMessage(const GrUniqueKey& key, uint32_t contextUniqueID,
bool inThreadSafeCache = false)
UniqueKeyInvalidatedMessage() = default;
UniqueKeyInvalidatedMessage(const UniqueKey& key,
uint32_t contextUniqueID,
bool inThreadSafeCache = false)
: fKey(key), fContextID(contextUniqueID), fInThreadSafeCache(inThreadSafeCache) {
SkASSERT(SK_InvalidUniqueID != contextUniqueID);
}
GrUniqueKeyInvalidatedMessage(const GrUniqueKeyInvalidatedMessage&) = default;
UniqueKeyInvalidatedMessage(const UniqueKeyInvalidatedMessage&) = default;
GrUniqueKeyInvalidatedMessage& operator=(const GrUniqueKeyInvalidatedMessage&) = default;
UniqueKeyInvalidatedMessage& operator=(const UniqueKeyInvalidatedMessage&) = default;
const GrUniqueKey& key() const { return fKey; }
const UniqueKey& key() const { return fKey; }
uint32_t contextID() const { return fContextID; }
bool inThreadSafeCache() const { return fInThreadSafeCache; }
private:
GrUniqueKey fKey;
UniqueKey fKey;
uint32_t fContextID = SK_InvalidUniqueID;
bool fInThreadSafeCache = false;
};
static inline bool SkShouldPostMessageToBus(const GrUniqueKeyInvalidatedMessage& msg,
static inline bool SkShouldPostMessageToBus(const UniqueKeyInvalidatedMessage& msg,
uint32_t msgBusUniqueID) {
return msg.contextID() == msgBusUniqueID;
}
#endif
} // namespace skgpu
#endif // skgpu_ResourceKey_DEFINED

View File

@ -48,12 +48,12 @@
#include "src/image/SkImage_Base.h"
#include "src/shaders/SkShaderBase.h"
void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& imageBounds) {
void GrMakeKeyFromImageID(skgpu::UniqueKey* key, uint32_t imageID, const SkIRect& imageBounds) {
SkASSERT(key);
SkASSERT(imageID);
SkASSERT(!imageBounds.isEmpty());
static const GrUniqueKey::Domain kImageIDDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, kImageIDDomain, 5, "Image");
static const skgpu::UniqueKey::Domain kImageIDDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(key, kImageIDDomain, 5, "Image");
builder[0] = imageID;
builder[1] = imageBounds.fLeft;
builder[2] = imageBounds.fTop;
@ -63,18 +63,19 @@ void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& ima
////////////////////////////////////////////////////////////////////////////////
sk_sp<SkIDChangeListener> GrMakeUniqueKeyInvalidationListener(GrUniqueKey* key,
sk_sp<SkIDChangeListener> GrMakeUniqueKeyInvalidationListener(skgpu::UniqueKey* key,
uint32_t contextID) {
class Listener : public SkIDChangeListener {
public:
Listener(const GrUniqueKey& key, uint32_t contextUniqueID) : fMsg(key, contextUniqueID) {}
Listener(const skgpu::UniqueKey& key, uint32_t contextUniqueID)
: fMsg(key, contextUniqueID) {}
void changed() override {
SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(fMsg);
SkMessageBus<skgpu::UniqueKeyInvalidatedMessage, uint32_t>::Post(fMsg);
}
private:
GrUniqueKeyInvalidatedMessage fMsg;
skgpu::UniqueKeyInvalidatedMessage fMsg;
};
auto listener = sk_make_sp<Listener>(*key, contextID);
@ -176,7 +177,7 @@ GrMakeCachedBitmapProxyView(GrRecordingContext* rContext,
GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
const GrCaps* caps = rContext->priv().caps();
GrUniqueKey key;
skgpu::UniqueKey key;
SkIPoint origin = bitmap.pixelRefOrigin();
SkIRect subset = SkIRect::MakePtSize(origin, bitmap.dimensions());
GrMakeKeyFromImageID(&key, bitmap.pixelRef()->getGenerationID(), subset);

View File

@ -30,7 +30,6 @@ class GrPaint;
class GrRecordingContext;
class GrResourceProvider;
class GrTextureProxy;
class GrUniqueKey;
class SkBitmap;
class SkData;
class SkMatrix;
@ -40,6 +39,10 @@ class SkPixelRef;
class SkPixmap;
struct SkIRect;
namespace skgpu {
class UniqueKey;
}
////////////////////////////////////////////////////////////////////////////////
// Color type conversions
@ -187,15 +190,16 @@ GrMakeUncachedBitmapProxyView(GrRecordingContext*,
* - SkImage
* - SkImageGenerator
*/
void GrMakeKeyFromImageID(GrUniqueKey* key, uint32_t imageID, const SkIRect& imageBounds);
void GrMakeKeyFromImageID(skgpu::UniqueKey* key, uint32_t imageID, const SkIRect& imageBounds);
/**
* Makes a SkIDChangeListener from a GrUniqueKey. The key will be invalidated in the resource
* Makes a SkIDChangeListener from a skgpu::UniqueKey. The key will be invalidated in the resource
* cache if the ID becomes invalid. This also modifies the key so that it will cause the listener
* to be deregistered if the key is destroyed (to prevent unbounded listener growth when resources
* are purged before listeners trigger).
*/
sk_sp<SkIDChangeListener> GrMakeUniqueKeyInvalidationListener(GrUniqueKey*, uint32_t contextID);
sk_sp<SkIDChangeListener> GrMakeUniqueKeyInvalidationListener(skgpu::UniqueKey*,
uint32_t contextID);
static inline bool GrValidCubicResampler(SkCubicResampler cubic) {
return cubic.B >= 0 && cubic.C >= 0;

View File

@ -86,10 +86,10 @@ GrMatrixConvolutionEffect::KernelWrapper::Make(GrRecordingContext* rContext,
// TODO: Pick cache or dont-cache based on observed perf.
static constexpr bool kCacheKernelTexture = true;
GrUniqueKey key;
skgpu::UniqueKey key;
if (kCacheKernelTexture) {
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(&key, kDomain, length, "Matrix Convolution Kernel");
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(&key, kDomain, length, "Matrix Convolution Kernel");
// Texture cache key is the exact content of the kernel.
static_assert(sizeof(float) == 4);
for (int i = 0; i < length; i++) {

View File

@ -212,7 +212,9 @@ private:
}
// This avoids an inherits via dominance warning on MSVC.
void computeScratchKey(GrScratchKey* key) const override { GrTexture::computeScratchKey(key); }
void computeScratchKey(skgpu::ScratchKey* key) const override {
GrTexture::computeScratchKey(key);
}
};
#endif

View File

@ -75,10 +75,10 @@ static const uint16_t kQuadIdxBufPattern[] = {
static const int kIdxsPerQuad = SK_ARRAY_COUNT(kQuadIdxBufPattern);
static const int kQuadNumVertices = 5;
static const int kQuadsNumInIdxBuffer = 256;
GR_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
sk_sp<const GrBuffer> get_quads_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
return resourceProvider->findOrCreatePatternedIndexBuffer(
kQuadIdxBufPattern, kIdxsPerQuad, kQuadsNumInIdxBuffer, kQuadNumVertices,
gQuadsIndexBufferKey);
@ -109,10 +109,10 @@ static const int kIdxsPerLineSeg = SK_ARRAY_COUNT(kLineSegIdxBufPattern);
static const int kLineSegNumVertices = 6;
static const int kLineSegsNumInIdxBuffer = 256;
GR_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
sk_sp<const GrBuffer> get_lines_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
return resourceProvider->findOrCreatePatternedIndexBuffer(
kLineSegIdxBufPattern, kIdxsPerLineSeg, kLineSegsNumInIdxBuffer, kLineSegNumVertices,
gLinesIndexBufferKey);

View File

@ -181,7 +181,7 @@ void DrawAtlasPathOp::onPrePrepare(GrRecordingContext* rContext,
rContext->priv().recordProgramInfo(fProgram);
}
GR_DECLARE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
void DrawAtlasPathOp::onPrepare(GrOpFlushState* flushState) {
if (!fProgram) {
@ -207,7 +207,7 @@ void DrawAtlasPathOp::onPrepare(GrOpFlushState* flushState) {
if (!flushState->caps().shaderCaps()->vertexIDSupport()) {
constexpr static SkPoint kUnitQuad[4] = {{0,0}, {0,1}, {1,0}, {1,1}};
GR_DEFINE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
fVertexBufferIfNoIDSupport = flushState->resourceProvider()->findOrMakeStaticBuffer(
GrGpuBufferType::kVertex, sizeof(kUnitQuad), kUnitQuad, gUnitQuadBufferKey);

View File

@ -470,7 +470,7 @@ static constexpr CoverageVertex kVertexData[] = {
{{{0,0,0,1}}, {{-1,+1}}, {{0,-kOctoOffset}}, {{-1,+1}}, 0, 0},
{{{0,0,0,1}}, {{-1,+1}}, {{+kOctoOffset,0}}, {{-1,+1}}, 0, 0}};
GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
static constexpr uint16_t kIndexData[] = {
// Inset octagon (solid coverage).
@ -511,7 +511,7 @@ static constexpr uint16_t kIndexData[] = {
39, 36, 38,
36, 38, 37};
GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
void FillRRectOpImpl::onPrepareDraws(GrMeshDrawTarget* target) {
if (!fProgramInfo) {
@ -549,13 +549,13 @@ void FillRRectOpImpl::onPrepareDraws(GrMeshDrawTarget* target) {
SkASSERT(instanceWrter == end);
}
GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
fIndexBuffer = target->resourceProvider()->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
sizeof(kIndexData),
kIndexData, gIndexBufferKey);
GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
fVertexBuffer = target->resourceProvider()->findOrMakeStaticBuffer(GrGpuBufferType::kVertex,
sizeof(kVertexData),

View File

@ -2846,12 +2846,12 @@ private:
static const int kNumRRectsInIndexBuffer = 256;
GR_DECLARE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
GR_DECLARE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
static sk_sp<const GrBuffer> get_rrect_index_buffer(RRectType type,
GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
GR_DEFINE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
switch (type) {
case kFill_RRectType:
return resourceProvider->findOrCreatePatternedIndexBuffer(

View File

@ -398,7 +398,7 @@ void PathInnerTriangulateOp::onPrePrepare(GrRecordingContext* context,
}
}
GR_DECLARE_STATIC_UNIQUE_KEY(gHullVertexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gHullVertexBufferKey);
void PathInnerTriangulateOp::onPrepare(GrOpFlushState* flushState) {
const GrCaps& caps = flushState->caps();
@ -460,7 +460,7 @@ void PathInnerTriangulateOp::onPrepare(GrOpFlushState* flushState) {
if (!caps.shaderCaps()->vertexIDSupport()) {
constexpr static float kStripOrderIDs[4] = {0, 1, 3, 2};
GR_DEFINE_STATIC_UNIQUE_KEY(gHullVertexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gHullVertexBufferKey);
fHullVertexBufferIfNoIDSupport = flushState->resourceProvider()->findOrMakeStaticBuffer(
GrGpuBufferType::kVertex, sizeof(kStripOrderIDs), kStripOrderIDs,

View File

@ -221,7 +221,7 @@ void PathStencilCoverOp::onPrePrepare(GrRecordingContext* context,
}
}
GR_DECLARE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
void PathStencilCoverOp::onPrepare(GrOpFlushState* flushState) {
if (!fTessellator) {
@ -307,7 +307,7 @@ void PathStencilCoverOp::onPrepare(GrOpFlushState* flushState) {
if (!flushState->caps().shaderCaps()->vertexIDSupport()) {
constexpr static SkPoint kUnitQuad[4] = {{0,0}, {0,1}, {1,0}, {1,1}};
GR_DEFINE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gUnitQuadBufferKey);
fBBoxVertexBufferIfNoIDSupport = flushState->resourceProvider()->findOrMakeStaticBuffer(
GrGpuBufferType::kVertex, sizeof(kUnitQuad), kUnitQuad, gUnitQuadBufferKey);

View File

@ -666,9 +666,9 @@ private:
namespace skgpu::v1::ShadowRRectOp {
static GrSurfaceProxyView create_falloff_texture(GrRecordingContext* rContext) {
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
GrUniqueKey::Builder builder(&key, kDomain, 0, "Shadow Gaussian Falloff");
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey key;
skgpu::UniqueKey::Builder builder(&key, kDomain, 0, "Shadow Gaussian Falloff");
builder.finish();
auto threadSafeCache = rContext->priv().threadSafeCache();

View File

@ -278,15 +278,15 @@ bool SoftwarePathRenderer::onDrawPath(const DrawPathArgs& args) {
}
}
GrUniqueKey maskKey;
skgpu::UniqueKey maskKey;
if (useCache) {
// We require the upper left 2x2 of the matrix to match exactly for a cache hit.
SkScalar sx = args.fViewMatrix->get(SkMatrix::kMScaleX);
SkScalar sy = args.fViewMatrix->get(SkMatrix::kMScaleY);
SkScalar kx = args.fViewMatrix->get(SkMatrix::kMSkewX);
SkScalar ky = args.fViewMatrix->get(SkMatrix::kMSkewY);
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(&maskKey, kDomain, 7 + args.fShape->unstyledKeySize(),
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(&maskKey, kDomain, 7 + args.fShape->unstyledKeySize(),
"SW Path Mask");
builder[0] = boundsForMask->width();
builder[1] = boundsForMask->height();

View File

@ -8,7 +8,6 @@
#include "src/gpu/ops/StrokeRectOp.h"
#include "include/core/SkStrokeRec.h"
#include "include/private/GrResourceKey.h"
#include "include/utils/SkRandom.h"
#include "src/core/SkMatrixPriv.h"
#include "src/gpu/BufferWriter.h"
@ -19,6 +18,7 @@
#include "src/gpu/GrOpFlushState.h"
#include "src/gpu/GrProgramInfo.h"
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/ResourceKey.h"
#include "src/gpu/geometry/GrQuad.h"
#include "src/gpu/ops/FillRectOp.h"
#include "src/gpu/ops/GrMeshDrawOp.h"
@ -277,8 +277,8 @@ private:
// AA Stroking
///////////////////////////////////////////////////////////////////////////////////////////////////
GR_DECLARE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
GR_DECLARE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
bool stroke_dev_half_size_supported(SkVector devHalfStrokeSize) {
// Since the horizontal and vertical strokes share internal corners, the coverage value at that
@ -695,7 +695,7 @@ sk_sp<const GrGpuBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* reso
};
// clang-format on
static_assert(SK_ARRAY_COUNT(gMiterIndices) == kMiterIndexCnt);
GR_DEFINE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gMiterIndexBufferKey);
return resourceProvider->findOrCreatePatternedIndexBuffer(
gMiterIndices, kMiterIndexCnt, kNumMiterRectsInIndexBuffer, kMiterVertexCnt,
gMiterIndexBufferKey);
@ -760,7 +760,7 @@ sk_sp<const GrGpuBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* reso
// clang-format on
static_assert(SK_ARRAY_COUNT(gBevelIndices) == kBevelIndexCnt);
GR_DEFINE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gBevelIndexBufferKey);
return resourceProvider->findOrCreatePatternedIndexBuffer(
gBevelIndices, kBevelIndexCnt, kNumBevelRectsInIndexBuffer, kBevelVertexCnt,
gBevelIndexBufferKey);

View File

@ -83,13 +83,15 @@ bool is_newer_better(SkData* incumbent, SkData* challenger) {
// When the SkPathRef genID changes, invalidate a corresponding GrResource described by key.
class UniqueKeyInvalidator : public SkIDChangeListener {
public:
UniqueKeyInvalidator(const GrUniqueKey& key, uint32_t contextUniqueID)
UniqueKeyInvalidator(const skgpu::UniqueKey& key, uint32_t contextUniqueID)
: fMsg(key, contextUniqueID, /* inThreadSafeCache */ true) {}
private:
GrUniqueKeyInvalidatedMessage fMsg;
skgpu::UniqueKeyInvalidatedMessage fMsg;
void changed() override { SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(fMsg); }
void changed() override {
SkMessageBus<skgpu::UniqueKeyInvalidatedMessage, uint32_t>::Post(fMsg);
}
};
class StaticVertexAllocator : public GrEagerVertexAllocator {
@ -230,17 +232,17 @@ private:
return path;
}
static void CreateKey(GrUniqueKey* key,
static void CreateKey(skgpu::UniqueKey* key,
const GrStyledShape& shape,
const SkIRect& devClipBounds) {
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
bool inverseFill = shape.inverseFilled();
static constexpr int kClipBoundsCnt = sizeof(devClipBounds) / sizeof(uint32_t);
int shapeKeyDataCnt = shape.unstyledKeySize();
SkASSERT(shapeKeyDataCnt >= 0);
GrUniqueKey::Builder builder(key, kDomain, shapeKeyDataCnt + kClipBoundsCnt, "Path");
skgpu::UniqueKey::Builder builder(key, kDomain, shapeKeyDataCnt + kClipBoundsCnt, "Path");
shape.writeUnstyledKey(&builder[0]);
// For inverse fills, the tessellation is dependent on clip bounds.
if (inverseFill) {
@ -280,7 +282,7 @@ private:
GrResourceProvider* rp = target->resourceProvider();
auto threadSafeCache = target->threadSafeCache();
GrUniqueKey key;
skgpu::UniqueKey key;
CreateKey(&key, fShape, fDevClipBounds);
SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,
@ -439,7 +441,7 @@ private:
auto threadSafeViewCache = rContext->priv().threadSafeCache();
GrUniqueKey key;
skgpu::UniqueKey key;
CreateKey(&key, fShape, fDevClipBounds);
SkScalar tol = GrPathUtils::scaleToleranceToSrc(GrPathUtils::kDefaultTolerance,

View File

@ -219,20 +219,20 @@ void PathCurveTessellator::WriteFixedIndexBufferBaseIndex(VertexWriter vertexWri
#if SK_GPU_V1
GR_DECLARE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
GR_DECLARE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
void PathCurveTessellator::prepareFixedCountBuffers(GrMeshDrawTarget* target) {
GrResourceProvider* rp = target->resourceProvider();
GR_DEFINE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
fFixedVertexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kVertex,
FixedVertexBufferSize(kMaxFixedResolveLevel),
gFixedVertexBufferKey,
WriteFixedVertexBuffer);
GR_DEFINE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
fFixedIndexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
FixedIndexBufferSize(kMaxFixedResolveLevel),

View File

@ -281,20 +281,20 @@ void PathWedgeTessellator::WriteFixedIndexBuffer(VertexWriter vertexWriter, size
#if SK_GPU_V1
GR_DECLARE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
GR_DECLARE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
void PathWedgeTessellator::prepareFixedCountBuffers(GrMeshDrawTarget* target) {
GrResourceProvider* rp = target->resourceProvider();
GR_DEFINE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gFixedVertexBufferKey);
fFixedVertexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kVertex,
FixedVertexBufferSize(kMaxFixedResolveLevel),
gFixedVertexBufferKey,
WriteFixedVertexBuffer);
GR_DEFINE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gFixedIndexBufferKey);
fFixedIndexBuffer = rp->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,
FixedIndexBufferSize(kMaxFixedResolveLevel),

View File

@ -362,7 +362,7 @@ void StrokeFixedCountTessellator::InitializeVertexIDFallbackBuffer(VertexWriter
#if SK_GPU_V1
GR_DECLARE_STATIC_UNIQUE_KEY(gVertexIDFallbackBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gVertexIDFallbackBufferKey);
int StrokeFixedCountTessellator::prepare(GrMeshDrawTarget* target,
const SkMatrix& shaderMatrix,
@ -387,7 +387,7 @@ int StrokeFixedCountTessellator::prepare(GrMeshDrawTarget* target,
constexpr static int kMaxVerticesInFallbackBuffer = 2048;
fFixedEdgeCount = std::min(fFixedEdgeCount, kMaxVerticesInFallbackBuffer/2);
GR_DEFINE_STATIC_UNIQUE_KEY(gVertexIDFallbackBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gVertexIDFallbackBufferKey);
fVertexBufferIfNoIDSupport = target->resourceProvider()->findOrMakeStaticBuffer(
GrGpuBufferType::kVertex,

View File

@ -742,12 +742,12 @@ ClipStack::ClipState ClipStack::RawElement::clipType() const {
ClipStack::Mask::Mask(const SaveRecord& current, const SkIRect& drawBounds)
: fBounds(drawBounds)
, fGenID(current.genID()) {
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
static const UniqueKey::Domain kDomain = UniqueKey::GenerateDomain();
// The gen ID should not be invalid, empty, or wide open, since those do not require masks
SkASSERT(fGenID != kInvalidGenID && fGenID != kEmptyGenID && fGenID != kWideOpenGenID);
GrUniqueKey::Builder builder(&fKey, kDomain, 5, "clip_mask");
UniqueKey::Builder builder(&fKey, kDomain, 5, "clip_mask");
builder[0] = fGenID;
builder[1] = drawBounds.fLeft;
builder[2] = drawBounds.fRight;

View File

@ -11,10 +11,10 @@
#include "include/core/SkClipOp.h"
#include "include/core/SkMatrix.h"
#include "include/core/SkShader.h"
#include "include/private/GrResourceKey.h"
#include "src/core/SkTBlockList.h"
#include "src/gpu/GrClip.h"
#include "src/gpu/GrSurfaceProxyView.h"
#include "src/gpu/ResourceKey.h"
#include "src/gpu/geometry/GrShape.h"
class GrAppliedClip;
@ -80,8 +80,8 @@ public:
SkIRect getConservativeBounds() const override;
#if GR_TEST_UTILS
GrUniqueKey testingOnly_getLastSWMaskKey() const {
return fMasks.empty() ? GrUniqueKey() : fMasks.back().key();
UniqueKey testingOnly_getLastSWMaskKey() const {
return fMasks.empty() ? UniqueKey() : fMasks.back().key();
}
#endif
@ -176,16 +176,16 @@ private:
SkASSERT(!fKey.isValid());
}
const GrUniqueKey& key() const { return fKey; }
const SkIRect& bounds() const { return fBounds; }
uint32_t genID() const { return fGenID; }
const UniqueKey& key() const { return fKey; }
const SkIRect& bounds() const { return fBounds; }
uint32_t genID() const { return fGenID; }
bool appliesToDraw(const SaveRecord& current, const SkIRect& drawBounds) const;
void invalidate(GrProxyProvider* proxyProvider);
SkDEBUGCODE(const SaveRecord* owner() const { return fOwner; })
private:
GrUniqueKey fKey;
UniqueKey fKey;
// The gen ID of the save record and the query bounds uniquely define the set of elements
// that would go into a mask. If the save record adds new elements, its gen ID would change.
// If the draw had different bounds it would select a different set of masked elements.

View File

@ -422,14 +422,14 @@ GrSurfaceProxyView SkImage_Base::FindOrMakeCachedMipmappedView(GrRecordingContex
}
GrProxyProvider* proxyProvider = rContext->priv().proxyProvider();
GrUniqueKey baseKey;
skgpu::UniqueKey baseKey;
GrMakeKeyFromImageID(&baseKey, imageUniqueID, SkIRect::MakeSize(view.dimensions()));
SkASSERT(baseKey.isValid());
GrUniqueKey mipmappedKey;
static const GrUniqueKey::Domain kMipmappedDomain = GrUniqueKey::GenerateDomain();
skgpu::UniqueKey mipmappedKey;
static const skgpu::UniqueKey::Domain kMipmappedDomain = skgpu::UniqueKey::GenerateDomain();
{ // No extra values beyond the domain are required. Must name the var to please
// clang-tidy.
GrUniqueKey::Builder b(&mipmappedKey, baseKey, kMipmappedDomain, 0);
skgpu::UniqueKey::Builder b(&mipmappedKey, baseKey, kMipmappedDomain, 0);
}
SkASSERT(mipmappedKey.isValid());
if (sk_sp<GrTextureProxy> cachedMippedView =

View File

@ -18,7 +18,6 @@
#if SK_SUPPORT_GPU
#include "include/gpu/GrDirectContext.h"
#include "include/gpu/GrRecordingContext.h"
#include "include/private/GrResourceKey.h"
#include "src/core/SkResourceCache.h"
#include "src/core/SkYUVPlanesCache.h"
#include "src/gpu/GrCaps.h"
@ -29,6 +28,7 @@
#include "src/gpu/GrRecordingContextPriv.h"
#include "src/gpu/GrSamplerState.h"
#include "src/gpu/GrYUVATextureProxies.h"
#include "src/gpu/ResourceKey.h"
#include "src/gpu/SkGr.h"
#include "src/gpu/SurfaceFillContext.h"
#include "src/gpu/effects/GrYUVtoRGBEffect.h"
@ -426,7 +426,7 @@ GrSurfaceProxyView SkImage_Lazy::lockTextureProxyView(GrRecordingContext* rConte
enum { kLockTexturePathCount = kRGBA_LockTexturePath + 1 };
GrUniqueKey key;
skgpu::UniqueKey key;
if (texGenPolicy == GrImageTexGenPolicy::kDraw) {
GrMakeKeyFromImageID(&key, this->uniqueID(), SkIRect::MakeSize(this->dimensions()));
}

View File

@ -387,9 +387,9 @@ std::unique_ptr<GrFragmentProcessor> SkPictureShader::asFragmentProcessor(
info.imageInfo = info.imageInfo.makeColorType(kRGBA_8888_SkColorType);
}
static const GrUniqueKey::Domain kDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
GrUniqueKey::Builder builder(&key, kDomain, 10, "Picture Shader Image");
static const skgpu::UniqueKey::Domain kDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey key;
skgpu::UniqueKey::Builder builder(&key, kDomain, 10, "Picture Shader Image");
builder[0] = dstCS->toXYZD50Hash();
builder[1] = dstCS->transferFnHash();
builder[2] = static_cast<uint32_t>(dstColorType);

View File

@ -21,7 +21,6 @@
#include "include/core/SkSurface.h"
#include "include/core/SkTypes.h"
#include "include/effects/SkGradientShader.h"
#include "include/private/GrResourceKey.h"
#include "include/private/SkTemplates.h"
#include "include/utils/SkRandom.h"
#include "src/core/SkClipStack.h"

View File

@ -2085,15 +2085,15 @@ DEF_GPUTEST_FOR_CONTEXTS(ClipStack_SWMask,
};
auto generateMask = [&](SkRect drawBounds) {
GrUniqueKey priorKey = cs->testingOnly_getLastSWMaskKey();
skgpu::UniqueKey priorKey = cs->testingOnly_getLastSWMaskKey();
drawRect(drawBounds);
GrUniqueKey newKey = cs->testingOnly_getLastSWMaskKey();
skgpu::UniqueKey newKey = cs->testingOnly_getLastSWMaskKey();
REPORTER_ASSERT(r, priorKey != newKey, "Did not generate a new SW mask key as expected");
return newKey;
};
auto verifyKeys = [&](const std::vector<GrUniqueKey>& expectedKeys,
const std::vector<GrUniqueKey>& releasedKeys) {
auto verifyKeys = [&](const std::vector<skgpu::UniqueKey>& expectedKeys,
const std::vector<skgpu::UniqueKey>& releasedKeys) {
context->flush();
GrProxyProvider* proxyProvider = context->priv().proxyProvider();
@ -2122,20 +2122,20 @@ DEF_GPUTEST_FOR_CONTEXTS(ClipStack_SWMask,
// Creates a mask for a complex clip
cs->save();
addMaskRequiringClip(5.f, 5.f, 20.f);
GrUniqueKey keyADepth1 = generateMask({0.f, 0.f, 20.f, 20.f});
GrUniqueKey keyBDepth1 = generateMask({10.f, 10.f, 30.f, 30.f});
skgpu::UniqueKey keyADepth1 = generateMask({0.f, 0.f, 20.f, 20.f});
skgpu::UniqueKey keyBDepth1 = generateMask({10.f, 10.f, 30.f, 30.f});
verifyKeys({keyADepth1, keyBDepth1}, {});
// Creates a new mask for a new save record, but doesn't delete the old records
cs->save();
addMaskRequiringClip(6.f, 6.f, 15.f);
GrUniqueKey keyADepth2 = generateMask({0.f, 0.f, 20.f, 20.f});
GrUniqueKey keyBDepth2 = generateMask({10.f, 10.f, 30.f, 30.f});
skgpu::UniqueKey keyADepth2 = generateMask({0.f, 0.f, 20.f, 20.f});
skgpu::UniqueKey keyBDepth2 = generateMask({10.f, 10.f, 30.f, 30.f});
verifyKeys({keyADepth1, keyBDepth1, keyADepth2, keyBDepth2}, {});
// Release after modifying the current record (even if we don't draw anything)
addMaskRequiringClip(4.f, 4.f, 15.f);
GrUniqueKey keyCDepth2 = generateMask({4.f, 4.f, 16.f, 20.f});
skgpu::UniqueKey keyCDepth2 = generateMask({4.f, 4.f, 16.f, 20.f});
verifyKeys({keyADepth1, keyBDepth1, keyCDepth2}, {keyADepth2, keyBDepth2});
// Release after restoring an older record

View File

@ -12,7 +12,6 @@
#include <vector>
#include "include/core/SkBitmap.h"
#include "include/gpu/GrDirectContext.h"
#include "include/private/GrResourceKey.h"
#include "src/gpu/GrCaps.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrGeometryProcessor.h"
@ -23,6 +22,7 @@
#include "src/gpu/GrProgramInfo.h"
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/KeyBuilder.h"
#include "src/gpu/ResourceKey.h"
#include "src/gpu/glsl/GrGLSLFragmentShaderBuilder.h"
#include "src/gpu/glsl/GrGLSLVarying.h"
#include "src/gpu/glsl/GrGLSLVertexGeoBuilder.h"
@ -34,7 +34,7 @@
#define WRITE_PNG_CONTEXT_TYPE kANGLE_D3D11_ES3_ContextType
#endif
GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
SKGPU_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
static constexpr int kBoxSize = 2;
static constexpr int kBoxCountY = 8;
@ -545,7 +545,7 @@ sk_sp<const GrBuffer> DrawMeshHelper::makeVertexBuffer(const T* data, int count)
}
sk_sp<const GrBuffer> DrawMeshHelper::getIndexBuffer() {
GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
SKGPU_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
return fState->resourceProvider()->findOrCreatePatternedIndexBuffer(
kIndexPattern, 6, kIndexPatternRepeatCount, 4, gIndexBufferKey);
}

View File

@ -53,9 +53,9 @@ static std::unique_ptr<skgpu::v1::SurfaceDrawContext> new_SDC(GrRecordingContext
SkBudgeted::kYes);
}
static void create_view_key(GrUniqueKey* key, int wh, int id) {
static const GrUniqueKey::Domain kViewDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, kViewDomain, 1);
static void create_view_key(skgpu::UniqueKey* key, int wh, int id) {
static const skgpu::UniqueKey::Domain kViewDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(key, kViewDomain, 1);
builder[0] = wh;
builder.finish();
@ -64,9 +64,9 @@ static void create_view_key(GrUniqueKey* key, int wh, int id) {
}
}
static void create_vert_key(GrUniqueKey* key, int wh, int id) {
static const GrUniqueKey::Domain kVertDomain = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, kVertDomain, 1);
static void create_vert_key(skgpu::UniqueKey* key, int wh, int id) {
static const skgpu::UniqueKey::Domain kVertDomain = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(key, kVertDomain, 1);
builder[0] = wh;
builder.finish();
@ -215,7 +215,7 @@ public:
return false;
}
GrUniqueKey key;
skgpu::UniqueKey key;
create_view_key(&key, wh, kNoID);
auto threadSafeCache = this->threadSafeCache();
@ -301,7 +301,7 @@ public:
return false;
}
GrUniqueKey key;
skgpu::UniqueKey key;
create_vert_key(&key, wh, kNoID);
auto threadSafeCache = this->threadSafeCache();
@ -500,7 +500,7 @@ private:
++fStats->fNumLazyCreations;
}
GrUniqueKey key;
skgpu::UniqueKey key;
create_vert_key(&key, fWH, fID);
// We can "fail the lookup" to simulate a threaded race condition
@ -673,7 +673,7 @@ GrSurfaceProxyView TestHelper::AccessCachedView(GrRecordingContext* rContext,
int wh,
bool failLookup, bool failFillingIn, int id,
Stats* stats) {
GrUniqueKey key;
skgpu::UniqueKey key;
create_view_key(&key, wh, id);
if (GrDirectContext* dContext = rContext->asDirectContext()) {
@ -1422,7 +1422,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeCache14, reporter, ctxInfo) {
static void test_15(GrDirectContext* dContext, skiatest::Reporter* reporter,
TestHelper::addAccessFP addAccess,
TestHelper::checkFP check,
void (*create_key)(GrUniqueKey*, int wh, int id)) {
void (*create_key)(skgpu::UniqueKey*, int wh, int id)) {
TestHelper helper(dContext);
@ -1433,13 +1433,13 @@ static void test_15(GrDirectContext* dContext, skiatest::Reporter* reporter,
REPORTER_ASSERT(reporter, helper.numCacheEntries() == 1);
GrUniqueKey key;
skgpu::UniqueKey key;
(*create_key)(&key, kImageWH, kNoID);
GrUniqueKeyInvalidatedMessage msg(key, dContext->priv().contextID(),
skgpu::UniqueKeyInvalidatedMessage msg(key, dContext->priv().contextID(),
/* inThreadSafeCache */ true);
SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(msg);
SkMessageBus<skgpu::UniqueKeyInvalidatedMessage, uint32_t>::Post(msg);
// This purge call is needed to process the invalidation messages
dContext->purgeUnlockedResources(/* scratchResourcesOnly */ true);
@ -1478,7 +1478,7 @@ static bool newer_is_always_better(SkData* /* incumbent */, SkData* /* challenge
};
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrThreadSafeCache16Verts, reporter, ctxInfo) {
GrUniqueKey key;
skgpu::UniqueKey key;
create_vert_key(&key, kImageWH, kNoID);
TestHelper helper(ctxInfo.directContext(), newer_is_always_better);

View File

@ -36,7 +36,7 @@ struct ProxyParams {
kInstantiated
};
Kind fKind;
GrUniqueKey fUniqueKey = GrUniqueKey();
skgpu::UniqueKey fUniqueKey = skgpu::UniqueKey();
// TODO: do we care about mipmapping
};

View File

@ -270,16 +270,16 @@ public:
fToDelete = std::move(resource);
}
static void ComputeScratchKey(SimulatedProperty property, GrScratchKey* key) {
static GrScratchKey::ResourceType t = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, t, kScratchKeyFieldCnt);
static void ComputeScratchKey(SimulatedProperty property, skgpu::ScratchKey* key) {
static skgpu::ScratchKey::ResourceType t = skgpu::ScratchKey::GenerateResourceType();
skgpu::ScratchKey::Builder builder(key, t, kScratchKeyFieldCnt);
for (int i = 0; i < kScratchKeyFieldCnt; ++i) {
builder[i] = static_cast<uint32_t>(i + property);
}
}
static size_t ExpectedScratchKeySize() {
return sizeof(uint32_t) * (kScratchKeyFieldCnt + GrScratchKey::kMetaDataCnt);
return sizeof(uint32_t) * (kScratchKeyFieldCnt + skgpu::ScratchKey::kMetaDataCnt);
}
private:
static const int kScratchKeyFieldCnt = 6;
@ -306,7 +306,7 @@ private:
this->registerWithCacheWrapped(cacheable);
}
void computeScratchKey(GrScratchKey* key) const override {
void computeScratchKey(skgpu::ScratchKey* key) const override {
if (fIsScratch) {
ComputeScratchKey(fProperty, key);
}
@ -394,9 +394,9 @@ static void test_no_key(skiatest::Reporter* reporter) {
// Each integer passed as a template param creates a new domain.
template <int>
static void make_unique_key(GrUniqueKey* key, int data, const char* tag = nullptr) {
static GrUniqueKey::Domain d = GrUniqueKey::GenerateDomain();
GrUniqueKey::Builder builder(key, d, 1, tag);
static void make_unique_key(skgpu::UniqueKey* key, int data, const char* tag = nullptr) {
static skgpu::UniqueKey::Domain d = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey::Builder builder(key, d, 1, tag);
builder[0] = data;
}
@ -409,7 +409,7 @@ static void test_purge_unlocked(skiatest::Reporter* reporter) {
TestResource* a = TestResource::CreateScratch(gpu, SkBudgeted::kYes,
TestResource::kA_SimulatedProperty, 11);
GrUniqueKey uniqueKey;
skgpu::UniqueKey uniqueKey;
make_unique_key<0>(&uniqueKey, 0);
TestResource* b = TestResource::CreateScratch(gpu, SkBudgeted::kYes,
@ -419,7 +419,7 @@ static void test_purge_unlocked(skiatest::Reporter* reporter) {
TestResource* c = TestResource::CreateScratch(gpu, SkBudgeted::kYes,
TestResource::kA_SimulatedProperty, 13);
GrUniqueKey uniqueKey2;
skgpu::UniqueKey uniqueKey2;
make_unique_key<0>(&uniqueKey2, 1);
TestResource* d = TestResource::CreateScratch(gpu, SkBudgeted::kYes,
@ -536,7 +536,7 @@ static void test_budgeting(skiatest::Reporter* reporter) {
GrResourceCache* cache = mock.cache();
GrGpu* gpu = mock.gpu();
GrUniqueKey uniqueKey;
skgpu::UniqueKey uniqueKey;
make_unique_key<0>(&uniqueKey, 0);
// Create a scratch, a unique, and a wrapped resource
@ -550,9 +550,9 @@ static void test_budgeting(skiatest::Reporter* reporter) {
TestResource* unbudgeted = new TestResource(gpu, SkBudgeted::kNo, 14);
// Make sure we can add a unique key to the wrapped resources
GrUniqueKey uniqueKey2;
skgpu::UniqueKey uniqueKey2;
make_unique_key<0>(&uniqueKey2, 1);
GrUniqueKey uniqueKey3;
skgpu::UniqueKey uniqueKey3;
make_unique_key<0>(&uniqueKey3, 2);
wrappedCacheable->resourcePriv().setUniqueKey(uniqueKey2);
wrappedUncacheable->resourcePriv().setUniqueKey(uniqueKey3);
@ -662,7 +662,7 @@ static void test_unbudgeted(skiatest::Reporter* reporter) {
GrResourceCache* cache = mock.cache();
GrGpu* gpu = mock.gpu();
GrUniqueKey uniqueKey;
skgpu::UniqueKey uniqueKey;
make_unique_key<0>(&uniqueKey, 0);
TestResource* scratch;
@ -736,7 +736,7 @@ void test_unbudgeted_to_scratch(skiatest::Reporter* reporter);
TestResource* resource =
TestResource::CreateScratch(gpu, SkBudgeted::kNo, TestResource::kA_SimulatedProperty);
GrScratchKey key;
skgpu::ScratchKey key;
TestResource::ComputeScratchKey(TestResource::kA_SimulatedProperty, &key);
size_t size = resource->gpuMemorySize();
@ -807,12 +807,12 @@ static void test_duplicate_scratch_key(skiatest::Reporter* reporter) {
TestResource* b = TestResource::CreateScratch(gpu,
SkBudgeted::kYes,
TestResource::kB_SimulatedProperty, 12);
GrScratchKey scratchKey1;
skgpu::ScratchKey scratchKey1;
TestResource::ComputeScratchKey(TestResource::kA_SimulatedProperty, &scratchKey1);
// Check for negative case consistency. (leaks upon test failure.)
REPORTER_ASSERT(reporter, !cache->findAndRefScratchResource(scratchKey1));
GrScratchKey scratchKey;
skgpu::ScratchKey scratchKey;
TestResource::ComputeScratchKey(TestResource::kB_SimulatedProperty, &scratchKey);
// Scratch resources are registered with GrResourceCache just by existing. There are 2.
@ -855,7 +855,7 @@ static void test_remove_scratch_key(skiatest::Reporter* reporter) {
a->unref();
b->unref();
GrScratchKey scratchKey;
skgpu::ScratchKey scratchKey;
// Ensure that scratch key lookup is correct for negative case.
TestResource::ComputeScratchKey(TestResource::kA_SimulatedProperty, &scratchKey);
// (following leaks upon test failure).
@ -913,11 +913,11 @@ static void test_scratch_key_consistency(skiatest::Reporter* reporter) {
a->unref();
b->unref();
GrScratchKey scratchKey;
skgpu::ScratchKey scratchKey;
// Ensure that scratch key comparison and assignment is consistent.
GrScratchKey scratchKey1;
skgpu::ScratchKey scratchKey1;
TestResource::ComputeScratchKey(TestResource::kA_SimulatedProperty, &scratchKey1);
GrScratchKey scratchKey2;
skgpu::ScratchKey scratchKey2;
TestResource::ComputeScratchKey(TestResource::kB_SimulatedProperty, &scratchKey2);
REPORTER_ASSERT(reporter, scratchKey1.size() == TestResource::ExpectedScratchKeySize());
REPORTER_ASSERT(reporter, scratchKey1 != scratchKey2);
@ -964,7 +964,7 @@ static void test_duplicate_unique_key(skiatest::Reporter* reporter) {
GrResourceCache* cache = mock.cache();
GrGpu* gpu = mock.gpu();
GrUniqueKey key;
skgpu::UniqueKey key;
make_unique_key<0>(&key, 0);
// Create two resources that we will attempt to register with the same unique key.
@ -1004,7 +1004,7 @@ static void test_duplicate_unique_key(skiatest::Reporter* reporter) {
// Also make b be unreffed when replacement occurs.
b->unref();
TestResource* c = new TestResource(gpu, SkBudgeted::kYes, 13);
GrUniqueKey differentKey;
skgpu::UniqueKey differentKey;
make_unique_key<0>(&differentKey, 1);
c->resourcePriv().setUniqueKey(differentKey);
REPORTER_ASSERT(reporter, 2 == cache->getResourceCount());
@ -1037,7 +1037,7 @@ static void test_duplicate_unique_key(skiatest::Reporter* reporter) {
REPORTER_ASSERT(reporter, 0 == TestResource::NumAlive());
{
GrUniqueKey key2;
skgpu::UniqueKey key2;
make_unique_key<0>(&key2, 0);
sk_sp<TestResource> d(new TestResource(gpu));
int foo = 4132;
@ -1045,7 +1045,7 @@ static void test_duplicate_unique_key(skiatest::Reporter* reporter) {
d->resourcePriv().setUniqueKey(key2);
}
GrUniqueKey key3;
skgpu::UniqueKey key3;
make_unique_key<0>(&key3, 0);
sk_sp<GrGpuResource> d2(cache->findAndRefUniqueResource(key3));
REPORTER_ASSERT(reporter, *(int*) d2->getUniqueKey().getCustomData()->data() == 4132);
@ -1057,7 +1057,7 @@ static void test_purge_invalidated(skiatest::Reporter* reporter) {
GrResourceCache* cache = mock.cache();
GrGpu* gpu = mock.gpu();
GrUniqueKey key1, key2, key3;
skgpu::UniqueKey key1, key2, key3;
make_unique_key<0>(&key1, 1);
make_unique_key<0>(&key2, 2);
make_unique_key<0>(&key3, 3);
@ -1079,8 +1079,8 @@ static void test_purge_invalidated(skiatest::Reporter* reporter) {
REPORTER_ASSERT(reporter, cache->hasUniqueKey(key3));
REPORTER_ASSERT(reporter, 3 == TestResource::NumAlive());
typedef GrUniqueKeyInvalidatedMessage Msg;
typedef SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t> Bus;
typedef skgpu::UniqueKeyInvalidatedMessage Msg;
typedef SkMessageBus<Msg, uint32_t> Bus;
// Invalidate two of the three, they should be purged and no longer accessible via their keys.
Bus::Post(Msg(key1, dContext->priv().contextID()));
@ -1104,7 +1104,7 @@ static void test_purge_invalidated(skiatest::Reporter* reporter) {
REPORTER_ASSERT(reporter, 1 == TestResource::NumAlive());
// Make sure we actually get to c via it's scratch key, before we say goodbye.
GrScratchKey scratchKey;
skgpu::ScratchKey scratchKey;
TestResource::ComputeScratchKey(TestResource::kA_SimulatedProperty, &scratchKey);
GrGpuResource* scratch = cache->findAndRefScratchResource(scratchKey);
REPORTER_ASSERT(reporter, scratch == c);
@ -1125,7 +1125,7 @@ static void test_cache_chained_purge(skiatest::Reporter* reporter) {
GrResourceCache* cache = mock.cache();
GrGpu* gpu = mock.gpu();
GrUniqueKey key1, key2;
skgpu::UniqueKey key1, key2;
make_unique_key<0>(&key1, 1);
make_unique_key<0>(&key2, 2);
@ -1182,7 +1182,7 @@ static void test_timestamp_wrap(skiatest::Reporter* reporter) {
// Add kCount resources, holding onto resources at random so we have a mix of purgeable and
// unpurgeable resources.
for (int j = 0; j < kCount; ++j) {
GrUniqueKey key;
skgpu::UniqueKey key;
make_unique_key<0>(&key, j);
TestResource* r = new TestResource(gpu);
@ -1202,7 +1202,7 @@ static void test_timestamp_wrap(skiatest::Reporter* reporter) {
// Verify that the correct resources were purged.
int currShouldPurgeIdx = 0;
for (int j = 0; j < kCount; ++j) {
GrUniqueKey key;
skgpu::UniqueKey key;
make_unique_key<0>(&key, j);
GrGpuResource* res = cache->findAndRefUniqueResource(key);
if (currShouldPurgeIdx < shouldPurgeIdxs.count() &&
@ -1245,7 +1245,7 @@ static void test_time_purge(skiatest::Reporter* reporter) {
// Insert resources and get time points between each addition.
for (int i = 0; i < cnt; ++i) {
TestResource* r = new TestResource(gpu);
GrUniqueKey k;
skgpu::UniqueKey k;
make_unique_key<1>(&k, i);
r->resourcePriv().setUniqueKey(k);
r->unref();
@ -1258,7 +1258,7 @@ static void test_time_purge(skiatest::Reporter* reporter) {
cache->purgeResourcesNotUsedSince(timeStamps[i]);
REPORTER_ASSERT(reporter, cnt - i - 1 == cache->getResourceCount());
for (int j = 0; j < i; ++j) {
GrUniqueKey k;
skgpu::UniqueKey k;
make_unique_key<1>(&k, j);
GrGpuResource* r = cache->findAndRefUniqueResource(k);
REPORTER_ASSERT(reporter, !SkToBool(r));
@ -1276,7 +1276,7 @@ static void test_time_purge(skiatest::Reporter* reporter) {
std::unique_ptr<GrGpuResource* []> refedResources(new GrGpuResource*[cnt / 2]);
for (int i = 0; i < cnt; ++i) {
TestResource* r = new TestResource(gpu);
GrUniqueKey k;
skgpu::UniqueKey k;
make_unique_key<1>(&k, i);
r->resourcePriv().setUniqueKey(k);
// Leave a ref on every other resource, beginning with the first.
@ -1316,7 +1316,7 @@ static void test_time_purge(skiatest::Reporter* reporter) {
TestResource* r = isScratch ? TestResource::CreateScratch(gpu, budgeted, property)
: new TestResource(gpu, budgeted, property);
if (!isScratch) {
GrUniqueKey k;
skgpu::UniqueKey k;
make_unique_key<1>(&k, i);
r->resourcePriv().setUniqueKey(k);
}
@ -1341,7 +1341,7 @@ static void test_time_purge(skiatest::Reporter* reporter) {
dContext->flushAndSubmit();
for (int i = 0; i < 10; ++i) {
TestResource* r = new TestResource(gpu);
GrUniqueKey k;
skgpu::UniqueKey k;
make_unique_key<1>(&k, i);
r->resourcePriv().setUniqueKey(k);
r->unref();
@ -1372,7 +1372,7 @@ static void test_partial_purge(skiatest::Reporter* reporter) {
for (int testCase = 0; testCase < kEndTests_TestCase; testCase++) {
GrUniqueKey key1, key2, key3;
skgpu::UniqueKey key1, key2, key3;
make_unique_key<0>(&key1, 1);
make_unique_key<0>(&key2, 2);
make_unique_key<0>(&key3, 3);
@ -1459,7 +1459,7 @@ static void test_partial_purge(skiatest::Reporter* reporter) {
}
static void test_custom_data(skiatest::Reporter* reporter) {
GrUniqueKey key1, key2;
skgpu::UniqueKey key1, key2;
make_unique_key<0>(&key1, 1);
make_unique_key<0>(&key2, 2);
int foo = 4132;
@ -1468,7 +1468,7 @@ static void test_custom_data(skiatest::Reporter* reporter) {
REPORTER_ASSERT(reporter, key2.getCustomData() == nullptr);
// Test that copying a key also takes a ref on its custom data.
GrUniqueKey key3 = key1;
skgpu::UniqueKey key3 = key1;
REPORTER_ASSERT(reporter, *(int*) key3.getCustomData()->data() == 4132);
}
@ -1495,7 +1495,7 @@ static void test_abandoned(skiatest::Reporter* reporter) {
resource->resourcePriv().makeBudgeted();
resource->resourcePriv().makeUnbudgeted();
resource->resourcePriv().removeScratchKey();
GrUniqueKey key;
skgpu::UniqueKey key;
make_unique_key<0>(&key, 1);
resource->resourcePriv().setUniqueKey(key);
resource->resourcePriv().removeUniqueKey();
@ -1521,7 +1521,7 @@ static void test_tags(skiatest::Reporter* reporter) {
for (int i = 0; i < kNumResources; ++i, ++currTagCnt) {
sk_sp<GrGpuResource> resource(new TestResource(gpu));
GrUniqueKey key;
skgpu::UniqueKey key;
if (currTagCnt == tagIdx) {
tagIdx += 1;
currTagCnt = 0;

View File

@ -80,12 +80,12 @@ static sk_sp<GrTextureProxy> wrapped(skiatest::Reporter* reporter, GrRecordingCo
static sk_sp<GrTextureProxy> wrapped_with_key(skiatest::Reporter* reporter, GrRecordingContext*,
GrProxyProvider* proxyProvider, SkBackingFit fit) {
static GrUniqueKey::Domain d = GrUniqueKey::GenerateDomain();
static skgpu::UniqueKey::Domain d = skgpu::UniqueKey::GenerateDomain();
static int kUniqueKeyData = 0;
GrUniqueKey key;
skgpu::UniqueKey key;
GrUniqueKey::Builder builder(&key, d, 1, nullptr);
skgpu::UniqueKey::Builder builder(&key, d, 1, nullptr);
builder[0] = kUniqueKeyData++;
builder.finish();
@ -131,7 +131,7 @@ static void basic_test(GrDirectContext* dContext,
int startCacheCount = cache->getResourceCount();
GrUniqueKey key;
skgpu::UniqueKey key;
if (proxy->getUniqueKey().isValid()) {
key = proxy->getUniqueKey();
} else {
@ -157,7 +157,7 @@ static void basic_test(GrDirectContext* dContext,
// Once instantiated, the backing resource should have the same key
SkAssertResult(proxy->instantiate(resourceProvider));
const GrUniqueKey texKey = proxy->peekSurface()->getUniqueKey();
const skgpu::UniqueKey texKey = proxy->peekSurface()->getUniqueKey();
REPORTER_ASSERT(reporter, texKey.isValid());
REPORTER_ASSERT(reporter, key == texKey);
@ -202,8 +202,8 @@ static void basic_test(GrDirectContext* dContext,
if (expectResourceToOutliveProxy) {
proxy.reset();
GrUniqueKeyInvalidatedMessage msg(texKey, dContext->priv().contextID());
SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(msg);
skgpu::UniqueKeyInvalidatedMessage msg(texKey, dContext->priv().contextID());
SkMessageBus<skgpu::UniqueKeyInvalidatedMessage, uint32_t>::Post(msg);
cache->purgeAsNeeded();
expectedCacheCount -= cacheEntriesPerProxy;
proxy = proxyProvider->findOrCreateProxyByUniqueKey(key);
@ -291,9 +291,9 @@ static void invalidation_and_instantiation_test(GrDirectContext* dContext,
GrResourceCache* cache = dContext->priv().getResourceCache();
REPORTER_ASSERT(reporter, 0 == cache->getResourceCount());
static GrUniqueKey::Domain d = GrUniqueKey::GenerateDomain();
GrUniqueKey key;
GrUniqueKey::Builder builder(&key, d, 1, nullptr);
static skgpu::UniqueKey::Domain d = skgpu::UniqueKey::GenerateDomain();
skgpu::UniqueKey key;
skgpu::UniqueKey::Builder builder(&key, d, 1, nullptr);
builder[0] = 0;
builder.finish();
@ -303,8 +303,8 @@ static void invalidation_and_instantiation_test(GrDirectContext* dContext,
SkAssertResult(proxyProvider->assignUniqueKeyToProxy(key, proxy.get()));
// Send an invalidation message, which will be sitting in the cache's inbox
SkMessageBus<GrUniqueKeyInvalidatedMessage, uint32_t>::Post(
GrUniqueKeyInvalidatedMessage(key, dContext->priv().contextID()));
SkMessageBus<skgpu::UniqueKeyInvalidatedMessage, uint32_t>::Post(
skgpu::UniqueKeyInvalidatedMessage(key, dContext->priv().contextID()));
REPORTER_ASSERT(reporter, 1 == proxyProvider->numUniqueKeyProxies_TestOnly());
REPORTER_ASSERT(reporter, 0 == cache->getResourceCount());