diff --git a/src/gpu/GrPathRendererChain.cpp b/src/gpu/GrPathRendererChain.cpp index 93741da308..60c935ab08 100644 --- a/src/gpu/GrPathRendererChain.cpp +++ b/src/gpu/GrPathRendererChain.cpp @@ -37,8 +37,7 @@ GrPathRendererChain::GrPathRendererChain(GrContext* context, const Options& opti if (options.fGpuPathRenderers & GpuPathRenderers::kCoverageCounting) { using AllowCaching = GrCoverageCountingPathRenderer::AllowCaching; if (auto ccpr = GrCoverageCountingPathRenderer::CreateIfSupported( - caps, AllowCaching(options.fAllowPathMaskCaching), - context->uniqueID())) { + caps, AllowCaching(options.fAllowPathMaskCaching))) { fCoverageCountingPathRenderer = ccpr.get(); context->contextPriv().addOnFlushCallbackObject(fCoverageCountingPathRenderer); fChain.push_back(std::move(ccpr)); diff --git a/src/gpu/ccpr/GrCCAtlas.cpp b/src/gpu/ccpr/GrCCAtlas.cpp index 4ef632a000..a566707889 100644 --- a/src/gpu/ccpr/GrCCAtlas.cpp +++ b/src/gpu/ccpr/GrCCAtlas.cpp @@ -166,10 +166,11 @@ const GrUniqueKey& GrCCAtlas::getOrAssignUniqueKey(GrOnFlushResourceProvider* on return fUniqueKey; } -sk_sp GrCCAtlas::refOrMakeCachedAtlasInfo() { +sk_sp GrCCAtlas::refOrMakeCachedAtlasInfo(uint32_t contextUniqueID) { if (!fCachedAtlasInfo) { - fCachedAtlasInfo = sk_make_sp(); + fCachedAtlasInfo = sk_make_sp(contextUniqueID); } + SkASSERT(fCachedAtlasInfo->fContextUniqueID == contextUniqueID); return fCachedAtlasInfo; } diff --git a/src/gpu/ccpr/GrCCAtlas.h b/src/gpu/ccpr/GrCCAtlas.h index 641289596a..4a762bcb9a 100644 --- a/src/gpu/ccpr/GrCCAtlas.h +++ b/src/gpu/ccpr/GrCCAtlas.h @@ -74,11 +74,13 @@ public: // potentially be reused (i.e., those which still represent an extant path). When the percentage // of useful pixels drops below 50%, the entire texture is purged from the resource cache. struct CachedAtlasInfo : public GrNonAtomicRef { + CachedAtlasInfo(uint32_t contextUniqueID) : fContextUniqueID(contextUniqueID) {} + const uint32_t fContextUniqueID; int fNumPathPixels = 0; int fNumInvalidatedPathPixels = 0; bool fIsPurgedFromResourceCache = false; }; - sk_sp refOrMakeCachedAtlasInfo(); + sk_sp refOrMakeCachedAtlasInfo(uint32_t contextUniqueID); // Instantiates our texture proxy for the atlas and returns a pre-cleared GrRenderTargetContext // that the caller may use to render the content. After this call, it is no longer valid to call diff --git a/src/gpu/ccpr/GrCCDrawPathsOp.cpp b/src/gpu/ccpr/GrCCDrawPathsOp.cpp index 1dae08587c..4b3780dedf 100644 --- a/src/gpu/ccpr/GrCCDrawPathsOp.cpp +++ b/src/gpu/ccpr/GrCCDrawPathsOp.cpp @@ -345,8 +345,9 @@ void GrCCDrawPathsOp::setupResources(GrOnFlushResourceProvider* onFlushRP, SkIVector newOffset; GrCCAtlas* atlas = resources->copyPathToCachedAtlas(*cacheEntry, doEvenOddFill, &newOffset); - cacheEntry->updateToCachedAtlas(atlas->getOrAssignUniqueKey(onFlushRP), newOffset, - atlas->refOrMakeCachedAtlasInfo()); + cacheEntry->updateToCachedAtlas( + atlas->getOrAssignUniqueKey(onFlushRP), newOffset, + atlas->refOrMakeCachedAtlasInfo(onFlushRP->contextUniqueID())); this->recordInstance(atlas->textureProxy(), resources->nextPathInstanceIdx()); resources->appendDrawPathInstance().set(*cacheEntry, draw.fCachedMaskShift, draw.fColor); diff --git a/src/gpu/ccpr/GrCCPathCache.cpp b/src/gpu/ccpr/GrCCPathCache.cpp index db85641ad7..6d37b4fca5 100644 --- a/src/gpu/ccpr/GrCCPathCache.cpp +++ b/src/gpu/ccpr/GrCCPathCache.cpp @@ -12,6 +12,16 @@ DECLARE_SKMESSAGEBUS_MESSAGE(sk_sp); +static inline uint32_t next_path_cache_id() { + static std::atomic gNextID(1); + for (;;) { + uint32_t id = gNextID.fetch_add(+1, std::memory_order_acquire); + if (SK_InvalidUniqueID != id) { + return id; + } + } +} + static inline bool SkShouldPostMessageToBus( const sk_sp& entry, uint32_t msgBusUniqueID) { return entry->pathCacheUniqueID() == msgBusUniqueID; @@ -20,6 +30,7 @@ static inline bool SkShouldPostMessageToBus( // The maximum number of cache entries we allow in our own cache. static constexpr int kMaxCacheCount = 1 << 16; + GrCCPathCache::MaskTransform::MaskTransform(const SkMatrix& m, SkIVector* shift) : fMatrix2x2{m.getScaleX(), m.getSkewX(), m.getSkewY(), m.getScaleY()} { SkASSERT(!m.hasPerspective()); @@ -128,6 +139,11 @@ inline uint32_t GrCCPathCache::HashNode::Hash(HashKey key) { return GrResourceKeyHash(&key.fData[1], key.fData[0]); } + +GrCCPathCache::GrCCPathCache() + : fInvalidatedEntriesInbox(next_path_cache_id()) { +} + #ifdef SK_DEBUG GrCCPathCache::~GrCCPathCache() { // Ensure the hash table and LRU list are still coherent. @@ -248,9 +264,8 @@ void GrCCPathCacheEntry::invalidateAtlas() { fCachedAtlasInfo->fNumInvalidatedPathPixels >= fCachedAtlasInfo->fNumPathPixels / 2) { // Too many invalidated pixels: purge the atlas texture from the resource cache. // The GrContext and CCPR path cache both share the same unique ID. - uint32_t contextUniqueID = fPathCacheUniqueID; SkMessageBus::Post( - GrUniqueKeyInvalidatedMessage(fAtlasKey, contextUniqueID)); + GrUniqueKeyInvalidatedMessage(fAtlasKey, fCachedAtlasInfo->fContextUniqueID)); fCachedAtlasInfo->fIsPurgedFromResourceCache = true; } } diff --git a/src/gpu/ccpr/GrCCPathCache.h b/src/gpu/ccpr/GrCCPathCache.h index e8ce928b9b..54a835a128 100644 --- a/src/gpu/ccpr/GrCCPathCache.h +++ b/src/gpu/ccpr/GrCCPathCache.h @@ -24,7 +24,7 @@ class GrShape; */ class GrCCPathCache { public: - GrCCPathCache(uint32_t contextUniqueID) : fInvalidatedEntriesInbox(contextUniqueID) {} + GrCCPathCache(); SkDEBUGCODE(~GrCCPathCache();) // Stores the components of a transformation that affect a path mask (i.e. everything but diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp index 50eb1eb16a..55065e6136 100644 --- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp +++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.cpp @@ -30,16 +30,15 @@ bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) { } sk_sp GrCoverageCountingPathRenderer::CreateIfSupported( - const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) { + const GrCaps& caps, AllowCaching allowCaching) { return sk_sp((IsSupported(caps)) - ? new GrCoverageCountingPathRenderer(allowCaching, contextUniqueID) + ? new GrCoverageCountingPathRenderer(allowCaching) : nullptr); } -GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching, - uint32_t contextUniqueID) { +GrCoverageCountingPathRenderer::GrCoverageCountingPathRenderer(AllowCaching allowCaching) { if (AllowCaching::kYes == allowCaching) { - fPathCache = skstd::make_unique(contextUniqueID); + fPathCache = skstd::make_unique(); } } diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h index 1457e9b38d..19e42a922c 100644 --- a/src/gpu/ccpr/GrCoverageCountingPathRenderer.h +++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer.h @@ -34,8 +34,7 @@ public: kYes = true }; - static sk_sp CreateIfSupported(const GrCaps&, AllowCaching, - uint32_t contextUniqueID); + static sk_sp CreateIfSupported(const GrCaps&, AllowCaching); using PendingPathsMap = std::map>; @@ -83,7 +82,7 @@ public: float* inflationRadius = nullptr); private: - GrCoverageCountingPathRenderer(AllowCaching, uint32_t contextUniqueID); + GrCoverageCountingPathRenderer(AllowCaching); // GrPathRenderer overrides. StencilSupport onGetStencilSupport(const GrShape&) const override { diff --git a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp index 5de32aaddb..ebc12da1c4 100644 --- a/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp +++ b/src/gpu/ccpr/GrCoverageCountingPathRenderer_none.cpp @@ -12,7 +12,7 @@ bool GrCoverageCountingPathRenderer::IsSupported(const GrCaps& caps) { } sk_sp GrCoverageCountingPathRenderer::CreateIfSupported( - const GrCaps& caps, AllowCaching allowCaching, uint32_t contextUniqueID) { + const GrCaps& caps, AllowCaching allowCaching) { return nullptr; }