Revert "Add explicit GrSurfaceProxy flag to skip explicit resource allocation"
This reverts commit 7bb47f2a2e
.
Reason for revert: chrome roll maybe
Original change's description:
> Add explicit GrSurfaceProxy flag to skip explicit resource allocation
>
> This approach eliminates a lot of edge cases where ops (e.g., the SmallPathOp) are treating their proxies in a special manner (i.e., just holding a raw ref and never adding pendingIO). Given that the atlas managers are managing the lifetime of the proxies there is no reason for the GrResourceAllocator to be aware of them.
>
> Pulled out of:
>
> https://skia-review.googlesource.com/c/skia/+/208227 (Implement alternate method for determining recycle-ability of allocated GrSurfaces)
>
> Change-Id: Ia6bec5e8f5d5bc63e86ae011bcc3f8e061c066b2
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/209400
> Reviewed-by: Brian Salomon <bsalomon@google.com>
> Commit-Queue: Robert Phillips <robertphillips@google.com>
TBR=bsalomon@google.com,robertphillips@google.com
# Not skipping CQ checks because original CL landed > 1 day ago.
Change-Id: Id65bd176f56aa91ff76ec1979aef6206b7665d63
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/209600
Reviewed-by: Mike Klein <mtklein@google.com>
Commit-Queue: Mike Klein <mtklein@google.com>
This commit is contained in:
parent
08cb24621a
commit
6350cb0f4b
@ -183,8 +183,9 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
int32_t internalGetProxyRefCnt() const { return fRefCnt; }
|
||||
int32_t internalGetTotalRefs() const { return fRefCnt + fPendingReads + fPendingWrites; }
|
||||
int32_t internalGetProxyRefCnt() const {
|
||||
return fRefCnt;
|
||||
}
|
||||
|
||||
// For deferred proxies this will be null. For wrapped proxies it will point to the
|
||||
// wrapped resource.
|
||||
@ -492,11 +493,9 @@ protected:
|
||||
friend class GrSurfaceProxyPriv;
|
||||
|
||||
// Methods made available via GrSurfaceProxyPriv
|
||||
bool ignoredByResourceAllocator() const { return fIgnoredByResourceAllocator; }
|
||||
void setIgnoredByResourceAllocator() { fIgnoredByResourceAllocator = true; }
|
||||
|
||||
int32_t getProxyRefCnt() const { return this->internalGetProxyRefCnt(); }
|
||||
int32_t getTotalRefs() const { return this->internalGetTotalRefs(); }
|
||||
int32_t getProxyRefCnt() const {
|
||||
return this->internalGetProxyRefCnt();
|
||||
}
|
||||
|
||||
void computeScratchKey(GrScratchKey*) const;
|
||||
|
||||
@ -552,7 +551,6 @@ private:
|
||||
virtual size_t onUninstantiatedGpuMemorySize() const = 0;
|
||||
|
||||
bool fNeedsClear;
|
||||
bool fIgnoredByResourceAllocator = false;
|
||||
|
||||
// This entry is lazily evaluated so, when the proxy wraps a resource, the resource
|
||||
// will be called but, when the proxy is deferred, it will compute the answer itself.
|
||||
|
@ -536,8 +536,6 @@ bool GrDrawOpAtlas::createPages(GrProxyProvider* proxyProvider) {
|
||||
return false;
|
||||
}
|
||||
|
||||
fProxies[i]->priv().setIgnoredByResourceAllocator();
|
||||
|
||||
// set up allocated plots
|
||||
fPages[i].fPlotArray.reset(new sk_sp<Plot>[ numPlotsX * numPlotsY ]);
|
||||
|
||||
|
@ -64,7 +64,6 @@ sk_sp<GrTextureProxy> GrOnFlushResourceProvider::findOrCreateProxyByUniqueKey(
|
||||
}
|
||||
|
||||
bool GrOnFlushResourceProvider::instatiateProxy(GrSurfaceProxy* proxy) {
|
||||
SkASSERT(proxy->priv().ignoredByResourceAllocator());
|
||||
SkASSERT(proxy->priv().requiresNoPendingIO());
|
||||
|
||||
// TODO: this class should probably just get a GrDirectContext
|
||||
|
@ -57,12 +57,6 @@ GrResourceAllocator::~GrResourceAllocator() {
|
||||
|
||||
void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end
|
||||
SkDEBUGCODE(, bool isDirectDstRead)) {
|
||||
if (proxy->canSkipResourceAllocator()) {
|
||||
return;
|
||||
}
|
||||
|
||||
SkASSERT(!proxy->priv().ignoredByResourceAllocator());
|
||||
|
||||
SkASSERT(start <= end);
|
||||
SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
|
||||
|
||||
@ -347,22 +341,17 @@ bool GrResourceAllocator::assign(int* startIndex, int* stopIndex, AssignError* o
|
||||
SkASSERT(outError);
|
||||
*outError = AssignError::kNoError;
|
||||
|
||||
SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
|
||||
|
||||
fIntvlHash.reset(); // we don't need the interval hash anymore
|
||||
|
||||
if (fCurOpListIndex >= fEndOfOpListOpIndices.count()) {
|
||||
return false; // nothing to render
|
||||
if (fIntvlList.empty()) {
|
||||
return false; // nothing to render
|
||||
}
|
||||
|
||||
SkASSERT(fCurOpListIndex < fNumOpLists);
|
||||
SkASSERT(fNumOpLists == fEndOfOpListOpIndices.count());
|
||||
|
||||
*startIndex = fCurOpListIndex;
|
||||
*stopIndex = fEndOfOpListOpIndices.count();
|
||||
|
||||
if (fIntvlList.empty()) {
|
||||
fCurOpListIndex = fEndOfOpListOpIndices.count();
|
||||
return true; // no resources to assign
|
||||
}
|
||||
|
||||
#if GR_ALLOCATION_SPEW
|
||||
SkDebugf("assigning opLists %d through %d out of %d numOpLists\n",
|
||||
*startIndex, *stopIndex, fNumOpLists);
|
||||
|
@ -189,11 +189,6 @@ sk_sp<GrSurface> GrSurfaceProxy::createSurfaceImpl(GrResourceProvider* resourceP
|
||||
}
|
||||
|
||||
bool GrSurfaceProxy::canSkipResourceAllocator() const {
|
||||
if (this->ignoredByResourceAllocator()) {
|
||||
// Usually an atlas or onFlush proxy
|
||||
return true;
|
||||
}
|
||||
|
||||
auto peek = this->peekSurface();
|
||||
if (!peek) {
|
||||
return false;
|
||||
|
@ -23,8 +23,6 @@ public:
|
||||
// depends on the read and write refs (So this method can validly return 0).
|
||||
int32_t getProxyRefCnt() const { return fProxy->getProxyRefCnt(); }
|
||||
|
||||
int32_t getTotalRefs() const { return fProxy->getTotalRefs(); }
|
||||
|
||||
void computeScratchKey(GrScratchKey* key) const { return fProxy->computeScratchKey(key); }
|
||||
|
||||
// Create a GrSurface-derived class that meets the requirements (i.e, desc, renderability)
|
||||
@ -60,9 +58,6 @@ public:
|
||||
static bool SK_WARN_UNUSED_RESULT AttachStencilIfNeeded(GrResourceProvider*, GrSurface*,
|
||||
bool needsStencil);
|
||||
|
||||
bool ignoredByResourceAllocator() const { return fProxy->ignoredByResourceAllocator(); }
|
||||
void setIgnoredByResourceAllocator() { fProxy->setIgnoredByResourceAllocator(); }
|
||||
|
||||
private:
|
||||
explicit GrSurfaceProxyPriv(GrSurfaceProxy* proxy) : fProxy(proxy) {}
|
||||
GrSurfaceProxyPriv(const GrSurfaceProxyPriv&) {} // unimpl
|
||||
|
@ -97,8 +97,6 @@ GrCCAtlas::GrCCAtlas(CoverageType coverageType, const Specs& specs, const GrCaps
|
||||
return GrSurfaceProxy::LazyInstantiationResult(fBackingTexture);
|
||||
},
|
||||
format, GrProxyProvider::Renderable::kYes, kTextureOrigin, pixelConfig, caps);
|
||||
|
||||
fTextureProxy->priv().setIgnoredByResourceAllocator();
|
||||
}
|
||||
|
||||
GrCCAtlas::~GrCCAtlas() {
|
||||
|
@ -196,6 +196,9 @@ public:
|
||||
const char* name() const override { return "TextureOp"; }
|
||||
|
||||
void visitProxies(const VisitProxyFunc& func, VisitorType visitor) const override {
|
||||
if (visitor == VisitorType::kAllocatorGather && fCanSkipAllocatorGather) {
|
||||
return;
|
||||
}
|
||||
for (unsigned p = 0; p < fProxyCnt; ++p) {
|
||||
func(fProxies[p].fProxy);
|
||||
}
|
||||
@ -297,6 +300,8 @@ private:
|
||||
auto bounds = dstQuad.bounds(dstQuadType);
|
||||
this->setBounds(bounds, HasAABloat(aaType == GrAAType::kCoverage), IsZeroArea::kNo);
|
||||
fDomain = static_cast<unsigned>(domain);
|
||||
fCanSkipAllocatorGather =
|
||||
static_cast<unsigned>(fProxies[0].fProxy->canSkipResourceAllocator());
|
||||
}
|
||||
TextureOp(const GrRenderTargetContext::TextureSetEntry set[], int cnt,
|
||||
GrSamplerState::Filter filter, GrAAType aaType,
|
||||
@ -310,6 +315,7 @@ private:
|
||||
SkRect bounds = SkRectPriv::MakeLargestInverted();
|
||||
GrAAType overallAAType = GrAAType::kNone; // aa type maximally compatible with all dst rects
|
||||
bool mustFilter = false;
|
||||
fCanSkipAllocatorGather = static_cast<unsigned>(true);
|
||||
// Most dst rects are transformed by the same view matrix, so their quad types start
|
||||
// identical, unless an entry provides a dstClip or additional transform that changes it.
|
||||
// The quad list will automatically adapt to that.
|
||||
@ -321,6 +327,9 @@ private:
|
||||
fProxies[p].fQuadCnt = 1;
|
||||
SkASSERT(fProxies[p].fProxy->textureType() == fProxies[0].fProxy->textureType());
|
||||
SkASSERT(fProxies[p].fProxy->config() == fProxies[0].fProxy->config());
|
||||
if (!fProxies[p].fProxy->canSkipResourceAllocator()) {
|
||||
fCanSkipAllocatorGather = static_cast<unsigned>(false);
|
||||
}
|
||||
|
||||
SkMatrix ctm = viewMatrix;
|
||||
if (set[p].fPreViewMatrix) {
|
||||
@ -668,7 +677,8 @@ private:
|
||||
GR_STATIC_ASSERT(GrQuadPerEdgeAA::kColorTypeCount <= 4);
|
||||
// Used to track whether fProxy is ref'ed or has a pending IO after finalize() is called.
|
||||
unsigned fFinalized : 1;
|
||||
unsigned fProxyCnt : 32 - 8;
|
||||
unsigned fCanSkipAllocatorGather : 1;
|
||||
unsigned fProxyCnt : 32 - 9;
|
||||
Proxy fProxies[1];
|
||||
|
||||
static_assert(kGrQuadTypeCount <= 4, "GrQuadType does not fit in 2 bits");
|
||||
|
@ -323,8 +323,6 @@ public:
|
||||
kBottomLeft_GrSurfaceOrigin,
|
||||
kRGBA_8888_GrPixelConfig,
|
||||
*proxyProvider->caps());
|
||||
|
||||
fAtlasProxy->priv().setIgnoredByResourceAllocator();
|
||||
return fAtlasProxy;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user