Revert "Do register allocation in GrResourceAllocator"

This reverts commit c6f78ff55d.

Reason for revert: Broke Chrome roll and MSAN

Original change's description:
> Do register allocation in GrResourceAllocator
>
> This lets us plan out the allocation of resources without
> actually committing to the resulting plan. In the future,
> the user will be able to do the register allocation, then
> query the estimated memory cost, and either commit to
> that allocation or try a different order of operations.
>
> Bug: skia:10877
> Change-Id: I34f92b01986dc2a0dd72e85d42283fc438c5fc82
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/386097
> Commit-Queue: Adlai Holler <adlai@google.com>
> Reviewed-by: Robert Phillips <robertphillips@google.com>

TBR=robertphillips@google.com,adlai@google.com

Change-Id: I7492c12b8188ed22c3cd80fd4068da402d8d3543
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: skia:10877
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/386856
Reviewed-by: Adlai Holler <adlai@google.com>
Commit-Queue: Adlai Holler <adlai@google.com>
This commit is contained in:
Adlai Holler 2021-03-18 20:41:08 +00:00 committed by Skia Commit-Bot
parent f96645dd9e
commit 9f358825f9
3 changed files with 104 additions and 149 deletions

View File

@ -98,63 +98,10 @@ void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy, unsigned int start,
fIntvlHash.set(proxyID, newIntvl);
}
bool GrResourceAllocator::Register::isRecyclable(const GrCaps& caps,
GrSurfaceProxy* proxy,
int knownUseCount) const {
if (!caps.reuseScratchTextures() && !proxy->asRenderTargetProxy()) {
// Tragically, scratch texture reuse is totally disabled in this case.
return false;
}
if (!this->scratchKey().isValid()) {
return false; // no scratch key, no free pool
}
if (this->uniqueKey().isValid()) {
return false; // rely on the resource cache to hold onto uniquely-keyed surfaces.
}
// If all the refs on the proxy are known to the resource allocator then no one
bool GrResourceAllocator::Interval::isSurfaceRecyclable() const {
// All the refs on the proxy are known to the resource allocator thus no one
// should be holding onto it outside of Ganesh.
return !proxy->refCntGreaterThan(knownUseCount);
}
bool GrResourceAllocator::Register::instantiateSurface(GrSurfaceProxy* proxy,
GrResourceProvider* resourceProvider) {
SkASSERT(!proxy->peekSurface());
sk_sp<GrSurface> surface;
if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
SkASSERT(uniqueKey == fOriginatingProxy->getUniqueKey());
// First try to reattach to a cached surface if the proxy is uniquely keyed
surface = resourceProvider->findByUniqueKey<GrSurface>(uniqueKey);
}
if (!surface) {
if (proxy == fOriginatingProxy) {
surface = proxy->priv().createSurface(resourceProvider);
} else {
surface = sk_ref_sp(fOriginatingProxy->peekSurface());
}
}
if (!surface) {
return false;
}
// Make surface budgeted if this proxy is budgeted.
if (SkBudgeted::kYes == proxy->isBudgeted() &&
GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
// This gets the job done but isn't quite correct. It would be better to try to
// match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
surface->resourcePriv().makeBudgeted();
}
// Propagate the proxy unique key to the surface if we have one.
if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
if (!surface->getUniqueKey().isValid()) {
resourceProvider->assignUniqueKeyToResource(uniqueKey, surface.get());
}
SkASSERT(surface->getUniqueKey() == uniqueKey);
}
proxy->priv().assign(std::move(surface));
return true;
return !fProxy->refCntGreaterThan(fUses);
}
GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
@ -247,49 +194,78 @@ void GrResourceAllocator::IntervalList::validate() const {
}
#endif
// 'surface' can be reused. Add it back to the free pool.
void GrResourceAllocator::recycleRegister(Register* r) {
const GrScratchKey &key = r->scratchKey();
if (!key.isValid()) {
return; // can't do it w/o a valid scratch key
}
GrSurface* surface = r->surface();
if (surface->getUniqueKey().isValid()) {
// If the surface has a unique key we throw it back into the resource cache.
// If things get really tight 'findRegisterFor' may pull it back out but there is
// no need to have it in tight rotation.
return;
}
#if GR_ALLOCATION_SPEW
SkDebugf("putting register %d back into pool\n", r->uniqueID());
#endif
// TODO: fix this insertion so we get a more LRU-ish behavior
fFreePool.insert(key, r);
}
// First try to reuse one of the recently allocated/used registers in the free pool.
GrResourceAllocator::Register* GrResourceAllocator::findOrCreateRegisterFor(GrSurfaceProxy* proxy) {
// Handle uniquely keyed proxies
// If we can't find a usable one, try to instantiate a surface and wrap it in a new one.
GrResourceAllocator::Register* GrResourceAllocator::findRegisterFor(const GrSurfaceProxy* proxy) {
if (const auto& uniqueKey = proxy->getUniqueKey(); uniqueKey.isValid()) {
if (auto p = fUniqueKeyRegisters.find(uniqueKey)) {
return *p;
// First try to reattach to a cached surface if the proxy is uniquely keyed
if (sk_sp<GrSurface> surface = fResourceProvider->findByUniqueKey<GrSurface>(uniqueKey)) {
// TODO: Find the register if we've encountered this unique key before.
return fInternalAllocator.make<Register>(std::move(surface));
}
// No need for a scratch key. These don't go in the free pool.
Register* r = fInternalAllocator.make<Register>(proxy, GrScratchKey());
fUniqueKeyRegisters.set(uniqueKey, r);
return r;
}
// Then look in the free pool
GrScratchKey scratchKey;
proxy->priv().computeScratchKey(*fResourceProvider->caps(), &scratchKey);
GrScratchKey key;
proxy->priv().computeScratchKey(*fResourceProvider->caps(), &key);
auto filter = [] (const Register* r) {
return true;
};
if (Register* r = fFreePool.findAndRemove(scratchKey, filter)) {
if (Register* r = fFreePool.findAndRemove(key, filter)) {
GrSurface* surface = r->surface();
if (SkBudgeted::kYes == proxy->isBudgeted() &&
GrBudgetedType::kBudgeted != surface->resourcePriv().budgetedType()) {
// This gets the job done but isn't quite correct. It would be better to try to
// match budgeted proxies w/ budgeted surfaces and unbudgeted w/ unbudgeted.
surface->resourcePriv().makeBudgeted();
}
SkASSERT(!surface->getUniqueKey().isValid());
return r;
}
return fInternalAllocator.make<Register>(proxy, std::move(scratchKey));
if (sk_sp<GrSurface> surf = proxy->priv().createSurface(fResourceProvider)) {
return fInternalAllocator.make<Register>(std::move(surf));
}
return nullptr;
}
// Remove any intervals that end before the current index. Add their registers
// Remove any intervals that end before the current index. Return their GrSurfaces
// to the free pool if possible.
void GrResourceAllocator::expire(unsigned int curIndex) {
while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->end() < curIndex) {
Interval* intvl = fActiveIntvls.popHead();
SkASSERT(!intvl->next());
Register* r = intvl->getRegister();
if (r && r->isRecyclable(*fResourceProvider->caps(), intvl->proxy(), intvl->uses())) {
#if GR_ALLOCATION_SPEW
SkDebugf("putting register %d back into pool\n", r->uniqueID());
#endif
// TODO: fix this insertion so we get a more LRU-ish behavior
fFreePool.insert(r->scratchKey(), r);
if (Register* r = intvl->getRegister()) {
if (intvl->isSurfaceRecyclable()) {
this->recycleRegister(r);
}
}
fFinishedIntvls.insertByIncreasingEnd(intvl);
}
}
@ -310,48 +286,47 @@ bool GrResourceAllocator::assign() {
while (Interval* cur = fIntvlList.popHead()) {
this->expire(cur->start());
// Already-instantiated proxies and lazy proxies don't use registers.
// No need to compute scratch keys (or CANT, in the case of fully-lazy).
if (cur->proxy()->isInstantiated() || cur->proxy()->isLazy()) {
if (cur->proxy()->isInstantiated()) {
fActiveIntvls.insertByIncreasingEnd(cur);
continue;
}
Register* r = this->findOrCreateRegisterFor(cur->proxy());
if (cur->proxy()->isLazy()) {
if (!cur->proxy()->priv().doLazyInstantiation(fResourceProvider)) {
fFailedInstantiation = true;
}
} else if (Register* r = this->findRegisterFor(cur->proxy())) {
sk_sp<GrSurface> surface = r->refSurface();
// propagate the proxy unique key to the surface if we have one.
if (const auto& uniqueKey = cur->proxy()->getUniqueKey(); uniqueKey.isValid()) {
if (!surface->getUniqueKey().isValid()) {
fResourceProvider->assignUniqueKeyToResource(uniqueKey, surface.get());
}
SkASSERT(surface->getUniqueKey() == uniqueKey);
}
#if GR_ALLOCATION_SPEW
SkDebugf("Assigning register %d to %d\n",
r->uniqueID(),
cur->proxy()->uniqueID().asUInt());
SkDebugf("Assigning %d to %d\n",
surface->uniqueID().asUInt(),
cur->proxy()->uniqueID().asUInt());
#endif
SkASSERT(!cur->proxy()->peekSurface());
cur->setRegister(r);
SkASSERT(!cur->proxy()->peekSurface());
cur->setRegister(r);
// TODO: surface creation and assignment should happen later
cur->proxy()->priv().assign(std::move(surface));
} else {
SkASSERT(!cur->proxy()->isInstantiated());
fFailedInstantiation = true;
}
fActiveIntvls.insertByIncreasingEnd(cur);
}
// expire all the remaining intervals to drain the active interval list
this->expire(std::numeric_limits<unsigned int>::max());
// TODO: Return here and give the caller a chance to estimate memory cost and bail before
// instantiating anything.
// Instantiate surfaces
while (Interval* cur = fFinishedIntvls.popHead()) {
if (fFailedInstantiation) {
break;
}
if (cur->proxy()->isInstantiated()) {
continue;
}
if (cur->proxy()->isLazy()) {
fFailedInstantiation = !cur->proxy()->priv().doLazyInstantiation(fResourceProvider);
continue;
}
Register* r = cur->getRegister();
SkASSERT(r);
fFailedInstantiation = !r->instantiateSurface(cur->proxy(), fResourceProvider);
}
return !fFailedInstantiation;
}

View File

@ -13,7 +13,7 @@
#include "src/gpu/GrGpuResourcePriv.h"
#include "src/gpu/GrHashMapWithCache.h"
#include "src/gpu/GrSurface.h"
#include "src/gpu/GrSurfaceProxyPriv.h"
#include "src/gpu/GrSurfaceProxy.h"
#include "src/core/SkArenaAlloc.h"
#include "src/core/SkTMultiMap.h"
@ -108,7 +108,7 @@ private:
// These two methods wrap the interactions with the free pool
void recycleRegister(Register* r);
Register* findOrCreateRegisterFor(GrSurfaceProxy* proxy);
Register* findRegisterFor(const GrSurfaceProxy* proxy);
struct FreePoolTraits {
static const GrScratchKey& GetKey(const Register& r) {
@ -120,42 +120,31 @@ private:
};
typedef SkTMultiMap<Register, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash;
typedef SkTHashMap<GrUniqueKey, Register*> UniqueKeyRegisterHash;
typedef SkTHashMap<uint32_t, Interval*, GrCheapHash> IntvlHash;
// Each proxy with some exceptions is assigned a register. After all assignments are made,
// another pass is performed to instantiate and assign actual surfaces to the proxies. Right
// now these are performed in one call, but in the future they will be separable and the user
// will be able to query re: memory cost before committing to surface creation.
// Right now this is just a wrapper around an actual SkSurface.
// In the future this will be a placeholder for an SkSurface that will be
// created later, after the user of the resource allocator commits to
// a specific series of intervals.
class Register {
public:
// It's OK to pass an invalid scratch key iff the proxy has a unique key.
Register(GrSurfaceProxy* originatingProxy, GrScratchKey scratchKey)
: fOriginatingProxy(originatingProxy)
, fScratchKey(std::move(scratchKey)) {
SkASSERT(originatingProxy);
SkASSERT(!originatingProxy->isInstantiated());
SkASSERT(!originatingProxy->isLazy());
SkASSERT(this->scratchKey().isValid() ^ this->uniqueKey().isValid());
Register(sk_sp<GrSurface> s) : fSurface(std::move(s)) {
SkASSERT(fSurface);
SkDEBUGCODE(fUniqueID = CreateUniqueID();)
}
~Register() = default;
const GrScratchKey& scratchKey() const { return fScratchKey; }
const GrUniqueKey& uniqueKey() const { return fOriginatingProxy->getUniqueKey(); }
const GrScratchKey& scratchKey() const {
return fSurface->resourcePriv().getScratchKey();
}
// Can this register be used by other proxies after this one?
bool isRecyclable(const GrCaps&, GrSurfaceProxy* proxy, int knownUseCount) const;
// Resolve the register allocation to an actual GrSurface. 'fOriginatingProxy'
// is used to cache the allocation when a given register is used by multiple
// proxies.
bool instantiateSurface(GrSurfaceProxy*, GrResourceProvider*);
GrSurface* surface() const { return fSurface.get(); }
sk_sp<GrSurface> refSurface() const { return fSurface; }
SkDEBUGCODE(uint32_t uniqueID() const { return fUniqueID; })
private:
GrSurfaceProxy* fOriginatingProxy;
GrScratchKey fScratchKey; // free pool wants a reference to this.
sk_sp<GrSurface> fSurface;
#ifdef SK_DEBUG
uint32_t fUniqueID;
@ -167,9 +156,9 @@ private:
class Interval {
public:
Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
: fProxy(proxy)
, fStart(start)
, fEnd(end) {
: fProxy(proxy)
, fStart(start)
, fEnd(end) {
SkASSERT(proxy);
SkDEBUGCODE(fUniqueID = CreateUniqueID());
#if GR_TRACK_INTERVAL_CREATION
@ -191,6 +180,8 @@ private:
Register* getRegister() const { return fRegister; }
void setRegister(Register* r) { fRegister = r; }
bool isSurfaceRecyclable() const;
void addUse() { fUses++; }
int uses() const { return fUses; }
@ -255,9 +246,6 @@ private:
IntervalList fIntvlList; // All the intervals sorted by increasing start
IntervalList fActiveIntvls; // List of live intervals during assignment
// (sorted by increasing end)
IntervalList fFinishedIntvls; // All the completed intervals
// (sorted by increasing end)
UniqueKeyRegisterHash fUniqueKeyRegisters;
unsigned int fNumOps = 0;
SkDEBUGCODE(bool fAssigned = false;)

View File

@ -139,13 +139,10 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
{{64, kNotRT, kRGBA, kA, 1, kNotB}, {64, kNotRT, kRGBA, kA, 1, kNotB}, kDontShare},
};
for (size_t i = 0; i < SK_ARRAY_COUNT(gOverlappingTests); i++) {
auto test = gOverlappingTests[i];
for (auto test : gOverlappingTests) {
sk_sp<GrSurfaceProxy> p1 = make_deferred(proxyProvider, caps, test.fP1);
sk_sp<GrSurfaceProxy> p2 = make_deferred(proxyProvider, caps, test.fP2);
reporter->push(SkStringPrintf("case %d", SkToInt(i)));
overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2), test.fExpectation);
reporter->pop();
}
auto beFormat = caps->getDefaultBackendFormat(GrColorType::kRGBA_8888, GrRenderable::kYes);
@ -187,8 +184,7 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
{{64, kRT, kRGBA, kA, 1, kNotB}, {64, kRT, kRGBA, kA, 1, kNotB}, kShare},
};
for (size_t i = 0; i < SK_ARRAY_COUNT(gNonOverlappingTests); i++) {
auto test = gNonOverlappingTests[i];
for (auto test : gNonOverlappingTests) {
sk_sp<GrSurfaceProxy> p1 = make_deferred(proxyProvider, caps, test.fP1);
sk_sp<GrSurfaceProxy> p2 = make_deferred(proxyProvider, caps, test.fP2);
@ -196,10 +192,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
continue; // creation can fail (i.e., for msaa4 on iOS)
}
reporter->push(SkStringPrintf("case %d", SkToInt(i)));
non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2),
test.fExpectation);
reporter->pop();
}
{
@ -210,10 +204,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
sk_sp<GrSurfaceProxy> p1 = make_backend(direct, t[0].fP1);
sk_sp<GrSurfaceProxy> p2 = make_deferred(proxyProvider, caps, t[0].fP2);
reporter->push(SkString("wrapped case"));
non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2),
t[0].fExpectation);
reporter->pop();
}
}