Add GrResourceAllocator class + unit test

Change-Id: I2700e8cb4213479b680519ba67f078cc3fb71376
Reviewed-on: https://skia-review.googlesource.com/23661
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Robert Phillips 2017-07-18 14:49:38 -04:00 committed by Skia Commit-Bot
parent 11f898f154
commit 5af44defbd
14 changed files with 393 additions and 13 deletions

View File

@ -161,6 +161,8 @@ skia_gpu_sources = [
"$_src/gpu/GrPathRenderingRenderTargetContext.h",
"$_src/gpu/GrRenderTargetOpList.cpp",
"$_src/gpu/GrRenderTargetOpList.h",
"$_src/gpu/GrResourceAllocator.cpp",
"$_src/gpu/GrResourceAllocator.h",
"$_src/gpu/GrResourceCache.cpp",
"$_src/gpu/GrResourceCache.h",
"$_src/gpu/GrResourceHandle.h",

View File

@ -187,6 +187,7 @@ tests_sources = [
"$_tests/RefDictTest.cpp",
"$_tests/RegionTest.cpp",
"$_tests/RenderTargetContextTest.cpp",
"$_tests/ResourceAllocatorTest.cpp",
"$_tests/ResourceCacheTest.cpp",
"$_tests/RoundRectTest.cpp",
"$_tests/RRectInPathTest.cpp",

View File

@ -23,7 +23,7 @@ public:
const GrRenderTargetProxy* asRenderTargetProxy() const override { return this; }
// Actually instantiate the backing rendertarget, if necessary.
bool instantiate(GrResourceProvider* resourceProvider) override;
bool instantiate(GrResourceProvider*) override;
GrFSAAType fsaaType() const {
if (!fSampleCnt) {
@ -68,6 +68,8 @@ protected:
// Wrapped version
GrRenderTargetProxy(sk_sp<GrSurface>);
sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
private:
size_t onUninstantiatedGpuMemorySize() const override;

View File

@ -329,6 +329,7 @@ public:
bool isWrapped_ForTesting() const;
SkDEBUGCODE(bool isInstantiated() const { return SkToBool(fTarget); })
SkDEBUGCODE(void validate(GrContext*) const;)
// Provides access to functions that aren't part of the public API.
@ -367,6 +368,13 @@ protected:
return this->internalHasPendingWrite();
}
virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0;
void assign(sk_sp<GrSurface> surface);
sk_sp<GrSurface> createSurfaceImpl(GrResourceProvider*, int sampleCnt,
GrSurfaceFlags flags, bool isMipMapped,
SkDestinationSurfaceColorMode mipColorMode) const;
bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
GrSurfaceFlags flags, bool isMipMapped,
SkDestinationSurfaceColorMode mipColorMode);

View File

@ -49,6 +49,8 @@ protected:
SkDestinationSurfaceColorMode mipColorMode() const { return fMipColorMode; }
sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
private:
bool fIsMipMapped;
SkDestinationSurfaceColorMode fMipColorMode;

View File

@ -61,6 +61,22 @@ bool GrRenderTargetProxy::instantiate(GrResourceProvider* resourceProvider) {
return true;
}
sk_sp<GrSurface> GrRenderTargetProxy::createSurface(GrResourceProvider* resourceProvider) const {
static constexpr GrSurfaceFlags kFlags = kRenderTarget_GrSurfaceFlag;
sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, fSampleCnt, kFlags,
/* isMipped = */ false,
SkDestinationSurfaceColorMode::kLegacy);
if (!surface) {
return nullptr;
}
SkASSERT(surface->asRenderTarget());
// Check that our a priori computation matched the ultimate reality
SkASSERT(fRenderTargetFlags == surface->asRenderTarget()->renderTargetPriv().flags());
return surface;
}
int GrRenderTargetProxy::worstCaseWidth() const {
if (fTarget) {
return fTarget->width();

View File

@ -0,0 +1,115 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrResourceAllocator.h"
#include "GrSurfaceProxy.h"
#include "GrSurfaceProxyPriv.h"
void GrResourceAllocator::addInterval(GrSurfaceProxy* proxy,
unsigned int start, unsigned int end) {
SkASSERT(start <= end);
SkASSERT(!fAssigned); // We shouldn't be adding any intervals after (or during) assignment
if (Interval* intvl = fIntvlHash.find(proxy->uniqueID().asUInt())) {
// Revise the interval for an existing use
SkASSERT(intvl->fEnd < start);
intvl->fEnd = end;
return;
}
// TODO: given the usage pattern an arena allocation scheme would work well here
Interval* newIntvl = new Interval(proxy, start, end);
fIntvlList.insertByIncreasingStart(newIntvl);
fIntvlHash.add(newIntvl);
}
GrResourceAllocator::Interval* GrResourceAllocator::IntervalList::popHead() {
Interval* temp = fHead;
if (temp) {
fHead = temp->fNext;
}
return temp;
}
// TODO: fuse this with insertByIncreasingEnd
void GrResourceAllocator::IntervalList::insertByIncreasingStart(Interval* intvl) {
if (!fHead) {
intvl->fNext = nullptr;
fHead = intvl;
} else if (intvl->fStart <= fHead->fStart) {
intvl->fNext = fHead;
fHead = intvl;
} else {
Interval* prev = fHead;
Interval* next = prev->fNext;
for (; next && intvl->fStart > next->fStart; prev = next, next = next->fNext) {
}
intvl->fNext = next;
prev->fNext = intvl;
}
}
// TODO: fuse this with insertByIncreasingStart
void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
if (!fHead) {
intvl->fNext = nullptr;
fHead = intvl;
} else if (intvl->fEnd <= fHead->fEnd) {
intvl->fNext = fHead;
fHead = intvl;
} else {
Interval* prev = fHead;
Interval* next = prev->fNext;
for (; next && intvl->fEnd > next->fEnd; prev = next, next = next->fNext) {
}
intvl->fNext = next;
prev->fNext = intvl;
}
}
// 'surface' can be reused. Add it back to the free pool.
void GrResourceAllocator::freeUpSurface(GrSurface* surface) {
// TODO: add free pool
}
// First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
// If we can't find a useable one, create a new one.
// TODO: handle being overbudget
sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(GrSurfaceProxy* proxy) {
// TODO: add free pool
// Try to grab one from the resource cache
return proxy->priv().createSurface(fResourceProvider);
}
// Remove any intervals that end before the current index. Return their GrSurfaces
// to the free pool.
void GrResourceAllocator::expire(unsigned int curIndex) {
while (!fActiveIntvls.empty() && fActiveIntvls.peekHead()->fEnd < curIndex) {
Interval* temp = fActiveIntvls.popHead();
this->freeUpSurface(temp->fProxy->priv().peekSurface());
delete temp;
}
}
void GrResourceAllocator::assign() {
fIntvlHash.reset(); // we don't need this anymore
SkDEBUGCODE(fAssigned = true;)
while (Interval* cur = fIntvlList.popHead()) {
this->expire(cur->fStart);
// TODO: add over budget handling here?
sk_sp<GrSurface> surface = this->findSurfaceFor(cur->fProxy);
if (surface) {
cur->fProxy->priv().assign(std::move(surface));
}
// TODO: handle resouce allocation failure upstack
fActiveIntvls.insertByIncreasingEnd(cur);
}
}

View File

@ -0,0 +1,127 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrResourceAllocator_DEFINED
#define GrResourceAllocator_DEFINED
#include "GrSurfaceProxy.h"
#include "SkTDynamicHash.h"
class GrResourceProvider;
/*
* The ResourceAllocator explicitly distributes GPU resources at flush time. It operates by
* being given the usage intervals of the various proxies. It keeps these intervals in a singly
* linked list sorted by increasing start index. (It also maintains a hash table from proxyID
* to interval to find proxy reuse). When it comes time to allocate the resources it
* traverses the sorted list and:
* removes intervals from the active list that have completed (returning their GrSurfaces
* to the free pool)
* allocates a new resource (preferably from the free pool) for the new interval
* adds the new interval to the active list (that is sorted by increasing end index)
*
* Note: the op indices (used in the usage intervals) come from the order of the ops in
* their opLists after the opList DAG has been linearized.
*/
class GrResourceAllocator {
public:
GrResourceAllocator(GrResourceProvider* resourceProvider)
: fResourceProvider(resourceProvider) {
}
unsigned int curOp() const { return fNumOps; }
void incOps() { fNumOps++; }
unsigned int numOps() const { return fNumOps; }
// Add a usage interval from start to end inclusive. This is usually used for renderTargets.
// If an existing interval already exists it will be expanded to include the new range.
void addInterval(GrSurfaceProxy*, unsigned int start, unsigned int end);
// Add an interval that spans just the current op. Usually this is for texture uses.
// If an existing interval already exists it will be expanded to include the new operation.
void addInterval(GrSurfaceProxy* proxy) {
this->addInterval(proxy, fNumOps, fNumOps);
}
void assign();
private:
class Interval;
// Remove dead intervals from the active list
void expire(unsigned int curIndex);
// These two methods wrap the interactions with the free pool
void freeUpSurface(GrSurface* surface);
sk_sp<GrSurface> findSurfaceFor(GrSurfaceProxy* proxy);
struct UniqueHashTraits {
static const GrUniqueKey& GetKey(const GrSurface& s) { return s.getUniqueKey(); }
static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
};
typedef SkTDynamicHash<GrSurface, GrUniqueKey, UniqueHashTraits> UniqueHash;
typedef SkTDynamicHash<Interval, unsigned int> IntvlHash;
class Interval {
public:
Interval(GrSurfaceProxy* proxy, unsigned int start, unsigned int end)
: fProxy(proxy)
, fProxyID(proxy->uniqueID().asUInt())
, fStart(start)
, fEnd(end)
, fNext(nullptr) {
SkASSERT(proxy);
}
// for SkTDynamicHash
static const uint32_t& GetKey(const Interval& intvl) {
return intvl.fProxyID;
}
static uint32_t Hash(const uint32_t& key) { return key; }
GrSurfaceProxy* fProxy;
uint32_t fProxyID; // This is here b.c. DynamicHash requires a ref to the key
unsigned int fStart;
unsigned int fEnd;
Interval* fNext;
};
class IntervalList {
public:
IntervalList() = default;
~IntervalList() {
while (fHead) {
Interval* temp = fHead;
fHead = temp->fNext;
delete temp;
}
}
bool empty() const { return !SkToBool(fHead); }
const Interval* peekHead() const { return fHead; }
Interval* popHead();
void insertByIncreasingStart(Interval*);
void insertByIncreasingEnd(Interval*);
private:
Interval* fHead = nullptr;
};
GrResourceProvider* fResourceProvider;
UniqueHash fFreePool; // Recently created/used GrSurfaces
IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
IntervalList fIntvlList; // All the intervals sorted by increasing start
IntervalList fActiveIntvls; // List of live intervals during assignment
// (sorted by increasing end)
unsigned int fNumOps = 0;
SkDEBUGCODE(bool fAssigned = false;)
};
#endif // GrResourceAllocator_DEFINED

View File

@ -40,12 +40,10 @@ GrSurfaceProxy::~GrSurfaceProxy() {
SkASSERT(!fLastOpList);
}
bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
GrSurfaceFlags flags, bool isMipMapped,
SkDestinationSurfaceColorMode mipColorMode) {
if (fTarget) {
return true;
}
sk_sp<GrSurface> GrSurfaceProxy::createSurfaceImpl(
GrResourceProvider* resourceProvider, int sampleCnt,
GrSurfaceFlags flags, bool isMipMapped,
SkDestinationSurfaceColorMode mipColorMode) const {
GrSurfaceDesc desc;
desc.fConfig = fConfig;
desc.fWidth = fWidth;
@ -58,16 +56,22 @@ bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int s
desc.fFlags |= kPerformInitialClear_GrSurfaceFlag;
}
sk_sp<GrSurface> surface;
if (SkBackingFit::kApprox == fFit) {
fTarget = resourceProvider->createApproxTexture(desc, fFlags).release();
surface.reset(resourceProvider->createApproxTexture(desc, fFlags).release());
} else {
fTarget = resourceProvider->createTexture(desc, fBudgeted, fFlags).release();
surface.reset(resourceProvider->createTexture(desc, fBudgeted, fFlags).release());
}
if (!fTarget) {
return false;
if (surface) {
surface->asTexture()->texturePriv().setMipColorMode(mipColorMode);
}
fTarget->asTexture()->texturePriv().setMipColorMode(mipColorMode);
return surface;
}
void GrSurfaceProxy::assign(sk_sp<GrSurface> surface) {
SkASSERT(!fTarget && surface);
fTarget = surface.release();
this->INHERITED::transferRefs();
#ifdef SK_DEBUG
@ -75,7 +79,22 @@ bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int s
SkASSERT(fTarget->gpuMemorySize() <= this->getRawGpuMemorySize_debugOnly());
}
#endif
}
bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt,
GrSurfaceFlags flags, bool isMipMapped,
SkDestinationSurfaceColorMode mipColorMode) {
if (fTarget) {
return true;
}
sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, sampleCnt, flags,
isMipMapped, mipColorMode);
if (!surface) {
return false;
}
this->assign(std::move(surface));
return true;
}
@ -226,7 +245,6 @@ sk_sp<GrTextureProxy> GrSurfaceProxy::MakeDeferredMipMap(
return GrSurfaceProxy::MakeWrapped(std::move(tex));
}
sk_sp<GrTextureProxy> GrSurfaceProxy::MakeWrappedBackend(GrContext* context,
GrBackendTexture& backendTex,
GrSurfaceOrigin origin) {

View File

@ -43,6 +43,15 @@ public:
// future when the proxy is actually used/instantiated.
bool hasPendingWrite() const { return fProxy->hasPendingWrite(); }
// Create a GrSurface-derived class that meets the requirements (i.e, desc, renderability)
// of the GrSurfaceProxy.
sk_sp<GrSurface> createSurface(GrResourceProvider* resourceProvider) const {
return fProxy->createSurface(resourceProvider);
}
// Assign this proxy the provided GrSurface as its backing surface
void assign(sk_sp<GrSurface> surface) { fProxy->assign(std::move(surface)); }
// Don't abuse this call!!!!!!!
bool isExact() const { return SkBackingFit::kExact == fProxy->fFit; }

View File

@ -33,6 +33,17 @@ bool GrTextureProxy::instantiate(GrResourceProvider* resourceProvider) {
return true;
}
sk_sp<GrSurface> GrTextureProxy::createSurface(GrResourceProvider* resourceProvider) const {
sk_sp<GrSurface> surface= this->createSurfaceImpl(resourceProvider, 0, kNone_GrSurfaceFlags,
fIsMipMapped, fMipColorMode);
if (!surface) {
return nullptr;
}
SkASSERT(surface->asTexture());
return surface;
}
void GrTextureProxy::setMipColorMode(SkDestinationSurfaceColorMode colorMode) {
SkASSERT(fTarget || fTarget->asTexture());

View File

@ -56,3 +56,20 @@ bool GrTextureRenderTargetProxy::instantiate(GrResourceProvider* resourceProvide
return true;
}
sk_sp<GrSurface> GrTextureRenderTargetProxy::createSurface(
GrResourceProvider* resourceProvider) const {
static constexpr GrSurfaceFlags kFlags = kRenderTarget_GrSurfaceFlag;
sk_sp<GrSurface> surface = this->createSurfaceImpl(resourceProvider, this->numStencilSamples(),
kFlags, this->isMipMapped(),
this->mipColorMode());
if (!surface) {
return nullptr;
}
SkASSERT(surface->asRenderTarget());
SkASSERT(surface->asTexture());
return surface;
}

View File

@ -33,6 +33,7 @@ private:
GrTextureRenderTargetProxy(sk_sp<GrSurface>);
bool instantiate(GrResourceProvider*) override;
sk_sp<GrSurface> createSurface(GrResourceProvider*) const override;
size_t onUninstantiatedGpuMemorySize() const override;
};

View File

@ -0,0 +1,51 @@
/*
* Copyright 2017 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
// Include here to ensure SK_SUPPORT_GPU is set correctly before it is examined.
#include "SkTypes.h"
#if SK_SUPPORT_GPU
#include "Test.h"
#include "GrResourceAllocator.h"
#include "GrSurfaceProxyPriv.h"
#include "GrTextureProxy.h"
// Basic test that two proxies with overlapping intervals and compatible descriptors are
// assigned different GrSurfaces.
static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider) {
GrSurfaceDesc desc;
desc.fConfig = kRGBA_8888_GrPixelConfig;
desc.fWidth = 64;
desc.fHeight = 64;
sk_sp<GrSurfaceProxy> p1 = GrSurfaceProxy::MakeDeferred(resourceProvider, desc,
SkBackingFit::kApprox,
SkBudgeted::kNo);
sk_sp<GrSurfaceProxy> p2 = GrSurfaceProxy::MakeDeferred(resourceProvider, desc,
SkBackingFit::kApprox,
SkBudgeted::kNo);
GrResourceAllocator alloc(resourceProvider);
alloc.addInterval(p1.get(), 0, 4);
alloc.addInterval(p2.get(), 1, 2);
alloc.assign();
REPORTER_ASSERT(reporter, p1->priv().peekSurface());
REPORTER_ASSERT(reporter, p2->priv().peekSurface());
REPORTER_ASSERT(reporter, p1->underlyingUniqueID() != p2->underlyingUniqueID());
}
DEF_GPUTEST_FOR_ALL_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
GrResourceProvider* resourceProvider = ctxInfo.grContext()->resourceProvider();
overlap_test(reporter, resourceProvider);
}
#endif