Add initial version of GrResourceAllocator's free pool

Change-Id: Ibd60303ffb1d3ea814dad0cee3a521f94da63ca8
Reviewed-on: https://skia-review.googlesource.com/24262
Reviewed-by: Brian Salomon <bsalomon@google.com>
Commit-Queue: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Robert Phillips 2017-07-20 15:09:23 -04:00 committed by Skia Commit-Bot
parent 8b8b2244c3
commit 6ec9a4ffe4
7 changed files with 241 additions and 27 deletions

View File

@ -329,7 +329,6 @@ public:
bool isWrapped_ForTesting() const;
SkDEBUGCODE(bool isInstantiated() const { return SkToBool(fTarget); })
SkDEBUGCODE(void validate(GrContext*) const;)
// Provides access to functions that aren't part of the public API.
@ -368,6 +367,8 @@ protected:
return this->internalHasPendingWrite();
}
void computeScratchKey(GrScratchKey*) const;
virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0;
void assign(sk_sp<GrSurface> surface);

View File

@ -30,8 +30,14 @@ public:
SkTMultiMap() : fCount(0) {}
~SkTMultiMap() {
SkASSERT(fCount == 0);
SkASSERT(fHash.count() == 0);
typename SkTDynamicHash<ValueList, Key>::Iter iter(&fHash);
for ( ; !iter.done(); ++iter) {
ValueList* next;
for (ValueList* cur = &(*iter); cur; cur = next) {
next = cur->fNext;
delete cur;
}
}
}
void insert(const Key& key, T* value) {

View File

@ -75,16 +75,31 @@ void GrResourceAllocator::IntervalList::insertByIncreasingEnd(Interval* intvl) {
// 'surface' can be reused. Add it back to the free pool.
void GrResourceAllocator::freeUpSurface(GrSurface* surface) {
// TODO: add free pool
const GrScratchKey &key = surface->resourcePriv().getScratchKey();
if (!key.isValid()) {
return; // can't do it w/o a valid scratch key
}
// TODO: fix this insertion so we get a more LRU-ish behavior
fFreePool.insert(key, surface);
}
// First try to reuse one of the recently allocated/used GrSurfaces in the free pool.
// If we can't find a useable one, create a new one.
// TODO: handle being overbudget
sk_sp<GrSurface> GrResourceAllocator::findSurfaceFor(GrSurfaceProxy* proxy) {
// TODO: add free pool
// First look in the free pool
GrScratchKey key;
// Try to grab one from the resource cache
proxy->priv().computeScratchKey(&key);
GrSurface* surface = fFreePool.find(key);
if (surface) {
return sk_ref_sp(surface);
}
// Failing that, try to grab a new one from the resource cache
return proxy->priv().createSurface(fResourceProvider);
}
@ -104,6 +119,12 @@ void GrResourceAllocator::assign() {
while (Interval* cur = fIntvlList.popHead()) {
this->expire(cur->fStart);
if (cur->fProxy->priv().isInstantiated()) {
fActiveIntvls.insertByIncreasingEnd(cur);
continue;
}
// TODO: add over budget handling here?
sk_sp<GrSurface> surface = this->findSurfaceFor(cur->fProxy);
if (surface) {

View File

@ -8,8 +8,11 @@
#ifndef GrResourceAllocator_DEFINED
#define GrResourceAllocator_DEFINED
#include "GrGpuResourcePriv.h"
#include "GrSurface.h"
#include "GrSurfaceProxy.h"
#include "SkTDynamicHash.h"
#include "SkTMultiMap.h"
class GrResourceProvider;
@ -60,12 +63,15 @@ private:
void freeUpSurface(GrSurface* surface);
sk_sp<GrSurface> findSurfaceFor(GrSurfaceProxy* proxy);
struct UniqueHashTraits {
static const GrUniqueKey& GetKey(const GrSurface& s) { return s.getUniqueKey(); }
struct FreePoolTraits {
static const GrScratchKey& GetKey(const GrSurface& s) {
return s.resourcePriv().getScratchKey();
}
static uint32_t Hash(const GrUniqueKey& key) { return key.hash(); }
static uint32_t Hash(const GrScratchKey& key) { return key.hash(); }
};
typedef SkTDynamicHash<GrSurface, GrUniqueKey, UniqueHashTraits> UniqueHash;
typedef SkTMultiMap<GrSurface, GrScratchKey, FreePoolTraits> FreePoolMultiMap;
typedef SkTDynamicHash<Interval, unsigned int> IntvlHash;
class Interval {
@ -114,7 +120,7 @@ private:
};
GrResourceProvider* fResourceProvider;
UniqueHash fFreePool; // Recently created/used GrSurfaces
FreePoolMultiMap fFreePool; // Recently created/used GrSurfaces
IntvlHash fIntvlHash; // All the intervals, hashed by proxyID
IntervalList fIntvlList; // All the intervals sorted by increasing start

View File

@ -98,6 +98,32 @@ bool GrSurfaceProxy::instantiateImpl(GrResourceProvider* resourceProvider, int s
return true;
}
void GrSurfaceProxy::computeScratchKey(GrScratchKey* key) const {
const GrRenderTargetProxy* rtp = this->asRenderTargetProxy();
int sampleCount = 0;
if (rtp) {
sampleCount = rtp->numStencilSamples();
}
const GrTextureProxy* tp = this->asTextureProxy();
bool hasMipMaps = false;
if (tp) {
hasMipMaps = tp->isMipMapped();
}
int width = this->width();
int height = this->height();
if (SkBackingFit::kApprox == fFit) {
// bin by pow2 with a reasonable min
width = SkTMax(GrResourceProvider::kMinScratchTextureSize, GrNextPow2(width));
height = SkTMax(GrResourceProvider::kMinScratchTextureSize, GrNextPow2(height));
}
GrTexturePriv::ComputeScratchKey(this->config(), width, height,
this->origin(), SkToBool(rtp), sampleCount,
hasMipMaps, key);
}
void GrSurfaceProxy::setLastOpList(GrOpList* opList) {
#ifdef SK_DEBUG
if (fLastOpList) {

View File

@ -15,6 +15,8 @@
data members or virtual methods. */
class GrSurfaceProxyPriv {
public:
bool isInstantiated() const { return SkToBool(fProxy->fTarget); }
// This should only be called after a successful call to instantiate
GrSurface* peekSurface() const {
SkASSERT(fProxy->fTarget);
@ -43,6 +45,8 @@ public:
// future when the proxy is actually used/instantiated.
bool hasPendingWrite() const { return fProxy->hasPendingWrite(); }
void computeScratchKey(GrScratchKey* key) const { return fProxy->computeScratchKey(key); }
// Create a GrSurface-derived class that meets the requirements (i.e, desc, renderability)
// of the GrSurfaceProxy.
sk_sp<GrSurface> createSurface(GrResourceProvider* resourceProvider) const {

View File

@ -11,25 +11,63 @@
#if SK_SUPPORT_GPU
#include "Test.h"
#include "GrContextPriv.h"
#include "GrGpu.h"
#include "GrResourceAllocator.h"
#include "GrResourceProvider.h"
#include "GrSurfaceProxyPriv.h"
#include "GrTest.h"
#include "GrTexture.h"
#include "GrTextureProxy.h"
struct ProxyParams {
int fSize;
bool fIsRT;
GrPixelConfig fConfig;
SkBackingFit fFit;
int fSampleCnt;
GrSurfaceOrigin fOrigin;
// TODO: do we care about mipmapping
};
static sk_sp<GrSurfaceProxy> make_deferred(GrResourceProvider* resourceProvider,
const ProxyParams& p) {
GrSurfaceDesc desc;
desc.fFlags = p.fIsRT ? kRenderTarget_GrSurfaceFlag : kNone_GrSurfaceFlags;
desc.fOrigin = p.fOrigin;
desc.fWidth = p.fSize;
desc.fHeight = p.fSize;
desc.fConfig = p.fConfig;
desc.fSampleCnt = p.fSampleCnt;
return GrSurfaceProxy::MakeDeferred(resourceProvider, desc, p.fFit, SkBudgeted::kNo);
}
static sk_sp<GrSurfaceProxy> make_backend(GrContext* context, const ProxyParams& p,
GrBackendObject* backendTexHandle) {
*backendTexHandle = context->getGpu()->createTestingOnlyBackendTexture(
nullptr, p.fSize, p.fSize, p.fConfig);
GrBackendTexture backendTex = GrTest::CreateBackendTexture(context->contextPriv().getBackend(),
p.fSize,
p.fSize,
p.fConfig,
*backendTexHandle);
sk_sp<GrSurface> tex = context->resourceProvider()->wrapBackendTexture(
backendTex, p.fOrigin,
kBorrow_GrWrapOwnership);
return GrSurfaceProxy::MakeWrapped(std::move(tex));
}
static void cleanup_backend(GrContext* context, GrBackendObject* backendTexHandle) {
context->getGpu()->deleteTestingOnlyBackendTexture(*backendTexHandle);
}
// Basic test that two proxies with overlapping intervals and compatible descriptors are
// assigned different GrSurfaces.
static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider) {
GrSurfaceDesc desc;
desc.fConfig = kRGBA_8888_GrPixelConfig;
desc.fWidth = 64;
desc.fHeight = 64;
sk_sp<GrSurfaceProxy> p1 = GrSurfaceProxy::MakeDeferred(resourceProvider, desc,
SkBackingFit::kApprox,
SkBudgeted::kNo);
sk_sp<GrSurfaceProxy> p2 = GrSurfaceProxy::MakeDeferred(resourceProvider, desc,
SkBackingFit::kApprox,
SkBudgeted::kNo);
static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider,
sk_sp<GrSurfaceProxy> p1, sk_sp<GrSurfaceProxy> p2,
bool expectedResult) {
GrResourceAllocator alloc(resourceProvider);
alloc.addInterval(p1.get(), 0, 4);
@ -39,13 +77,125 @@ static void overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resou
REPORTER_ASSERT(reporter, p1->priv().peekSurface());
REPORTER_ASSERT(reporter, p2->priv().peekSurface());
REPORTER_ASSERT(reporter, p1->underlyingUniqueID() != p2->underlyingUniqueID());
bool doTheBackingStoresMatch = p1->underlyingUniqueID() == p2->underlyingUniqueID();
REPORTER_ASSERT(reporter, expectedResult == doTheBackingStoresMatch);
SkASSERT(expectedResult == doTheBackingStoresMatch);
}
DEF_GPUTEST_FOR_ALL_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
// Test various cases when two proxies do not have overlapping intervals.
// This mainly acts as a test of the ResourceAllocator's free pool.
static void non_overlap_test(skiatest::Reporter* reporter, GrResourceProvider* resourceProvider,
sk_sp<GrSurfaceProxy> p1, sk_sp<GrSurfaceProxy> p2,
bool expectedResult) {
GrResourceAllocator alloc(resourceProvider);
alloc.addInterval(p1.get(), 0, 2);
alloc.addInterval(p2.get(), 3, 5);
alloc.assign();
REPORTER_ASSERT(reporter, p1->priv().peekSurface());
REPORTER_ASSERT(reporter, p2->priv().peekSurface());
bool doTheBackingStoresMatch = p1->underlyingUniqueID() == p2->underlyingUniqueID();
REPORTER_ASSERT(reporter, expectedResult == doTheBackingStoresMatch);
SkASSERT(expectedResult == doTheBackingStoresMatch);
}
DEF_GPUTEST_FOR_RENDERING_CONTEXTS(ResourceAllocatorTest, reporter, ctxInfo) {
GrResourceProvider* resourceProvider = ctxInfo.grContext()->resourceProvider();
overlap_test(reporter, resourceProvider);
struct TestCase {
ProxyParams fP1;
ProxyParams fP2;
bool fExpectation;
};
constexpr bool kRT = true;
constexpr bool kNotRT = false;
constexpr bool kShare = true;
constexpr bool kDontShare = false;
// Non-RT GrSurfaces are never recycled on some platforms.
bool kConditionallyShare = resourceProvider->caps()->reuseScratchTextures();
const GrPixelConfig kRGBA = kRGBA_8888_GrPixelConfig;
const GrPixelConfig kBGRA = kBGRA_8888_GrPixelConfig;
const SkBackingFit kE = SkBackingFit::kExact;
const SkBackingFit kA = SkBackingFit::kApprox;
const GrSurfaceOrigin kTL = kTopLeft_GrSurfaceOrigin;
const GrSurfaceOrigin kBL = kBottomLeft_GrSurfaceOrigin;
//--------------------------------------------------------------------------------------------
TestCase gOverlappingTests[] = {
//----------------------------------------------------------------------------------------
// Two proxies with overlapping intervals and compatible descriptors should never share
// RT version
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare },
// non-RT version
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare },
};
for (auto test : gOverlappingTests) {
sk_sp<GrSurfaceProxy> p1 = make_deferred(resourceProvider, test.fP1);
sk_sp<GrSurfaceProxy> p2 = make_deferred(resourceProvider, test.fP2);
overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2), test.fExpectation);
}
int k2 = ctxInfo.grContext()->caps()->getSampleCount(2, kRGBA);
int k4 = ctxInfo.grContext()->caps()->getSampleCount(4, kRGBA);
//--------------------------------------------------------------------------------------------
TestCase gNonOverlappingTests[] = {
//----------------------------------------------------------------------------------------
// Two non-overlapping intervals w/ compatible proxies should share
// both same size & approx
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare },
// diffs sizes but still approx
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 50, kRT, kRGBA, kA, 0, kTL }, kShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 50, kNotRT, kRGBA, kA, 0, kTL }, kConditionallyShare },
// sames sizes but exact
{ { 64, kRT, kRGBA, kE, 0, kTL }, { 64, kRT, kRGBA, kE, 0, kTL }, kShare },
{ { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kConditionallyShare },
//----------------------------------------------------------------------------------------
// Two non-overlapping intervals w/ different exact sizes should not share
{ { 56, kRT, kRGBA, kE, 0, kTL }, { 54, kRT, kRGBA, kE, 0, kTL }, kDontShare },
// Two non-overlapping intervals w/ _very different_ approx sizes should not share
{ { 255, kRT, kRGBA, kA, 0, kTL }, { 127, kRT, kRGBA, kA, 0, kTL }, kDontShare },
// Two non-overlapping intervals w/ different MSAA sample counts should not share
{ { 64, kRT, kRGBA, kA, k2, kTL },{ 64, kRT, kRGBA, kA, k4, kTL}, k2 == k4 },
// Two non-overlapping intervals w/ different configs should not share
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kBGRA, kA, 0, kTL }, kDontShare },
// Two non-overlapping intervals w/ different RT classifications should never share
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kNotRT, kRGBA, kA, 0, kTL }, kDontShare },
{ { 64, kNotRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kTL }, kDontShare },
// Two non-overlapping intervals w/ different origins should not share
// TODO: rm this test case
{ { 64, kRT, kRGBA, kA, 0, kTL }, { 64, kRT, kRGBA, kA, 0, kBL }, kDontShare },
};
for (auto test : gNonOverlappingTests) {
sk_sp<GrSurfaceProxy> p1 = make_deferred(resourceProvider, test.fP1);
sk_sp<GrSurfaceProxy> p2 = make_deferred(resourceProvider, test.fP2);
non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2),
test.fExpectation);
}
{
// Wrapped backend textures should never be reused
TestCase t[1] = {
{ { 64, kNotRT, kRGBA, kE, 0, kTL }, { 64, kNotRT, kRGBA, kE, 0, kTL }, kDontShare }
};
GrBackendObject backEndObj;
sk_sp<GrSurfaceProxy> p1 = make_backend(ctxInfo.grContext(), t[0].fP1, &backEndObj);
sk_sp<GrSurfaceProxy> p2 = make_deferred(resourceProvider, t[0].fP2);
non_overlap_test(reporter, resourceProvider, std::move(p1), std::move(p2),
t[0].fExpectation);
cleanup_backend(ctxInfo.grContext(), &backEndObj);
}
}
#endif