Refactor GrBufferAllocPools to use resource cache

Committed: https://skia.googlesource.com/skia/+/e935f1a0e2351373c33600b8388492ce1218014a

Review URL: https://codereview.chromium.org/1139753002
This commit is contained in:
robertphillips 2015-06-24 06:54:10 -07:00 committed by Commit bot
parent 8bcc7a00fe
commit 1b8e1b5c49
13 changed files with 165 additions and 144 deletions

View File

@ -185,6 +185,7 @@ public:
// recycled in the texture cache. This is to prevent ghosting by drivers
// (in particular for deferred architectures).
bool reuseScratchTextures() const { return fReuseScratchTextures; }
bool reuseScratchBuffers() const { return fReuseScratchBuffers; }
int maxRenderTargetSize() const { return fMaxRenderTargetSize; }
int maxTextureSize() const { return fMaxTextureSize; }
@ -229,10 +230,12 @@ protected:
bool fStencilWrapOpsSupport : 1;
bool fDiscardRenderTargetSupport : 1;
bool fReuseScratchTextures : 1;
bool fReuseScratchBuffers : 1;
bool fGpuTracingSupport : 1;
bool fCompressedTexSubImageSupport : 1;
bool fOversizedStencilSupport : 1;
bool fTextureBarrierSupport : 1;
// Driver workaround
bool fUseDrawInsteadOfClear : 1;
bool fUseDrawInsteadOfPartialRenderTargetWrite : 1;

View File

@ -158,12 +158,12 @@ protected:
GrGpu* gpu() { return fGpu; }
const GrGpu* gpu() const { return fGpu; }
private:
bool isAbandoned() const {
SkASSERT(SkToBool(fGpu) == SkToBool(fCache));
return !SkToBool(fCache);
}
private:
GrResourceCache* fCache;
GrGpu* fGpu;
};

View File

@ -10,27 +10,16 @@
#include "GrBatchAtlas.h"
#include "GrPipeline.h"
static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
GrBatchTarget::GrBatchTarget(GrGpu* gpu)
: fGpu(gpu)
, fVertexPool(gpu)
, fIndexPool(gpu)
, fFlushBuffer(kFlushBufferInitialSizeInBytes)
, fIter(fFlushBuffer)
, fNumberOfDraws(0)
, fCurrentToken(0)
, fLastFlushedToken(0)
, fInlineUpdatesIndex(0) {
fVertexPool.reset(SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu,
DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)));
fIndexPool.reset(SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu,
DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)));
}
void GrBatchTarget::flushNext(int n) {
@ -65,11 +54,11 @@ void GrBatchTarget::flushNext(int n) {
void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
const GrVertexBuffer** buffer, int* startVertex) {
return fVertexPool->makeSpace(vertexSize, vertexCount, buffer, startVertex);
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
}
uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
const GrIndexBuffer** buffer, int* startIndex) {
return reinterpret_cast<uint16_t*>(fIndexPool->makeSpace(indexCount, buffer, startIndex));
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
}

View File

@ -115,26 +115,26 @@ public:
const GrIndexBuffer** buffer, int* startIndex);
// A helper for draws which overallocate and then return data to the pool
void putBackIndices(size_t indices) { fIndexPool->putBack(indices * sizeof(uint16_t)); }
void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
void putBackVertices(size_t vertices, size_t vertexStride) {
fVertexPool->putBack(vertices * vertexStride);
fVertexPool.putBack(vertices * vertexStride);
}
void reset() {
fVertexPool->reset();
fIndexPool->reset();
fVertexPool.reset();
fIndexPool.reset();
}
private:
void unmapVertexAndIndexBuffers() {
fVertexPool->unmap();
fIndexPool->unmap();
fVertexPool.unmap();
fIndexPool.unmap();
}
GrGpu* fGpu;
SkAutoTDelete<GrVertexBufferAllocPool> fVertexPool;
SkAutoTDelete<GrIndexBufferAllocPool> fIndexPool;
GrVertexBufferAllocPool fVertexPool;
GrIndexBufferAllocPool fIndexPool;
typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.

View File

@ -9,8 +9,10 @@
#include "GrBufferAllocPool.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrGpu.h"
#include "GrIndexBuffer.h"
#include "GrResourceProvider.h"
#include "GrTypes.h"
#include "GrVertexBuffer.h"
@ -22,6 +24,9 @@
static void VALIDATE(bool = false) {}
#endif
static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
// page size
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
@ -37,9 +42,8 @@ do {
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
size_t blockSize,
int preallocBufferCnt)
: fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
size_t blockSize)
: fBlocks(8) {
fGpu = SkRef(gpu);
@ -49,19 +53,10 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
fBytesInUse = 0;
fPreallocBuffersInUse = 0;
fPreallocBufferStartIdx = 0;
for (int i = 0; i < preallocBufferCnt; ++i) {
GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
if (buffer) {
*fPreallocBuffers.append() = buffer;
}
}
fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
}
GrBufferAllocPool::~GrBufferAllocPool() {
VALIDATE();
void GrBufferAllocPool::deleteBlocks() {
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isMapped()) {
@ -71,34 +66,22 @@ GrBufferAllocPool::~GrBufferAllocPool() {
while (!fBlocks.empty()) {
this->destroyBlock();
}
fPreallocBuffers.unrefAll();
SkASSERT(!fBufferPtr);
}
GrBufferAllocPool::~GrBufferAllocPool() {
VALIDATE();
this->deleteBlocks();
fGpu->unref();
}
void GrBufferAllocPool::reset() {
VALIDATE();
fBytesInUse = 0;
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isMapped()) {
UNMAP_BUFFER(fBlocks.back());
}
}
// fPreallocBuffersInUse will be decremented down to zero in the while loop
int preallocBuffersInUse = fPreallocBuffersInUse;
while (!fBlocks.empty()) {
this->destroyBlock();
}
if (fPreallocBuffers.count()) {
// must set this after above loop.
fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
preallocBuffersInUse) %
fPreallocBuffers.count();
}
this->deleteBlocks();
// we may have created a large cpu mirror of a large VB. Reset the size
// to match our pre-allocated VBs.
// to match our minimum.
fCpuData.reset(fMinBlockSize);
SkASSERT(0 == fPreallocBuffersInUse);
VALIDATE();
}
@ -170,8 +153,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
if (fBufferPtr) {
BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes,
alignment);
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
if ((size + pad) <= back.fBytesFree) {
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
usedBytes += pad;
@ -209,12 +191,6 @@ void* GrBufferAllocPool::makeSpace(size_t size,
void GrBufferAllocPool::putBack(size_t bytes) {
VALIDATE();
// if the putBack unwinds all the preallocated buffers then we will
// advance the starting index. As blocks are destroyed fPreallocBuffersInUse
// will be decremented. I will reach zero if all blocks using preallocated
// buffers are released.
int preallocBuffersInUse = fPreallocBuffersInUse;
while (bytes) {
// caller shouldn't try to put back more than they've taken
SkASSERT(!fBlocks.empty());
@ -236,11 +212,7 @@ void GrBufferAllocPool::putBack(size_t bytes) {
break;
}
}
if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
preallocBuffersInUse) %
fPreallocBuffers.count();
}
VALIDATE();
}
@ -253,24 +225,13 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
BufferBlock& block = fBlocks.push_back();
if (size == fMinBlockSize &&
fPreallocBuffersInUse < fPreallocBuffers.count()) {
uint32_t nextBuffer = (fPreallocBuffersInUse +
fPreallocBufferStartIdx) %
fPreallocBuffers.count();
block.fBuffer = fPreallocBuffers[nextBuffer];
block.fBuffer->ref();
++fPreallocBuffersInUse;
} else {
block.fBuffer = this->createBuffer(size);
if (NULL == block.fBuffer) {
fBlocks.pop_back();
return false;
}
block.fBuffer = this->getBuffer(size);
if (NULL == block.fBuffer) {
fBlocks.pop_back();
return false;
}
block.fBytesFree = size;
block.fBytesFree = block.fBuffer->gpuMemorySize();
if (fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
@ -297,7 +258,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
}
if (NULL == fBufferPtr) {
fBufferPtr = fCpuData.reset(size);
fBufferPtr = fCpuData.reset(block.fBytesFree);
}
VALIDATE(true);
@ -309,15 +270,7 @@ void GrBufferAllocPool::destroyBlock() {
SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back();
if (fPreallocBuffersInUse > 0) {
uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
fPreallocBufferStartIdx +
(fPreallocBuffers.count() - 1)) %
fPreallocBuffers.count();
if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
--fPreallocBuffersInUse;
}
}
SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
@ -345,24 +298,22 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
VALIDATE(true);
}
GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
if (kIndex_BufferType == fBufferType) {
return fGpu->createIndexBuffer(size, true);
return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
} else {
SkASSERT(kVertex_BufferType == fBufferType);
return fGpu->createVertexBuffer(size, true);
return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
}
}
////////////////////////////////////////////////////////////////////////////////
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
size_t bufferSize,
int preallocBufferCnt)
: GrBufferAllocPool(gpu,
kVertex_BufferType,
bufferSize,
preallocBufferCnt) {
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
: GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
}
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
@ -389,13 +340,8 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
////////////////////////////////////////////////////////////////////////////////
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
size_t bufferSize,
int preallocBufferCnt)
: GrBufferAllocPool(gpu,
kIndex_BufferType,
bufferSize,
preallocBufferCnt) {
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
: GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
}
void* GrIndexBufferAllocPool::makeSpace(int indexCount,

View File

@ -64,16 +64,12 @@ protected:
* @param bufferSize The minimum size of created buffers.
* This value will be clamped to some
* reasonable minimum.
* @param preallocBufferCnt The pool will allocate this number of
* buffers at bufferSize and keep them until it
* is destroyed.
*/
GrBufferAllocPool(GrGpu* gpu,
BufferType bufferType,
size_t bufferSize = 0,
int preallocBufferCnt = 0);
size_t bufferSize = 0);
virtual ~GrBufferAllocPool();
virtual ~GrBufferAllocPool();
/**
* Returns a block of memory to hold data. A buffer designated to hold the
@ -99,7 +95,7 @@ protected:
const GrGeometryBuffer** buffer,
size_t* offset);
GrGeometryBuffer* createBuffer(size_t size);
GrGeometryBuffer* getBuffer(size_t size);
private:
struct BufferBlock {
@ -109,6 +105,7 @@ private:
bool createBlock(size_t requestSize);
void destroyBlock();
void deleteBlocks();
void flushCpuData(const BufferBlock& block, size_t flushSize);
#ifdef SK_DEBUG
void validate(bool unusedBlockAllowed = false) const;
@ -117,15 +114,10 @@ private:
size_t fBytesInUse;
GrGpu* fGpu;
SkTDArray<GrGeometryBuffer*> fPreallocBuffers;
size_t fMinBlockSize;
BufferType fBufferType;
SkTArray<BufferBlock> fBlocks;
int fPreallocBuffersInUse;
// We attempt to cycle through the preallocated buffers rather than
// always starting from the first.
int fPreallocBufferStartIdx;
SkAutoMalloc fCpuData;
void* fBufferPtr;
size_t fGeometryBufferMapThreshold;
@ -142,13 +134,8 @@ public:
* Constructor
*
* @param gpu The GrGpu used to create the vertex buffers.
* @param bufferSize The minimum size of created VBs. This value
* will be clamped to some reasonable minimum.
* @param preallocBufferCnt The pool will allocate this number of VBs at
* bufferSize and keep them until it is
* destroyed.
*/
GrVertexBufferAllocPool(GrGpu* gpu, size_t bufferSize = 0, int preallocBufferCnt = 0);
GrVertexBufferAllocPool(GrGpu* gpu);
/**
* Returns a block of memory to hold vertices. A buffer designated to hold
@ -191,15 +178,8 @@ public:
* Constructor
*
* @param gpu The GrGpu used to create the index buffers.
* @param bufferSize The minimum size of created IBs. This value
* will be clamped to some reasonable minimum.
* @param preallocBufferCnt The pool will allocate this number of VBs at
* bufferSize and keep them until it is
* destroyed.
*/
GrIndexBufferAllocPool(GrGpu* gpu,
size_t bufferSize = 0,
int preallocBufferCnt = 0);
GrIndexBufferAllocPool(GrGpu* gpu);
/**
* Returns a block of memory to hold indices. A buffer designated to hold

View File

@ -86,6 +86,7 @@ GrCaps::GrCaps(const GrContextOptions& options) {
fStencilWrapOpsSupport = false;
fDiscardRenderTargetSupport = false;
fReuseScratchTextures = true;
fReuseScratchBuffers = true;
fGpuTracingSupport = false;
fCompressedTexSubImageSupport = false;
fOversizedStencilSupport = false;
@ -146,6 +147,7 @@ SkString GrCaps::dump() const {
r.appendf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
r.appendf("Discard Render Target Support : %s\n", gNY[fDiscardRenderTargetSupport]);
r.appendf("Reuse Scratch Textures : %s\n", gNY[fReuseScratchTextures]);
r.appendf("Reuse Scratch Buffers : %s\n", gNY[fReuseScratchBuffers]);
r.appendf("Gpu Tracing Support : %s\n", gNY[fGpuTracingSupport]);
r.appendf("Compressed Update Support : %s\n", gNY[fCompressedTexSubImageSupport]);
r.appendf("Oversized Stencil Support : %s\n", gNY[fOversizedStencilSupport]);

View File

@ -199,12 +199,20 @@ GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc&
GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) {
this->handleDirtyContext();
return this->onCreateVertexBuffer(size, dynamic);
GrVertexBuffer* vb = this->onCreateVertexBuffer(size, dynamic);
if (!this->caps()->reuseScratchBuffers()) {
vb->resourcePriv().removeScratchKey();
}
return vb;
}
GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
this->handleDirtyContext();
return this->onCreateIndexBuffer(size, dynamic);
GrIndexBuffer* ib = this->onCreateIndexBuffer(size, dynamic);
if (!this->caps()->reuseScratchBuffers()) {
ib->resourcePriv().removeScratchKey();
}
return ib;
}
void GrGpu::clear(const SkIRect* rect,

View File

@ -13,8 +13,18 @@
#include "GrGeometryBuffer.h"
class GrIndexBuffer : public GrGeometryBuffer {
public:
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 2);
builder[0] = SkToUInt(size);
builder[1] = dynamic ? 1 : 0;
}
/**
* Retrieves the maximum number of quads that could be rendered
* from the index buffer (using kTriangles_GrPrimitiveType).
@ -25,7 +35,12 @@ public:
}
protected:
GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
GrScratchKey key;
ComputeScratchKey(gpuMemorySize, dynamic, &key);
this->setScratchKey(key);
}
private:
typedef GrGeometryBuffer INHERITED;
};

View File

@ -28,7 +28,7 @@ const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16
const GrUniqueKey& key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
GrIndexBuffer* buffer = this->gpu()->createIndexBuffer(bufferSize, /* dynamic = */ false);
GrIndexBuffer* buffer = this->getIndexBuffer(bufferSize, /* dynamic = */ false, true);
if (!buffer) {
return NULL;
}
@ -83,3 +83,58 @@ GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, const SkDesc
return this->gpu()->pathRendering()->createGlyphs(tf, desc, stroke);
}
GrIndexBuffer* GrResourceProvider::getIndexBuffer(size_t size, bool dynamic,
bool calledDuringFlush) {
if (this->isAbandoned()) {
return NULL;
}
if (dynamic) {
// bin by pow2 with a reasonable min
static const uint32_t MIN_SIZE = 1 << 12;
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
GrScratchKey key;
GrIndexBuffer::ComputeScratchKey(size, dynamic, &key);
uint32_t scratchFlags = 0;
if (calledDuringFlush) {
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
} else {
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
}
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
if (resource) {
return static_cast<GrIndexBuffer*>(resource);
}
}
return this->gpu()->createIndexBuffer(size, dynamic);
}
GrVertexBuffer* GrResourceProvider::getVertexBuffer(size_t size, bool dynamic,
bool calledDuringFlush) {
if (this->isAbandoned()) {
return NULL;
}
if (dynamic) {
// bin by pow2 with a reasonable min
static const uint32_t MIN_SIZE = 1 << 15;
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
GrScratchKey key;
GrVertexBuffer::ComputeScratchKey(size, dynamic, &key);
uint32_t scratchFlags = 0;
if (calledDuringFlush) {
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
} else {
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
}
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
if (resource) {
return static_cast<GrVertexBuffer*>(resource);
}
}
return this->gpu()->createVertexBuffer(size, dynamic);
}

View File

@ -86,6 +86,9 @@ public:
using GrTextureProvider::findAndRefResourceByUniqueKey;
using GrTextureProvider::abandon;
GrIndexBuffer* getIndexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
GrVertexBuffer* getVertexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
private:
const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
int patternSize,

View File

@ -14,9 +14,24 @@
#include "GrGeometryBuffer.h"
class GrVertexBuffer : public GrGeometryBuffer {
public:
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 2);
builder[0] = SkToUInt(size);
builder[1] = dynamic ? 1 : 0;
}
protected:
GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
GrScratchKey key;
ComputeScratchKey(gpuMemorySize, dynamic, &key);
this->setScratchKey(key);
}
private:
typedef GrGeometryBuffer INHERITED;
};

View File

@ -414,6 +414,11 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor() &&
kQualcomm_GrGLVendor != ctxInfo.vendor();
#if 0
fReuseScratchBuffers = kARM_GrGLVendor != ctxInfo.vendor() &&
kQualcomm_GrGLVendor != ctxInfo.vendor();
#endif
if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &fMaxSampleCount);
} else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {