Revert of Refactor GrBufferAllocPools to use resource cache (patchset #15 id:280001 of https://codereview.chromium.org/1139753002/)
Reason for revert: Will reland after Chromium branch Original issue's description: > Refactor GrBufferAllocPools to use resource cache > > Committed: https://skia.googlesource.com/skia/+/e935f1a0e2351373c33600b8388492ce1218014a TBR=bsalomon@google.com,joshualitt@google.com NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true Review URL: https://codereview.chromium.org/1204773003
This commit is contained in:
parent
e582a5a891
commit
b607767703
@ -185,7 +185,6 @@ public:
|
||||
// recycled in the texture cache. This is to prevent ghosting by drivers
|
||||
// (in particular for deferred architectures).
|
||||
bool reuseScratchTextures() const { return fReuseScratchTextures; }
|
||||
bool reuseScratchBuffers() const { return fReuseScratchBuffers; }
|
||||
|
||||
int maxRenderTargetSize() const { return fMaxRenderTargetSize; }
|
||||
int maxTextureSize() const { return fMaxTextureSize; }
|
||||
@ -230,12 +229,10 @@ protected:
|
||||
bool fStencilWrapOpsSupport : 1;
|
||||
bool fDiscardRenderTargetSupport : 1;
|
||||
bool fReuseScratchTextures : 1;
|
||||
bool fReuseScratchBuffers : 1;
|
||||
bool fGpuTracingSupport : 1;
|
||||
bool fCompressedTexSubImageSupport : 1;
|
||||
bool fOversizedStencilSupport : 1;
|
||||
bool fTextureBarrierSupport : 1;
|
||||
|
||||
// Driver workaround
|
||||
bool fUseDrawInsteadOfClear : 1;
|
||||
bool fUseDrawInsteadOfPartialRenderTargetWrite : 1;
|
||||
|
@ -158,12 +158,12 @@ protected:
|
||||
GrGpu* gpu() { return fGpu; }
|
||||
const GrGpu* gpu() const { return fGpu; }
|
||||
|
||||
private:
|
||||
bool isAbandoned() const {
|
||||
SkASSERT(SkToBool(fGpu) == SkToBool(fCache));
|
||||
return !SkToBool(fCache);
|
||||
}
|
||||
|
||||
private:
|
||||
GrResourceCache* fCache;
|
||||
GrGpu* fGpu;
|
||||
};
|
||||
|
@ -10,16 +10,27 @@
|
||||
#include "GrBatchAtlas.h"
|
||||
#include "GrPipeline.h"
|
||||
|
||||
static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
|
||||
static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
|
||||
|
||||
static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
|
||||
static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
|
||||
|
||||
GrBatchTarget::GrBatchTarget(GrGpu* gpu)
|
||||
: fGpu(gpu)
|
||||
, fVertexPool(gpu)
|
||||
, fIndexPool(gpu)
|
||||
, fFlushBuffer(kFlushBufferInitialSizeInBytes)
|
||||
, fIter(fFlushBuffer)
|
||||
, fNumberOfDraws(0)
|
||||
, fCurrentToken(0)
|
||||
, fLastFlushedToken(0)
|
||||
, fInlineUpdatesIndex(0) {
|
||||
|
||||
fVertexPool.reset(SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu,
|
||||
DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
|
||||
DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)));
|
||||
fIndexPool.reset(SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu,
|
||||
DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
|
||||
DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)));
|
||||
}
|
||||
|
||||
void GrBatchTarget::flushNext(int n) {
|
||||
@ -54,11 +65,11 @@ void GrBatchTarget::flushNext(int n) {
|
||||
|
||||
void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
|
||||
const GrVertexBuffer** buffer, int* startVertex) {
|
||||
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
|
||||
return fVertexPool->makeSpace(vertexSize, vertexCount, buffer, startVertex);
|
||||
}
|
||||
|
||||
uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
|
||||
const GrIndexBuffer** buffer, int* startIndex) {
|
||||
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
|
||||
return reinterpret_cast<uint16_t*>(fIndexPool->makeSpace(indexCount, buffer, startIndex));
|
||||
}
|
||||
|
||||
|
@ -115,26 +115,26 @@ public:
|
||||
const GrIndexBuffer** buffer, int* startIndex);
|
||||
|
||||
// A helper for draws which overallocate and then return data to the pool
|
||||
void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
|
||||
void putBackIndices(size_t indices) { fIndexPool->putBack(indices * sizeof(uint16_t)); }
|
||||
|
||||
void putBackVertices(size_t vertices, size_t vertexStride) {
|
||||
fVertexPool.putBack(vertices * vertexStride);
|
||||
fVertexPool->putBack(vertices * vertexStride);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
fVertexPool.reset();
|
||||
fIndexPool.reset();
|
||||
fVertexPool->reset();
|
||||
fIndexPool->reset();
|
||||
}
|
||||
|
||||
private:
|
||||
void unmapVertexAndIndexBuffers() {
|
||||
fVertexPool.unmap();
|
||||
fIndexPool.unmap();
|
||||
fVertexPool->unmap();
|
||||
fIndexPool->unmap();
|
||||
}
|
||||
|
||||
GrGpu* fGpu;
|
||||
GrVertexBufferAllocPool fVertexPool;
|
||||
GrIndexBufferAllocPool fIndexPool;
|
||||
SkAutoTDelete<GrVertexBufferAllocPool> fVertexPool;
|
||||
SkAutoTDelete<GrIndexBufferAllocPool> fIndexPool;
|
||||
|
||||
typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
|
||||
|
||||
|
@ -9,10 +9,8 @@
|
||||
|
||||
#include "GrBufferAllocPool.h"
|
||||
#include "GrCaps.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrTypes.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
|
||||
@ -24,9 +22,6 @@
|
||||
static void VALIDATE(bool = false) {}
|
||||
#endif
|
||||
|
||||
static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
|
||||
static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
|
||||
|
||||
// page size
|
||||
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
|
||||
|
||||
@ -42,8 +37,9 @@ do {
|
||||
|
||||
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
BufferType bufferType,
|
||||
size_t blockSize)
|
||||
: fBlocks(8) {
|
||||
size_t blockSize,
|
||||
int preallocBufferCnt)
|
||||
: fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
|
||||
|
||||
fGpu = SkRef(gpu);
|
||||
|
||||
@ -53,10 +49,19 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
|
||||
fBytesInUse = 0;
|
||||
|
||||
fPreallocBuffersInUse = 0;
|
||||
fPreallocBufferStartIdx = 0;
|
||||
for (int i = 0; i < preallocBufferCnt; ++i) {
|
||||
GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
|
||||
if (buffer) {
|
||||
*fPreallocBuffers.append() = buffer;
|
||||
}
|
||||
}
|
||||
fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
|
||||
}
|
||||
|
||||
void GrBufferAllocPool::deleteBlocks() {
|
||||
GrBufferAllocPool::~GrBufferAllocPool() {
|
||||
VALIDATE();
|
||||
if (fBlocks.count()) {
|
||||
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
|
||||
if (buffer->isMapped()) {
|
||||
@ -66,22 +71,34 @@ void GrBufferAllocPool::deleteBlocks() {
|
||||
while (!fBlocks.empty()) {
|
||||
this->destroyBlock();
|
||||
}
|
||||
SkASSERT(!fBufferPtr);
|
||||
}
|
||||
|
||||
GrBufferAllocPool::~GrBufferAllocPool() {
|
||||
VALIDATE();
|
||||
this->deleteBlocks();
|
||||
fPreallocBuffers.unrefAll();
|
||||
fGpu->unref();
|
||||
}
|
||||
|
||||
void GrBufferAllocPool::reset() {
|
||||
VALIDATE();
|
||||
fBytesInUse = 0;
|
||||
this->deleteBlocks();
|
||||
if (fBlocks.count()) {
|
||||
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
|
||||
if (buffer->isMapped()) {
|
||||
UNMAP_BUFFER(fBlocks.back());
|
||||
}
|
||||
}
|
||||
// fPreallocBuffersInUse will be decremented down to zero in the while loop
|
||||
int preallocBuffersInUse = fPreallocBuffersInUse;
|
||||
while (!fBlocks.empty()) {
|
||||
this->destroyBlock();
|
||||
}
|
||||
if (fPreallocBuffers.count()) {
|
||||
// must set this after above loop.
|
||||
fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
|
||||
preallocBuffersInUse) %
|
||||
fPreallocBuffers.count();
|
||||
}
|
||||
// we may have created a large cpu mirror of a large VB. Reset the size
|
||||
// to match our minimum.
|
||||
// to match our pre-allocated VBs.
|
||||
fCpuData.reset(fMinBlockSize);
|
||||
SkASSERT(0 == fPreallocBuffersInUse);
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
@ -153,7 +170,8 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
if (fBufferPtr) {
|
||||
BufferBlock& back = fBlocks.back();
|
||||
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
|
||||
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
|
||||
size_t pad = GrSizeAlignUpPad(usedBytes,
|
||||
alignment);
|
||||
if ((size + pad) <= back.fBytesFree) {
|
||||
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
|
||||
usedBytes += pad;
|
||||
@ -191,6 +209,12 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
void GrBufferAllocPool::putBack(size_t bytes) {
|
||||
VALIDATE();
|
||||
|
||||
// if the putBack unwinds all the preallocated buffers then we will
|
||||
// advance the starting index. As blocks are destroyed fPreallocBuffersInUse
|
||||
// will be decremented. I will reach zero if all blocks using preallocated
|
||||
// buffers are released.
|
||||
int preallocBuffersInUse = fPreallocBuffersInUse;
|
||||
|
||||
while (bytes) {
|
||||
// caller shouldn't try to put back more than they've taken
|
||||
SkASSERT(!fBlocks.empty());
|
||||
@ -212,7 +236,11 @@ void GrBufferAllocPool::putBack(size_t bytes) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
|
||||
fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
|
||||
preallocBuffersInUse) %
|
||||
fPreallocBuffers.count();
|
||||
}
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
@ -225,13 +253,24 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
|
||||
BufferBlock& block = fBlocks.push_back();
|
||||
|
||||
block.fBuffer = this->getBuffer(size);
|
||||
if (NULL == block.fBuffer) {
|
||||
fBlocks.pop_back();
|
||||
return false;
|
||||
if (size == fMinBlockSize &&
|
||||
fPreallocBuffersInUse < fPreallocBuffers.count()) {
|
||||
|
||||
uint32_t nextBuffer = (fPreallocBuffersInUse +
|
||||
fPreallocBufferStartIdx) %
|
||||
fPreallocBuffers.count();
|
||||
block.fBuffer = fPreallocBuffers[nextBuffer];
|
||||
block.fBuffer->ref();
|
||||
++fPreallocBuffersInUse;
|
||||
} else {
|
||||
block.fBuffer = this->createBuffer(size);
|
||||
if (NULL == block.fBuffer) {
|
||||
fBlocks.pop_back();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
block.fBytesFree = block.fBuffer->gpuMemorySize();
|
||||
block.fBytesFree = size;
|
||||
if (fBufferPtr) {
|
||||
SkASSERT(fBlocks.count() > 1);
|
||||
BufferBlock& prev = fBlocks.fromBack(1);
|
||||
@ -258,7 +297,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
}
|
||||
|
||||
if (NULL == fBufferPtr) {
|
||||
fBufferPtr = fCpuData.reset(block.fBytesFree);
|
||||
fBufferPtr = fCpuData.reset(size);
|
||||
}
|
||||
|
||||
VALIDATE(true);
|
||||
@ -270,7 +309,15 @@ void GrBufferAllocPool::destroyBlock() {
|
||||
SkASSERT(!fBlocks.empty());
|
||||
|
||||
BufferBlock& block = fBlocks.back();
|
||||
|
||||
if (fPreallocBuffersInUse > 0) {
|
||||
uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
|
||||
fPreallocBufferStartIdx +
|
||||
(fPreallocBuffers.count() - 1)) %
|
||||
fPreallocBuffers.count();
|
||||
if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
|
||||
--fPreallocBuffersInUse;
|
||||
}
|
||||
}
|
||||
SkASSERT(!block.fBuffer->isMapped());
|
||||
block.fBuffer->unref();
|
||||
fBlocks.pop_back();
|
||||
@ -298,22 +345,24 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
|
||||
VALIDATE(true);
|
||||
}
|
||||
|
||||
GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
|
||||
|
||||
GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
|
||||
|
||||
GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
|
||||
if (kIndex_BufferType == fBufferType) {
|
||||
return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
|
||||
return fGpu->createIndexBuffer(size, true);
|
||||
} else {
|
||||
SkASSERT(kVertex_BufferType == fBufferType);
|
||||
return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
|
||||
return fGpu->createVertexBuffer(size, true);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
|
||||
: GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
|
||||
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
|
||||
size_t bufferSize,
|
||||
int preallocBufferCnt)
|
||||
: GrBufferAllocPool(gpu,
|
||||
kVertex_BufferType,
|
||||
bufferSize,
|
||||
preallocBufferCnt) {
|
||||
}
|
||||
|
||||
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
@ -340,8 +389,13 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
|
||||
: GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
|
||||
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
|
||||
size_t bufferSize,
|
||||
int preallocBufferCnt)
|
||||
: GrBufferAllocPool(gpu,
|
||||
kIndex_BufferType,
|
||||
bufferSize,
|
||||
preallocBufferCnt) {
|
||||
}
|
||||
|
||||
void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
||||
|
@ -64,12 +64,16 @@ protected:
|
||||
* @param bufferSize The minimum size of created buffers.
|
||||
* This value will be clamped to some
|
||||
* reasonable minimum.
|
||||
* @param preallocBufferCnt The pool will allocate this number of
|
||||
* buffers at bufferSize and keep them until it
|
||||
* is destroyed.
|
||||
*/
|
||||
GrBufferAllocPool(GrGpu* gpu,
|
||||
BufferType bufferType,
|
||||
size_t bufferSize = 0);
|
||||
size_t bufferSize = 0,
|
||||
int preallocBufferCnt = 0);
|
||||
|
||||
virtual ~GrBufferAllocPool();
|
||||
virtual ~GrBufferAllocPool();
|
||||
|
||||
/**
|
||||
* Returns a block of memory to hold data. A buffer designated to hold the
|
||||
@ -95,7 +99,7 @@ protected:
|
||||
const GrGeometryBuffer** buffer,
|
||||
size_t* offset);
|
||||
|
||||
GrGeometryBuffer* getBuffer(size_t size);
|
||||
GrGeometryBuffer* createBuffer(size_t size);
|
||||
|
||||
private:
|
||||
struct BufferBlock {
|
||||
@ -105,7 +109,6 @@ private:
|
||||
|
||||
bool createBlock(size_t requestSize);
|
||||
void destroyBlock();
|
||||
void deleteBlocks();
|
||||
void flushCpuData(const BufferBlock& block, size_t flushSize);
|
||||
#ifdef SK_DEBUG
|
||||
void validate(bool unusedBlockAllowed = false) const;
|
||||
@ -114,10 +117,15 @@ private:
|
||||
size_t fBytesInUse;
|
||||
|
||||
GrGpu* fGpu;
|
||||
SkTDArray<GrGeometryBuffer*> fPreallocBuffers;
|
||||
size_t fMinBlockSize;
|
||||
BufferType fBufferType;
|
||||
|
||||
SkTArray<BufferBlock> fBlocks;
|
||||
int fPreallocBuffersInUse;
|
||||
// We attempt to cycle through the preallocated buffers rather than
|
||||
// always starting from the first.
|
||||
int fPreallocBufferStartIdx;
|
||||
SkAutoMalloc fCpuData;
|
||||
void* fBufferPtr;
|
||||
size_t fGeometryBufferMapThreshold;
|
||||
@ -134,8 +142,13 @@ public:
|
||||
* Constructor
|
||||
*
|
||||
* @param gpu The GrGpu used to create the vertex buffers.
|
||||
* @param bufferSize The minimum size of created VBs. This value
|
||||
* will be clamped to some reasonable minimum.
|
||||
* @param preallocBufferCnt The pool will allocate this number of VBs at
|
||||
* bufferSize and keep them until it is
|
||||
* destroyed.
|
||||
*/
|
||||
GrVertexBufferAllocPool(GrGpu* gpu);
|
||||
GrVertexBufferAllocPool(GrGpu* gpu, size_t bufferSize = 0, int preallocBufferCnt = 0);
|
||||
|
||||
/**
|
||||
* Returns a block of memory to hold vertices. A buffer designated to hold
|
||||
@ -178,8 +191,15 @@ public:
|
||||
* Constructor
|
||||
*
|
||||
* @param gpu The GrGpu used to create the index buffers.
|
||||
* @param bufferSize The minimum size of created IBs. This value
|
||||
* will be clamped to some reasonable minimum.
|
||||
* @param preallocBufferCnt The pool will allocate this number of VBs at
|
||||
* bufferSize and keep them until it is
|
||||
* destroyed.
|
||||
*/
|
||||
GrIndexBufferAllocPool(GrGpu* gpu);
|
||||
GrIndexBufferAllocPool(GrGpu* gpu,
|
||||
size_t bufferSize = 0,
|
||||
int preallocBufferCnt = 0);
|
||||
|
||||
/**
|
||||
* Returns a block of memory to hold indices. A buffer designated to hold
|
||||
|
@ -86,7 +86,6 @@ GrCaps::GrCaps(const GrContextOptions& options) {
|
||||
fStencilWrapOpsSupport = false;
|
||||
fDiscardRenderTargetSupport = false;
|
||||
fReuseScratchTextures = true;
|
||||
fReuseScratchBuffers = true;
|
||||
fGpuTracingSupport = false;
|
||||
fCompressedTexSubImageSupport = false;
|
||||
fOversizedStencilSupport = false;
|
||||
@ -147,7 +146,6 @@ SkString GrCaps::dump() const {
|
||||
r.appendf("Stencil Wrap Ops Support : %s\n", gNY[fStencilWrapOpsSupport]);
|
||||
r.appendf("Discard Render Target Support : %s\n", gNY[fDiscardRenderTargetSupport]);
|
||||
r.appendf("Reuse Scratch Textures : %s\n", gNY[fReuseScratchTextures]);
|
||||
r.appendf("Reuse Scratch Buffers : %s\n", gNY[fReuseScratchBuffers]);
|
||||
r.appendf("Gpu Tracing Support : %s\n", gNY[fGpuTracingSupport]);
|
||||
r.appendf("Compressed Update Support : %s\n", gNY[fCompressedTexSubImageSupport]);
|
||||
r.appendf("Oversized Stencil Support : %s\n", gNY[fOversizedStencilSupport]);
|
||||
|
@ -199,20 +199,12 @@ GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc&
|
||||
|
||||
GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) {
|
||||
this->handleDirtyContext();
|
||||
GrVertexBuffer* vb = this->onCreateVertexBuffer(size, dynamic);
|
||||
if (!this->caps()->reuseScratchBuffers()) {
|
||||
vb->resourcePriv().removeScratchKey();
|
||||
}
|
||||
return vb;
|
||||
return this->onCreateVertexBuffer(size, dynamic);
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
|
||||
this->handleDirtyContext();
|
||||
GrIndexBuffer* ib = this->onCreateIndexBuffer(size, dynamic);
|
||||
if (!this->caps()->reuseScratchBuffers()) {
|
||||
ib->resourcePriv().removeScratchKey();
|
||||
}
|
||||
return ib;
|
||||
return this->onCreateIndexBuffer(size, dynamic);
|
||||
}
|
||||
|
||||
void GrGpu::clear(const SkIRect* rect,
|
||||
|
@ -13,18 +13,8 @@
|
||||
|
||||
#include "GrGeometryBuffer.h"
|
||||
|
||||
|
||||
class GrIndexBuffer : public GrGeometryBuffer {
|
||||
public:
|
||||
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
|
||||
GrScratchKey::Builder builder(key, kType, 2);
|
||||
|
||||
builder[0] = SkToUInt(size);
|
||||
builder[1] = dynamic ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the maximum number of quads that could be rendered
|
||||
* from the index buffer (using kTriangles_GrPrimitiveType).
|
||||
@ -35,12 +25,7 @@ public:
|
||||
}
|
||||
protected:
|
||||
GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKey(gpuMemorySize, dynamic, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
|
||||
private:
|
||||
typedef GrGeometryBuffer INHERITED;
|
||||
};
|
||||
|
@ -28,7 +28,7 @@ const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16
|
||||
const GrUniqueKey& key) {
|
||||
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
|
||||
|
||||
GrIndexBuffer* buffer = this->getIndexBuffer(bufferSize, /* dynamic = */ false, true);
|
||||
GrIndexBuffer* buffer = this->gpu()->createIndexBuffer(bufferSize, /* dynamic = */ false);
|
||||
if (!buffer) {
|
||||
return NULL;
|
||||
}
|
||||
@ -83,58 +83,3 @@ GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, const SkDesc
|
||||
return this->gpu()->pathRendering()->createGlyphs(tf, desc, stroke);
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrResourceProvider::getIndexBuffer(size_t size, bool dynamic,
|
||||
bool calledDuringFlush) {
|
||||
if (this->isAbandoned()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dynamic) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const uint32_t MIN_SIZE = 1 << 12;
|
||||
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
|
||||
|
||||
GrScratchKey key;
|
||||
GrIndexBuffer::ComputeScratchKey(size, dynamic, &key);
|
||||
uint32_t scratchFlags = 0;
|
||||
if (calledDuringFlush) {
|
||||
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
|
||||
} else {
|
||||
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
|
||||
}
|
||||
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
|
||||
if (resource) {
|
||||
return static_cast<GrIndexBuffer*>(resource);
|
||||
}
|
||||
}
|
||||
|
||||
return this->gpu()->createIndexBuffer(size, dynamic);
|
||||
}
|
||||
|
||||
GrVertexBuffer* GrResourceProvider::getVertexBuffer(size_t size, bool dynamic,
|
||||
bool calledDuringFlush) {
|
||||
if (this->isAbandoned()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dynamic) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const uint32_t MIN_SIZE = 1 << 15;
|
||||
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
|
||||
|
||||
GrScratchKey key;
|
||||
GrVertexBuffer::ComputeScratchKey(size, dynamic, &key);
|
||||
uint32_t scratchFlags = 0;
|
||||
if (calledDuringFlush) {
|
||||
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
|
||||
} else {
|
||||
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
|
||||
}
|
||||
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
|
||||
if (resource) {
|
||||
return static_cast<GrVertexBuffer*>(resource);
|
||||
}
|
||||
}
|
||||
|
||||
return this->gpu()->createVertexBuffer(size, dynamic);
|
||||
}
|
||||
|
@ -86,9 +86,6 @@ public:
|
||||
using GrTextureProvider::findAndRefResourceByUniqueKey;
|
||||
using GrTextureProvider::abandon;
|
||||
|
||||
GrIndexBuffer* getIndexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
|
||||
GrVertexBuffer* getVertexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
|
||||
|
||||
private:
|
||||
const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
|
@ -14,24 +14,9 @@
|
||||
#include "GrGeometryBuffer.h"
|
||||
|
||||
class GrVertexBuffer : public GrGeometryBuffer {
|
||||
public:
|
||||
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
|
||||
GrScratchKey::Builder builder(key, kType, 2);
|
||||
|
||||
builder[0] = SkToUInt(size);
|
||||
builder[1] = dynamic ? 1 : 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKey(gpuMemorySize, dynamic, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
|
||||
private:
|
||||
typedef GrGeometryBuffer INHERITED;
|
||||
};
|
||||
|
@ -414,11 +414,6 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
|
||||
fReuseScratchTextures = kARM_GrGLVendor != ctxInfo.vendor() &&
|
||||
kQualcomm_GrGLVendor != ctxInfo.vendor();
|
||||
|
||||
#if 0
|
||||
fReuseScratchBuffers = kARM_GrGLVendor != ctxInfo.vendor() &&
|
||||
kQualcomm_GrGLVendor != ctxInfo.vendor();
|
||||
#endif
|
||||
|
||||
if (GrGLCaps::kES_IMG_MsToTexture_MSFBOType == fMSFBOType) {
|
||||
GR_GL_GetIntegerv(gli, GR_GL_MAX_SAMPLES_IMG, &fMaxSampleCount);
|
||||
} else if (GrGLCaps::kNone_MSFBOType != fMSFBOType) {
|
||||
|
Loading…
Reference in New Issue
Block a user