Refactor GrBufferAllocPools to use resource cache
Review URL: https://codereview.chromium.org/1139753002
This commit is contained in:
parent
1bf714fb45
commit
c5f1c5414f
@ -160,12 +160,12 @@ protected:
|
||||
GrGpu* gpu() { return fGpu; }
|
||||
const GrGpu* gpu() const { return fGpu; }
|
||||
|
||||
private:
|
||||
bool isAbandoned() const {
|
||||
SkASSERT(SkToBool(fGpu) == SkToBool(fCache));
|
||||
return !SkToBool(fCache);
|
||||
}
|
||||
|
||||
private:
|
||||
GrResourceCache* fCache;
|
||||
GrGpu* fGpu;
|
||||
};
|
||||
|
@ -10,27 +10,16 @@
|
||||
#include "GrBatchAtlas.h"
|
||||
#include "GrPipeline.h"
|
||||
|
||||
static const size_t DRAW_BUFFER_VBPOOL_BUFFER_SIZE = 1 << 15;
|
||||
static const int DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS = 4;
|
||||
|
||||
static const size_t DRAW_BUFFER_IBPOOL_BUFFER_SIZE = 1 << 11;
|
||||
static const int DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS = 4;
|
||||
|
||||
GrBatchTarget::GrBatchTarget(GrGpu* gpu)
|
||||
: fGpu(gpu)
|
||||
, fVertexPool(gpu)
|
||||
, fIndexPool(gpu)
|
||||
, fFlushBuffer(kFlushBufferInitialSizeInBytes)
|
||||
, fIter(fFlushBuffer)
|
||||
, fNumberOfDraws(0)
|
||||
, fCurrentToken(0)
|
||||
, fLastFlushedToken(0)
|
||||
, fInlineUpdatesIndex(0) {
|
||||
|
||||
fVertexPool.reset(SkNEW_ARGS(GrVertexBufferAllocPool, (fGpu,
|
||||
DRAW_BUFFER_VBPOOL_BUFFER_SIZE,
|
||||
DRAW_BUFFER_VBPOOL_PREALLOC_BUFFERS)));
|
||||
fIndexPool.reset(SkNEW_ARGS(GrIndexBufferAllocPool, (fGpu,
|
||||
DRAW_BUFFER_IBPOOL_BUFFER_SIZE,
|
||||
DRAW_BUFFER_IBPOOL_PREALLOC_BUFFERS)));
|
||||
}
|
||||
|
||||
void GrBatchTarget::flushNext(int n) {
|
||||
@ -65,11 +54,11 @@ void GrBatchTarget::flushNext(int n) {
|
||||
|
||||
void* GrBatchTarget::makeVertSpace(size_t vertexSize, int vertexCount,
|
||||
const GrVertexBuffer** buffer, int* startVertex) {
|
||||
return fVertexPool->makeSpace(vertexSize, vertexCount, buffer, startVertex);
|
||||
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
|
||||
}
|
||||
|
||||
uint16_t* GrBatchTarget::makeIndexSpace(int indexCount,
|
||||
const GrIndexBuffer** buffer, int* startIndex) {
|
||||
return reinterpret_cast<uint16_t*>(fIndexPool->makeSpace(indexCount, buffer, startIndex));
|
||||
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
|
||||
}
|
||||
|
||||
|
@ -120,26 +120,26 @@ public:
|
||||
const GrIndexBuffer** buffer, int* startIndex);
|
||||
|
||||
// A helper for draws which overallocate and then return data to the pool
|
||||
void putBackIndices(size_t indices) { fIndexPool->putBack(indices * sizeof(uint16_t)); }
|
||||
void putBackIndices(size_t indices) { fIndexPool.putBack(indices * sizeof(uint16_t)); }
|
||||
|
||||
void putBackVertices(size_t vertices, size_t vertexStride) {
|
||||
fVertexPool->putBack(vertices * vertexStride);
|
||||
fVertexPool.putBack(vertices * vertexStride);
|
||||
}
|
||||
|
||||
void reset() {
|
||||
fVertexPool->reset();
|
||||
fIndexPool->reset();
|
||||
fVertexPool.reset();
|
||||
fIndexPool.reset();
|
||||
}
|
||||
|
||||
private:
|
||||
void unmapVertexAndIndexBuffers() {
|
||||
fVertexPool->unmap();
|
||||
fIndexPool->unmap();
|
||||
fVertexPool.unmap();
|
||||
fIndexPool.unmap();
|
||||
}
|
||||
|
||||
GrGpu* fGpu;
|
||||
SkAutoTDelete<GrVertexBufferAllocPool> fVertexPool;
|
||||
SkAutoTDelete<GrIndexBufferAllocPool> fIndexPool;
|
||||
GrVertexBufferAllocPool fVertexPool;
|
||||
GrIndexBufferAllocPool fIndexPool;
|
||||
|
||||
typedef void* TBufferAlign; // This wouldn't be enough align if a command used long double.
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "GrDrawTargetCaps.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrTypes.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
|
||||
@ -22,6 +23,9 @@
|
||||
static void VALIDATE(bool = false) {}
|
||||
#endif
|
||||
|
||||
static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
|
||||
static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
|
||||
|
||||
// page size
|
||||
#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
|
||||
|
||||
@ -37,9 +41,8 @@ do {
|
||||
|
||||
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
BufferType bufferType,
|
||||
size_t blockSize,
|
||||
int preallocBufferCnt)
|
||||
: fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
|
||||
size_t blockSize)
|
||||
: fBlocks(8) {
|
||||
|
||||
fGpu = SkRef(gpu);
|
||||
|
||||
@ -48,19 +51,9 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
|
||||
|
||||
fBytesInUse = 0;
|
||||
|
||||
fPreallocBuffersInUse = 0;
|
||||
fPreallocBufferStartIdx = 0;
|
||||
for (int i = 0; i < preallocBufferCnt; ++i) {
|
||||
GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
|
||||
if (buffer) {
|
||||
*fPreallocBuffers.append() = buffer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GrBufferAllocPool::~GrBufferAllocPool() {
|
||||
VALIDATE();
|
||||
void GrBufferAllocPool::deleteBlocks() {
|
||||
if (fBlocks.count()) {
|
||||
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
|
||||
if (buffer->isMapped()) {
|
||||
@ -70,34 +63,22 @@ GrBufferAllocPool::~GrBufferAllocPool() {
|
||||
while (!fBlocks.empty()) {
|
||||
this->destroyBlock();
|
||||
}
|
||||
fPreallocBuffers.unrefAll();
|
||||
SkASSERT(!fBufferPtr);
|
||||
}
|
||||
|
||||
GrBufferAllocPool::~GrBufferAllocPool() {
|
||||
VALIDATE();
|
||||
this->deleteBlocks();
|
||||
fGpu->unref();
|
||||
}
|
||||
|
||||
void GrBufferAllocPool::reset() {
|
||||
VALIDATE();
|
||||
fBytesInUse = 0;
|
||||
if (fBlocks.count()) {
|
||||
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
|
||||
if (buffer->isMapped()) {
|
||||
UNMAP_BUFFER(fBlocks.back());
|
||||
}
|
||||
}
|
||||
// fPreallocBuffersInUse will be decremented down to zero in the while loop
|
||||
int preallocBuffersInUse = fPreallocBuffersInUse;
|
||||
while (!fBlocks.empty()) {
|
||||
this->destroyBlock();
|
||||
}
|
||||
if (fPreallocBuffers.count()) {
|
||||
// must set this after above loop.
|
||||
fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
|
||||
preallocBuffersInUse) %
|
||||
fPreallocBuffers.count();
|
||||
}
|
||||
this->deleteBlocks();
|
||||
// we may have created a large cpu mirror of a large VB. Reset the size
|
||||
// to match our pre-allocated VBs.
|
||||
// to match our minimum.
|
||||
fCpuData.reset(fMinBlockSize);
|
||||
SkASSERT(0 == fPreallocBuffersInUse);
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
@ -169,8 +150,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
if (fBufferPtr) {
|
||||
BufferBlock& back = fBlocks.back();
|
||||
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
|
||||
size_t pad = GrSizeAlignUpPad(usedBytes,
|
||||
alignment);
|
||||
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
|
||||
if ((size + pad) <= back.fBytesFree) {
|
||||
memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
|
||||
usedBytes += pad;
|
||||
@ -208,12 +188,6 @@ void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
void GrBufferAllocPool::putBack(size_t bytes) {
|
||||
VALIDATE();
|
||||
|
||||
// if the putBack unwinds all the preallocated buffers then we will
|
||||
// advance the starting index. As blocks are destroyed fPreallocBuffersInUse
|
||||
// will be decremented. I will reach zero if all blocks using preallocated
|
||||
// buffers are released.
|
||||
int preallocBuffersInUse = fPreallocBuffersInUse;
|
||||
|
||||
while (bytes) {
|
||||
// caller shouldn't try to put back more than they've taken
|
||||
SkASSERT(!fBlocks.empty());
|
||||
@ -235,11 +209,7 @@ void GrBufferAllocPool::putBack(size_t bytes) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
|
||||
fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
|
||||
preallocBuffersInUse) %
|
||||
fPreallocBuffers.count();
|
||||
}
|
||||
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
@ -252,24 +222,13 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
|
||||
BufferBlock& block = fBlocks.push_back();
|
||||
|
||||
if (size == fMinBlockSize &&
|
||||
fPreallocBuffersInUse < fPreallocBuffers.count()) {
|
||||
|
||||
uint32_t nextBuffer = (fPreallocBuffersInUse +
|
||||
fPreallocBufferStartIdx) %
|
||||
fPreallocBuffers.count();
|
||||
block.fBuffer = fPreallocBuffers[nextBuffer];
|
||||
block.fBuffer->ref();
|
||||
++fPreallocBuffersInUse;
|
||||
} else {
|
||||
block.fBuffer = this->createBuffer(size);
|
||||
if (NULL == block.fBuffer) {
|
||||
fBlocks.pop_back();
|
||||
return false;
|
||||
}
|
||||
block.fBuffer = this->getBuffer(size);
|
||||
if (NULL == block.fBuffer) {
|
||||
fBlocks.pop_back();
|
||||
return false;
|
||||
}
|
||||
|
||||
block.fBytesFree = size;
|
||||
block.fBytesFree = block.fBuffer->gpuMemorySize();
|
||||
if (fBufferPtr) {
|
||||
SkASSERT(fBlocks.count() > 1);
|
||||
BufferBlock& prev = fBlocks.fromBack(1);
|
||||
@ -288,7 +247,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
// threshold.
|
||||
bool attemptMap = block.fBuffer->isCPUBacked();
|
||||
if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
|
||||
attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
|
||||
attemptMap = block.fBytesFree > GR_GEOM_BUFFER_MAP_THRESHOLD;
|
||||
}
|
||||
|
||||
if (attemptMap) {
|
||||
@ -296,7 +255,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
}
|
||||
|
||||
if (NULL == fBufferPtr) {
|
||||
fBufferPtr = fCpuData.reset(size);
|
||||
fBufferPtr = fCpuData.reset(block.fBytesFree);
|
||||
}
|
||||
|
||||
VALIDATE(true);
|
||||
@ -308,15 +267,7 @@ void GrBufferAllocPool::destroyBlock() {
|
||||
SkASSERT(!fBlocks.empty());
|
||||
|
||||
BufferBlock& block = fBlocks.back();
|
||||
if (fPreallocBuffersInUse > 0) {
|
||||
uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
|
||||
fPreallocBufferStartIdx +
|
||||
(fPreallocBuffers.count() - 1)) %
|
||||
fPreallocBuffers.count();
|
||||
if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
|
||||
--fPreallocBuffersInUse;
|
||||
}
|
||||
}
|
||||
|
||||
SkASSERT(!block.fBuffer->isMapped());
|
||||
block.fBuffer->unref();
|
||||
fBlocks.pop_back();
|
||||
@ -344,24 +295,22 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
|
||||
VALIDATE(true);
|
||||
}
|
||||
|
||||
GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
|
||||
GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
|
||||
|
||||
GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
|
||||
|
||||
if (kIndex_BufferType == fBufferType) {
|
||||
return fGpu->createIndexBuffer(size, true);
|
||||
return rp->getIndexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
|
||||
} else {
|
||||
SkASSERT(kVertex_BufferType == fBufferType);
|
||||
return fGpu->createVertexBuffer(size, true);
|
||||
return rp->getVertexBuffer(size, /* dynamic = */ true, /* duringFlush = */ true);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
|
||||
size_t bufferSize,
|
||||
int preallocBufferCnt)
|
||||
: GrBufferAllocPool(gpu,
|
||||
kVertex_BufferType,
|
||||
bufferSize,
|
||||
preallocBufferCnt) {
|
||||
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
|
||||
: GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
|
||||
}
|
||||
|
||||
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
@ -388,13 +337,8 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
|
||||
size_t bufferSize,
|
||||
int preallocBufferCnt)
|
||||
: GrBufferAllocPool(gpu,
|
||||
kIndex_BufferType,
|
||||
bufferSize,
|
||||
preallocBufferCnt) {
|
||||
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
|
||||
: GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
|
||||
}
|
||||
|
||||
void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
||||
|
@ -64,16 +64,12 @@ protected:
|
||||
* @param bufferSize The minimum size of created buffers.
|
||||
* This value will be clamped to some
|
||||
* reasonable minimum.
|
||||
* @param preallocBufferCnt The pool will allocate this number of
|
||||
* buffers at bufferSize and keep them until it
|
||||
* is destroyed.
|
||||
*/
|
||||
GrBufferAllocPool(GrGpu* gpu,
|
||||
BufferType bufferType,
|
||||
size_t bufferSize = 0,
|
||||
int preallocBufferCnt = 0);
|
||||
size_t bufferSize = 0);
|
||||
|
||||
virtual ~GrBufferAllocPool();
|
||||
virtual ~GrBufferAllocPool();
|
||||
|
||||
/**
|
||||
* Returns a block of memory to hold data. A buffer designated to hold the
|
||||
@ -99,7 +95,7 @@ protected:
|
||||
const GrGeometryBuffer** buffer,
|
||||
size_t* offset);
|
||||
|
||||
GrGeometryBuffer* createBuffer(size_t size);
|
||||
GrGeometryBuffer* getBuffer(size_t size);
|
||||
|
||||
private:
|
||||
struct BufferBlock {
|
||||
@ -109,6 +105,7 @@ private:
|
||||
|
||||
bool createBlock(size_t requestSize);
|
||||
void destroyBlock();
|
||||
void deleteBlocks();
|
||||
void flushCpuData(const BufferBlock& block, size_t flushSize);
|
||||
#ifdef SK_DEBUG
|
||||
void validate(bool unusedBlockAllowed = false) const;
|
||||
@ -117,15 +114,10 @@ private:
|
||||
size_t fBytesInUse;
|
||||
|
||||
GrGpu* fGpu;
|
||||
SkTDArray<GrGeometryBuffer*> fPreallocBuffers;
|
||||
size_t fMinBlockSize;
|
||||
BufferType fBufferType;
|
||||
|
||||
SkTArray<BufferBlock> fBlocks;
|
||||
int fPreallocBuffersInUse;
|
||||
// We attempt to cycle through the preallocated buffers rather than
|
||||
// always starting from the first.
|
||||
int fPreallocBufferStartIdx;
|
||||
SkAutoMalloc fCpuData;
|
||||
void* fBufferPtr;
|
||||
};
|
||||
@ -141,13 +133,8 @@ public:
|
||||
* Constructor
|
||||
*
|
||||
* @param gpu The GrGpu used to create the vertex buffers.
|
||||
* @param bufferSize The minimum size of created VBs. This value
|
||||
* will be clamped to some reasonable minimum.
|
||||
* @param preallocBufferCnt The pool will allocate this number of VBs at
|
||||
* bufferSize and keep them until it is
|
||||
* destroyed.
|
||||
*/
|
||||
GrVertexBufferAllocPool(GrGpu* gpu, size_t bufferSize = 0, int preallocBufferCnt = 0);
|
||||
GrVertexBufferAllocPool(GrGpu* gpu);
|
||||
|
||||
/**
|
||||
* Returns a block of memory to hold vertices. A buffer designated to hold
|
||||
@ -190,15 +177,8 @@ public:
|
||||
* Constructor
|
||||
*
|
||||
* @param gpu The GrGpu used to create the index buffers.
|
||||
* @param bufferSize The minimum size of created IBs. This value
|
||||
* will be clamped to some reasonable minimum.
|
||||
* @param preallocBufferCnt The pool will allocate this number of VBs at
|
||||
* bufferSize and keep them until it is
|
||||
* destroyed.
|
||||
*/
|
||||
GrIndexBufferAllocPool(GrGpu* gpu,
|
||||
size_t bufferSize = 0,
|
||||
int preallocBufferCnt = 0);
|
||||
GrIndexBufferAllocPool(GrGpu* gpu);
|
||||
|
||||
/**
|
||||
* Returns a block of memory to hold indices. A buffer designated to hold
|
||||
|
@ -13,8 +13,18 @@
|
||||
|
||||
#include "GrGeometryBuffer.h"
|
||||
|
||||
|
||||
class GrIndexBuffer : public GrGeometryBuffer {
|
||||
public:
|
||||
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
|
||||
GrScratchKey::Builder builder(key, kType, 2);
|
||||
|
||||
builder[0] = SkToUInt(size);
|
||||
builder[1] = dynamic ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the maximum number of quads that could be rendered
|
||||
* from the index buffer (using kTriangles_GrPrimitiveType).
|
||||
@ -25,7 +35,12 @@ public:
|
||||
}
|
||||
protected:
|
||||
GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKey(gpuMemorySize, dynamic, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
|
||||
private:
|
||||
typedef GrGeometryBuffer INHERITED;
|
||||
};
|
||||
|
@ -26,7 +26,7 @@ const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16
|
||||
const GrUniqueKey& key) {
|
||||
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
|
||||
|
||||
GrIndexBuffer* buffer = this->gpu()->createIndexBuffer(bufferSize, /* dynamic = */ false);
|
||||
GrIndexBuffer* buffer = this->getIndexBuffer(bufferSize, /* dynamic = */ false, true);
|
||||
if (!buffer) {
|
||||
return NULL;
|
||||
}
|
||||
@ -63,3 +63,58 @@ const GrIndexBuffer* GrResourceProvider::createQuadIndexBuffer() {
|
||||
return this->createInstancedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrResourceProvider::getIndexBuffer(size_t size, bool dynamic,
|
||||
bool calledDuringFlush) {
|
||||
if (this->isAbandoned()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dynamic) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const uint32_t MIN_SIZE = 1 << 12;
|
||||
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
|
||||
|
||||
GrScratchKey key;
|
||||
GrIndexBuffer::ComputeScratchKey(size, dynamic, &key);
|
||||
uint32_t scratchFlags = 0;
|
||||
if (calledDuringFlush) {
|
||||
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
|
||||
} else {
|
||||
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
|
||||
}
|
||||
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
|
||||
if (resource) {
|
||||
return static_cast<GrIndexBuffer*>(resource);
|
||||
}
|
||||
}
|
||||
|
||||
return this->gpu()->createIndexBuffer(size, dynamic);
|
||||
}
|
||||
|
||||
GrVertexBuffer* GrResourceProvider::getVertexBuffer(size_t size, bool dynamic,
|
||||
bool calledDuringFlush) {
|
||||
if (this->isAbandoned()) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dynamic) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const uint32_t MIN_SIZE = 1 << 15;
|
||||
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
|
||||
|
||||
GrScratchKey key;
|
||||
GrVertexBuffer::ComputeScratchKey(size, dynamic, &key);
|
||||
uint32_t scratchFlags = 0;
|
||||
if (calledDuringFlush) {
|
||||
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
|
||||
} else {
|
||||
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
|
||||
}
|
||||
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, scratchFlags);
|
||||
if (resource) {
|
||||
return static_cast<GrVertexBuffer*>(resource);
|
||||
}
|
||||
}
|
||||
|
||||
return this->gpu()->createVertexBuffer(size, dynamic);
|
||||
}
|
||||
|
@ -71,6 +71,9 @@ public:
|
||||
using GrTextureProvider::findAndRefResourceByUniqueKey;
|
||||
using GrTextureProvider::abandon;
|
||||
|
||||
GrIndexBuffer* getIndexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
|
||||
GrVertexBuffer* getVertexBuffer(size_t size, bool dynamic, bool calledDuringFlush);
|
||||
|
||||
private:
|
||||
const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
|
@ -14,9 +14,24 @@
|
||||
#include "GrGeometryBuffer.h"
|
||||
|
||||
class GrVertexBuffer : public GrGeometryBuffer {
|
||||
public:
|
||||
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
|
||||
GrScratchKey::Builder builder(key, kType, 2);
|
||||
|
||||
builder[0] = SkToUInt(size);
|
||||
builder[1] = dynamic ? 1 : 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {}
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKey(gpuMemorySize, dynamic, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
|
||||
private:
|
||||
typedef GrGeometryBuffer INHERITED;
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user