Use calloc to allocate data that will be uploaded to vertex/index buffers in Chrome

BUG=chromium:454267
BUG=chromium:522315

Review URL: https://codereview.chromium.org/1300123002
This commit is contained in:
bsalomon 2015-08-19 08:26:51 -07:00 committed by Commit bot
parent 93ab254b7e
commit 7dea7b7df1
6 changed files with 52 additions and 22 deletions

View File

@ -218,6 +218,10 @@ public:
bool fullClearIsFree() const { return fFullClearIsFree; }
/** True in environments that will issue errors if memory uploaded to buffers
is not initialized (even if not read by draw calls). */
bool mustClearUploadedBufferData() const { return fMustClearUploadedBufferData; }
protected:
/** Subclasses must call this at the end of their constructors in order to apply caps
overrides requested by the client. Note that overrides will only reduce the caps never
@ -239,6 +243,7 @@ protected:
bool fTextureBarrierSupport : 1;
bool fSupportsInstancedDraws : 1;
bool fFullClearIsFree : 1;
bool fMustClearUploadedBufferData : 1;
// Driver workaround
bool fUseDrawInsteadOfClear : 1;

View File

@ -46,9 +46,9 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
: fBlocks(8) {
fGpu = SkRef(gpu);
fCpuData = nullptr;
fBufferType = bufferType;
fBufferPtr = NULL;
fBufferPtr = nullptr;
fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
fBytesInUse = 0;
@ -72,6 +72,7 @@ void GrBufferAllocPool::deleteBlocks() {
GrBufferAllocPool::~GrBufferAllocPool() {
VALIDATE();
this->deleteBlocks();
sk_free(fCpuData);
fGpu->unref();
}
@ -79,9 +80,10 @@ void GrBufferAllocPool::reset() {
VALIDATE();
fBytesInUse = 0;
this->deleteBlocks();
// we may have created a large cpu mirror of a large VB. Reset the size
// to match our minimum.
fCpuData.reset(fMinBlockSize);
// we may have created a large cpu mirror of a large VB. Reset the size to match our minimum.
this->resetCpuData(fMinBlockSize);
VALIDATE();
}
@ -96,7 +98,7 @@ void GrBufferAllocPool::unmap() {
size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
this->flushCpuData(fBlocks.back(), flushSize);
}
fBufferPtr = NULL;
fBufferPtr = nullptr;
}
VALIDATE();
}
@ -110,7 +112,7 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
GrGeometryBuffer* buf = fBlocks.back().fBuffer;
SkASSERT(buf->mapPtr() == fBufferPtr);
} else {
SkASSERT(fCpuData.get() == fBufferPtr);
SkASSERT(fCpuData == fBufferPtr);
}
} else {
SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
@ -175,7 +177,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
// size.
if (!this->createBlock(size)) {
return NULL;
return nullptr;
}
SkASSERT(fBufferPtr);
@ -226,7 +228,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
BufferBlock& block = fBlocks.push_back();
block.fBuffer = this->getBuffer(size);
if (NULL == block.fBuffer) {
if (!block.fBuffer) {
fBlocks.pop_back();
return false;
}
@ -240,10 +242,10 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
} else {
this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
}
fBufferPtr = NULL;
fBufferPtr = nullptr;
}
SkASSERT(NULL == fBufferPtr);
SkASSERT(!fBufferPtr);
// If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
// Otherwise when buffer mapping is supported we map if the buffer size is greater than the
@ -257,8 +259,8 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
fBufferPtr = block.fBuffer->map();
}
if (NULL == fBufferPtr) {
fBufferPtr = fCpuData.reset(block.fBytesFree);
if (!fBufferPtr) {
fBufferPtr = this->resetCpuData(block.fBytesFree);
}
VALIDATE(true);
@ -274,14 +276,29 @@ void GrBufferAllocPool::destroyBlock() {
SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
fBufferPtr = NULL;
fBufferPtr = nullptr;
}
void* GrBufferAllocPool::resetCpuData(size_t newSize) {
sk_free(fCpuData);
if (newSize) {
if (fGpu->caps()->mustClearUploadedBufferData()) {
fCpuData = sk_calloc(newSize);
} else {
fCpuData = sk_malloc_throw(newSize);
}
} else {
fCpuData = nullptr;
}
return fCpuData;
}
void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
GrGeometryBuffer* buffer = block.fBuffer;
SkASSERT(buffer);
SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData.get() == fBufferPtr);
SkASSERT(fCpuData == fBufferPtr);
SkASSERT(flushSize <= buffer->gpuMemorySize());
VALIDATE(true);
@ -329,7 +346,7 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
SkASSERT(startVertex);
size_t offset = 0; // assign to suppress warning
const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
vertexSize,
&geomBuffer,
@ -356,7 +373,7 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
SkASSERT(startIndex);
size_t offset = 0; // assign to suppress warning
const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
sizeof(uint16_t),
&geomBuffer,
@ -367,5 +384,3 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
*startIndex = static_cast<int>(offset / sizeof(uint16_t));
return ptr;
}

View File

@ -107,10 +107,10 @@ private:
void destroyBlock();
void deleteBlocks();
void flushCpuData(const BufferBlock& block, size_t flushSize);
void* resetCpuData(size_t newSize);
#ifdef SK_DEBUG
void validate(bool unusedBlockAllowed = false) const;
#endif
size_t fBytesInUse;
GrGpu* fGpu;
@ -118,7 +118,7 @@ private:
BufferType fBufferType;
SkTArray<BufferBlock> fBlocks;
SkAutoMalloc fCpuData;
void* fCpuData;
void* fBufferPtr;
size_t fGeometryBufferMapThreshold;
};

View File

@ -93,6 +93,7 @@ GrCaps::GrCaps(const GrContextOptions& options) {
fTextureBarrierSupport = false;
fSupportsInstancedDraws = false;
fFullClearIsFree = false;
fMustClearUploadedBufferData = false;
fUseDrawInsteadOfClear = false;
@ -156,6 +157,7 @@ SkString GrCaps::dump() const {
r.appendf("Texture Barrier Support : %s\n", gNY[fTextureBarrierSupport]);
r.appendf("Supports instanced draws : %s\n", gNY[fSupportsInstancedDraws]);
r.appendf("Full screen clear is free : %s\n", gNY[fFullClearIsFree]);
r.appendf("Must clear buffer memory : %s\n", gNY[fMustClearUploadedBufferData]);
r.appendf("Draw Instead of Clear [workaround] : %s\n", gNY[fUseDrawInsteadOfClear]);
r.appendf("Draw Instead of TexSubImage [workaround] : %s\n",
gNY[fUseDrawInsteadOfPartialRenderTargetWrite]);

View File

@ -25,7 +25,11 @@ GrGLBufferImpl::GrGLBufferImpl(GrGLGpu* gpu, const Desc& desc, GrGLenum bufferTy
, fBufferType(bufferType)
, fMapPtr(NULL) {
if (0 == desc.fID) {
fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
if (gpu->caps()->mustClearUploadedBufferData()) {
fCPUData = sk_calloc_throw(desc.fSizeInBytes);
} else {
fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
}
fGLSizeInBytes = 0;
} else {
fCPUData = NULL;

View File

@ -471,6 +471,10 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
fUseDrawInsteadOfPartialRenderTargetWrite = true;
}
if (kChromium_GrGLDriver == ctxInfo.driver()) {
fMustClearUploadedBufferData = true;
}
if (kGL_GrGLStandard == standard) {
// ARB allows mixed size FBO attachments, EXT does not.
if (ctxInfo.version() >= GR_GL_VER(3, 0) ||