Move GrGLBufferImpl's GL calls to behind GrGLGpu

BUG=skia:

Review URL: https://codereview.chromium.org/1417313003
This commit is contained in:
joshualitt 2015-10-23 09:08:08 -07:00 committed by Commit bot
parent 648c696438
commit 93316b92e5
5 changed files with 143 additions and 111 deletions

View File

@ -411,6 +411,12 @@ protected:
*preference = SkTMax(*preference, elevation);
}
void handleDirtyContext() {
if (fResetBits) {
this->resetContext();
}
}
Stats fStats;
SkAutoTDelete<GrPathRendering> fPathRendering;
// Subclass must initialize this in its constructor.
@ -486,12 +492,6 @@ private:
++fResetTimestamp;
}
void handleDirtyContext() {
if (fResetBits) {
this->resetContext();
}
}
ResetTimestamp fResetTimestamp;
uint32_t fResetBits;
// The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu.

View File

@ -16,10 +16,6 @@
#define VALIDATE() do {} while(false)
#endif
// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
GrGLBufferImpl::GrGLBufferImpl(GrGLGpu* gpu, const Desc& desc, GrGLenum bufferType)
: fDesc(desc)
, fBufferType(bufferType)
@ -46,13 +42,7 @@ void GrGLBufferImpl::release(GrGLGpu* gpu) {
sk_free(fCPUData);
fCPUData = nullptr;
} else if (fDesc.fID) {
GL_CALL(gpu, DeleteBuffers(1, &fDesc.fID));
if (GR_GL_ARRAY_BUFFER == fBufferType) {
gpu->notifyVertexBufferDelete(fDesc.fID);
} else {
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
gpu->notifyIndexBufferDelete(fDesc.fID);
}
gpu->releaseBuffer(fDesc.fID, fBufferType);
fDesc.fID = 0;
fGLSizeInBytes = 0;
}
@ -69,69 +59,15 @@ void GrGLBufferImpl::abandon() {
VALIDATE();
}
void GrGLBufferImpl::bind(GrGLGpu* gpu) const {
VALIDATE();
if (GR_GL_ARRAY_BUFFER == fBufferType) {
gpu->bindVertexBuffer(fDesc.fID);
} else {
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
gpu->bindIndexBufferAndDefaultVertexArray(fDesc.fID);
}
VALIDATE();
}
void* GrGLBufferImpl::map(GrGLGpu* gpu) {
VALIDATE();
SkASSERT(!this->isMapped());
if (0 == fDesc.fID) {
fMapPtr = fCPUData;
} else {
switch (gpu->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
VALIDATE();
return nullptr;
case GrGLCaps::kMapBuffer_MapBufferType:
this->bind(gpu);
// Let driver know it can discard the old data
if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fDesc.fSizeInBytes != fGLSizeInBytes) {
fGLSizeInBytes = fDesc.fSizeInBytes;
GL_CALL(gpu,
BufferData(fBufferType, fGLSizeInBytes, nullptr,
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GR_GL_CALL_RET(gpu->glInterface(), fMapPtr,
MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
break;
case GrGLCaps::kMapBufferRange_MapBufferType: {
this->bind(gpu);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (fDesc.fSizeInBytes != fGLSizeInBytes) {
fGLSizeInBytes = fDesc.fSizeInBytes;
GL_CALL(gpu,
BufferData(fBufferType, fGLSizeInBytes, nullptr,
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
GR_GL_MAP_WRITE_BIT;
GR_GL_CALL_RET(gpu->glInterface(),
fMapPtr,
MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess));
break;
}
case GrGLCaps::kChromium_MapBufferType:
this->bind(gpu);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (fDesc.fSizeInBytes != fGLSizeInBytes) {
fGLSizeInBytes = fDesc.fSizeInBytes;
GL_CALL(gpu,
BufferData(fBufferType, fGLSizeInBytes, nullptr,
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GR_GL_CALL_RET(gpu->glInterface(),
fMapPtr,
MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY));
break;
}
fMapPtr = gpu->mapBuffer(fDesc.fID, fBufferType, fDesc.fDynamic, fGLSizeInBytes,
fDesc.fSizeInBytes);
fGLSizeInBytes = fDesc.fSizeInBytes;
}
VALIDATE();
return fMapPtr;
@ -141,20 +77,7 @@ void GrGLBufferImpl::unmap(GrGLGpu* gpu) {
VALIDATE();
SkASSERT(this->isMapped());
if (0 != fDesc.fID) {
switch (gpu->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
SkDEBUGFAIL("Shouldn't get here.");
return;
case GrGLCaps::kMapBuffer_MapBufferType: // fall through
case GrGLCaps::kMapBufferRange_MapBufferType:
this->bind(gpu);
GL_CALL(gpu, UnmapBuffer(fBufferType));
break;
case GrGLCaps::kChromium_MapBufferType:
this->bind(gpu);
GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fMapPtr));
break;
}
gpu->unmapBuffer(fDesc.fID, fBufferType, fMapPtr);
}
fMapPtr = nullptr;
}
@ -174,30 +97,14 @@ bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInB
memcpy(fCPUData, src, srcSizeInBytes);
return true;
}
this->bind(gpu);
GrGLenum usage = fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW;
gpu->bufferData(fDesc.fID, fBufferType, fDesc.fDynamic, fDesc.fSizeInBytes, src,
srcSizeInBytes);
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
if (fDesc.fSizeInBytes == srcSizeInBytes) {
GL_CALL(gpu, BufferData(fBufferType, (GrGLsizeiptr) srcSizeInBytes, src, usage));
} else {
// Before we call glBufferSubData we give the driver a hint using
// glBufferData with nullptr. This makes the old buffer contents
// inaccessible to future draws. The GPU may still be processing
// draws that reference the old contents. With this hint it can
// assign a different allocation for the new contents to avoid
// flushing the gpu past draws consuming the old contents.
fGLSizeInBytes = fDesc.fSizeInBytes;
GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, nullptr, usage));
GL_CALL(gpu, BufferSubData(fBufferType, 0, (GrGLsizeiptr) srcSizeInBytes, src));
}
fGLSizeInBytes = fDesc.fSizeInBytes;
#else
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
// portions of the buffer (map() does a glBufferData(..size, nullptr..))
fGLSizeInBytes = srcSizeInBytes;
GL_CALL(gpu, BufferData(fBufferType, fGLSizeInBytes, src, usage));
#endif
VALIDATE();
return true;
}
@ -206,6 +113,6 @@ void GrGLBufferImpl::validate() const {
// The following assert isn't valid when the buffer has been abandoned:
// SkASSERT((0 == fDesc.fID) == (fCPUData));
SkASSERT(nullptr == fCPUData || 0 == fGLSizeInBytes);
SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fDesc.fSizeInBytes);
SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr);
}

View File

@ -37,8 +37,6 @@ public:
GrGLuint bufferID() const { return fDesc.fID; }
size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
void bind(GrGLGpu* gpu) const;
void* map(GrGLGpu* gpu);
void unmap(GrGLGpu* gpu);
bool isMapped() const;

View File

@ -1561,6 +1561,119 @@ void GrGLGpu::buildProgramDesc(GrProgramDesc* desc,
}
}
void GrGLGpu::bindBuffer(GrGLuint id, GrGLenum type) {
this->handleDirtyContext();
if (GR_GL_ARRAY_BUFFER == type) {
this->bindVertexBuffer(id);
} else {
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == type);
this->bindIndexBufferAndDefaultVertexArray(id);
}
}
void GrGLGpu::releaseBuffer(GrGLuint id, GrGLenum type) {
this->handleDirtyContext();
GL_CALL(DeleteBuffers(1, &id));
if (GR_GL_ARRAY_BUFFER == type) {
this->notifyVertexBufferDelete(id);
} else {
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == type);
this->notifyIndexBufferDelete(id);
}
}
// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
size_t requestedSize) {
void* mapPtr = nullptr;
// Handling dirty context is done in the bindBuffer call
switch (this->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
break;
case GrGLCaps::kMapBuffer_MapBufferType:
this->bindBuffer(id, type);
// Let driver know it can discard the old data
if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
GL_CALL(BufferData(type, requestedSize, nullptr,
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GL_CALL_RET(mapPtr, MapBuffer(type, GR_GL_WRITE_ONLY));
break;
case GrGLCaps::kMapBufferRange_MapBufferType: {
this->bindBuffer(id, type);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (currentSize != requestedSize) {
GL_CALL(BufferData(type, requestedSize, nullptr,
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
GR_GL_MAP_WRITE_BIT;
GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, kAccess));
break;
}
case GrGLCaps::kChromium_MapBufferType:
this->bindBuffer(id, type);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (currentSize != requestedSize) {
GL_CALL(BufferData(type, requestedSize, nullptr,
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, GR_GL_WRITE_ONLY));
break;
}
return mapPtr;
}
void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
const void* src, size_t srcSizeInBytes) {
SkASSERT(srcSizeInBytes <= currentSize);
// bindbuffer handles dirty context
this->bindBuffer(id, type);
GrGLenum usage = dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW;
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
if (currentSize == srcSizeInBytes) {
GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, usage));
} else {
// Before we call glBufferSubData we give the driver a hint using
// glBufferData with nullptr. This makes the old buffer contents
// inaccessible to future draws. The GPU may still be processing
// draws that reference the old contents. With this hint it can
// assign a different allocation for the new contents to avoid
// flushing the gpu past draws consuming the old contents.
// TODO I think we actually want to try calling bufferData here
GL_CALL(BufferData(type, currentSize, nullptr, usage));
GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
}
#else
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
// portions of the buffer (map() does a glBufferData(..size, nullptr..))
GL_CALL(BufferData(type, srcSizeInBytes, src, usage));
#endif
}
void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) {
// bind buffer handles the dirty context
switch (this->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
SkDEBUGFAIL("Shouldn't get here.");
return;
case GrGLCaps::kMapBuffer_MapBufferType: // fall through
case GrGLCaps::kMapBufferRange_MapBufferType:
this->bindBuffer(id, type);
GL_CALL(UnmapBuffer(type));
break;
case GrGLCaps::kChromium_MapBufferType:
this->bindBuffer(id, type);
GL_CALL(UnmapBufferSubData(mapPtr));
break;
}
}
void GrGLGpu::disableScissor() {
if (kNo_TriState != fHWScissorSettings.fEnabled) {
GL_CALL(Disable(GR_GL_SCISSOR_TEST));

View File

@ -95,6 +95,20 @@ public:
const GrPrimitiveProcessor&,
const GrPipeline&) const override;
// id and type (GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER) of buffer to bind
void bindBuffer(GrGLuint id, GrGLenum type);
void releaseBuffer(GrGLuint id, GrGLenum type);
// sizes are in bytes
void* mapBuffer(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
size_t requestedSize);
void unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr);
void bufferData(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
const void* src, size_t srcSizeInBytes);
const GrGLContext* glContextForTesting() const override {
return &this->glContext();
}