Revert of Add transfer buffer support. (patchset #9 id:160001 of https://codereview.chromium.org/1490473003/ )
Reason for revert: speculative revert for deps roll failures Original issue's description: > Add transfer buffer support. > > BUG=skia:4604 > > Committed: https://skia.googlesource.com/skia/+/fa498fe12239988578465d0dee69f0d5645bb361 TBR=bsalomon@google.com,jvanverth@google.com NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=skia:4604 Review URL: https://codereview.chromium.org/1496843003
This commit is contained in:
parent
404816ee3f
commit
53c5d5fb79
@ -188,7 +188,6 @@
|
||||
'<(skia_src_path)/gpu/GrTextureProvider.cpp',
|
||||
'<(skia_src_path)/gpu/GrTexturePriv.h',
|
||||
'<(skia_src_path)/gpu/GrTextureAccess.cpp',
|
||||
'<(skia_src_path)/gpu/GrTransferBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrTRecorder.h',
|
||||
'<(skia_src_path)/gpu/GrVertexBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrVertices.h',
|
||||
@ -336,8 +335,6 @@
|
||||
'<(skia_src_path)/gpu/gl/GrGLTexture.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTextureRenderTarget.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTextureRenderTarget.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTransferBuffer.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTransferBuffer.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLUtil.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLUtil.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLVaryingHandler.cpp',
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "GrRenderTargetPriv.h"
|
||||
#include "GrStencilAttachment.h"
|
||||
#include "GrSurfacePriv.h"
|
||||
#include "GrTransferBuffer.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "GrVertices.h"
|
||||
|
||||
@ -198,12 +197,6 @@ GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
|
||||
return ib;
|
||||
}
|
||||
|
||||
GrTransferBuffer* GrGpu::createTransferBuffer(size_t size, TransferType type) {
|
||||
this->handleDirtyContext();
|
||||
GrTransferBuffer* tb = this->onCreateTransferBuffer(size, type);
|
||||
return tb;
|
||||
}
|
||||
|
||||
void GrGpu::clear(const SkIRect& rect,
|
||||
GrColor color,
|
||||
GrRenderTarget* renderTarget) {
|
||||
|
@ -31,7 +31,6 @@ class GrRenderTarget;
|
||||
class GrStencilAttachment;
|
||||
class GrSurface;
|
||||
class GrTexture;
|
||||
class GrTransferBuffer;
|
||||
class GrVertexBuffer;
|
||||
class GrVertices;
|
||||
|
||||
@ -129,22 +128,6 @@ public:
|
||||
*/
|
||||
GrIndexBuffer* createIndexBuffer(size_t size, bool dynamic);
|
||||
|
||||
enum TransferType {
|
||||
kCpuToGpu_TransferType,
|
||||
kGpuToCpu_TransferType
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a transfer buffer.
|
||||
*
|
||||
* @param size size in bytes of the index buffer
|
||||
* @param toGpu true if used to transfer from the cpu to the gpu
|
||||
* otherwise to be used to transfer from the gpu to the cpu
|
||||
*
|
||||
* @return The transfer buffer if successful, otherwise nullptr.
|
||||
*/
|
||||
GrTransferBuffer* createTransferBuffer(size_t size, TransferType type);
|
||||
|
||||
/**
|
||||
* Resolves MSAA.
|
||||
*/
|
||||
@ -472,7 +455,6 @@ private:
|
||||
GrWrapOwnership) = 0;
|
||||
virtual GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) = 0;
|
||||
virtual GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) = 0;
|
||||
virtual GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) = 0;
|
||||
|
||||
// overridden by backend-specific derived class to perform the clear.
|
||||
virtual void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) = 0;
|
||||
|
@ -311,8 +311,6 @@ private:
|
||||
|
||||
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override { return nullptr; }
|
||||
|
||||
GrTransferBuffer* onCreateTransferBuffer(size_t, TransferType) override { return nullptr; }
|
||||
|
||||
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override {}
|
||||
|
||||
void onClearStencilClip(GrRenderTarget*, const SkIRect& rect, bool insideClip) override {}
|
||||
|
@ -1,76 +0,0 @@
|
||||
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef GrTransferBuffer_DEFINED
|
||||
#define GrTransferBuffer_DEFINED
|
||||
|
||||
#include "GrGpuResource.h"
|
||||
|
||||
class GrTransferBuffer : public GrGpuResource {
|
||||
public:
|
||||
/**
|
||||
* Maps the buffer to be written by the CPU.
|
||||
*
|
||||
* The previous content of the buffer is invalidated. It is an error
|
||||
* to transfer to or from the buffer while it is mapped. It is an error to
|
||||
* call map on an already mapped buffer. Must be matched by an unmap() call.
|
||||
* Currently only one map at a time is supported (no nesting of map/unmap).
|
||||
*
|
||||
* Note that buffer mapping does not go through GrContext and therefore is
|
||||
* not serialized with other operations.
|
||||
*
|
||||
* @return a pointer to the data or nullptr if the map fails.
|
||||
*/
|
||||
void* map() { return (fMapPtr = this->onMap()); }
|
||||
|
||||
/**
|
||||
* Unmaps the buffer.
|
||||
*
|
||||
* The pointer returned by the previous map call will no longer be valid.
|
||||
*/
|
||||
void unmap() {
|
||||
SkASSERT(fMapPtr);
|
||||
this->onUnmap();
|
||||
fMapPtr = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the same ptr that map() returned at time of map or nullptr if the
|
||||
* is not mapped.
|
||||
*
|
||||
* @return ptr to mapped buffer data or nullptr if buffer is not mapped.
|
||||
*/
|
||||
void* mapPtr() const { return fMapPtr; }
|
||||
|
||||
/**
|
||||
Queries whether the buffer has been mapped.
|
||||
|
||||
@return true if the buffer is mapped, false otherwise.
|
||||
*/
|
||||
bool isMapped() const { return SkToBool(fMapPtr); }
|
||||
|
||||
protected:
|
||||
GrTransferBuffer(GrGpu* gpu, size_t gpuMemorySize)
|
||||
: INHERITED(gpu, kUncached_LifeCycle)
|
||||
, fGpuMemorySize(gpuMemorySize) {
|
||||
}
|
||||
|
||||
private:
|
||||
virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
|
||||
|
||||
virtual void* onMap() = 0;
|
||||
virtual void onUnmap() = 0;
|
||||
|
||||
void* fMapPtr;
|
||||
size_t fGpuMemorySize;
|
||||
|
||||
typedef GrGpuResource INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -65,7 +65,7 @@ void* GrGLBufferImpl::map(GrGLGpu* gpu) {
|
||||
if (0 == fDesc.fID) {
|
||||
fMapPtr = fCPUData;
|
||||
} else {
|
||||
fMapPtr = gpu->mapBuffer(fDesc.fID, fBufferType, fDesc.fUsage, fGLSizeInBytes,
|
||||
fMapPtr = gpu->mapBuffer(fDesc.fID, fBufferType, fDesc.fDynamic, fGLSizeInBytes,
|
||||
fDesc.fSizeInBytes);
|
||||
fGLSizeInBytes = fDesc.fSizeInBytes;
|
||||
}
|
||||
@ -89,7 +89,6 @@ bool GrGLBufferImpl::isMapped() const {
|
||||
|
||||
bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInBytes) {
|
||||
SkASSERT(!this->isMapped());
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
VALIDATE();
|
||||
if (srcSizeInBytes > fDesc.fSizeInBytes) {
|
||||
return false;
|
||||
@ -98,7 +97,7 @@ bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInB
|
||||
memcpy(fCPUData, src, srcSizeInBytes);
|
||||
return true;
|
||||
}
|
||||
gpu->bufferData(fDesc.fID, fBufferType, fDesc.fUsage, fDesc.fSizeInBytes, src,
|
||||
gpu->bufferData(fDesc.fID, fBufferType, fDesc.fDynamic, fDesc.fSizeInBytes, src,
|
||||
srcSizeInBytes);
|
||||
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
|
||||
fGLSizeInBytes = fDesc.fSizeInBytes;
|
||||
@ -110,8 +109,7 @@ bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInB
|
||||
}
|
||||
|
||||
void GrGLBufferImpl::validate() const {
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType ||
|
||||
GR_GL_PIXEL_PACK_BUFFER == fBufferType || GR_GL_PIXEL_UNPACK_BUFFER == fBufferType);
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
// The following assert isn't valid when the buffer has been abandoned:
|
||||
// SkASSERT((0 == fDesc.fID) == (fCPUData));
|
||||
SkASSERT(nullptr == fCPUData || 0 == fGLSizeInBytes);
|
||||
|
@ -19,20 +19,10 @@ class GrGLGpu;
|
||||
*/
|
||||
class GrGLBufferImpl : SkNoncopyable {
|
||||
public:
|
||||
enum Usage {
|
||||
kStaticDraw_Usage = 0,
|
||||
kDynamicDraw_Usage,
|
||||
kStreamDraw_Usage,
|
||||
kStreamRead_Usage,
|
||||
|
||||
kLast_Usage = kStreamRead_Usage
|
||||
};
|
||||
static const int kUsageCount = kLast_Usage + 1;
|
||||
|
||||
struct Desc {
|
||||
GrGLuint fID; // set to 0 to indicate buffer is CPU-backed and not a VBO.
|
||||
size_t fSizeInBytes;
|
||||
Usage fUsage;
|
||||
bool fDynamic;
|
||||
};
|
||||
|
||||
GrGLBufferImpl(GrGLGpu*, const Desc&, GrGLenum bufferType);
|
||||
|
@ -110,11 +110,8 @@
|
||||
#define GR_GL_ELEMENT_ARRAY_BUFFER 0x8893
|
||||
#define GR_GL_ARRAY_BUFFER_BINDING 0x8894
|
||||
#define GR_GL_ELEMENT_ARRAY_BUFFER_BINDING 0x8895
|
||||
#define GR_GL_PIXEL_PACK_BUFFER 0x88EB
|
||||
#define GR_GL_PIXEL_UNPACK_BUFFER 0x88EC
|
||||
|
||||
#define GR_GL_STREAM_DRAW 0x88E0
|
||||
#define GR_GL_STREAM_READ 0x88E1
|
||||
#define GR_GL_STATIC_DRAW 0x88E4
|
||||
#define GR_GL_DYNAMIC_DRAW 0x88E8
|
||||
|
||||
|
@ -1399,16 +1399,12 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
|
||||
// objects are implemented as client-side-arrays on tile-deferred architectures.
|
||||
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
|
||||
|
||||
GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
|
||||
GrGLVertexBuffer::Desc desc;
|
||||
desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
|
||||
desc.fDynamic = dynamic;
|
||||
desc.fSizeInBytes = size;
|
||||
|
||||
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
|
||||
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
|
||||
desc.fID = 0;
|
||||
GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
|
||||
return vertexBuffer;
|
||||
@ -1422,7 +1418,7 @@ GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
|
||||
BufferData(GR_GL_ARRAY_BUFFER,
|
||||
(GrGLsizeiptr) desc.fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
|
||||
if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
|
||||
GL_CALL(DeleteBuffers(1, &desc.fID));
|
||||
this->notifyVertexBufferDelete(desc.fID);
|
||||
@ -1437,10 +1433,10 @@ GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
|
||||
|
||||
GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
|
||||
GrGLIndexBuffer::Desc desc;
|
||||
desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
|
||||
desc.fDynamic = dynamic;
|
||||
desc.fSizeInBytes = size;
|
||||
|
||||
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
|
||||
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
|
||||
desc.fID = 0;
|
||||
GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
|
||||
return indexBuffer;
|
||||
@ -1454,7 +1450,7 @@ GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
|
||||
BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
|
||||
(GrGLsizeiptr) desc.fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
|
||||
if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
|
||||
GL_CALL(DeleteBuffers(1, &desc.fID));
|
||||
this->notifyIndexBufferDelete(desc.fID);
|
||||
@ -1467,35 +1463,6 @@ GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
|
||||
}
|
||||
}
|
||||
|
||||
GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType type) {
|
||||
GrGLTransferBuffer::Desc desc;
|
||||
bool toGpu = (kCpuToGpu_TransferType == type);
|
||||
desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kStreamRead_Usage;
|
||||
|
||||
desc.fSizeInBytes = size;
|
||||
|
||||
// TODO: check caps to see if we can create a PBO, and which kind
|
||||
GL_CALL(GenBuffers(1, &desc.fID));
|
||||
if (desc.fID) {
|
||||
CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
||||
// make sure driver can allocate memory for this buffer
|
||||
GrGLenum type = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER;
|
||||
GL_ALLOC_CALL(this->glInterface(),
|
||||
BufferData(type,
|
||||
(GrGLsizeiptr) desc.fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
(toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ)));
|
||||
if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
|
||||
GL_CALL(DeleteBuffers(1, &desc.fID));
|
||||
return nullptr;
|
||||
}
|
||||
GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, type);
|
||||
return transferBuffer;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void GrGLGpu::flushScissor(const GrScissorState& scissorState,
|
||||
const GrGLIRect& rtViewport,
|
||||
GrSurfaceOrigin rtOrigin) {
|
||||
@ -1636,7 +1603,8 @@ void GrGLGpu::bindBuffer(GrGLuint id, GrGLenum type) {
|
||||
this->handleDirtyContext();
|
||||
if (GR_GL_ARRAY_BUFFER == type) {
|
||||
this->bindVertexBuffer(id);
|
||||
} else if (GR_GL_ELEMENT_ARRAY_BUFFER == type) {
|
||||
} else {
|
||||
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == type);
|
||||
this->bindIndexBufferAndDefaultVertexArray(id);
|
||||
}
|
||||
}
|
||||
@ -1646,27 +1614,19 @@ void GrGLGpu::releaseBuffer(GrGLuint id, GrGLenum type) {
|
||||
GL_CALL(DeleteBuffers(1, &id));
|
||||
if (GR_GL_ARRAY_BUFFER == type) {
|
||||
this->notifyVertexBufferDelete(id);
|
||||
} else if (GR_GL_ELEMENT_ARRAY_BUFFER == type) {
|
||||
} else {
|
||||
SkASSERT(GR_GL_ELEMENT_ARRAY_BUFFER == type);
|
||||
this->notifyIndexBufferDelete(id);
|
||||
}
|
||||
}
|
||||
|
||||
static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) {
|
||||
static const GrGLenum grToGL[] = {
|
||||
GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage
|
||||
DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage
|
||||
GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage
|
||||
GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage
|
||||
};
|
||||
static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_size_mismatch");
|
||||
// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
|
||||
// objects are implemented as client-side-arrays on tile-deferred architectures.
|
||||
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
|
||||
|
||||
return grToGL[usage];
|
||||
}
|
||||
|
||||
void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
|
||||
size_t currentSize, size_t requestedSize) {
|
||||
void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
|
||||
size_t requestedSize) {
|
||||
void* mapPtr = nullptr;
|
||||
GrGLenum glUsage = get_gl_usage(usage);
|
||||
// Handling dirty context is done in the bindBuffer call
|
||||
switch (this->glCaps().mapBufferType()) {
|
||||
case GrGLCaps::kNone_MapBufferType:
|
||||
@ -1675,7 +1635,8 @@ void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
|
||||
this->bindBuffer(id, type);
|
||||
// Let driver know it can discard the old data
|
||||
if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr,
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
}
|
||||
GL_CALL_RET(mapPtr, MapBuffer(type, GR_GL_WRITE_ONLY));
|
||||
break;
|
||||
@ -1683,7 +1644,8 @@ void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
|
||||
this->bindBuffer(id, type);
|
||||
// Make sure the GL buffer size agrees with fDesc before mapping.
|
||||
if (currentSize != requestedSize) {
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr,
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
}
|
||||
static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
|
||||
GR_GL_MAP_WRITE_BIT;
|
||||
@ -1694,7 +1656,8 @@ void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
|
||||
this->bindBuffer(id, type);
|
||||
// Make sure the GL buffer size agrees with fDesc before mapping.
|
||||
if (currentSize != requestedSize) {
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr,
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
}
|
||||
GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, GR_GL_WRITE_ONLY));
|
||||
break;
|
||||
@ -1702,16 +1665,16 @@ void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
|
||||
return mapPtr;
|
||||
}
|
||||
|
||||
void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
|
||||
size_t currentSize, const void* src, size_t srcSizeInBytes) {
|
||||
void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
|
||||
const void* src, size_t srcSizeInBytes) {
|
||||
SkASSERT(srcSizeInBytes <= currentSize);
|
||||
// bindbuffer handles dirty context
|
||||
this->bindBuffer(id, type);
|
||||
GrGLenum glUsage = get_gl_usage(usage);
|
||||
GrGLenum usage = dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW;
|
||||
|
||||
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
|
||||
if (currentSize == srcSizeInBytes) {
|
||||
GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage));
|
||||
GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, usage));
|
||||
} else {
|
||||
// Before we call glBufferSubData we give the driver a hint using
|
||||
// glBufferData with nullptr. This makes the old buffer contents
|
||||
@ -1720,7 +1683,7 @@ void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage
|
||||
// assign a different allocation for the new contents to avoid
|
||||
// flushing the gpu past draws consuming the old contents.
|
||||
// TODO I think we actually want to try calling bufferData here
|
||||
GL_CALL(BufferData(type, currentSize, nullptr, glUsage));
|
||||
GL_CALL(BufferData(type, currentSize, nullptr, usage));
|
||||
GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
|
||||
}
|
||||
#else
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "GrGLRenderTarget.h"
|
||||
#include "GrGLStencilAttachment.h"
|
||||
#include "GrGLTexture.h"
|
||||
#include "GrGLTransferBuffer.h"
|
||||
#include "GrGLVertexArray.h"
|
||||
#include "GrGLVertexBuffer.h"
|
||||
#include "GrGpu.h"
|
||||
@ -102,12 +101,12 @@ public:
|
||||
void releaseBuffer(GrGLuint id, GrGLenum type);
|
||||
|
||||
// sizes are in bytes
|
||||
void* mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage, size_t currentSize,
|
||||
void* mapBuffer(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
|
||||
size_t requestedSize);
|
||||
|
||||
void unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr);
|
||||
|
||||
void bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage, size_t currentSize,
|
||||
void bufferData(GrGLuint id, GrGLenum type, bool dynamic, size_t currentSize,
|
||||
const void* src, size_t srcSizeInBytes);
|
||||
|
||||
const GrGLContext* glContextForTesting() const override {
|
||||
@ -146,7 +145,6 @@ private:
|
||||
const void* srcData) override;
|
||||
GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
|
||||
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
|
||||
GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
|
||||
GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
|
||||
GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
|
||||
GrWrapOwnership) override;
|
||||
|
@ -10,8 +10,7 @@
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
GrGLIndexBuffer::GrGLIndexBuffer(GrGLGpu* gpu, const Desc& desc)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, GrGLBufferImpl::kDynamicDraw_Usage == desc.fUsage,
|
||||
0 == desc.fID)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID)
|
||||
, fImpl(gpu, desc, GR_GL_ELEMENT_ARRAY_BUFFER) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
@ -1,51 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrGLTransferBuffer.h"
|
||||
#include "GrGLGpu.h"
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
GrGLTransferBuffer::GrGLTransferBuffer(GrGLGpu* gpu, const Desc& desc, GrGLenum type)
|
||||
: INHERITED(gpu, desc.fSizeInBytes)
|
||||
, fImpl(gpu, desc, type) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::onRelease() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.release(this->getGpuGL());
|
||||
}
|
||||
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::onAbandon() {
|
||||
fImpl.abandon();
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void* GrGLTransferBuffer::onMap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
return fImpl.map(this->getGpuGL());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::onUnmap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.unmap(this->getGpuGL());
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const {
|
||||
SkString buffer_id;
|
||||
buffer_id.appendU32(this->bufferID());
|
||||
traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
|
||||
buffer_id.c_str());
|
||||
}
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrGLTransferBuffer_DEFINED
|
||||
#define GrGLTransferBuffer_DEFINED
|
||||
|
||||
#include "GrTransferBuffer.h"
|
||||
#include "GrGLBufferImpl.h"
|
||||
#include "gl/GrGLInterface.h"
|
||||
|
||||
class GrGLGpu;
|
||||
|
||||
class GrGLTransferBuffer : public GrTransferBuffer {
|
||||
|
||||
public:
|
||||
typedef GrGLBufferImpl::Desc Desc;
|
||||
|
||||
GrGLTransferBuffer(GrGLGpu* gpu, const Desc& desc, GrGLenum type);
|
||||
|
||||
GrGLuint bufferID() const { return fImpl.bufferID(); }
|
||||
size_t baseOffset() const { return fImpl.baseOffset(); }
|
||||
|
||||
protected:
|
||||
void onAbandon() override;
|
||||
void onRelease() override;
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
private:
|
||||
void* onMap() override;
|
||||
void onUnmap() override;
|
||||
|
||||
GrGLGpu* getGpuGL() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return (GrGLGpu*)(this->getGpu());
|
||||
}
|
||||
|
||||
GrGLBufferImpl fImpl;
|
||||
|
||||
typedef GrTransferBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -10,8 +10,7 @@
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
GrGLVertexBuffer::GrGLVertexBuffer(GrGLGpu* gpu, const Desc& desc)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, GrGLBufferImpl::kDynamicDraw_Usage == desc.fUsage,
|
||||
0 == desc.fID)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, 0 == desc.fID)
|
||||
, fImpl(gpu, desc, GR_GL_ARRAY_BUFFER) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
@ -122,8 +122,6 @@ public:
|
||||
BufferManager fBufferManager;
|
||||
GrGLuint fCurrArrayBuffer;
|
||||
GrGLuint fCurrElementArrayBuffer;
|
||||
GrGLuint fCurrPixelPackBuffer;
|
||||
GrGLuint fCurrPixelUnpackBuffer;
|
||||
GrGLuint fCurrProgramID;
|
||||
GrGLuint fCurrShaderID;
|
||||
|
||||
@ -131,8 +129,6 @@ public:
|
||||
ContextState()
|
||||
: fCurrArrayBuffer(0)
|
||||
, fCurrElementArrayBuffer(0)
|
||||
, fCurrPixelPackBuffer(0)
|
||||
, fCurrPixelUnpackBuffer(0)
|
||||
, fCurrProgramID(0)
|
||||
, fCurrShaderID(0) {}
|
||||
|
||||
@ -176,12 +172,6 @@ GrGLvoid GR_GL_FUNCTION_TYPE nullGLBufferData(GrGLenum target,
|
||||
case GR_GL_ELEMENT_ARRAY_BUFFER:
|
||||
id = state->fCurrElementArrayBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_PACK_BUFFER:
|
||||
id = state->fCurrPixelPackBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_UNPACK_BUFFER:
|
||||
id = state->fCurrPixelUnpackBuffer;
|
||||
break;
|
||||
default:
|
||||
SkFAIL("Unexpected target to nullGLBufferData");
|
||||
break;
|
||||
@ -225,12 +215,6 @@ GrGLvoid GR_GL_FUNCTION_TYPE nullGLBindBuffer(GrGLenum target, GrGLuint buffer)
|
||||
case GR_GL_ELEMENT_ARRAY_BUFFER:
|
||||
state->fCurrElementArrayBuffer = buffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_PACK_BUFFER:
|
||||
state->fCurrPixelPackBuffer = buffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_UNPACK_BUFFER:
|
||||
state->fCurrPixelUnpackBuffer = buffer;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -244,12 +228,6 @@ GrGLvoid GR_GL_FUNCTION_TYPE nullGLDeleteBuffers(GrGLsizei n, const GrGLuint* id
|
||||
if (ids[i] == state->fCurrElementArrayBuffer) {
|
||||
state->fCurrElementArrayBuffer = 0;
|
||||
}
|
||||
if (ids[i] == state->fCurrPixelPackBuffer) {
|
||||
state->fCurrPixelPackBuffer = 0;
|
||||
}
|
||||
if (ids[i] == state->fCurrPixelUnpackBuffer) {
|
||||
state->fCurrPixelUnpackBuffer = 0;
|
||||
}
|
||||
|
||||
BufferObj* buffer = state->fBufferManager.lookUp(ids[i]);
|
||||
state->fBufferManager.free(buffer);
|
||||
@ -267,12 +245,6 @@ GrGLvoid* GR_GL_FUNCTION_TYPE nullGLMapBufferRange(GrGLenum target, GrGLintptr o
|
||||
case GR_GL_ELEMENT_ARRAY_BUFFER:
|
||||
id = state->fCurrElementArrayBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_PACK_BUFFER:
|
||||
id = state->fCurrPixelPackBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_UNPACK_BUFFER:
|
||||
id = state->fCurrPixelUnpackBuffer;
|
||||
break;
|
||||
}
|
||||
|
||||
if (id > 0) {
|
||||
@ -295,12 +267,6 @@ GrGLvoid* GR_GL_FUNCTION_TYPE nullGLMapBuffer(GrGLenum target, GrGLenum access)
|
||||
case GR_GL_ELEMENT_ARRAY_BUFFER:
|
||||
id = state->fCurrElementArrayBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_PACK_BUFFER:
|
||||
id = state->fCurrPixelPackBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_UNPACK_BUFFER:
|
||||
id = state->fCurrPixelUnpackBuffer;
|
||||
break;
|
||||
}
|
||||
|
||||
if (id > 0) {
|
||||
@ -329,12 +295,6 @@ GrGLboolean GR_GL_FUNCTION_TYPE nullGLUnmapBuffer(GrGLenum target) {
|
||||
case GR_GL_ELEMENT_ARRAY_BUFFER:
|
||||
id = state->fCurrElementArrayBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_PACK_BUFFER:
|
||||
id = state->fCurrPixelPackBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_UNPACK_BUFFER:
|
||||
id = state->fCurrPixelUnpackBuffer;
|
||||
break;
|
||||
}
|
||||
if (id > 0) {
|
||||
BufferObj* buffer = state->fBufferManager.lookUp(id);
|
||||
@ -360,12 +320,6 @@ GrGLvoid GR_GL_FUNCTION_TYPE nullGLGetBufferParameteriv(GrGLenum target, GrGLenu
|
||||
case GR_GL_ELEMENT_ARRAY_BUFFER:
|
||||
id = state->fCurrElementArrayBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_PACK_BUFFER:
|
||||
id = state->fCurrPixelPackBuffer;
|
||||
break;
|
||||
case GR_GL_PIXEL_UNPACK_BUFFER:
|
||||
id = state->fCurrPixelUnpackBuffer;
|
||||
break;
|
||||
}
|
||||
if (id > 0) {
|
||||
BufferObj* buffer = state->fBufferManager.lookUp(id);
|
||||
|
Loading…
Reference in New Issue
Block a user