Rename from "(un)lock" to "(un)map" for geometry buffers.

This better reflects OpenGL terminology and is less overloaded ("lock" is used w.r.t. the resource cache).

R=robertphillips@google.com

Author: bsalomon@google.com

Review URL: https://codereview.chromium.org/275493004

git-svn-id: http://skia.googlecode.com/svn/trunk@14628 2bbb7eff-a529-9590-31e7-b0007b416f81
This commit is contained in:
commit-bot@chromium.org 2014-05-07 20:51:05 +00:00
parent d4db657bf5
commit 8341eb76fb
17 changed files with 138 additions and 138 deletions

View File

@ -197,13 +197,13 @@ typedef unsigned __int64 uint64_t;
#endif
/**
* GR_GEOM_BUFFER_LOCK_THRESHOLD gives a threshold (in bytes) for when Gr should
* lock a GrGeometryBuffer to update its contents. It will use lock() if the
* GR_GEOM_BUFFER_MAP_THRESHOLD gives a threshold (in bytes) for when Gr should
* map a GrGeometryBuffer to update its contents. It will use map() if the
* size of the updated region is greater than the threshold. Otherwise it will
* use updateData().
*/
#if !defined(GR_GEOM_BUFFER_LOCK_THRESHOLD)
#define GR_GEOM_BUFFER_LOCK_THRESHOLD (1 << 15)
#if !defined(GR_GEOM_BUFFER_MAP_THRESHOLD)
#define GR_GEOM_BUFFER_MAP_THRESHOLD (1 << 15)
#endif
/**

View File

@ -13,11 +13,11 @@
#endif
/**
* This gives a threshold in bytes of when to lock a GrGeometryBuffer vs using
* This gives a threshold in bytes of when to map a GrGeometryBuffer vs using
* updateData. (Note the depending on the underlying 3D API the update functions
* may always be implemented using a lock)
* may always be implemented using a map)
*/
//#define GR_GEOM_BUFFER_LOCK_THRESHOLD (1<<15)
//#define GR_GEOM_BUFFER_MAP_THRESHOLD (1<<15)
/**
* This gives a threshold in megabytes for the maximum size of the texture cache

View File

@ -50,7 +50,7 @@ static const size_t kLineSegIdxSBufize = kIdxsPerLineSeg *
kNumLineSegsInIdxBuffer;
static bool push_quad_index_data(GrIndexBuffer* qIdxBuffer) {
uint16_t* data = (uint16_t*) qIdxBuffer->lock();
uint16_t* data = (uint16_t*) qIdxBuffer->map();
bool tempData = NULL == data;
if (tempData) {
data = SkNEW_ARRAY(uint16_t, kNumQuadsInIdxBuffer * kIdxsPerQuad);
@ -86,13 +86,13 @@ static bool push_quad_index_data(GrIndexBuffer* qIdxBuffer) {
delete[] data;
return ret;
} else {
qIdxBuffer->unlock();
qIdxBuffer->unmap();
return true;
}
}
static bool push_line_index_data(GrIndexBuffer* lIdxBuffer) {
uint16_t* data = (uint16_t*) lIdxBuffer->lock();
uint16_t* data = (uint16_t*) lIdxBuffer->map();
bool tempData = NULL == data;
if (tempData) {
data = SkNEW_ARRAY(uint16_t, kNumLineSegsInIdxBuffer * kIdxsPerLineSeg);
@ -139,7 +139,7 @@ static bool push_line_index_data(GrIndexBuffer* lIdxBuffer) {
delete[] data;
return ret;
} else {
lIdxBuffer->unlock();
lIdxBuffer->unmap();
return true;
}
}

View File

@ -311,7 +311,7 @@ GrIndexBuffer* GrAARectRenderer::aaFillRectIndexBuffer(GrGpu* gpu) {
if (NULL == fAAFillRectIndexBuffer) {
fAAFillRectIndexBuffer = gpu->createIndexBuffer(kAAFillRectIndexBufferSize, false);
if (NULL != fAAFillRectIndexBuffer) {
uint16_t* data = (uint16_t*) fAAFillRectIndexBuffer->lock();
uint16_t* data = (uint16_t*) fAAFillRectIndexBuffer->map();
bool useTempData = (NULL == data);
if (useTempData) {
data = SkNEW_ARRAY(uint16_t, kNumAAFillRectsInIndexBuffer * kIndicesPerAAFillRect);
@ -331,7 +331,7 @@ GrIndexBuffer* GrAARectRenderer::aaFillRectIndexBuffer(GrGpu* gpu) {
}
SkDELETE_ARRAY(data);
} else {
fAAFillRectIndexBuffer->unlock();
fAAFillRectIndexBuffer->unmap();
}
}
}

View File

@ -56,8 +56,8 @@ GrBufferAllocPool::~GrBufferAllocPool() {
VALIDATE();
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isLocked()) {
buffer->unlock();
if (buffer->isMapped()) {
buffer->unmap();
}
}
while (!fBlocks.empty()) {
@ -79,8 +79,8 @@ void GrBufferAllocPool::reset() {
fBytesInUse = 0;
if (fBlocks.count()) {
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
if (buffer->isLocked()) {
buffer->unlock();
if (buffer->isMapped()) {
buffer->unmap();
}
}
// fPreallocBuffersInUse will be decremented down to zero in the while loop
@ -101,16 +101,16 @@ void GrBufferAllocPool::reset() {
VALIDATE();
}
void GrBufferAllocPool::unlock() {
void GrBufferAllocPool::unmap() {
VALIDATE();
if (NULL != fBufferPtr) {
BufferBlock& block = fBlocks.back();
if (block.fBuffer->isLocked()) {
block.fBuffer->unlock();
if (block.fBuffer->isMapped()) {
block.fBuffer->unmap();
} else {
size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
flushCpuData(fBlocks.back().fBuffer, flushSize);
this->flushCpuData(fBlocks.back().fBuffer, flushSize);
}
fBufferPtr = NULL;
}
@ -121,18 +121,18 @@ void GrBufferAllocPool::unlock() {
void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
if (NULL != fBufferPtr) {
SkASSERT(!fBlocks.empty());
if (fBlocks.back().fBuffer->isLocked()) {
if (fBlocks.back().fBuffer->isMapped()) {
GrGeometryBuffer* buf = fBlocks.back().fBuffer;
SkASSERT(buf->lockPtr() == fBufferPtr);
SkASSERT(buf->mapPtr() == fBufferPtr);
} else {
SkASSERT(fCpuData.get() == fBufferPtr);
}
} else {
SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isLocked());
SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
}
size_t bytesInUse = 0;
for (int i = 0; i < fBlocks.count() - 1; ++i) {
SkASSERT(!fBlocks[i].fBuffer->isLocked());
SkASSERT(!fBlocks[i].fBuffer->isMapped());
}
for (int i = 0; i < fBlocks.count(); ++i) {
size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
@ -236,9 +236,9 @@ void GrBufferAllocPool::putBack(size_t bytes) {
bytes -= bytesUsed;
fBytesInUse -= bytesUsed;
// if we locked a vb to satisfy the make space and we're releasing
// beyond it, then unlock it.
if (block.fBuffer->isLocked()) {
block.fBuffer->unlock();
// beyond it, then unmap it.
if (block.fBuffer->isMapped()) {
block.fBuffer->unmap();
}
this->destroyBlock();
} else {
@ -286,8 +286,8 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
if (NULL != fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
if (prev.fBuffer->isLocked()) {
prev.fBuffer->unlock();
if (prev.fBuffer->isMapped()) {
prev.fBuffer->unmap();
} else {
flushCpuData(prev.fBuffer,
prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
@ -297,22 +297,22 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
SkASSERT(NULL == fBufferPtr);
// If the buffer is CPU-backed we lock it because it is free to do so and saves a copy.
// Otherwise when buffer locking is supported:
// a) If the frequently reset hint is set we only lock when the requested size meets a
// If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
// Otherwise when buffer mapping is supported:
// a) If the frequently reset hint is set we only map when the requested size meets a
// threshold (since we don't expect it is likely that we will see more vertex data)
// b) If the hint is not set we lock if the buffer size is greater than the threshold.
bool attemptLock = block.fBuffer->isCPUBacked();
if (!attemptLock && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
// b) If the hint is not set we map if the buffer size is greater than the threshold.
bool attemptMap = block.fBuffer->isCPUBacked();
if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
if (fFrequentResetHint) {
attemptLock = requestSize > GR_GEOM_BUFFER_LOCK_THRESHOLD;
attemptMap = requestSize > GR_GEOM_BUFFER_MAP_THRESHOLD;
} else {
attemptLock = size > GR_GEOM_BUFFER_LOCK_THRESHOLD;
attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
}
}
if (attemptLock) {
fBufferPtr = block.fBuffer->lock();
if (attemptMap) {
fBufferPtr = block.fBuffer->map();
}
if (NULL == fBufferPtr) {
@ -337,7 +337,7 @@ void GrBufferAllocPool::destroyBlock() {
--fPreallocBuffersInUse;
}
}
SkASSERT(!block.fBuffer->isLocked());
SkASSERT(!block.fBuffer->isMapped());
block.fBuffer->unref();
fBlocks.pop_back();
fBufferPtr = NULL;
@ -346,17 +346,17 @@ void GrBufferAllocPool::destroyBlock() {
void GrBufferAllocPool::flushCpuData(GrGeometryBuffer* buffer,
size_t flushSize) {
SkASSERT(NULL != buffer);
SkASSERT(!buffer->isLocked());
SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData.get() == fBufferPtr);
SkASSERT(flushSize <= buffer->gpuMemorySize());
VALIDATE(true);
if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
flushSize > GR_GEOM_BUFFER_LOCK_THRESHOLD) {
void* data = buffer->lock();
flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
void* data = buffer->map();
if (NULL != data) {
memcpy(data, fBufferPtr, flushSize);
buffer->unlock();
buffer->unmap();
return;
}
}

View File

@ -20,7 +20,7 @@ class GrGpu;
*
* The pool allows a client to make space for geometry and then put back excess
* space if it over allocated. When a client is ready to draw from the pool
* it calls unlock on the pool ensure buffers are ready for drawing. The pool
* it calls unmap on the pool ensure buffers are ready for drawing. The pool
* can be reset after drawing is completed to recycle space.
*
* At creation time a minimum per-buffer size can be specified. Additionally,
@ -30,10 +30,10 @@ class GrGpu;
class GrBufferAllocPool : SkNoncopyable {
public:
/**
* Ensures all buffers are unlocked and have all data written to them.
* Ensures all buffers are unmapped and have all data written to them.
* Call before drawing using buffers from the pool.
*/
void unlock();
void unmap();
/**
* Invalidates all the data in the pool, unrefs non-preallocated buffers.
@ -77,7 +77,7 @@ protected:
* @param gpu The GrGpu used to create the buffers.
* @param bufferType The type of buffers to create.
* @param frequentResetHint A hint that indicates that the pool
* should expect frequent unlock() calls
* should expect frequent unmap() calls
* (as opposed to many makeSpace / acquires
* between resets).
* @param bufferSize The minimum size of created buffers.
@ -109,11 +109,11 @@ protected:
* data is given to the caller. The buffer may or may not be locked. The
* returned ptr remains valid until any of the following:
* *makeSpace is called again.
* *unlock is called.
* *unmap is called.
* *reset is called.
* *this object is destroyed.
*
* Once unlock on the pool is called the data is guaranteed to be in the
* Once unmap on the pool is called the data is guaranteed to be in the
* buffer at the offset indicated by offset. Until that time it may be
* in temporary storage and/or the buffer may be locked.
*
@ -190,7 +190,7 @@ public:
*
* @param gpu The GrGpu used to create the vertex buffers.
* @param frequentResetHint A hint that indicates that the pool
* should expect frequent unlock() calls
* should expect frequent unmap() calls
* (as opposed to many makeSpace / acquires
* between resets).
* @param bufferSize The minimum size of created VBs This value
@ -209,11 +209,11 @@ public:
* the vertices given to the caller. The buffer may or may not be locked.
* The returned ptr remains valid until any of the following:
* *makeSpace is called again.
* *unlock is called.
* *unmap is called.
* *reset is called.
* *this object is destroyed.
*
* Once unlock on the pool is called the vertices are guaranteed to be in
* Once unmap on the pool is called the vertices are guaranteed to be in
* the buffer at the offset indicated by startVertex. Until that time they
* may be in temporary storage and/or the buffer may be locked.
*
@ -278,7 +278,7 @@ public:
*
* @param gpu The GrGpu used to create the index buffers.
* @param frequentResetHint A hint that indicates that the pool
* should expect frequent unlock() calls
* should expect frequent unmap() calls
* (as opposed to many makeSpace / acquires
* between resets).
* @param bufferSize The minimum size of created IBs This value
@ -297,11 +297,11 @@ public:
* the indices is given to the caller. The buffer may or may not be locked.
* The returned ptr remains valid until any of the following:
* *makeSpace is called again.
* *unlock is called.
* *unmap is called.
* *reset is called.
* *this object is destroyed.
*
* Once unlock on the pool is called the indices are guaranteed to be in the
* Once unmap on the pool is called the indices are guaranteed to be in the
* buffer at the offset indicated by startIndex. Until that time they may be
* in temporary storage and/or the buffer may be locked.
*

View File

@ -30,46 +30,46 @@ public:
/**
* Returns true if the buffer is a wrapper around a CPU array. If true it
* indicates that lock will always succeed and will be free.
* indicates that map will always succeed and will be free.
*/
bool isCPUBacked() const { return fCPUBacked; }
/**
* Locks the buffer to be written by the CPU.
* Maps the buffer to be written by the CPU.
*
* The previous content of the buffer is invalidated. It is an error
* to draw from the buffer while it is locked. It is an error to call lock
* on an already locked buffer. It may fail if the backend doesn't support
* locking the buffer. If the buffer is CPU backed then it will always
* succeed and is a free operation. Must be matched by an unlock() call.
* Currently only one lock at a time is supported (no nesting of
* lock/unlock).
* to draw from the buffer while it is mapped. It is an error to call map
* on an already mapped buffer. It may fail if the backend doesn't support
* mapping the buffer. If the buffer is CPU backed then it will always
* succeed and is a free operation. Must be matched by an unmap() call.
* Currently only one map at a time is supported (no nesting of
* map/unmap).
*
* @return a pointer to the data or NULL if the lock fails.
* @return a pointer to the data or NULL if the map fails.
*/
virtual void* lock() = 0;
virtual void* map() = 0;
/**
* Returns the same ptr that lock() returned at time of lock or NULL if the
* is not locked.
* Returns the same ptr that map() returned at time of map or NULL if the
* is not mapped.
*
* @return ptr to locked buffer data or undefined if buffer is not locked.
* @return ptr to mapped buffer data or undefined if buffer is not mapped.
*/
virtual void* lockPtr() const = 0;
virtual void* mapPtr() const = 0;
/**
* Unlocks the buffer.
* Unmaps the buffer.
*
* The pointer returned by the previous lock call will no longer be valid.
* The pointer returned by the previous map call will no longer be valid.
*/
virtual void unlock() = 0;
virtual void unmap() = 0;
/**
Queries whether the buffer has been locked.
Queries whether the buffer has been mapped.
@return true if the buffer is locked, false otherwise.
@return true if the buffer is mapped, false otherwise.
*/
virtual bool isLocked() const = 0;
virtual bool isMapped() const = 0;
/**
* Updates the buffer data.

View File

@ -303,10 +303,10 @@ const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const {
GrGpu* me = const_cast<GrGpu*>(this);
fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
if (NULL != fQuadIndexBuffer) {
uint16_t* indices = (uint16_t*)fQuadIndexBuffer->lock();
uint16_t* indices = (uint16_t*)fQuadIndexBuffer->map();
if (NULL != indices) {
fill_indices(indices, MAX_QUADS);
fQuadIndexBuffer->unlock();
fQuadIndexBuffer->unmap();
} else {
indices = (uint16_t*)sk_malloc_throw(SIZE);
fill_indices(indices, MAX_QUADS);
@ -422,12 +422,12 @@ void GrGpu::onDrawPaths(int pathCount, const GrPath** paths,
void GrGpu::finalizeReservedVertices() {
SkASSERT(NULL != fVertexPool);
fVertexPool->unlock();
fVertexPool->unmap();
}
void GrGpu::finalizeReservedIndices() {
SkASSERT(NULL != fIndexPool);
fIndexPool->unlock();
fIndexPool->unmap();
}
void GrGpu::prepareVertexPool() {

View File

@ -101,8 +101,8 @@ public:
*
* @param size size in bytes of the vertex buffer
* @param dynamic hints whether the data will be frequently changed
* by either GrVertexBuffer::lock or
* GrVertexBuffer::updateData.
* by either GrVertexBuffer::map() or
* GrVertexBuffer::updateData().
*
* @return The vertex buffer if successful, otherwise NULL.
*/
@ -113,8 +113,8 @@ public:
*
* @param size size in bytes of the index buffer
* @param dynamic hints whether the data will be frequently changed
* by either GrIndexBuffer::lock or
* GrIndexBuffer::updateData.
* by either GrIndexBuffer::map() or
* GrIndexBuffer::updateData().
*
* @return The index buffer if successful, otherwise NULL.
*/

View File

@ -569,8 +569,8 @@ void GrInOrderDrawBuffer::flush() {
GrAutoTRestore<bool> flushRestore(&fFlushing);
fFlushing = true;
fVertexPool.unlock();
fIndexPool.unlock();
fVertexPool.unmap();
fIndexPool.unmap();
GrDrawTarget::AutoClipRestore acr(fDstGpu);
AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);

View File

@ -23,7 +23,7 @@
GrGLBufferImpl::GrGLBufferImpl(GrGpuGL* gpu, const Desc& desc, GrGLenum bufferType)
: fDesc(desc)
, fBufferType(bufferType)
, fLockPtr(NULL) {
, fMapPtr(NULL) {
if (0 == desc.fID) {
fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
fGLSizeInBytes = 0;
@ -52,14 +52,14 @@ void GrGLBufferImpl::release(GrGpuGL* gpu) {
fDesc.fID = 0;
fGLSizeInBytes = 0;
}
fLockPtr = NULL;
fMapPtr = NULL;
VALIDATE();
}
void GrGLBufferImpl::abandon() {
fDesc.fID = 0;
fGLSizeInBytes = 0;
fLockPtr = NULL;
fMapPtr = NULL;
sk_free(fCPUData);
fCPUData = NULL;
VALIDATE();
@ -76,11 +76,11 @@ void GrGLBufferImpl::bind(GrGpuGL* gpu) const {
VALIDATE();
}
void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
void* GrGLBufferImpl::map(GrGpuGL* gpu) {
VALIDATE();
SkASSERT(!this->isLocked());
SkASSERT(!this->isMapped());
if (0 == fDesc.fID) {
fLockPtr = fCPUData;
fMapPtr = fCPUData;
} else {
switch (gpu->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
@ -95,7 +95,7 @@ void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
BufferData(fBufferType, fGLSizeInBytes, NULL,
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GR_GL_CALL_RET(gpu->glInterface(), fLockPtr,
GR_GL_CALL_RET(gpu->glInterface(), fMapPtr,
MapBuffer(fBufferType, GR_GL_WRITE_ONLY));
break;
case GrGLCaps::kMapBufferRange_MapBufferType: {
@ -110,7 +110,7 @@ void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
static const GrGLbitfield kAccess = GR_GL_MAP_INVALIDATE_BUFFER_BIT |
GR_GL_MAP_WRITE_BIT;
GR_GL_CALL_RET(gpu->glInterface(),
fLockPtr,
fMapPtr,
MapBufferRange(fBufferType, 0, fGLSizeInBytes, kAccess));
break;
}
@ -124,18 +124,18 @@ void* GrGLBufferImpl::lock(GrGpuGL* gpu) {
fDesc.fDynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
}
GR_GL_CALL_RET(gpu->glInterface(),
fLockPtr,
fMapPtr,
MapBufferSubData(fBufferType, 0, fGLSizeInBytes, GR_GL_WRITE_ONLY));
break;
}
}
VALIDATE();
return fLockPtr;
return fMapPtr;
}
void GrGLBufferImpl::unlock(GrGpuGL* gpu) {
void GrGLBufferImpl::unmap(GrGpuGL* gpu) {
VALIDATE();
SkASSERT(this->isLocked());
SkASSERT(this->isMapped());
if (0 != fDesc.fID) {
switch (gpu->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
@ -148,20 +148,20 @@ void GrGLBufferImpl::unlock(GrGpuGL* gpu) {
break;
case GrGLCaps::kChromium_MapBufferType:
this->bind(gpu);
GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fLockPtr));
GR_GL_CALL(gpu->glInterface(), UnmapBufferSubData(fMapPtr));
break;
}
}
fLockPtr = NULL;
fMapPtr = NULL;
}
bool GrGLBufferImpl::isLocked() const {
bool GrGLBufferImpl::isMapped() const {
VALIDATE();
return NULL != fLockPtr;
return NULL != fMapPtr;
}
bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isLocked());
SkASSERT(!this->isMapped());
VALIDATE();
if (srcSizeInBytes > fDesc.fSizeInBytes) {
return false;
@ -190,7 +190,7 @@ bool GrGLBufferImpl::updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInB
#else
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
// portions of the buffer (lock() does a glBufferData(..size, NULL..))
// portions of the buffer (map() does a glBufferData(..size, NULL..))
bool doSubData = false;
#if GR_GL_MAC_BUFFER_OBJECT_PERFOMANCE_WORKAROUND
static int N = 0;
@ -221,6 +221,6 @@ void GrGLBufferImpl::validate() const {
// SkASSERT((0 == fDesc.fID) == (NULL != fCPUData));
SkASSERT(0 != fDesc.fID || !fDesc.fIsWrapped);
SkASSERT(NULL == fCPUData || 0 == fGLSizeInBytes);
SkASSERT(NULL == fLockPtr || NULL != fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
SkASSERT(NULL == fCPUData || NULL == fLockPtr || fCPUData == fLockPtr);
SkASSERT(NULL == fMapPtr || NULL != fCPUData || fGLSizeInBytes == fDesc.fSizeInBytes);
SkASSERT(NULL == fCPUData || NULL == fMapPtr || fCPUData == fMapPtr);
}

View File

@ -40,10 +40,10 @@ public:
void bind(GrGpuGL* gpu) const;
void* lock(GrGpuGL* gpu);
void* lockPtr() const { return fLockPtr; }
void unlock(GrGpuGL* gpu);
bool isLocked() const;
void* map(GrGpuGL* gpu);
void* mapPtr() const { return fMapPtr; }
void unmap(GrGpuGL* gpu);
bool isMapped() const;
bool updateData(GrGpuGL* gpu, const void* src, size_t srcSizeInBytes);
private:
@ -52,7 +52,7 @@ private:
Desc fDesc;
GrGLenum fBufferType; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER
void* fCPUData;
void* fLockPtr;
void* fMapPtr;
size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
// smaller or larger than the size in fDesc.

View File

@ -26,26 +26,26 @@ void GrGLIndexBuffer::onAbandon() {
INHERITED::onAbandon();
}
void* GrGLIndexBuffer::lock() {
void* GrGLIndexBuffer::map() {
if (!this->wasDestroyed()) {
return fImpl.lock(this->getGpuGL());
return fImpl.map(this->getGpuGL());
} else {
return NULL;
}
}
void* GrGLIndexBuffer::lockPtr() const {
return fImpl.lockPtr();
void* GrGLIndexBuffer::mapPtr() const {
return fImpl.mapPtr();
}
void GrGLIndexBuffer::unlock() {
void GrGLIndexBuffer::unmap() {
if (!this->wasDestroyed()) {
fImpl.unlock(this->getGpuGL());
fImpl.unmap(this->getGpuGL());
}
}
bool GrGLIndexBuffer::isLocked() const {
return fImpl.isLocked();
bool GrGLIndexBuffer::isMapped() const {
return fImpl.isMapped();
}
bool GrGLIndexBuffer::updateData(const void* src, size_t srcSizeInBytes) {

View File

@ -32,10 +32,10 @@ public:
}
// overrides of GrIndexBuffer
virtual void* lock();
virtual void* lockPtr() const;
virtual void unlock();
virtual bool isLocked() const;
virtual void* map() SK_OVERRIDE;
virtual void* mapPtr() const SK_OVERRIDE;
virtual void unmap() SK_OVERRIDE;
virtual bool isMapped() const SK_OVERRIDE;
virtual bool updateData(const void* src, size_t srcSizeInBytes);
protected:

View File

@ -27,26 +27,26 @@ void GrGLVertexBuffer::onAbandon() {
INHERITED::onAbandon();
}
void* GrGLVertexBuffer::lock() {
void* GrGLVertexBuffer::map() {
if (!this->wasDestroyed()) {
return fImpl.lock(this->getGpuGL());
return fImpl.map(this->getGpuGL());
} else {
return NULL;
}
}
void* GrGLVertexBuffer::lockPtr() const {
return fImpl.lockPtr();
void* GrGLVertexBuffer::mapPtr() const {
return fImpl.mapPtr();
}
void GrGLVertexBuffer::unlock() {
void GrGLVertexBuffer::unmap() {
if (!this->wasDestroyed()) {
fImpl.unlock(this->getGpuGL());
fImpl.unmap(this->getGpuGL());
}
}
bool GrGLVertexBuffer::isLocked() const {
return fImpl.isLocked();
bool GrGLVertexBuffer::isMapped() const {
return fImpl.isMapped();
}
bool GrGLVertexBuffer::updateData(const void* src, size_t srcSizeInBytes) {

View File

@ -32,10 +32,10 @@ public:
}
// overrides of GrVertexBuffer
virtual void* lock();
virtual void* lockPtr() const;
virtual void unlock();
virtual bool isLocked() const;
virtual void* map() SK_OVERRIDE;
virtual void* mapPtr() const SK_OVERRIDE;
virtual void unmap() SK_OVERRIDE;
virtual bool isMapped() const SK_OVERRIDE;
virtual bool updateData(const void* src, size_t srcSizeInBytes);
protected:

View File

@ -313,7 +313,7 @@ void GrGpuGL::setupGeometry(const DrawInfo& info, size_t* indexOffsetInBytes) {
}
SkASSERT(NULL != vbuf);
SkASSERT(!vbuf->isLocked());
SkASSERT(!vbuf->isMapped());
vertexOffsetInBytes += vbuf->baseOffset();
GrGLIndexBuffer* ibuf = NULL;
@ -337,7 +337,7 @@ void GrGpuGL::setupGeometry(const DrawInfo& info, size_t* indexOffsetInBytes) {
}
SkASSERT(NULL != ibuf);
SkASSERT(!ibuf->isLocked());
SkASSERT(!ibuf->isMapped());
*indexOffsetInBytes += ibuf->baseOffset();
}
GrGLAttribArrayState* attribState =