Consolidate GPU buffer implementations
Consolidates all the different buffer implementations into a single GrBuffer class. This will allow us to add new buffer types, use DSA in OpenGL, track buffer bindings by unique ID, cache buffers without respect to the type of data they have been used for previously, etc. This change is strictly a refactor; it introduces no change in functionality. BUG=skia: GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1825393002 Review URL: https://codereview.chromium.org/1825393002
This commit is contained in:
parent
6b3eacb0df
commit
8b1bff2967
15
gyp/gpu.gypi
15
gyp/gpu.gypi
@ -71,6 +71,7 @@
|
||||
'<(skia_src_path)/gpu/GrBlend.cpp',
|
||||
'<(skia_src_path)/gpu/GrBlurUtils.cpp',
|
||||
'<(skia_src_path)/gpu/GrBlurUtils.h',
|
||||
'<(skia_src_path)/gpu/GrBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrBufferAllocPool.cpp',
|
||||
'<(skia_src_path)/gpu/GrBufferAllocPool.h',
|
||||
'<(skia_src_path)/gpu/GrCaps.cpp',
|
||||
@ -90,7 +91,6 @@
|
||||
'<(skia_src_path)/gpu/GrDrawTarget.cpp',
|
||||
'<(skia_src_path)/gpu/GrDrawTarget.h',
|
||||
'<(skia_src_path)/gpu/GrFragmentProcessor.cpp',
|
||||
'<(skia_src_path)/gpu/GrGeometryBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrGeometryProcessor.h',
|
||||
'<(skia_src_path)/gpu/GrGlyph.h',
|
||||
'<(skia_src_path)/gpu/GrGpu.cpp',
|
||||
@ -102,7 +102,6 @@
|
||||
'<(skia_src_path)/gpu/GrGpuFactory.h',
|
||||
'<(skia_src_path)/gpu/GrImageIDTextureAdjuster.cpp',
|
||||
'<(skia_src_path)/gpu/GrImageIDTextureAdjuster.h',
|
||||
'<(skia_src_path)/gpu/GrIndexBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrInvariantOutput.cpp',
|
||||
'<(skia_src_path)/gpu/GrLayerAtlas.cpp',
|
||||
'<(skia_src_path)/gpu/GrLayerAtlas.h',
|
||||
@ -189,9 +188,7 @@
|
||||
'<(skia_src_path)/gpu/GrTextureToYUVPlanes.cpp',
|
||||
'<(skia_src_path)/gpu/GrTextureToYUVPlanes.h',
|
||||
'<(skia_src_path)/gpu/GrTextureAccess.cpp',
|
||||
'<(skia_src_path)/gpu/GrTransferBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrTRecorder.h',
|
||||
'<(skia_src_path)/gpu/GrVertexBuffer.h',
|
||||
'<(skia_src_path)/gpu/GrXferProcessor.cpp',
|
||||
'<(skia_src_path)/gpu/GrYUVProvider.cpp',
|
||||
'<(skia_src_path)/gpu/GrYUVProvider.h',
|
||||
@ -314,8 +311,8 @@
|
||||
|
||||
'<(skia_src_path)/gpu/gl/GrGLAssembleInterface.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLAssembleInterface.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLBufferImpl.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLBufferImpl.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLBuffer.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLBuffer.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLCaps.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLCaps.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLContext.cpp',
|
||||
@ -329,8 +326,6 @@
|
||||
'<(skia_src_path)/gpu/gl/GrGLGpu.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLGpuProgramCache.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLExtensions.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLIndexBuffer.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLIndexBuffer.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLInterface.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLIRect.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLPath.cpp',
|
||||
@ -355,8 +350,6 @@
|
||||
'<(skia_src_path)/gpu/gl/GrGLTexture.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTextureRenderTarget.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTextureRenderTarget.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTransferBuffer.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLTransferBuffer.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLUtil.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLUtil.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLUniformHandler.cpp',
|
||||
@ -365,8 +358,6 @@
|
||||
'<(skia_src_path)/gpu/gl/GrGLVaryingHandler.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLVertexArray.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLVertexArray.h',
|
||||
'<(skia_src_path)/gpu/gl/GrGLVertexBuffer.cpp',
|
||||
'<(skia_src_path)/gpu/gl/GrGLVertexBuffer.h',
|
||||
|
||||
# Files for building GLSL shaders
|
||||
'<(skia_src_path)/gpu/gl/builders/GrGLProgramBuilder.cpp',
|
||||
|
@ -247,9 +247,9 @@ public:
|
||||
return fDrawPathMasksToCompressedTextureSupport;
|
||||
}
|
||||
|
||||
size_t geometryBufferMapThreshold() const {
|
||||
SkASSERT(fGeometryBufferMapThreshold >= 0);
|
||||
return fGeometryBufferMapThreshold;
|
||||
size_t bufferMapThreshold() const {
|
||||
SkASSERT(fBufferMapThreshold >= 0);
|
||||
return fBufferMapThreshold;
|
||||
}
|
||||
|
||||
bool supportsInstancedDraws() const {
|
||||
@ -301,7 +301,7 @@ protected:
|
||||
GR_STATIC_ASSERT(kLast_GrBlendEquation < 32);
|
||||
|
||||
uint32_t fMapBufferFlags;
|
||||
int fGeometryBufferMapThreshold;
|
||||
int fBufferMapThreshold;
|
||||
|
||||
int fMaxRenderTargetSize;
|
||||
int fMaxVertexAttributes;
|
||||
|
@ -17,7 +17,7 @@ struct GrContextOptions {
|
||||
, fMaxTextureSizeOverride(SK_MaxS32)
|
||||
, fMaxTileSizeOverride(0)
|
||||
, fSuppressDualSourceBlending(false)
|
||||
, fGeometryBufferMapThreshold(-1)
|
||||
, fBufferMapThreshold(-1)
|
||||
, fUseDrawInsteadOfPartialRenderTargetWrite(false)
|
||||
, fImmediateMode(false)
|
||||
, fClipBatchToBounds(false)
|
||||
@ -47,7 +47,7 @@ struct GrContextOptions {
|
||||
/** the threshold in bytes above which we will use a buffer mapping API to map vertex and index
|
||||
buffers to CPU memory in order to update them. A value of -1 means the GrContext should
|
||||
deduce the optimal value for this platform. */
|
||||
int fGeometryBufferMapThreshold;
|
||||
int fBufferMapThreshold;
|
||||
|
||||
/** some gpus have problems with partial writes of the rendertarget */
|
||||
bool fUseDrawInsteadOfPartialRenderTargetWrite;
|
||||
|
@ -401,13 +401,29 @@ private:
|
||||
};
|
||||
|
||||
/**
|
||||
* Indicates the transfer direction for a transfer buffer
|
||||
*/
|
||||
enum TransferType {
|
||||
/** Caller intends to use the buffer to transfer data to the GPU */
|
||||
kCpuToGpu_TransferType,
|
||||
/** Caller intends to use the buffer to transfer data from the GPU */
|
||||
kGpuToCpu_TransferType
|
||||
* Indicates the type of data that a GPU buffer will be used for.
|
||||
*/
|
||||
enum GrBufferType {
|
||||
kVertex_GrBufferType,
|
||||
kIndex_GrBufferType,
|
||||
kXferCpuToGpu_GrBufferType,
|
||||
kXferGpuToCpu_GrBufferType,
|
||||
|
||||
kLast_GrBufferType = kXferGpuToCpu_GrBufferType
|
||||
};
|
||||
|
||||
/**
|
||||
* Provides a performance hint regarding the frequency at which a data store will be accessed.
|
||||
*/
|
||||
enum GrAccessPattern {
|
||||
/** Data store will be respecified repeatedly and used many times. */
|
||||
kDynamic_GrAccessPattern,
|
||||
/** Data store will be specified once and used many times. (Thus disqualified from caching.) */
|
||||
kStatic_GrAccessPattern,
|
||||
/** Data store will be specified once and used at most a few times. (Also can't be cached.) */
|
||||
kStream_GrAccessPattern,
|
||||
|
||||
kLast_GrAccessPattern = kStream_GrAccessPattern
|
||||
};
|
||||
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include "GrBatchFlushState.h"
|
||||
#include "GrRectanizer.h"
|
||||
#include "GrTracing.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
@ -20,11 +20,11 @@ GrBatchFlushState::GrBatchFlushState(GrGpu* gpu, GrResourceProvider* resourcePro
|
||||
, fLastFlushedToken(0) {}
|
||||
|
||||
void* GrBatchFlushState::makeVertexSpace(size_t vertexSize, int vertexCount,
|
||||
const GrVertexBuffer** buffer, int* startVertex) {
|
||||
const GrBuffer** buffer, int* startVertex) {
|
||||
return fVertexPool.makeSpace(vertexSize, vertexCount, buffer, startVertex);
|
||||
}
|
||||
|
||||
uint16_t* GrBatchFlushState::makeIndexSpace(int indexCount,
|
||||
const GrIndexBuffer** buffer, int* startIndex) {
|
||||
const GrBuffer** buffer, int* startIndex) {
|
||||
return reinterpret_cast<uint16_t*>(fIndexPool.makeSpace(indexCount, buffer, startIndex));
|
||||
}
|
||||
|
@ -76,8 +76,8 @@ public:
|
||||
GrBatchToken asapToken() const { return fLastFlushedToken + 1; }
|
||||
|
||||
void* makeVertexSpace(size_t vertexSize, int vertexCount,
|
||||
const GrVertexBuffer** buffer, int* startVertex);
|
||||
uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex);
|
||||
const GrBuffer** buffer, int* startVertex);
|
||||
uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex);
|
||||
|
||||
/** This is called after each batch has a chance to prepare its draws and before the draws
|
||||
are issued. */
|
||||
@ -172,11 +172,11 @@ public:
|
||||
}
|
||||
|
||||
void* makeVertexSpace(size_t vertexSize, int vertexCount,
|
||||
const GrVertexBuffer** buffer, int* startVertex) {
|
||||
const GrBuffer** buffer, int* startVertex) {
|
||||
return this->state()->makeVertexSpace(vertexSize, vertexCount, buffer, startVertex);
|
||||
}
|
||||
|
||||
uint16_t* makeIndexSpace(int indexCount, const GrIndexBuffer** buffer, int* startIndex) {
|
||||
uint16_t* makeIndexSpace(int indexCount, const GrBuffer** buffer, int* startIndex) {
|
||||
return this->state()->makeIndexSpace(indexCount, buffer, startIndex);
|
||||
}
|
||||
|
||||
|
145
src/gpu/GrBuffer.h
Normal file
145
src/gpu/GrBuffer.h
Normal file
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright 2016 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrBuffer_DEFINED
|
||||
#define GrBuffer_DEFINED
|
||||
|
||||
#include "GrGpuResource.h"
|
||||
|
||||
class GrGpu;
|
||||
|
||||
class GrBuffer : public GrGpuResource {
|
||||
public:
|
||||
/**
|
||||
* Computes a scratch key for a buffer with a "dynamic" access pattern. (Buffers with "static"
|
||||
* and "stream" access patterns are disqualified by nature from being cached and reused.)
|
||||
*/
|
||||
static void ComputeScratchKeyForDynamicBuffer(GrBufferType type, size_t size,
|
||||
GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
|
||||
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
|
||||
// a chunk of memory we can use/reuse for any type of data. We really only need to
|
||||
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
|
||||
builder[0] = type;
|
||||
builder[1] = (uint32_t)size;
|
||||
if (sizeof(size_t) > 4) {
|
||||
builder[2] = (uint32_t)((uint64_t)size >> 32);
|
||||
}
|
||||
}
|
||||
|
||||
GrBufferType type() const { return fType; }
|
||||
|
||||
GrAccessPattern accessPattern() const { return fAccessPattern; }
|
||||
|
||||
/**
|
||||
* Returns true if the buffer is a wrapper around a CPU array. If true it
|
||||
* indicates that map will always succeed and will be free.
|
||||
*/
|
||||
bool isCPUBacked() const { return fCPUBacked; }
|
||||
|
||||
/**
|
||||
* Maps the buffer to be written by the CPU.
|
||||
*
|
||||
* The previous content of the buffer is invalidated. It is an error
|
||||
* to draw from the buffer while it is mapped. It may fail if the backend
|
||||
* doesn't support mapping the buffer. If the buffer is CPU backed then
|
||||
* it will always succeed and is a free operation. Once a buffer is mapped,
|
||||
* subsequent calls to map() are ignored.
|
||||
*
|
||||
* Note that buffer mapping does not go through GrContext and therefore is
|
||||
* not serialized with other operations.
|
||||
*
|
||||
* @return a pointer to the data or nullptr if the map fails.
|
||||
*/
|
||||
void* map() {
|
||||
if (!fMapPtr) {
|
||||
this->onMap();
|
||||
}
|
||||
return fMapPtr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unmaps the buffer.
|
||||
*
|
||||
* The pointer returned by the previous map call will no longer be valid.
|
||||
*/
|
||||
void unmap() {
|
||||
SkASSERT(fMapPtr);
|
||||
this->onUnmap();
|
||||
fMapPtr = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the same ptr that map() returned at time of map or nullptr if the
|
||||
* is not mapped.
|
||||
*
|
||||
* @return ptr to mapped buffer data or nullptr if buffer is not mapped.
|
||||
*/
|
||||
void* mapPtr() const { return fMapPtr; }
|
||||
|
||||
/**
|
||||
Queries whether the buffer has been mapped.
|
||||
|
||||
@return true if the buffer is mapped, false otherwise.
|
||||
*/
|
||||
bool isMapped() const { return SkToBool(fMapPtr); }
|
||||
|
||||
/**
|
||||
* Updates the buffer data.
|
||||
*
|
||||
* The size of the buffer will be preserved. The src data will be
|
||||
* placed at the beginning of the buffer and any remaining contents will
|
||||
* be undefined. srcSizeInBytes must be <= to the buffer size.
|
||||
*
|
||||
* The buffer must not be mapped.
|
||||
*
|
||||
* Note that buffer updates do not go through GrContext and therefore are
|
||||
* not serialized with other operations.
|
||||
*
|
||||
* @return returns true if the update succeeds, false otherwise.
|
||||
*/
|
||||
bool updateData(const void* src, size_t srcSizeInBytes) {
|
||||
SkASSERT(!this->isMapped());
|
||||
SkASSERT(srcSizeInBytes <= fGpuMemorySize);
|
||||
return this->onUpdateData(src, srcSizeInBytes);
|
||||
}
|
||||
|
||||
protected:
|
||||
GrBuffer(GrGpu* gpu, GrBufferType type, size_t gpuMemorySize, GrAccessPattern accessPattern,
|
||||
bool cpuBacked)
|
||||
: INHERITED(gpu, kCached_LifeCycle),
|
||||
fMapPtr(nullptr),
|
||||
fType(type),
|
||||
fGpuMemorySize(gpuMemorySize), // TODO: Zero for cpu backed buffers?
|
||||
fAccessPattern(accessPattern),
|
||||
fCPUBacked(cpuBacked) {
|
||||
if (!fCPUBacked && SkIsPow2(fGpuMemorySize) && kDynamic_GrAccessPattern == fAccessPattern) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKeyForDynamicBuffer(fType, fGpuMemorySize, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
}
|
||||
|
||||
void* fMapPtr;
|
||||
|
||||
private:
|
||||
virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
|
||||
|
||||
virtual void onMap() = 0;
|
||||
virtual void onUnmap() = 0;
|
||||
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
|
||||
|
||||
GrBufferType fType;
|
||||
size_t fGpuMemorySize;
|
||||
GrAccessPattern fAccessPattern;
|
||||
bool fCPUBacked;
|
||||
|
||||
typedef GrGpuResource INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -8,13 +8,12 @@
|
||||
|
||||
|
||||
#include "GrBufferAllocPool.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrCaps.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrTypes.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
|
||||
#include "SkTraceEvent.h"
|
||||
|
||||
@ -41,7 +40,7 @@ do {
|
||||
} while (false)
|
||||
|
||||
GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
BufferType bufferType,
|
||||
GrBufferType bufferType,
|
||||
size_t blockSize)
|
||||
: fBlocks(8) {
|
||||
|
||||
@ -53,12 +52,12 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
|
||||
|
||||
fBytesInUse = 0;
|
||||
|
||||
fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
|
||||
fBufferMapThreshold = gpu->caps()->bufferMapThreshold();
|
||||
}
|
||||
|
||||
void GrBufferAllocPool::deleteBlocks() {
|
||||
if (fBlocks.count()) {
|
||||
GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
|
||||
GrBuffer* buffer = fBlocks.back().fBuffer;
|
||||
if (buffer->isMapped()) {
|
||||
UNMAP_BUFFER(fBlocks.back());
|
||||
}
|
||||
@ -109,7 +108,7 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
|
||||
if (fBufferPtr) {
|
||||
SkASSERT(!fBlocks.empty());
|
||||
if (fBlocks.back().fBuffer->isMapped()) {
|
||||
GrGeometryBuffer* buf = fBlocks.back().fBuffer;
|
||||
GrBuffer* buf = fBlocks.back().fBuffer;
|
||||
SkASSERT(buf->mapPtr() == fBufferPtr);
|
||||
} else {
|
||||
SkASSERT(fCpuData == fBufferPtr);
|
||||
@ -145,7 +144,7 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
|
||||
|
||||
void* GrBufferAllocPool::makeSpace(size_t size,
|
||||
size_t alignment,
|
||||
const GrGeometryBuffer** buffer,
|
||||
const GrBuffer** buffer,
|
||||
size_t* offset) {
|
||||
VALIDATE();
|
||||
|
||||
@ -252,7 +251,7 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
|
||||
// threshold.
|
||||
bool attemptMap = block.fBuffer->isCPUBacked();
|
||||
if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
|
||||
attemptMap = size > fGeometryBufferMapThreshold;
|
||||
attemptMap = size > fBufferMapThreshold;
|
||||
}
|
||||
|
||||
if (attemptMap) {
|
||||
@ -295,7 +294,7 @@ void* GrBufferAllocPool::resetCpuData(size_t newSize) {
|
||||
|
||||
|
||||
void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
|
||||
GrGeometryBuffer* buffer = block.fBuffer;
|
||||
GrBuffer* buffer = block.fBuffer;
|
||||
SkASSERT(buffer);
|
||||
SkASSERT(!buffer->isMapped());
|
||||
SkASSERT(fCpuData == fBufferPtr);
|
||||
@ -303,7 +302,7 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
|
||||
VALIDATE(true);
|
||||
|
||||
if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
|
||||
flushSize > fGeometryBufferMapThreshold) {
|
||||
flushSize > fBufferMapThreshold) {
|
||||
void* data = buffer->map();
|
||||
if (data) {
|
||||
memcpy(data, fBufferPtr, flushSize);
|
||||
@ -315,30 +314,24 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
|
||||
VALIDATE(true);
|
||||
}
|
||||
|
||||
GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
|
||||
GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
|
||||
|
||||
GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
|
||||
|
||||
static const GrResourceProvider::BufferUsage kUsage = GrResourceProvider::kDynamic_BufferUsage;
|
||||
// Shouldn't have to use this flag (https://bug.skia.org/4156)
|
||||
static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
|
||||
if (kIndex_BufferType == fBufferType) {
|
||||
return rp->createIndexBuffer(size, kUsage, kFlags);
|
||||
} else {
|
||||
SkASSERT(kVertex_BufferType == fBufferType);
|
||||
return rp->createVertexBuffer(size, kUsage, kFlags);
|
||||
}
|
||||
return rp->createBuffer(fBufferType, size, kDynamic_GrAccessPattern, kFlags);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
|
||||
: GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
|
||||
: GrBufferAllocPool(gpu, kVertex_GrBufferType, MIN_VERTEX_BUFFER_SIZE) {
|
||||
}
|
||||
|
||||
void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
int vertexCount,
|
||||
const GrVertexBuffer** buffer,
|
||||
const GrBuffer** buffer,
|
||||
int* startVertex) {
|
||||
|
||||
SkASSERT(vertexCount >= 0);
|
||||
@ -346,13 +339,11 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
SkASSERT(startVertex);
|
||||
|
||||
size_t offset = 0; // assign to suppress warning
|
||||
const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
|
||||
void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
|
||||
vertexSize,
|
||||
&geomBuffer,
|
||||
buffer,
|
||||
&offset);
|
||||
|
||||
*buffer = (const GrVertexBuffer*) geomBuffer;
|
||||
SkASSERT(0 == offset % vertexSize);
|
||||
*startVertex = static_cast<int>(offset / vertexSize);
|
||||
return ptr;
|
||||
@ -361,11 +352,11 @@ void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
|
||||
: GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
|
||||
: GrBufferAllocPool(gpu, kIndex_GrBufferType, MIN_INDEX_BUFFER_SIZE) {
|
||||
}
|
||||
|
||||
void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
||||
const GrIndexBuffer** buffer,
|
||||
const GrBuffer** buffer,
|
||||
int* startIndex) {
|
||||
|
||||
SkASSERT(indexCount >= 0);
|
||||
@ -373,13 +364,11 @@ void* GrIndexBufferAllocPool::makeSpace(int indexCount,
|
||||
SkASSERT(startIndex);
|
||||
|
||||
size_t offset = 0; // assign to suppress warning
|
||||
const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
|
||||
void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
|
||||
sizeof(uint16_t),
|
||||
&geomBuffer,
|
||||
buffer,
|
||||
&offset);
|
||||
|
||||
*buffer = (const GrIndexBuffer*) geomBuffer;
|
||||
SkASSERT(0 == offset % sizeof(uint16_t));
|
||||
*startIndex = static_cast<int>(offset / sizeof(uint16_t));
|
||||
return ptr;
|
||||
|
@ -11,8 +11,9 @@
|
||||
#include "SkTArray.h"
|
||||
#include "SkTDArray.h"
|
||||
#include "SkTypes.h"
|
||||
#include "GrTypesPriv.h"
|
||||
|
||||
class GrGeometryBuffer;
|
||||
class GrBuffer;
|
||||
class GrGpu;
|
||||
|
||||
/**
|
||||
@ -46,16 +47,6 @@ public:
|
||||
void putBack(size_t bytes);
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Used to determine what type of buffers to create. We could make the
|
||||
* createBuffer a virtual except that we want to use it in the cons for
|
||||
* pre-allocated buffers.
|
||||
*/
|
||||
enum BufferType {
|
||||
kVertex_BufferType,
|
||||
kIndex_BufferType,
|
||||
};
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
*
|
||||
@ -66,7 +57,7 @@ protected:
|
||||
* reasonable minimum.
|
||||
*/
|
||||
GrBufferAllocPool(GrGpu* gpu,
|
||||
BufferType bufferType,
|
||||
GrBufferType bufferType,
|
||||
size_t bufferSize = 0);
|
||||
|
||||
virtual ~GrBufferAllocPool();
|
||||
@ -92,15 +83,15 @@ protected:
|
||||
*/
|
||||
void* makeSpace(size_t size,
|
||||
size_t alignment,
|
||||
const GrGeometryBuffer** buffer,
|
||||
const GrBuffer** buffer,
|
||||
size_t* offset);
|
||||
|
||||
GrGeometryBuffer* getBuffer(size_t size);
|
||||
GrBuffer* getBuffer(size_t size);
|
||||
|
||||
private:
|
||||
struct BufferBlock {
|
||||
size_t fBytesFree;
|
||||
GrGeometryBuffer* fBuffer;
|
||||
size_t fBytesFree;
|
||||
GrBuffer* fBuffer;
|
||||
};
|
||||
|
||||
bool createBlock(size_t requestSize);
|
||||
@ -115,16 +106,14 @@ private:
|
||||
|
||||
GrGpu* fGpu;
|
||||
size_t fMinBlockSize;
|
||||
BufferType fBufferType;
|
||||
GrBufferType fBufferType;
|
||||
|
||||
SkTArray<BufferBlock> fBlocks;
|
||||
void* fCpuData;
|
||||
void* fBufferPtr;
|
||||
size_t fGeometryBufferMapThreshold;
|
||||
size_t fBufferMapThreshold;
|
||||
};
|
||||
|
||||
class GrVertexBuffer;
|
||||
|
||||
/**
|
||||
* A GrBufferAllocPool of vertex buffers
|
||||
*/
|
||||
@ -160,15 +149,13 @@ public:
|
||||
*/
|
||||
void* makeSpace(size_t vertexSize,
|
||||
int vertexCount,
|
||||
const GrVertexBuffer** buffer,
|
||||
const GrBuffer** buffer,
|
||||
int* startVertex);
|
||||
|
||||
private:
|
||||
typedef GrBufferAllocPool INHERITED;
|
||||
};
|
||||
|
||||
class GrIndexBuffer;
|
||||
|
||||
/**
|
||||
* A GrBufferAllocPool of index buffers
|
||||
*/
|
||||
@ -200,7 +187,7 @@ public:
|
||||
* @return pointer to first index.
|
||||
*/
|
||||
void* makeSpace(int indexCount,
|
||||
const GrIndexBuffer** buffer,
|
||||
const GrBuffer** buffer,
|
||||
int* startIndex);
|
||||
|
||||
private:
|
||||
|
@ -116,7 +116,7 @@ GrCaps::GrCaps(const GrContextOptions& options) {
|
||||
fSuppressPrints = options.fSuppressPrints;
|
||||
fImmediateFlush = options.fImmediateMode;
|
||||
fDrawPathMasksToCompressedTextureSupport = options.fDrawPathToCompressedTexture;
|
||||
fGeometryBufferMapThreshold = options.fGeometryBufferMapThreshold;
|
||||
fBufferMapThreshold = options.fBufferMapThreshold;
|
||||
fUseDrawInsteadOfPartialRenderTargetWrite = options.fUseDrawInsteadOfPartialRenderTargetWrite;
|
||||
fUseDrawInsteadOfAllRenderTargetWrites = false;
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include "GrRenderTargetPriv.h"
|
||||
#include "GrSurfacePriv.h"
|
||||
#include "GrTexture.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "gl/GrGLRenderTarget.h"
|
||||
|
||||
#include "SkStrokeRec.h"
|
||||
|
@ -13,11 +13,8 @@
|
||||
#include "GrContext.h"
|
||||
#include "GrPathProcessor.h"
|
||||
#include "GrPrimitiveProcessor.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrPathRendering.h"
|
||||
#include "GrPipelineBuilder.h"
|
||||
#include "GrPipeline.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "GrXferProcessor.h"
|
||||
|
||||
#include "batches/GrDrawBatch.h"
|
||||
|
@ -1,124 +0,0 @@
|
||||
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef GrGeometryBuffer_DEFINED
|
||||
#define GrGeometryBuffer_DEFINED
|
||||
|
||||
#include "GrGpuResource.h"
|
||||
|
||||
class GrGpu;
|
||||
|
||||
/**
|
||||
* Parent class for vertex and index buffers
|
||||
*/
|
||||
class GrGeometryBuffer : public GrGpuResource {
|
||||
public:
|
||||
|
||||
|
||||
/**
|
||||
*Retrieves whether the buffer was created with the dynamic flag
|
||||
*
|
||||
* @return true if the buffer was created with the dynamic flag
|
||||
*/
|
||||
bool dynamic() const { return fDynamic; }
|
||||
|
||||
/**
|
||||
* Returns true if the buffer is a wrapper around a CPU array. If true it
|
||||
* indicates that map will always succeed and will be free.
|
||||
*/
|
||||
bool isCPUBacked() const { return fCPUBacked; }
|
||||
|
||||
/**
|
||||
* Maps the buffer to be written by the CPU.
|
||||
*
|
||||
* The previous content of the buffer is invalidated. It is an error
|
||||
* to draw from the buffer while it is mapped. It is an error to call map
|
||||
* on an already mapped buffer. It may fail if the backend doesn't support
|
||||
* mapping the buffer. If the buffer is CPU backed then it will always
|
||||
* succeed and is a free operation. Must be matched by an unmap() call.
|
||||
* Currently only one map at a time is supported (no nesting of
|
||||
* map/unmap).
|
||||
*
|
||||
* Note that buffer mapping does not go through GrContext and therefore is
|
||||
* not serialized with other operations.
|
||||
*
|
||||
* @return a pointer to the data or nullptr if the map fails.
|
||||
*/
|
||||
void* map() { return (fMapPtr = this->onMap()); }
|
||||
|
||||
/**
|
||||
* Unmaps the buffer.
|
||||
*
|
||||
* The pointer returned by the previous map call will no longer be valid.
|
||||
*/
|
||||
void unmap() {
|
||||
SkASSERT(fMapPtr);
|
||||
this->onUnmap();
|
||||
fMapPtr = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the same ptr that map() returned at time of map or nullptr if the
|
||||
* is not mapped.
|
||||
*
|
||||
* @return ptr to mapped buffer data or nullptr if buffer is not mapped.
|
||||
*/
|
||||
void* mapPtr() const { return fMapPtr; }
|
||||
|
||||
/**
|
||||
Queries whether the buffer has been mapped.
|
||||
|
||||
@return true if the buffer is mapped, false otherwise.
|
||||
*/
|
||||
bool isMapped() const { return SkToBool(fMapPtr); }
|
||||
|
||||
/**
|
||||
* Updates the buffer data.
|
||||
*
|
||||
* The size of the buffer will be preserved. The src data will be
|
||||
* placed at the beginning of the buffer and any remaining contents will
|
||||
* be undefined. srcSizeInBytes must be <= to the buffer size.
|
||||
*
|
||||
* The buffer must not be mapped.
|
||||
*
|
||||
* Note that buffer updates do not go through GrContext and therefore are
|
||||
* not serialized with other operations.
|
||||
*
|
||||
* @return returns true if the update succeeds, false otherwise.
|
||||
*/
|
||||
bool updateData(const void* src, size_t srcSizeInBytes) {
|
||||
SkASSERT(!this->isMapped());
|
||||
SkASSERT(srcSizeInBytes <= fGpuMemorySize);
|
||||
return this->onUpdateData(src, srcSizeInBytes);
|
||||
}
|
||||
|
||||
protected:
|
||||
GrGeometryBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, kCached_LifeCycle)
|
||||
, fMapPtr(nullptr)
|
||||
, fGpuMemorySize(gpuMemorySize)
|
||||
, fDynamic(dynamic)
|
||||
, fCPUBacked(cpuBacked) {}
|
||||
|
||||
private:
|
||||
virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
|
||||
|
||||
virtual void* onMap() = 0;
|
||||
virtual void onUnmap() = 0;
|
||||
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
|
||||
|
||||
void* fMapPtr;
|
||||
size_t fGpuMemorySize;
|
||||
bool fDynamic;
|
||||
bool fCPUBacked;
|
||||
|
||||
typedef GrGpuResource INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -8,10 +8,10 @@
|
||||
|
||||
#include "GrGpu.h"
|
||||
|
||||
#include "GrBuffer.h"
|
||||
#include "GrCaps.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrGpuResourcePriv.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrMesh.h"
|
||||
#include "GrPathRendering.h"
|
||||
#include "GrPipeline.h"
|
||||
@ -20,8 +20,6 @@
|
||||
#include "GrRenderTargetPriv.h"
|
||||
#include "GrStencilAttachment.h"
|
||||
#include "GrSurfacePriv.h"
|
||||
#include "GrTransferBuffer.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
GrMesh& GrMesh::operator =(const GrMesh& di) {
|
||||
@ -238,28 +236,13 @@ GrRenderTarget* GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTextureDe
|
||||
return this->onWrapBackendTextureAsRenderTarget(desc, ownership);
|
||||
}
|
||||
|
||||
GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) {
|
||||
GrBuffer* GrGpu::createBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
|
||||
this->handleDirtyContext();
|
||||
GrVertexBuffer* vb = this->onCreateVertexBuffer(size, dynamic);
|
||||
GrBuffer* buffer = this->onCreateBuffer(type, size, accessPattern);
|
||||
if (!this->caps()->reuseScratchBuffers()) {
|
||||
vb->resourcePriv().removeScratchKey();
|
||||
buffer->resourcePriv().removeScratchKey();
|
||||
}
|
||||
return vb;
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
|
||||
this->handleDirtyContext();
|
||||
GrIndexBuffer* ib = this->onCreateIndexBuffer(size, dynamic);
|
||||
if (!this->caps()->reuseScratchBuffers()) {
|
||||
ib->resourcePriv().removeScratchKey();
|
||||
}
|
||||
return ib;
|
||||
}
|
||||
|
||||
GrTransferBuffer* GrGpu::createTransferBuffer(size_t size, TransferType type) {
|
||||
this->handleDirtyContext();
|
||||
GrTransferBuffer* tb = this->onCreateTransferBuffer(size, type);
|
||||
return tb;
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void GrGpu::clear(const SkIRect& rect,
|
||||
@ -416,13 +399,13 @@ bool GrGpu::writePixels(GrSurface* surface,
|
||||
|
||||
bool GrGpu::transferPixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes) {
|
||||
SkASSERT(buffer);
|
||||
SkASSERT(transferBuffer);
|
||||
|
||||
this->handleDirtyContext();
|
||||
if (this->onTransferPixels(surface, left, top, width, height, config,
|
||||
buffer, offset, rowBytes)) {
|
||||
transferBuffer, offset, rowBytes)) {
|
||||
fStats.incTransfersToTexture();
|
||||
return true;
|
||||
}
|
||||
|
@ -20,9 +20,9 @@
|
||||
#include "SkTArray.h"
|
||||
|
||||
class GrBatchTracker;
|
||||
class GrBuffer;
|
||||
class GrContext;
|
||||
class GrGLContext;
|
||||
class GrIndexBuffer;
|
||||
class GrMesh;
|
||||
class GrNonInstancedVertices;
|
||||
class GrPath;
|
||||
@ -36,8 +36,6 @@ class GrRenderTarget;
|
||||
class GrStencilAttachment;
|
||||
class GrSurface;
|
||||
class GrTexture;
|
||||
class GrTransferBuffer;
|
||||
class GrVertexBuffer;
|
||||
|
||||
class GrGpu : public SkRefCnt {
|
||||
public:
|
||||
@ -129,39 +127,11 @@ public:
|
||||
GrRenderTarget* wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&, GrWrapOwnership);
|
||||
|
||||
/**
|
||||
* Creates a vertex buffer.
|
||||
* Creates a buffer.
|
||||
*
|
||||
* @param size size in bytes of the vertex buffer
|
||||
* @param dynamic hints whether the data will be frequently changed
|
||||
* by either GrVertexBuffer::map() or
|
||||
* GrVertexBuffer::updateData().
|
||||
*
|
||||
* @return The vertex buffer if successful, otherwise nullptr.
|
||||
* @return the buffer if successful, otherwise nullptr.
|
||||
*/
|
||||
GrVertexBuffer* createVertexBuffer(size_t size, bool dynamic);
|
||||
|
||||
/**
|
||||
* Creates an index buffer.
|
||||
*
|
||||
* @param size size in bytes of the index buffer
|
||||
* @param dynamic hints whether the data will be frequently changed
|
||||
* by either GrIndexBuffer::map() or
|
||||
* GrIndexBuffer::updateData().
|
||||
*
|
||||
* @return The index buffer if successful, otherwise nullptr.
|
||||
*/
|
||||
GrIndexBuffer* createIndexBuffer(size_t size, bool dynamic);
|
||||
|
||||
/**
|
||||
* Creates a transfer buffer.
|
||||
*
|
||||
* @param size size in bytes of the index buffer
|
||||
* @param toGpu true if used to transfer from the cpu to the gpu
|
||||
* otherwise to be used to transfer from the gpu to the cpu
|
||||
*
|
||||
* @return The transfer buffer if successful, otherwise nullptr.
|
||||
*/
|
||||
GrTransferBuffer* createTransferBuffer(size_t size, TransferType type);
|
||||
GrBuffer* createBuffer(GrBufferType, size_t size, GrAccessPattern);
|
||||
|
||||
/**
|
||||
* Resolves MSAA.
|
||||
@ -298,22 +268,22 @@ public:
|
||||
size_t rowBytes);
|
||||
|
||||
/**
|
||||
* Updates the pixels in a rectangle of a surface using a GrTransferBuffer
|
||||
* Updates the pixels in a rectangle of a surface using a buffer
|
||||
*
|
||||
* @param surface The surface to write to.
|
||||
* @param left left edge of the rectangle to write (inclusive)
|
||||
* @param top top edge of the rectangle to write (inclusive)
|
||||
* @param width width of rectangle to write in pixels.
|
||||
* @param height height of rectangle to write in pixels.
|
||||
* @param config the pixel config of the source buffer
|
||||
* @param buffer GrTransferBuffer to read pixels from
|
||||
* @param offset offset from the start of the buffer
|
||||
* @param rowBytes number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed.
|
||||
* @param surface The surface to write to.
|
||||
* @param left left edge of the rectangle to write (inclusive)
|
||||
* @param top top edge of the rectangle to write (inclusive)
|
||||
* @param width width of rectangle to write in pixels.
|
||||
* @param height height of rectangle to write in pixels.
|
||||
* @param config the pixel config of the source buffer
|
||||
* @param transferBuffer GrBuffer to read pixels from (type must be "kCpuToGpu")
|
||||
* @param offset offset from the start of the buffer
|
||||
* @param rowBytes number of bytes between consecutive rows. Zero
|
||||
* means rows are tightly packed.
|
||||
*/
|
||||
bool transferPixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes);
|
||||
|
||||
/**
|
||||
@ -558,9 +528,7 @@ private:
|
||||
GrWrapOwnership) = 0;
|
||||
virtual GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&,
|
||||
GrWrapOwnership) = 0;
|
||||
virtual GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) = 0;
|
||||
virtual GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) = 0;
|
||||
virtual GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) = 0;
|
||||
virtual GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) = 0;
|
||||
|
||||
// overridden by backend-specific derived class to perform the clear.
|
||||
virtual void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) = 0;
|
||||
@ -602,7 +570,7 @@ private:
|
||||
// overridden by backend-specific derived class to perform the surface write
|
||||
virtual bool onTransferPixels(GrSurface*,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes) = 0;
|
||||
|
||||
// overridden by backend-specific derived class to perform the resolve
|
||||
|
@ -1,51 +0,0 @@
|
||||
|
||||
/*
|
||||
* Copyright 2010 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#ifndef GrIndexBuffer_DEFINED
|
||||
#define GrIndexBuffer_DEFINED
|
||||
|
||||
#include "GrGeometryBuffer.h"
|
||||
|
||||
|
||||
class GrIndexBuffer : public GrGeometryBuffer {
|
||||
public:
|
||||
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
|
||||
GrScratchKey::Builder builder(key, kType, 2);
|
||||
|
||||
builder[0] = SkToUInt(size);
|
||||
builder[1] = dynamic ? 1 : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Retrieves the maximum number of quads that could be rendered
|
||||
* from the index buffer (using kTriangles_GrPrimitiveType).
|
||||
* @return the maximum number of quads using full size of index buffer.
|
||||
*/
|
||||
int maxQuads() const {
|
||||
return static_cast<int>(this->gpuMemorySize() / (sizeof(uint16_t) * 6));
|
||||
}
|
||||
protected:
|
||||
GrIndexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
|
||||
// We currently only make buffers scratch if they're both pow2 sized and not cpuBacked.
|
||||
if (!cpuBacked && SkIsPow2(gpuMemorySize)) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKey(gpuMemorySize, dynamic, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef GrGeometryBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -8,8 +8,8 @@
|
||||
#ifndef GrMesh_DEFINED
|
||||
#define GrMesh_DEFINED
|
||||
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrGpuResourceRef.h"
|
||||
|
||||
class GrNonInstancedMesh {
|
||||
public:
|
||||
@ -20,8 +20,8 @@ public:
|
||||
int indexCount() const { return fIndexCount; }
|
||||
bool isIndexed() const { return fIndexCount > 0; }
|
||||
|
||||
const GrVertexBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
|
||||
const GrIndexBuffer* indexBuffer() const { return fIndexBuffer.get(); }
|
||||
const GrBuffer* vertexBuffer() const { return fVertexBuffer.get(); }
|
||||
const GrBuffer* indexBuffer() const { return fIndexBuffer.get(); }
|
||||
|
||||
protected:
|
||||
GrPrimitiveType fPrimitiveType;
|
||||
@ -29,8 +29,8 @@ protected:
|
||||
int fStartIndex;
|
||||
int fVertexCount;
|
||||
int fIndexCount;
|
||||
GrPendingIOResource<const GrVertexBuffer, kRead_GrIOType> fVertexBuffer;
|
||||
GrPendingIOResource<const GrIndexBuffer, kRead_GrIOType> fIndexBuffer;
|
||||
GrPendingIOResource<const GrBuffer, kRead_GrIOType> fVertexBuffer;
|
||||
GrPendingIOResource<const GrBuffer, kRead_GrIOType> fIndexBuffer;
|
||||
friend class GrMesh;
|
||||
};
|
||||
|
||||
@ -46,7 +46,7 @@ public:
|
||||
GrMesh(const GrMesh& di) { (*this) = di; }
|
||||
GrMesh& operator =(const GrMesh& di);
|
||||
|
||||
void init(GrPrimitiveType primType, const GrVertexBuffer* vertexBuffer, int startVertex,
|
||||
void init(GrPrimitiveType primType, const GrBuffer* vertexBuffer, int startVertex,
|
||||
int vertexCount) {
|
||||
SkASSERT(vertexBuffer);
|
||||
SkASSERT(vertexCount);
|
||||
@ -65,8 +65,8 @@ public:
|
||||
}
|
||||
|
||||
void initIndexed(GrPrimitiveType primType,
|
||||
const GrVertexBuffer* vertexBuffer,
|
||||
const GrIndexBuffer* indexBuffer,
|
||||
const GrBuffer* vertexBuffer,
|
||||
const GrBuffer* indexBuffer,
|
||||
int startVertex,
|
||||
int startIndex,
|
||||
int vertexCount,
|
||||
@ -95,8 +95,8 @@ public:
|
||||
the number of instances supported by the index buffer. To be used with
|
||||
nextInstances() to draw in max-sized batches.*/
|
||||
void initInstanced(GrPrimitiveType primType,
|
||||
const GrVertexBuffer* vertexBuffer,
|
||||
const GrIndexBuffer* indexBuffer,
|
||||
const GrBuffer* vertexBuffer,
|
||||
const GrBuffer* indexBuffer,
|
||||
int startVertex,
|
||||
int verticesPerInstance,
|
||||
int indicesPerInstance,
|
||||
|
@ -1211,8 +1211,8 @@ static const int kNumRRectsInIndexBuffer = 256;
|
||||
|
||||
GR_DECLARE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
|
||||
GR_DECLARE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
|
||||
static const GrIndexBuffer* ref_rrect_index_buffer(bool strokeOnly,
|
||||
GrResourceProvider* resourceProvider) {
|
||||
static const GrBuffer* ref_rrect_index_buffer(bool strokeOnly,
|
||||
GrResourceProvider* resourceProvider) {
|
||||
GR_DEFINE_STATIC_UNIQUE_KEY(gStrokeRRectOnlyIndexBufferKey);
|
||||
GR_DEFINE_STATIC_UNIQUE_KEY(gRRectOnlyIndexBufferKey);
|
||||
if (strokeOnly) {
|
||||
@ -1286,7 +1286,7 @@ private:
|
||||
|
||||
// drop out the middle quad if we're stroked
|
||||
int indicesPerInstance = fStroked ? kIndicesPerStrokeRRect : kIndicesPerRRect;
|
||||
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> indexBuffer(
|
||||
ref_rrect_index_buffer(fStroked, target->resourceProvider()));
|
||||
|
||||
InstancedHelper helper;
|
||||
@ -1434,7 +1434,7 @@ private:
|
||||
|
||||
// drop out the middle quad if we're stroked
|
||||
int indicesPerInstance = fStroked ? kIndicesPerStrokeRRect : kIndicesPerRRect;
|
||||
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> indexBuffer(
|
||||
ref_rrect_index_buffer(fStroked, target->resourceProvider()));
|
||||
|
||||
InstancedHelper helper;
|
||||
|
@ -7,15 +7,14 @@
|
||||
|
||||
#include "GrResourceProvider.h"
|
||||
|
||||
#include "GrBuffer.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrPathRendering.h"
|
||||
#include "GrRenderTarget.h"
|
||||
#include "GrRenderTargetPriv.h"
|
||||
#include "GrResourceCache.h"
|
||||
#include "GrResourceKey.h"
|
||||
#include "GrStencilAttachment.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
|
||||
GR_DECLARE_STATIC_UNIQUE_KEY(gQuadIndexBufferKey);
|
||||
|
||||
@ -25,16 +24,16 @@ GrResourceProvider::GrResourceProvider(GrGpu* gpu, GrResourceCache* cache, GrSin
|
||||
fQuadIndexBufferKey = gQuadIndexBufferKey;
|
||||
}
|
||||
|
||||
const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
int reps,
|
||||
int vertCount,
|
||||
const GrUniqueKey& key) {
|
||||
const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
int reps,
|
||||
int vertCount,
|
||||
const GrUniqueKey& key) {
|
||||
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
|
||||
|
||||
// This is typically used in GrBatchs, so we assume kNoPendingIO.
|
||||
GrIndexBuffer* buffer = this->createIndexBuffer(bufferSize, kStatic_BufferUsage,
|
||||
kNoPendingIO_Flag);
|
||||
GrBuffer* buffer = this->createBuffer(kIndex_GrBufferType, bufferSize, kStatic_GrAccessPattern,
|
||||
kNoPendingIO_Flag);
|
||||
if (!buffer) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -63,7 +62,7 @@ const GrIndexBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16
|
||||
return buffer;
|
||||
}
|
||||
|
||||
const GrIndexBuffer* GrResourceProvider::createQuadIndexBuffer() {
|
||||
const GrBuffer* GrResourceProvider::createQuadIndexBuffer() {
|
||||
static const int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
|
||||
GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
|
||||
static const uint16_t kPattern[] = { 0, 1, 2, 0, 2, 3 };
|
||||
@ -89,72 +88,31 @@ GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, const SkDesc
|
||||
return this->gpu()->pathRendering()->createGlyphs(tf, desc, stroke);
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrResourceProvider::createIndexBuffer(size_t size, BufferUsage usage,
|
||||
uint32_t flags) {
|
||||
GrBuffer* GrResourceProvider::createBuffer(GrBufferType type, size_t size,
|
||||
GrAccessPattern accessPattern, uint32_t flags) {
|
||||
if (this->isAbandoned()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag);
|
||||
bool dynamic = kDynamic_BufferUsage == usage;
|
||||
if (dynamic) {
|
||||
if (kDynamic_GrAccessPattern == accessPattern) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const uint32_t MIN_SIZE = 1 << 12;
|
||||
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
|
||||
|
||||
GrScratchKey key;
|
||||
GrIndexBuffer::ComputeScratchKey(size, true, &key);
|
||||
GrBuffer::ComputeScratchKeyForDynamicBuffer(type, size, &key);
|
||||
uint32_t scratchFlags = 0;
|
||||
if (noPendingIO) {
|
||||
if (flags & kNoPendingIO_Flag) {
|
||||
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
|
||||
} else {
|
||||
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
|
||||
}
|
||||
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, size, scratchFlags);
|
||||
if (resource) {
|
||||
return static_cast<GrIndexBuffer*>(resource);
|
||||
return static_cast<GrBuffer*>(resource);
|
||||
}
|
||||
}
|
||||
return this->gpu()->createIndexBuffer(size, dynamic);
|
||||
}
|
||||
|
||||
GrVertexBuffer* GrResourceProvider::createVertexBuffer(size_t size, BufferUsage usage,
|
||||
uint32_t flags) {
|
||||
if (this->isAbandoned()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag);
|
||||
bool dynamic = kDynamic_BufferUsage == usage;
|
||||
if (dynamic) {
|
||||
// bin by pow2 with a reasonable min
|
||||
static const uint32_t MIN_SIZE = 1 << 12;
|
||||
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
|
||||
|
||||
GrScratchKey key;
|
||||
GrVertexBuffer::ComputeScratchKey(size, true, &key);
|
||||
uint32_t scratchFlags = 0;
|
||||
if (noPendingIO) {
|
||||
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
|
||||
} else {
|
||||
scratchFlags = GrResourceCache::kPreferNoPendingIO_ScratchFlag;
|
||||
}
|
||||
GrGpuResource* resource = this->cache()->findAndRefScratchResource(key, size, scratchFlags);
|
||||
if (resource) {
|
||||
return static_cast<GrVertexBuffer*>(resource);
|
||||
}
|
||||
}
|
||||
return this->gpu()->createVertexBuffer(size, dynamic);
|
||||
}
|
||||
|
||||
GrTransferBuffer* GrResourceProvider::createTransferBuffer(size_t size, TransferType type,
|
||||
uint32_t flags) {
|
||||
if (this->isAbandoned()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
//bool noPendingIO = SkToBool(flags & kNoPendingIO_Flag);
|
||||
return this->gpu()->createTransferBuffer(size, type);
|
||||
return this->gpu()->createBuffer(type, size, accessPattern);
|
||||
}
|
||||
|
||||
GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,
|
||||
|
@ -9,18 +9,16 @@
|
||||
#define GrResourceProvider_DEFINED
|
||||
|
||||
#include "GrBatchAtlas.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrTextureProvider.h"
|
||||
#include "GrPathRange.h"
|
||||
|
||||
class GrBatchAtlas;
|
||||
class GrIndexBuffer;
|
||||
class GrPath;
|
||||
class GrRenderTarget;
|
||||
class GrSingleOwner;
|
||||
class GrStencilAttachment;
|
||||
class GrStrokeInfo;
|
||||
class GrVertexBuffer;
|
||||
class SkDescriptor;
|
||||
class SkPath;
|
||||
class SkTypeface;
|
||||
@ -45,7 +43,7 @@ public:
|
||||
/**
|
||||
* Either finds and refs, or creates an index buffer for instanced drawing with a specific
|
||||
* pattern if the index buffer is not found. If the return is non-null, the caller owns
|
||||
* a ref on the returned GrIndexBuffer.
|
||||
* a ref on the returned GrBuffer.
|
||||
*
|
||||
* @param pattern the pattern of indices to repeat
|
||||
* @param patternSize size in bytes of the pattern
|
||||
@ -55,12 +53,12 @@ public:
|
||||
*
|
||||
* @return The index buffer if successful, otherwise nullptr.
|
||||
*/
|
||||
const GrIndexBuffer* findOrCreateInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
int reps,
|
||||
int vertCount,
|
||||
const GrUniqueKey& key) {
|
||||
if (GrIndexBuffer* buffer = this->findAndRefTByUniqueKey<GrIndexBuffer>(key)) {
|
||||
const GrBuffer* findOrCreateInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
int reps,
|
||||
int vertCount,
|
||||
const GrUniqueKey& key) {
|
||||
if (GrBuffer* buffer = this->findAndRefTByUniqueKey<GrBuffer>(key)) {
|
||||
return buffer;
|
||||
}
|
||||
return this->createInstancedIndexBuffer(pattern, patternSize, reps, vertCount, key);
|
||||
@ -69,13 +67,13 @@ public:
|
||||
/**
|
||||
* Returns an index buffer that can be used to render quads.
|
||||
* Six indices per quad: 0, 1, 2, 0, 2, 3, etc.
|
||||
* The max number of quads can be queried using GrIndexBuffer::maxQuads().
|
||||
* The max number of quads is the buffer's index capacity divided by 6.
|
||||
* Draw with kTriangles_GrPrimitiveType
|
||||
* @ return the quad index buffer
|
||||
*/
|
||||
const GrIndexBuffer* refQuadIndexBuffer() {
|
||||
if (GrIndexBuffer* buffer =
|
||||
this->findAndRefTByUniqueKey<GrIndexBuffer>(fQuadIndexBufferKey)) {
|
||||
const GrBuffer* refQuadIndexBuffer() {
|
||||
if (GrBuffer* buffer =
|
||||
this->findAndRefTByUniqueKey<GrBuffer>(fQuadIndexBufferKey)) {
|
||||
return buffer;
|
||||
}
|
||||
return this->createQuadIndexBuffer();
|
||||
@ -104,16 +102,7 @@ public:
|
||||
kNoPendingIO_Flag = kNoPendingIO_ScratchTextureFlag,
|
||||
};
|
||||
|
||||
enum BufferUsage {
|
||||
/** Caller intends to specify the buffer data rarely with respect to the number of draws
|
||||
that read the data. */
|
||||
kStatic_BufferUsage,
|
||||
/** Caller intends to respecify the buffer data frequently between draws. */
|
||||
kDynamic_BufferUsage,
|
||||
};
|
||||
GrIndexBuffer* createIndexBuffer(size_t size, BufferUsage, uint32_t flags);
|
||||
GrVertexBuffer* createVertexBuffer(size_t size, BufferUsage, uint32_t flags);
|
||||
GrTransferBuffer* createTransferBuffer(size_t size, TransferType, uint32_t flags);
|
||||
GrBuffer* createBuffer(GrBufferType, size_t size, GrAccessPattern, uint32_t flags);
|
||||
|
||||
GrTexture* createApproxTexture(const GrSurfaceDesc& desc, uint32_t flags) {
|
||||
SkASSERT(0 == flags || kNoPendingIO_Flag == flags);
|
||||
@ -157,13 +146,13 @@ public:
|
||||
GrWrapOwnership = kBorrow_GrWrapOwnership);
|
||||
|
||||
private:
|
||||
const GrIndexBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
int reps,
|
||||
int vertCount,
|
||||
const GrUniqueKey& key);
|
||||
const GrBuffer* createInstancedIndexBuffer(const uint16_t* pattern,
|
||||
int patternSize,
|
||||
int reps,
|
||||
int vertCount,
|
||||
const GrUniqueKey& key);
|
||||
|
||||
const GrIndexBuffer* createQuadIndexBuffer();
|
||||
const GrBuffer* createQuadIndexBuffer();
|
||||
|
||||
GrUniqueKey fQuadIndexBufferKey;
|
||||
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include "GrSoftwarePathRenderer.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrSWMaskHelper.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "batches/GrRectBatchFactory.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -345,11 +345,7 @@ private:
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override { return nullptr; }
|
||||
|
||||
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override { return nullptr; }
|
||||
|
||||
GrTransferBuffer* onCreateTransferBuffer(size_t, TransferType) override { return nullptr; }
|
||||
GrBuffer* onCreateBuffer(GrBufferType, size_t, GrAccessPattern) override { return nullptr; }
|
||||
|
||||
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override {}
|
||||
|
||||
@ -376,7 +372,7 @@ private:
|
||||
|
||||
bool onTransferPixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes) override {
|
||||
return false;
|
||||
}
|
||||
@ -410,7 +406,7 @@ GrContext* GrContext::CreateMockContext() {
|
||||
|
||||
void GrContext::initMockContext() {
|
||||
GrContextOptions options;
|
||||
options.fGeometryBufferMapThreshold = 0;
|
||||
options.fBufferMapThreshold = 0;
|
||||
SkASSERT(nullptr == fGpu);
|
||||
fGpu = new MockGpu(this, options);
|
||||
SkASSERT(fGpu);
|
||||
|
@ -1,76 +0,0 @@
|
||||
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
|
||||
#ifndef GrTransferBuffer_DEFINED
|
||||
#define GrTransferBuffer_DEFINED
|
||||
|
||||
#include "GrGpuResource.h"
|
||||
|
||||
class GrTransferBuffer : public GrGpuResource {
|
||||
public:
|
||||
/**
|
||||
* Maps the buffer to be written by the CPU.
|
||||
*
|
||||
* The previous content of the buffer is invalidated. It is an error
|
||||
* to transfer to or from the buffer while it is mapped. It is an error to
|
||||
* call map on an already mapped buffer. Must be matched by an unmap() call.
|
||||
* Currently only one map at a time is supported (no nesting of map/unmap).
|
||||
*
|
||||
* Note that buffer mapping does not go through GrContext and therefore is
|
||||
* not serialized with other operations.
|
||||
*
|
||||
* @return a pointer to the data or nullptr if the map fails.
|
||||
*/
|
||||
void* map() { return (fMapPtr = this->onMap()); }
|
||||
|
||||
/**
|
||||
* Unmaps the buffer.
|
||||
*
|
||||
* The pointer returned by the previous map call will no longer be valid.
|
||||
*/
|
||||
void unmap() {
|
||||
SkASSERT(fMapPtr);
|
||||
this->onUnmap();
|
||||
fMapPtr = nullptr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the same ptr that map() returned at time of map or nullptr if the
|
||||
* is not mapped.
|
||||
*
|
||||
* @return ptr to mapped buffer data or nullptr if buffer is not mapped.
|
||||
*/
|
||||
void* mapPtr() const { return fMapPtr; }
|
||||
|
||||
/**
|
||||
Queries whether the buffer has been mapped.
|
||||
|
||||
@return true if the buffer is mapped, false otherwise.
|
||||
*/
|
||||
bool isMapped() const { return SkToBool(fMapPtr); }
|
||||
|
||||
protected:
|
||||
GrTransferBuffer(GrGpu* gpu, size_t gpuMemorySize)
|
||||
: INHERITED(gpu, kUncached_LifeCycle)
|
||||
, fGpuMemorySize(gpuMemorySize) {
|
||||
}
|
||||
|
||||
private:
|
||||
virtual size_t onGpuMemorySize() const { return fGpuMemorySize; }
|
||||
|
||||
virtual void* onMap() = 0;
|
||||
virtual void onUnmap() = 0;
|
||||
|
||||
void* fMapPtr;
|
||||
size_t fGpuMemorySize;
|
||||
|
||||
typedef GrGpuResource INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -1,42 +0,0 @@
|
||||
|
||||
/*
|
||||
* Copyright 2010 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#ifndef GrVertexBuffer_DEFINED
|
||||
#define GrVertexBuffer_DEFINED
|
||||
|
||||
#include "GrGeometryBuffer.h"
|
||||
|
||||
class GrVertexBuffer : public GrGeometryBuffer {
|
||||
public:
|
||||
static void ComputeScratchKey(size_t size, bool dynamic, GrScratchKey* key) {
|
||||
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
|
||||
|
||||
GrScratchKey::Builder builder(key, kType, 2);
|
||||
|
||||
builder[0] = SkToUInt(size);
|
||||
builder[1] = dynamic ? 1 : 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
GrVertexBuffer(GrGpu* gpu, size_t gpuMemorySize, bool dynamic, bool cpuBacked)
|
||||
: INHERITED(gpu, gpuMemorySize, dynamic, cpuBacked) {
|
||||
// We currently only make buffers scratch if they're both pow2 sized and not cpuBacked.
|
||||
if (!cpuBacked && SkIsPow2(gpuMemorySize)) {
|
||||
GrScratchKey key;
|
||||
ComputeScratchKey(gpuMemorySize, dynamic, &key);
|
||||
this->setScratchKey(key);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
typedef GrGeometryBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -814,7 +814,7 @@ private:
|
||||
continue;
|
||||
}
|
||||
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
void* verts = target->makeVertexSpace(vertexStride, tess.numPts(), &vertexBuffer,
|
||||
@ -824,7 +824,7 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
const GrIndexBuffer* indexBuffer;
|
||||
const GrBuffer* indexBuffer;
|
||||
int firstIndex;
|
||||
|
||||
uint16_t* idxs = target->makeIndexSpace(tess.numIndices(), &indexBuffer, &firstIndex);
|
||||
@ -900,7 +900,7 @@ private:
|
||||
continue;
|
||||
}
|
||||
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
size_t vertexStride = quadProcessor->getVertexStride();
|
||||
@ -912,7 +912,7 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
const GrIndexBuffer* indexBuffer;
|
||||
const GrBuffer* indexBuffer;
|
||||
int firstIndex;
|
||||
|
||||
uint16_t *idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
|
||||
|
@ -10,13 +10,13 @@
|
||||
|
||||
#include "GrBatchFlushState.h"
|
||||
#include "GrBatchTest.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrPipelineBuilder.h"
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrSurfacePriv.h"
|
||||
#include "GrSWMaskHelper.h"
|
||||
#include "GrTexturePriv.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "batches/GrVertexBatch.h"
|
||||
#include "effects/GrDistanceFieldGeoProc.h"
|
||||
|
||||
@ -177,8 +177,8 @@ private:
|
||||
}
|
||||
|
||||
struct FlushInfo {
|
||||
SkAutoTUnref<const GrVertexBuffer> fVertexBuffer;
|
||||
SkAutoTUnref<const GrIndexBuffer> fIndexBuffer;
|
||||
SkAutoTUnref<const GrBuffer> fVertexBuffer;
|
||||
SkAutoTUnref<const GrBuffer> fIndexBuffer;
|
||||
int fVertexOffset;
|
||||
int fInstancesToFlush;
|
||||
};
|
||||
@ -217,7 +217,7 @@ private:
|
||||
size_t vertexStride = dfProcessor->getVertexStride();
|
||||
SkASSERT(vertexStride == 2 * sizeof(SkPoint) + sizeof(GrColor));
|
||||
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
void* vertices = target->makeVertexSpace(vertexStride,
|
||||
kVerticesPerQuad * instanceCount,
|
||||
&vertexBuffer,
|
||||
@ -492,7 +492,8 @@ private:
|
||||
|
||||
void flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
|
||||
GrMesh mesh;
|
||||
int maxInstancesPerDraw = flushInfo->fIndexBuffer->maxQuads();
|
||||
int maxInstancesPerDraw =
|
||||
static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
|
||||
mesh.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
|
||||
flushInfo->fIndexBuffer, flushInfo->fVertexOffset, kVerticesPerQuad,
|
||||
kIndicesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw);
|
||||
|
@ -28,7 +28,7 @@ static const int kNumAAFillRectsInIndexBuffer = 256;
|
||||
static const int kVertsPerAAFillRect = 8;
|
||||
static const int kIndicesPerAAFillRect = 30;
|
||||
|
||||
const GrIndexBuffer* get_index_buffer(GrResourceProvider* resourceProvider) {
|
||||
const GrBuffer* get_index_buffer(GrResourceProvider* resourceProvider) {
|
||||
GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
|
||||
|
||||
static const uint16_t gFillAARectIdx[] = {
|
||||
@ -191,7 +191,7 @@ public:
|
||||
out->setUnknownSingleComponent();
|
||||
}
|
||||
|
||||
static const GrIndexBuffer* GetIndexBuffer(GrResourceProvider* rp) {
|
||||
static const GrBuffer* GetIndexBuffer(GrResourceProvider* rp) {
|
||||
return get_index_buffer(rp);
|
||||
}
|
||||
|
||||
|
@ -9,15 +9,14 @@
|
||||
|
||||
#include "GrBatchFlushState.h"
|
||||
#include "GrBatchTest.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrCaps.h"
|
||||
#include "GrContext.h"
|
||||
#include "GrDefaultGeoProcFactory.h"
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrPathUtils.h"
|
||||
#include "GrPipelineBuilder.h"
|
||||
#include "GrProcessor.h"
|
||||
#include "GrResourceProvider.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "SkGeometry.h"
|
||||
#include "SkStroke.h"
|
||||
#include "SkTemplates.h"
|
||||
@ -65,7 +64,7 @@ static const int kQuadNumVertices = 5;
|
||||
static const int kQuadsNumInIdxBuffer = 256;
|
||||
GR_DECLARE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
|
||||
|
||||
static const GrIndexBuffer* ref_quads_index_buffer(GrResourceProvider* resourceProvider) {
|
||||
static const GrBuffer* ref_quads_index_buffer(GrResourceProvider* resourceProvider) {
|
||||
GR_DEFINE_STATIC_UNIQUE_KEY(gQuadsIndexBufferKey);
|
||||
return resourceProvider->findOrCreateInstancedIndexBuffer(
|
||||
kQuadIdxBufPattern, kIdxsPerQuad, kQuadsNumInIdxBuffer, kQuadNumVertices,
|
||||
@ -99,7 +98,7 @@ static const int kLineSegsNumInIdxBuffer = 256;
|
||||
|
||||
GR_DECLARE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
|
||||
|
||||
static const GrIndexBuffer* ref_lines_index_buffer(GrResourceProvider* resourceProvider) {
|
||||
static const GrBuffer* ref_lines_index_buffer(GrResourceProvider* resourceProvider) {
|
||||
GR_DEFINE_STATIC_UNIQUE_KEY(gLinesIndexBufferKey);
|
||||
return resourceProvider->findOrCreateInstancedIndexBuffer(
|
||||
kLineSegIdxBufPattern, kIdxsPerLineSeg, kLineSegsNumInIdxBuffer, kLineSegNumVertices,
|
||||
@ -858,11 +857,11 @@ void AAHairlineBatch::onPrepareDraws(Target* target) const {
|
||||
|
||||
// do lines first
|
||||
if (lineCount) {
|
||||
SkAutoTUnref<const GrIndexBuffer> linesIndexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> linesIndexBuffer(
|
||||
ref_lines_index_buffer(target->resourceProvider()));
|
||||
target->initDraw(lineGP);
|
||||
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
size_t vertexStride = lineGP->getVertexStride();
|
||||
@ -891,10 +890,10 @@ void AAHairlineBatch::onPrepareDraws(Target* target) const {
|
||||
}
|
||||
|
||||
if (quadCount || conicCount) {
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
SkAutoTUnref<const GrIndexBuffer> quadsIndexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> quadsIndexBuffer(
|
||||
ref_quads_index_buffer(target->resourceProvider()));
|
||||
|
||||
size_t vertexStride = sizeof(BezierVertex);
|
||||
|
@ -164,7 +164,7 @@ private:
|
||||
if (vertexCount == 0 || indexCount == 0) {
|
||||
return;
|
||||
}
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
GrMesh mesh;
|
||||
int firstVertex;
|
||||
void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
|
||||
@ -175,7 +175,7 @@ private:
|
||||
}
|
||||
memcpy(verts, vertices, vertexCount * vertexStride);
|
||||
|
||||
const GrIndexBuffer* indexBuffer;
|
||||
const GrBuffer* indexBuffer;
|
||||
int firstIndex;
|
||||
uint16_t* idxs = target->makeIndexSpace(indexCount, &indexBuffer, &firstIndex);
|
||||
if (!idxs) {
|
||||
|
@ -123,8 +123,7 @@ private:
|
||||
static const int kBevelVertexCnt = 24;
|
||||
static const int kNumBevelRectsInIndexBuffer = 256;
|
||||
|
||||
static const GrIndexBuffer* GetIndexBuffer(GrResourceProvider* resourceProvider,
|
||||
bool miterStroke);
|
||||
static const GrBuffer* GetIndexBuffer(GrResourceProvider* resourceProvider, bool miterStroke);
|
||||
|
||||
GrColor color() const { return fBatch.fColor; }
|
||||
bool usesLocalCoords() const { return fBatch.fUsesLocalCoords; }
|
||||
@ -206,7 +205,7 @@ void AAStrokeRectBatch::onPrepareDraws(Target* target) const {
|
||||
int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt;
|
||||
int instanceCount = fGeoData.count();
|
||||
|
||||
const SkAutoTUnref<const GrIndexBuffer> indexBuffer(
|
||||
const SkAutoTUnref<const GrBuffer> indexBuffer(
|
||||
GetIndexBuffer(target->resourceProvider(), this->miterStroke()));
|
||||
InstancedHelper helper;
|
||||
void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
|
||||
@ -235,8 +234,8 @@ void AAStrokeRectBatch::onPrepareDraws(Target* target) const {
|
||||
helper.recordDraw(target);
|
||||
}
|
||||
|
||||
const GrIndexBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
|
||||
bool miterStroke) {
|
||||
const GrBuffer* AAStrokeRectBatch::GetIndexBuffer(GrResourceProvider* resourceProvider,
|
||||
bool miterStroke) {
|
||||
|
||||
if (miterStroke) {
|
||||
static const uint16_t gMiterIndices[] = {
|
||||
|
@ -119,7 +119,7 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
|
||||
target->initDraw(gp);
|
||||
|
||||
int glyphCount = this->numGlyphs();
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
|
||||
void* vertices = target->makeVertexSpace(vertexStride,
|
||||
glyphCount * kVerticesPerGlyph,
|
||||
@ -181,7 +181,8 @@ void GrAtlasTextBatch::onPrepareDraws(Target* target) const {
|
||||
|
||||
void GrAtlasTextBatch::flush(GrVertexBatch::Target* target, FlushInfo* flushInfo) const {
|
||||
GrMesh mesh;
|
||||
int maxGlyphsPerDraw = flushInfo->fIndexBuffer->maxQuads();
|
||||
int maxGlyphsPerDraw =
|
||||
static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
|
||||
mesh.initInstanced(kTriangles_GrPrimitiveType, flushInfo->fVertexBuffer,
|
||||
flushInfo->fIndexBuffer, flushInfo->fVertexOffset,
|
||||
kVerticesPerGlyph, kIndicesPerGlyph, flushInfo->fGlyphsToFlush,
|
||||
|
@ -99,8 +99,8 @@ private:
|
||||
void initBatchTracker(const GrXPOverridesForBatch& overrides) override;
|
||||
|
||||
struct FlushInfo {
|
||||
SkAutoTUnref<const GrVertexBuffer> fVertexBuffer;
|
||||
SkAutoTUnref<const GrIndexBuffer> fIndexBuffer;
|
||||
SkAutoTUnref<const GrBuffer> fVertexBuffer;
|
||||
SkAutoTUnref<const GrBuffer> fIndexBuffer;
|
||||
int fGlyphsToFlush;
|
||||
int fVertexOffset;
|
||||
};
|
||||
|
@ -313,7 +313,7 @@ private:
|
||||
}
|
||||
|
||||
// allocate vertex / index buffers
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
void* verts = target->makeVertexSpace(vertexStride, maxVertices,
|
||||
@ -324,7 +324,7 @@ private:
|
||||
return;
|
||||
}
|
||||
|
||||
const GrIndexBuffer* indexBuffer = nullptr;
|
||||
const GrBuffer* indexBuffer = nullptr;
|
||||
int firstIndex = 0;
|
||||
|
||||
void* indices = nullptr;
|
||||
|
@ -106,7 +106,7 @@ void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
|
||||
|
||||
int instanceCount = fGeoData.count();
|
||||
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex);
|
||||
@ -116,7 +116,7 @@ void GrDrawVerticesBatch::onPrepareDraws(Target* target) const {
|
||||
return;
|
||||
}
|
||||
|
||||
const GrIndexBuffer* indexBuffer = nullptr;
|
||||
const GrBuffer* indexBuffer = nullptr;
|
||||
int firstIndex = 0;
|
||||
|
||||
uint16_t* indices = nullptr;
|
||||
|
@ -96,7 +96,7 @@ private:
|
||||
size_t vertexStride = gp->getVertexStride();
|
||||
int instanceCount = fGeoData.count();
|
||||
|
||||
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> indexBuffer(
|
||||
target->resourceProvider()->refQuadIndexBuffer());
|
||||
InstancedHelper helper;
|
||||
void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
|
||||
|
@ -26,7 +26,7 @@ public:
|
||||
out->setKnownSingleComponent(0xff);
|
||||
}
|
||||
|
||||
static const GrIndexBuffer* GetIndexBuffer(GrResourceProvider* rp) {
|
||||
static const GrBuffer* GetIndexBuffer(GrResourceProvider* rp) {
|
||||
return rp->refQuadIndexBuffer();
|
||||
}
|
||||
|
||||
|
@ -130,7 +130,7 @@ private:
|
||||
vertexCount = kVertsPerStrokeRect;
|
||||
}
|
||||
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
|
||||
void* verts = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer,
|
||||
|
@ -873,7 +873,7 @@ public:
|
||||
}
|
||||
|
||||
if (triVertices.count()) {
|
||||
const GrVertexBuffer* triVertexBuffer;
|
||||
const GrBuffer* triVertexBuffer;
|
||||
int firstTriVertex;
|
||||
size_t triStride = triangleProcessor->getVertexStride();
|
||||
PLSVertex* triVerts = reinterpret_cast<PLSVertex*>(target->makeVertexSpace(
|
||||
@ -892,7 +892,7 @@ public:
|
||||
}
|
||||
|
||||
if (quadVertices.count()) {
|
||||
const GrVertexBuffer* quadVertexBuffer;
|
||||
const GrBuffer* quadVertexBuffer;
|
||||
int firstQuadVertex;
|
||||
size_t quadStride = quadProcessor->getVertexStride();
|
||||
PLSVertex* quadVerts = reinterpret_cast<PLSVertex*>(target->makeVertexSpace(
|
||||
@ -916,7 +916,7 @@ public:
|
||||
SkPath::FillType::kEvenOdd_FillType,
|
||||
invert,
|
||||
this->usesLocalCoords()));
|
||||
const GrVertexBuffer* rectVertexBuffer;
|
||||
const GrBuffer* rectVertexBuffer;
|
||||
size_t finishStride = finishProcessor->getVertexStride();
|
||||
int firstRectVertex;
|
||||
static const int kRectVertexCount = 6;
|
||||
|
@ -34,7 +34,7 @@
|
||||
* const GrGeometryProcessor* CreateGP(const Geometry& seedGeometry,
|
||||
* const GrXPOverridesForBatch& overrides)
|
||||
*
|
||||
* const GrIndexBuffer* GetIndexBuffer(GrResourceProvider*)
|
||||
* const GrBuffer* GetIndexBuffer(GrResourceProvider*)
|
||||
*
|
||||
* Tesselate(intptr_t vertices, size_t vertexStride, const Geometry& geo,
|
||||
* const GrXPOverridesForBatch& overrides)
|
||||
@ -101,7 +101,7 @@ private:
|
||||
size_t vertexStride = gp->getVertexStride();
|
||||
int instanceCount = fGeoData.count();
|
||||
|
||||
SkAutoTUnref<const GrIndexBuffer> indexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> indexBuffer(
|
||||
Impl::GetIndexBuffer(target->resourceProvider()));
|
||||
InstancedHelper helper;
|
||||
void* vertices = helper.init(target, kTriangles_GrPrimitiveType, vertexStride,
|
||||
|
@ -45,7 +45,7 @@ private:
|
||||
}
|
||||
};
|
||||
|
||||
bool cache_match(GrVertexBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
|
||||
bool cache_match(GrBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
|
||||
if (!vertexBuffer) {
|
||||
return false;
|
||||
}
|
||||
@ -68,8 +68,8 @@ public:
|
||||
}
|
||||
SkPoint* lock(int vertexCount) override {
|
||||
size_t size = vertexCount * sizeof(SkPoint);
|
||||
fVertexBuffer.reset(fResourceProvider->createVertexBuffer(
|
||||
size, GrResourceProvider::kStatic_BufferUsage, 0));
|
||||
fVertexBuffer.reset(fResourceProvider->createBuffer(
|
||||
kVertex_GrBufferType, size, kStatic_GrAccessPattern, 0));
|
||||
if (!fVertexBuffer.get()) {
|
||||
return nullptr;
|
||||
}
|
||||
@ -89,9 +89,9 @@ public:
|
||||
}
|
||||
fVertices = nullptr;
|
||||
}
|
||||
GrVertexBuffer* vertexBuffer() { return fVertexBuffer.get(); }
|
||||
GrBuffer* vertexBuffer() { return fVertexBuffer.get(); }
|
||||
private:
|
||||
SkAutoTUnref<GrVertexBuffer> fVertexBuffer;
|
||||
SkAutoTUnref<GrBuffer> fVertexBuffer;
|
||||
GrResourceProvider* fResourceProvider;
|
||||
bool fCanMapVB;
|
||||
SkPoint* fVertices;
|
||||
@ -158,8 +158,7 @@ private:
|
||||
fStroke.asUniqueKeyFragment(&builder[2 + clipBoundsSize32]);
|
||||
builder.finish();
|
||||
GrResourceProvider* rp = target->resourceProvider();
|
||||
SkAutoTUnref<GrVertexBuffer> cachedVertexBuffer(
|
||||
rp->findAndRefTByUniqueKey<GrVertexBuffer>(key));
|
||||
SkAutoTUnref<GrBuffer> cachedVertexBuffer(rp->findAndRefTByUniqueKey<GrBuffer>(key));
|
||||
int actualCount;
|
||||
SkScalar screenSpaceTol = GrPathUtils::kDefaultTolerance;
|
||||
SkScalar tol = GrPathUtils::scaleToleranceToSrc(
|
||||
@ -226,7 +225,7 @@ private:
|
||||
this->draw(target, gp.get());
|
||||
}
|
||||
|
||||
void drawVertices(Target* target, const GrGeometryProcessor* gp, const GrVertexBuffer* vb,
|
||||
void drawVertices(Target* target, const GrGeometryProcessor* gp, const GrBuffer* vb,
|
||||
int firstVertex, int count) const {
|
||||
SkASSERT(gp->getVertexStride() == sizeof(SkPoint));
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
|
||||
#include "GrBatchFlushState.h"
|
||||
#include "GrGeometryProcessor.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
|
||||
#include "batches/GrVertexBatch.h"
|
||||
|
||||
|
@ -17,14 +17,14 @@ void GrVertexBatch::onPrepare(GrBatchFlushState* state) {
|
||||
}
|
||||
|
||||
void* GrVertexBatch::InstancedHelper::init(Target* target, GrPrimitiveType primType,
|
||||
size_t vertexStride, const GrIndexBuffer* indexBuffer,
|
||||
size_t vertexStride, const GrBuffer* indexBuffer,
|
||||
int verticesPerInstance, int indicesPerInstance,
|
||||
int instancesToDraw) {
|
||||
SkASSERT(target);
|
||||
if (!indexBuffer) {
|
||||
return nullptr;
|
||||
}
|
||||
const GrVertexBuffer* vertexBuffer;
|
||||
const GrBuffer* vertexBuffer;
|
||||
int firstVertex;
|
||||
int vertexCount = verticesPerInstance * instancesToDraw;
|
||||
void* vertices = target->makeVertexSpace(vertexStride, vertexCount, &vertexBuffer, &firstVertex);
|
||||
@ -49,7 +49,7 @@ void GrVertexBatch::InstancedHelper::recordDraw(Target* target) {
|
||||
|
||||
void* GrVertexBatch::QuadHelper::init(Target* target, size_t vertexStride,
|
||||
int quadsToDraw) {
|
||||
SkAutoTUnref<const GrIndexBuffer> quadIndexBuffer(
|
||||
SkAutoTUnref<const GrBuffer> quadIndexBuffer(
|
||||
target->resourceProvider()->refQuadIndexBuffer());
|
||||
if (!quadIndexBuffer) {
|
||||
SkDebugf("Could not get quad index buffer.");
|
||||
|
@ -35,7 +35,7 @@ protected:
|
||||
/** Returns the allocated storage for the vertices. The caller should populate the before
|
||||
vertices before calling issueDraws(). */
|
||||
void* init(Target*, GrPrimitiveType, size_t vertexStride,
|
||||
const GrIndexBuffer*, int verticesPerInstance, int indicesPerInstance,
|
||||
const GrBuffer*, int verticesPerInstance, int indicesPerInstance,
|
||||
int instancesToDraw);
|
||||
|
||||
/** Call after init() to issue draws to the batch target.*/
|
||||
|
@ -17,7 +17,6 @@
|
||||
#include "GrInvariantOutput.h"
|
||||
#include "GrProcessor.h"
|
||||
#include "GrStrokeInfo.h"
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "SkGr.h"
|
||||
#include "batches/GrVertexBatch.h"
|
||||
#include "glsl/GrGLSLFragmentShaderBuilder.h"
|
||||
|
336
src/gpu/gl/GrGLBuffer.cpp
Normal file
336
src/gpu/gl/GrGLBuffer.cpp
Normal file
@ -0,0 +1,336 @@
|
||||
/*
|
||||
* Copyright 2016 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrGLBuffer.h"
|
||||
#include "GrGLGpu.h"
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
#define GL_CALL(X) GR_GL_CALL(this->glGpu()->glInterface(), X)
|
||||
#define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glGpu()->glInterface(), RET, X)
|
||||
|
||||
#if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
|
||||
#define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
|
||||
#define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
|
||||
#define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
|
||||
#else
|
||||
#define CLEAR_ERROR_BEFORE_ALLOC(iface)
|
||||
#define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
|
||||
#define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
|
||||
#endif
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
#define VALIDATE() this->validate()
|
||||
#else
|
||||
#define VALIDATE() do {} while(false)
|
||||
#endif
|
||||
|
||||
GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size,
|
||||
GrAccessPattern accessPattern) {
|
||||
static const int kIsVertexOrIndex = (1 << kVertex_GrBufferType) | (1 << kIndex_GrBufferType);
|
||||
bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() &&
|
||||
kDynamic_GrAccessPattern == accessPattern &&
|
||||
((kIsVertexOrIndex >> type) & 1);
|
||||
SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, type, size, accessPattern, cpuBacked));
|
||||
if (!cpuBacked && 0 == buffer->fBufferID) {
|
||||
return nullptr;
|
||||
}
|
||||
return buffer.release();
|
||||
}
|
||||
|
||||
// GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
|
||||
// objects are implemented as client-side-arrays on tile-deferred architectures.
|
||||
#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
|
||||
|
||||
inline static void get_target_and_usage(GrBufferType type, GrAccessPattern accessPattern,
|
||||
const GrGLCaps& caps, GrGLenum* target, GrGLenum* usage) {
|
||||
static const GrGLenum nonXferTargets[] = {
|
||||
GR_GL_ARRAY_BUFFER,
|
||||
GR_GL_ELEMENT_ARRAY_BUFFER
|
||||
};
|
||||
GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
|
||||
GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
|
||||
|
||||
static const GrGLenum drawUsages[] = {
|
||||
DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
|
||||
GR_GL_STATIC_DRAW,
|
||||
GR_GL_STREAM_DRAW
|
||||
};
|
||||
static const GrGLenum readUsages[] = {
|
||||
GR_GL_DYNAMIC_READ,
|
||||
GR_GL_STATIC_READ,
|
||||
GR_GL_STREAM_READ
|
||||
};
|
||||
GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern);
|
||||
GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern);
|
||||
GR_STATIC_ASSERT(2 == kStream_GrAccessPattern);
|
||||
GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern);
|
||||
GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern);
|
||||
|
||||
SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern);
|
||||
|
||||
switch (type) {
|
||||
case kVertex_GrBufferType:
|
||||
case kIndex_GrBufferType:
|
||||
*target = nonXferTargets[type];
|
||||
*usage = drawUsages[accessPattern];
|
||||
break;
|
||||
case kXferCpuToGpu_GrBufferType:
|
||||
if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferType()) {
|
||||
*target = GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
|
||||
} else {
|
||||
SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBufferType());
|
||||
*target = GR_GL_PIXEL_UNPACK_BUFFER;
|
||||
}
|
||||
*usage = drawUsages[accessPattern];
|
||||
break;
|
||||
case kXferGpuToCpu_GrBufferType:
|
||||
if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferType()) {
|
||||
*target = GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
|
||||
} else {
|
||||
SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBufferType());
|
||||
*target = GR_GL_PIXEL_PACK_BUFFER;
|
||||
}
|
||||
*usage = readUsages[accessPattern];
|
||||
break;
|
||||
default:
|
||||
SkFAIL("Unexpected buffer type.");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, GrBufferType type, size_t size, GrAccessPattern accessPattern,
|
||||
bool cpuBacked)
|
||||
: INHERITED(gpu, type, size, accessPattern, cpuBacked),
|
||||
fCPUData(nullptr),
|
||||
fTarget(0),
|
||||
fBufferID(0),
|
||||
fSizeInBytes(size),
|
||||
fUsage(0),
|
||||
fGLSizeInBytes(0) {
|
||||
if (cpuBacked) {
|
||||
if (gpu->caps()->mustClearUploadedBufferData()) {
|
||||
fCPUData = sk_calloc_throw(fSizeInBytes);
|
||||
} else {
|
||||
fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW);
|
||||
}
|
||||
} else {
|
||||
GL_CALL(GenBuffers(1, &fBufferID));
|
||||
fSizeInBytes = size;
|
||||
get_target_and_usage(type, accessPattern, gpu->glCaps(), &fTarget, &fUsage);
|
||||
if (fBufferID) {
|
||||
gpu->bindBuffer(fBufferID, fTarget);
|
||||
CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
|
||||
// make sure driver can allocate memory for this buffer
|
||||
GL_ALLOC_CALL(gpu->glInterface(), BufferData(fTarget,
|
||||
(GrGLsizeiptr) fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
fUsage));
|
||||
if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
|
||||
gpu->releaseBuffer(fBufferID, fTarget);
|
||||
fBufferID = 0;
|
||||
} else {
|
||||
fGLSizeInBytes = fSizeInBytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
VALIDATE();
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
inline GrGLGpu* GrGLBuffer::glGpu() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return static_cast<GrGLGpu*>(this->getGpu());
|
||||
}
|
||||
|
||||
inline const GrGLCaps& GrGLBuffer::glCaps() const {
|
||||
return this->glGpu()->glCaps();
|
||||
}
|
||||
|
||||
void GrGLBuffer::onRelease() {
|
||||
if (!this->wasDestroyed()) {
|
||||
VALIDATE();
|
||||
// make sure we've not been abandoned or already released
|
||||
if (fCPUData) {
|
||||
SkASSERT(!fBufferID);
|
||||
sk_free(fCPUData);
|
||||
fCPUData = nullptr;
|
||||
} else if (fBufferID) {
|
||||
this->glGpu()->releaseBuffer(fBufferID, fTarget);
|
||||
fBufferID = 0;
|
||||
fGLSizeInBytes = 0;
|
||||
}
|
||||
fMapPtr = nullptr;
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrGLBuffer::onAbandon() {
|
||||
fBufferID = 0;
|
||||
fGLSizeInBytes = 0;
|
||||
fMapPtr = nullptr;
|
||||
sk_free(fCPUData);
|
||||
fCPUData = nullptr;
|
||||
VALIDATE();
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void GrGLBuffer::onMap() {
|
||||
if (this->wasDestroyed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
VALIDATE();
|
||||
SkASSERT(!this->isMapped());
|
||||
|
||||
if (0 == fBufferID) {
|
||||
fMapPtr = fCPUData;
|
||||
VALIDATE();
|
||||
return;
|
||||
}
|
||||
|
||||
bool readOnly = (kXferGpuToCpu_GrBufferType == this->type());
|
||||
|
||||
// Handling dirty context is done in the bindBuffer call
|
||||
switch (this->glCaps().mapBufferType()) {
|
||||
case GrGLCaps::kNone_MapBufferType:
|
||||
break;
|
||||
case GrGLCaps::kMapBuffer_MapBufferType:
|
||||
this->glGpu()->bindBuffer(fBufferID, fTarget);
|
||||
// Let driver know it can discard the old data
|
||||
if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInBytes) {
|
||||
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
|
||||
}
|
||||
GL_CALL_RET(fMapPtr, MapBuffer(fTarget, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
|
||||
break;
|
||||
case GrGLCaps::kMapBufferRange_MapBufferType: {
|
||||
this->glGpu()->bindBuffer(fBufferID, fTarget);
|
||||
// Make sure the GL buffer size agrees with fDesc before mapping.
|
||||
if (fGLSizeInBytes != fSizeInBytes) {
|
||||
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
|
||||
}
|
||||
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
|
||||
// TODO: allow the client to specify invalidation in the transfer buffer case.
|
||||
if (kXferCpuToGpu_GrBufferType != this->type()) {
|
||||
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
|
||||
}
|
||||
GL_CALL_RET(fMapPtr, MapBufferRange(fTarget, 0, fSizeInBytes,
|
||||
readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
|
||||
break;
|
||||
}
|
||||
case GrGLCaps::kChromium_MapBufferType:
|
||||
this->glGpu()->bindBuffer(fBufferID, fTarget);
|
||||
// Make sure the GL buffer size agrees with fDesc before mapping.
|
||||
if (fGLSizeInBytes != fSizeInBytes) {
|
||||
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
|
||||
}
|
||||
GL_CALL_RET(fMapPtr, MapBufferSubData(fTarget, 0, fSizeInBytes,
|
||||
readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
|
||||
break;
|
||||
}
|
||||
fGLSizeInBytes = fSizeInBytes;
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
void GrGLBuffer::onUnmap() {
|
||||
if (this->wasDestroyed()) {
|
||||
return;
|
||||
}
|
||||
|
||||
VALIDATE();
|
||||
SkASSERT(this->isMapped());
|
||||
if (0 == fBufferID) {
|
||||
fMapPtr = nullptr;
|
||||
return;
|
||||
}
|
||||
// bind buffer handles the dirty context
|
||||
switch (this->glCaps().mapBufferType()) {
|
||||
case GrGLCaps::kNone_MapBufferType:
|
||||
SkDEBUGFAIL("Shouldn't get here.");
|
||||
return;
|
||||
case GrGLCaps::kMapBuffer_MapBufferType: // fall through
|
||||
case GrGLCaps::kMapBufferRange_MapBufferType:
|
||||
this->glGpu()->bindBuffer(fBufferID, fTarget);
|
||||
GL_CALL(UnmapBuffer(fTarget));
|
||||
break;
|
||||
case GrGLCaps::kChromium_MapBufferType:
|
||||
this->glGpu()->bindBuffer(fBufferID, fTarget);
|
||||
GL_CALL(UnmapBufferSubData(fMapPtr));
|
||||
break;
|
||||
}
|
||||
fMapPtr = nullptr;
|
||||
}
|
||||
|
||||
bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
|
||||
if (this->wasDestroyed()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
SkASSERT(!this->isMapped());
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTarget);
|
||||
VALIDATE();
|
||||
if (srcSizeInBytes > fSizeInBytes) {
|
||||
return false;
|
||||
}
|
||||
if (0 == fBufferID) {
|
||||
memcpy(fCPUData, src, srcSizeInBytes);
|
||||
return true;
|
||||
}
|
||||
SkASSERT(srcSizeInBytes <= fSizeInBytes);
|
||||
// bindbuffer handles dirty context
|
||||
this->glGpu()->bindBuffer(fBufferID, fTarget);
|
||||
|
||||
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
|
||||
if (fSizeInBytes == srcSizeInBytes) {
|
||||
GL_CALL(BufferData(fTarget, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
|
||||
} else {
|
||||
// Before we call glBufferSubData we give the driver a hint using
|
||||
// glBufferData with nullptr. This makes the old buffer contents
|
||||
// inaccessible to future draws. The GPU may still be processing
|
||||
// draws that reference the old contents. With this hint it can
|
||||
// assign a different allocation for the new contents to avoid
|
||||
// flushing the gpu past draws consuming the old contents.
|
||||
// TODO I think we actually want to try calling bufferData here
|
||||
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
|
||||
GL_CALL(BufferSubData(fTarget, 0, (GrGLsizeiptr) srcSizeInBytes, src));
|
||||
}
|
||||
fGLSizeInBytes = fSizeInBytes;
|
||||
#else
|
||||
// Note that we're cheating on the size here. Currently no methods
|
||||
// allow a partial update that preserves contents of non-updated
|
||||
// portions of the buffer (map() does a glBufferData(..size, nullptr..))
|
||||
GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage));
|
||||
fGLSizeInBytes = srcSizeInBytes;
|
||||
#endif
|
||||
VALIDATE();
|
||||
return true;
|
||||
}
|
||||
|
||||
void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const {
|
||||
SkString buffer_id;
|
||||
buffer_id.appendU32(this->bufferID());
|
||||
traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
|
||||
buffer_id.c_str());
|
||||
}
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
|
||||
void GrGLBuffer::validate() const {
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTarget ||
|
||||
GR_GL_PIXEL_PACK_BUFFER == fTarget || GR_GL_PIXEL_UNPACK_BUFFER == fTarget ||
|
||||
GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fTarget ||
|
||||
GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fTarget);
|
||||
// The following assert isn't valid when the buffer has been abandoned:
|
||||
// SkASSERT((0 == fDesc.fID) == (fCPUData));
|
||||
SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
|
||||
SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fSizeInBytes);
|
||||
SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr);
|
||||
}
|
||||
|
||||
#endif
|
61
src/gpu/gl/GrGLBuffer.h
Normal file
61
src/gpu/gl/GrGLBuffer.h
Normal file
@ -0,0 +1,61 @@
|
||||
/*
|
||||
* Copyright 2016 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrGLBuffer_DEFINED
|
||||
#define GrGLBuffer_DEFINED
|
||||
|
||||
#include "GrBuffer.h"
|
||||
#include "gl/GrGLTypes.h"
|
||||
|
||||
class GrGLGpu;
|
||||
class GrGLCaps;
|
||||
|
||||
class GrGLBuffer : public GrBuffer {
|
||||
public:
|
||||
static GrGLBuffer* Create(GrGLGpu*, GrBufferType, size_t size, GrAccessPattern);
|
||||
|
||||
~GrGLBuffer() {
|
||||
// either release or abandon should have been called by the owner of this object.
|
||||
SkASSERT(0 == fBufferID);
|
||||
}
|
||||
|
||||
GrGLenum target() const { return fTarget; }
|
||||
GrGLuint bufferID() const { return fBufferID; }
|
||||
size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
|
||||
|
||||
protected:
|
||||
GrGLBuffer(GrGLGpu*, GrBufferType, size_t size, GrAccessPattern, bool cpuBacked);
|
||||
|
||||
void onAbandon() override;
|
||||
void onRelease() override;
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
private:
|
||||
GrGLGpu* glGpu() const;
|
||||
const GrGLCaps& glCaps() const;
|
||||
|
||||
void onMap() override;
|
||||
void onUnmap() override;
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
void validate() const;
|
||||
#endif
|
||||
|
||||
void* fCPUData;
|
||||
GrGLenum fTarget; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER, e.g.
|
||||
GrGLuint fBufferID;
|
||||
size_t fSizeInBytes;
|
||||
GrGLenum fUsage;
|
||||
size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
|
||||
// smaller or larger than the size in fDesc.
|
||||
|
||||
typedef GrBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -1,122 +0,0 @@
|
||||
/*
|
||||
* Copyright 2013 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrGLBufferImpl.h"
|
||||
#include "GrGLGpu.h"
|
||||
|
||||
#define GL_CALL(GPU, X) GR_GL_CALL(GPU->glInterface(), X)
|
||||
|
||||
#ifdef SK_DEBUG
|
||||
#define VALIDATE() this->validate()
|
||||
#else
|
||||
#define VALIDATE() do {} while(false)
|
||||
#endif
|
||||
|
||||
GrGLBufferImpl::GrGLBufferImpl(GrGLGpu* gpu, const Desc& desc, GrGLenum bufferType)
|
||||
: fDesc(desc)
|
||||
, fBufferType(bufferType)
|
||||
, fMapPtr(nullptr) {
|
||||
if (0 == desc.fID) {
|
||||
if (gpu->caps()->mustClearUploadedBufferData()) {
|
||||
fCPUData = sk_calloc_throw(desc.fSizeInBytes);
|
||||
} else {
|
||||
fCPUData = sk_malloc_flags(desc.fSizeInBytes, SK_MALLOC_THROW);
|
||||
}
|
||||
fGLSizeInBytes = 0;
|
||||
} else {
|
||||
fCPUData = nullptr;
|
||||
// We assume that the GL buffer was created at the desc's size initially.
|
||||
fGLSizeInBytes = fDesc.fSizeInBytes;
|
||||
}
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
void GrGLBufferImpl::release(GrGLGpu* gpu) {
|
||||
VALIDATE();
|
||||
// make sure we've not been abandoned or already released
|
||||
if (fCPUData) {
|
||||
sk_free(fCPUData);
|
||||
fCPUData = nullptr;
|
||||
} else if (fDesc.fID) {
|
||||
gpu->releaseBuffer(fDesc.fID, fBufferType);
|
||||
fDesc.fID = 0;
|
||||
fGLSizeInBytes = 0;
|
||||
}
|
||||
fMapPtr = nullptr;
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
void GrGLBufferImpl::abandon() {
|
||||
fDesc.fID = 0;
|
||||
fGLSizeInBytes = 0;
|
||||
fMapPtr = nullptr;
|
||||
sk_free(fCPUData);
|
||||
fCPUData = nullptr;
|
||||
VALIDATE();
|
||||
}
|
||||
|
||||
void* GrGLBufferImpl::map(GrGLGpu* gpu) {
|
||||
VALIDATE();
|
||||
SkASSERT(!this->isMapped());
|
||||
if (0 == fDesc.fID) {
|
||||
fMapPtr = fCPUData;
|
||||
} else {
|
||||
fMapPtr = gpu->mapBuffer(fDesc.fID, fBufferType, fDesc.fUsage, fGLSizeInBytes,
|
||||
fDesc.fSizeInBytes);
|
||||
fGLSizeInBytes = fDesc.fSizeInBytes;
|
||||
}
|
||||
VALIDATE();
|
||||
return fMapPtr;
|
||||
}
|
||||
|
||||
void GrGLBufferImpl::unmap(GrGLGpu* gpu) {
|
||||
VALIDATE();
|
||||
SkASSERT(this->isMapped());
|
||||
if (0 != fDesc.fID) {
|
||||
gpu->unmapBuffer(fDesc.fID, fBufferType, fMapPtr);
|
||||
}
|
||||
fMapPtr = nullptr;
|
||||
}
|
||||
|
||||
bool GrGLBufferImpl::isMapped() const {
|
||||
VALIDATE();
|
||||
return SkToBool(fMapPtr);
|
||||
}
|
||||
|
||||
bool GrGLBufferImpl::updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInBytes) {
|
||||
SkASSERT(!this->isMapped());
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType);
|
||||
VALIDATE();
|
||||
if (srcSizeInBytes > fDesc.fSizeInBytes) {
|
||||
return false;
|
||||
}
|
||||
if (0 == fDesc.fID) {
|
||||
memcpy(fCPUData, src, srcSizeInBytes);
|
||||
return true;
|
||||
}
|
||||
gpu->bufferData(fDesc.fID, fBufferType, fDesc.fUsage, fDesc.fSizeInBytes, src,
|
||||
srcSizeInBytes);
|
||||
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
|
||||
fGLSizeInBytes = fDesc.fSizeInBytes;
|
||||
#else
|
||||
fGLSizeInBytes = srcSizeInBytes;
|
||||
#endif
|
||||
VALIDATE();
|
||||
return true;
|
||||
}
|
||||
|
||||
void GrGLBufferImpl::validate() const {
|
||||
SkASSERT(GR_GL_ARRAY_BUFFER == fBufferType || GR_GL_ELEMENT_ARRAY_BUFFER == fBufferType ||
|
||||
GR_GL_PIXEL_PACK_BUFFER == fBufferType || GR_GL_PIXEL_UNPACK_BUFFER == fBufferType ||
|
||||
GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fBufferType ||
|
||||
GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fBufferType);
|
||||
// The following assert isn't valid when the buffer has been abandoned:
|
||||
// SkASSERT((0 == fDesc.fID) == (fCPUData));
|
||||
SkASSERT(nullptr == fCPUData || 0 == fGLSizeInBytes);
|
||||
SkASSERT(nullptr == fMapPtr || fCPUData || fGLSizeInBytes <= fDesc.fSizeInBytes);
|
||||
SkASSERT(nullptr == fCPUData || nullptr == fMapPtr || fCPUData == fMapPtr);
|
||||
}
|
@ -1,69 +0,0 @@
|
||||
/*
|
||||
* Copyright 2013 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrGLBufferImpl_DEFINED
|
||||
#define GrGLBufferImpl_DEFINED
|
||||
|
||||
#include "SkTypes.h"
|
||||
#include "gl/GrGLTypes.h"
|
||||
|
||||
class GrGLGpu;
|
||||
|
||||
/**
|
||||
* This class serves as the implementation of GrGL*Buffer classes. It was written to avoid code
|
||||
* duplication in those classes.
|
||||
*/
|
||||
class GrGLBufferImpl : SkNoncopyable {
|
||||
public:
|
||||
enum Usage {
|
||||
kStaticDraw_Usage = 0,
|
||||
kDynamicDraw_Usage,
|
||||
kStreamDraw_Usage,
|
||||
kStreamRead_Usage,
|
||||
|
||||
kLast_Usage = kStreamRead_Usage
|
||||
};
|
||||
static const int kUsageCount = kLast_Usage + 1;
|
||||
|
||||
struct Desc {
|
||||
GrGLuint fID; // set to 0 to indicate buffer is CPU-backed and not a VBO.
|
||||
size_t fSizeInBytes;
|
||||
Usage fUsage;
|
||||
};
|
||||
|
||||
GrGLBufferImpl(GrGLGpu*, const Desc&, GrGLenum bufferType);
|
||||
~GrGLBufferImpl() {
|
||||
// either release or abandon should have been called by the owner of this object.
|
||||
SkASSERT(0 == fDesc.fID);
|
||||
}
|
||||
|
||||
void abandon();
|
||||
void release(GrGLGpu* gpu);
|
||||
|
||||
GrGLuint bufferID() const { return fDesc.fID; }
|
||||
size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
|
||||
GrGLenum bufferType() const { return fBufferType; }
|
||||
|
||||
void* map(GrGLGpu* gpu);
|
||||
void unmap(GrGLGpu* gpu);
|
||||
bool isMapped() const;
|
||||
bool updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInBytes);
|
||||
|
||||
private:
|
||||
void validate() const;
|
||||
|
||||
Desc fDesc;
|
||||
GrGLenum fBufferType; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER, e.g.
|
||||
void* fCPUData;
|
||||
void* fMapPtr;
|
||||
size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
|
||||
// smaller or larger than the size in fDesc.
|
||||
|
||||
typedef SkNoncopyable INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -400,14 +400,14 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
|
||||
|
||||
// On many GPUs, map memory is very expensive, so we effectively disable it here by setting the
|
||||
// threshold to the maximum unless the client gives us a hint that map memory is cheap.
|
||||
if (fGeometryBufferMapThreshold < 0) {
|
||||
if (fBufferMapThreshold < 0) {
|
||||
// We think mapping on Chromium will be cheaper once we know ahead of time how much space
|
||||
// we will use for all GrBatchs. Right now we might wind up mapping a large buffer and using
|
||||
// a small subset.
|
||||
#if 0
|
||||
fGeometryBufferMapThreshold = kChromium_GrGLDriver == ctxInfo.driver() ? 0 : SK_MaxS32;
|
||||
fBufferMapThreshold = kChromium_GrGLDriver == ctxInfo.driver() ? 0 : SK_MaxS32;
|
||||
#else
|
||||
fGeometryBufferMapThreshold = SK_MaxS32;
|
||||
fBufferMapThreshold = SK_MaxS32;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -122,7 +122,9 @@
|
||||
#define GR_GL_STREAM_DRAW 0x88E0
|
||||
#define GR_GL_STREAM_READ 0x88E1
|
||||
#define GR_GL_STATIC_DRAW 0x88E4
|
||||
#define GR_GL_STATIC_READ 0x88E5
|
||||
#define GR_GL_DYNAMIC_DRAW 0x88E8
|
||||
#define GR_GL_DYNAMIC_READ 0x88E9
|
||||
|
||||
#define GR_GL_BUFFER_SIZE 0x8764
|
||||
#define GR_GL_BUFFER_USAGE 0x8765
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
|
||||
#include "GrGLGpu.h"
|
||||
#include "GrGLBuffer.h"
|
||||
#include "GrGLGLSL.h"
|
||||
#include "GrGLStencilAttachment.h"
|
||||
#include "GrGLTextureRenderTarget.h"
|
||||
@ -843,7 +844,7 @@ bool GrGLGpu::onWritePixels(GrSurface* surface,
|
||||
|
||||
bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes) {
|
||||
GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
|
||||
|
||||
@ -859,16 +860,14 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
|
||||
this->setScratchTextureUnit();
|
||||
GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
|
||||
|
||||
SkASSERT(!buffer->isMapped());
|
||||
GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer);
|
||||
// bind the transfer buffer
|
||||
SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() ||
|
||||
GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType());
|
||||
GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
|
||||
SkASSERT(kXferCpuToGpu_GrBufferType == transferBuffer->type());
|
||||
SkASSERT(!transferBuffer->isMapped());
|
||||
const GrGLBuffer* glBuffer = reinterpret_cast<const GrGLBuffer*>(transferBuffer);
|
||||
this->bindBuffer(glBuffer->bufferID(), glBuffer->target());
|
||||
|
||||
bool success = false;
|
||||
GrMipLevel mipLevel;
|
||||
mipLevel.fPixels = buffer;
|
||||
mipLevel.fPixels = transferBuffer;
|
||||
mipLevel.fRowBytes = rowBytes;
|
||||
SkSTArray<1, GrMipLevel> texels;
|
||||
texels.push_back(mipLevel);
|
||||
@ -1933,111 +1932,8 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen
|
||||
// objects are implemented as client-side-arrays on tile-deferred architectures.
|
||||
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
|
||||
|
||||
GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
|
||||
GrGLVertexBuffer::Desc desc;
|
||||
desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
|
||||
desc.fSizeInBytes = size;
|
||||
|
||||
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
|
||||
desc.fID = 0;
|
||||
GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
|
||||
return vertexBuffer;
|
||||
} else {
|
||||
desc.fID = 0;
|
||||
GL_CALL(GenBuffers(1, &desc.fID));
|
||||
if (desc.fID) {
|
||||
fHWGeometryState.setVertexBufferID(this, desc.fID);
|
||||
CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
||||
// make sure driver can allocate memory for this buffer
|
||||
GL_ALLOC_CALL(this->glInterface(),
|
||||
BufferData(GR_GL_ARRAY_BUFFER,
|
||||
(GrGLsizeiptr) desc.fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
|
||||
GL_CALL(DeleteBuffers(1, &desc.fID));
|
||||
this->notifyVertexBufferDelete(desc.fID);
|
||||
return nullptr;
|
||||
}
|
||||
GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
|
||||
return vertexBuffer;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
|
||||
GrGLIndexBuffer::Desc desc;
|
||||
desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
|
||||
desc.fSizeInBytes = size;
|
||||
|
||||
if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
|
||||
desc.fID = 0;
|
||||
GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
|
||||
return indexBuffer;
|
||||
} else {
|
||||
desc.fID = 0;
|
||||
GL_CALL(GenBuffers(1, &desc.fID));
|
||||
if (desc.fID) {
|
||||
fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
|
||||
CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
||||
// make sure driver can allocate memory for this buffer
|
||||
GL_ALLOC_CALL(this->glInterface(),
|
||||
BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
|
||||
(GrGLsizeiptr) desc.fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
|
||||
if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
|
||||
GL_CALL(DeleteBuffers(1, &desc.fID));
|
||||
this->notifyIndexBufferDelete(desc.fID);
|
||||
return nullptr;
|
||||
}
|
||||
GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
|
||||
return indexBuffer;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType xferType) {
|
||||
GrGLCaps::TransferBufferType xferBufferType = this->ctxInfo().caps()->transferBufferType();
|
||||
if (GrGLCaps::kNone_TransferBufferType == xferBufferType) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GrGLTransferBuffer::Desc desc;
|
||||
bool toGpu = (kCpuToGpu_TransferType == xferType);
|
||||
desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kStreamRead_Usage;
|
||||
|
||||
desc.fSizeInBytes = size;
|
||||
desc.fID = 0;
|
||||
GL_CALL(GenBuffers(1, &desc.fID));
|
||||
if (desc.fID) {
|
||||
CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
|
||||
// make sure driver can allocate memory for this bmapuffer
|
||||
GrGLenum target;
|
||||
if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) {
|
||||
target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM
|
||||
: GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
|
||||
} else {
|
||||
SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType);
|
||||
target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER;
|
||||
}
|
||||
GL_CALL(BindBuffer(target, desc.fID));
|
||||
GL_ALLOC_CALL(this->glInterface(),
|
||||
BufferData(target,
|
||||
(GrGLsizeiptr) desc.fSizeInBytes,
|
||||
nullptr, // data ptr
|
||||
(toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ)));
|
||||
if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
|
||||
GL_CALL(DeleteBuffers(1, &desc.fID));
|
||||
return nullptr;
|
||||
}
|
||||
GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, target);
|
||||
return transferBuffer;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
GrBuffer* GrGLGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
|
||||
return GrGLBuffer::Create(this, type, size, accessPattern);
|
||||
}
|
||||
|
||||
void GrGLGpu::flushScissor(const GrScissorState& scissorState,
|
||||
@ -2122,18 +2018,18 @@ bool GrGLGpu::flushGLState(const GrPipeline& pipeline, const GrPrimitiveProcesso
|
||||
void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
|
||||
const GrNonInstancedMesh& mesh,
|
||||
size_t* indexOffsetInBytes) {
|
||||
GrGLVertexBuffer* vbuf;
|
||||
vbuf = (GrGLVertexBuffer*) mesh.vertexBuffer();
|
||||
const GrGLBuffer* vbuf;
|
||||
vbuf = static_cast<const GrGLBuffer*>(mesh.vertexBuffer());
|
||||
|
||||
SkASSERT(vbuf);
|
||||
SkASSERT(!vbuf->isMapped());
|
||||
|
||||
GrGLIndexBuffer* ibuf = nullptr;
|
||||
const GrGLBuffer* ibuf = nullptr;
|
||||
if (mesh.isIndexed()) {
|
||||
SkASSERT(indexOffsetInBytes);
|
||||
|
||||
*indexOffsetInBytes = 0;
|
||||
ibuf = (GrGLIndexBuffer*)mesh.indexBuffer();
|
||||
ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer());
|
||||
|
||||
SkASSERT(ibuf);
|
||||
SkASSERT(!ibuf->isMapped());
|
||||
@ -2223,113 +2119,6 @@ void GrGLGpu::releaseBuffer(GrGLuint id, GrGLenum type) {
|
||||
}
|
||||
}
|
||||
|
||||
static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) {
|
||||
static const GrGLenum grToGL[] = {
|
||||
GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage
|
||||
DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage
|
||||
GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage
|
||||
GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage
|
||||
};
|
||||
static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_size_mismatch");
|
||||
|
||||
return grToGL[usage];
|
||||
}
|
||||
|
||||
void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
|
||||
size_t currentSize, size_t requestedSize) {
|
||||
void* mapPtr = nullptr;
|
||||
GrGLenum glUsage = get_gl_usage(usage);
|
||||
bool readOnly = (GrGLBufferImpl::kStreamRead_Usage == usage);
|
||||
|
||||
// Handling dirty context is done in the bindBuffer call
|
||||
switch (this->glCaps().mapBufferType()) {
|
||||
case GrGLCaps::kNone_MapBufferType:
|
||||
break;
|
||||
case GrGLCaps::kMapBuffer_MapBufferType:
|
||||
this->bindBuffer(id, type);
|
||||
// Let driver know it can discard the old data
|
||||
if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
|
||||
}
|
||||
GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
|
||||
break;
|
||||
case GrGLCaps::kMapBufferRange_MapBufferType: {
|
||||
this->bindBuffer(id, type);
|
||||
// Make sure the GL buffer size agrees with fDesc before mapping.
|
||||
if (currentSize != requestedSize) {
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
|
||||
}
|
||||
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
|
||||
// TODO: allow the client to specify invalidation in the stream draw case
|
||||
if (GrGLBufferImpl::kStreamDraw_Usage != usage) {
|
||||
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
|
||||
}
|
||||
GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ?
|
||||
GR_GL_MAP_READ_BIT :
|
||||
writeAccess));
|
||||
break;
|
||||
}
|
||||
case GrGLCaps::kChromium_MapBufferType:
|
||||
this->bindBuffer(id, type);
|
||||
// Make sure the GL buffer size agrees with fDesc before mapping.
|
||||
if (currentSize != requestedSize) {
|
||||
GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
|
||||
}
|
||||
GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnly ?
|
||||
GR_GL_READ_ONLY :
|
||||
GR_GL_WRITE_ONLY));
|
||||
break;
|
||||
}
|
||||
return mapPtr;
|
||||
}
|
||||
|
||||
void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
|
||||
size_t currentSize, const void* src, size_t srcSizeInBytes) {
|
||||
SkASSERT(srcSizeInBytes <= currentSize);
|
||||
// bindbuffer handles dirty context
|
||||
this->bindBuffer(id, type);
|
||||
GrGLenum glUsage = get_gl_usage(usage);
|
||||
|
||||
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
|
||||
if (currentSize == srcSizeInBytes) {
|
||||
GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage));
|
||||
} else {
|
||||
// Before we call glBufferSubData we give the driver a hint using
|
||||
// glBufferData with nullptr. This makes the old buffer contents
|
||||
// inaccessible to future draws. The GPU may still be processing
|
||||
// draws that reference the old contents. With this hint it can
|
||||
// assign a different allocation for the new contents to avoid
|
||||
// flushing the gpu past draws consuming the old contents.
|
||||
// TODO I think we actually want to try calling bufferData here
|
||||
GL_CALL(BufferData(type, currentSize, nullptr, glUsage));
|
||||
GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
|
||||
}
|
||||
#else
|
||||
// Note that we're cheating on the size here. Currently no methods
|
||||
// allow a partial update that preserves contents of non-updated
|
||||
// portions of the buffer (map() does a glBufferData(..size, nullptr..))
|
||||
GL_CALL(BufferData(type, srcSizeInBytes, src, glUsage));
|
||||
#endif
|
||||
}
|
||||
|
||||
void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) {
|
||||
// bind buffer handles the dirty context
|
||||
switch (this->glCaps().mapBufferType()) {
|
||||
case GrGLCaps::kNone_MapBufferType:
|
||||
SkDEBUGFAIL("Shouldn't get here.");
|
||||
return;
|
||||
case GrGLCaps::kMapBuffer_MapBufferType: // fall through
|
||||
case GrGLCaps::kMapBufferRange_MapBufferType:
|
||||
this->bindBuffer(id, type);
|
||||
GL_CALL(UnmapBuffer(type));
|
||||
break;
|
||||
case GrGLCaps::kChromium_MapBufferType:
|
||||
this->bindBuffer(id, type);
|
||||
GL_CALL(UnmapBufferSubData(mapPtr));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLGpu::disableScissor() {
|
||||
if (kNo_TriState != fHWScissorSettings.fEnabled) {
|
||||
GL_CALL(Disable(GR_GL_SCISSOR_TEST));
|
||||
@ -4351,8 +4140,8 @@ void GrGLGpu::resetShaderCacheForTesting() const {
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
|
||||
GrGLGpu* gpu,
|
||||
const GrGLVertexBuffer* vbuffer,
|
||||
const GrGLIndexBuffer* ibuffer) {
|
||||
const GrGLBuffer* vbuffer,
|
||||
const GrGLBuffer* ibuffer) {
|
||||
SkASSERT(vbuffer);
|
||||
GrGLuint vbufferID = vbuffer->bufferID();
|
||||
GrGLuint* ibufferIDPtr = nullptr;
|
||||
|
@ -10,15 +10,12 @@
|
||||
|
||||
#include "GrGLContext.h"
|
||||
#include "GrGLIRect.h"
|
||||
#include "GrGLIndexBuffer.h"
|
||||
#include "GrGLPathRendering.h"
|
||||
#include "GrGLProgram.h"
|
||||
#include "GrGLRenderTarget.h"
|
||||
#include "GrGLStencilAttachment.h"
|
||||
#include "GrGLTexture.h"
|
||||
#include "GrGLTransferBuffer.h"
|
||||
#include "GrGLVertexArray.h"
|
||||
#include "GrGLVertexBuffer.h"
|
||||
#include "GrGpu.h"
|
||||
#include "GrPipelineBuilder.h"
|
||||
#include "GrTypes.h"
|
||||
@ -26,6 +23,7 @@
|
||||
#include "SkTArray.h"
|
||||
#include "SkTypes.h"
|
||||
|
||||
class GrGLBuffer;
|
||||
class GrPipeline;
|
||||
class GrNonInstancedMesh;
|
||||
class GrSwizzle;
|
||||
@ -101,15 +99,6 @@ public:
|
||||
|
||||
void releaseBuffer(GrGLuint id, GrGLenum type);
|
||||
|
||||
// sizes are in bytes
|
||||
void* mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage, size_t currentSize,
|
||||
size_t requestedSize);
|
||||
|
||||
void unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr);
|
||||
|
||||
void bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage, size_t currentSize,
|
||||
const void* src, size_t srcSizeInBytes);
|
||||
|
||||
const GrGLContext* glContextForTesting() const override {
|
||||
return &this->glContext();
|
||||
}
|
||||
@ -149,9 +138,7 @@ private:
|
||||
GrGpuResource::LifeCycle lifeCycle,
|
||||
const SkTArray<GrMipLevel>& texels) override;
|
||||
|
||||
GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
|
||||
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
|
||||
GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
|
||||
GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) override;
|
||||
GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
|
||||
GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
|
||||
GrWrapOwnership) override;
|
||||
@ -208,7 +195,7 @@ private:
|
||||
|
||||
bool onTransferPixels(GrSurface*,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes) override;
|
||||
|
||||
void onResolveRenderTarget(GrRenderTarget* target) override;
|
||||
@ -511,8 +498,8 @@ private:
|
||||
* returned GrGLAttribArrayState should be used to set vertex attribute arrays.
|
||||
*/
|
||||
GrGLAttribArrayState* bindArrayAndBuffersToDraw(GrGLGpu* gpu,
|
||||
const GrGLVertexBuffer* vbuffer,
|
||||
const GrGLIndexBuffer* ibuffer);
|
||||
const GrGLBuffer* vbuffer,
|
||||
const GrGLBuffer* ibuffer);
|
||||
|
||||
/** Variants of the above that takes GL buffer IDs. Note that 0 does not imply that a
|
||||
buffer won't be bound. The "default buffer" will be bound, which is used for client-side
|
||||
|
@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrGLIndexBuffer.h"
|
||||
#include "GrGLGpu.h"
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
GrGLIndexBuffer::GrGLIndexBuffer(GrGLGpu* gpu, const Desc& desc)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, GrGLBufferImpl::kDynamicDraw_Usage == desc.fUsage,
|
||||
0 == desc.fID)
|
||||
, fImpl(gpu, desc, GR_GL_ELEMENT_ARRAY_BUFFER) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
void GrGLIndexBuffer::onRelease() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.release(this->getGpuGL());
|
||||
}
|
||||
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrGLIndexBuffer::onAbandon() {
|
||||
fImpl.abandon();
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void* GrGLIndexBuffer::onMap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
return fImpl.map(this->getGpuGL());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLIndexBuffer::onUnmap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.unmap(this->getGpuGL());
|
||||
}
|
||||
}
|
||||
|
||||
bool GrGLIndexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
|
||||
if (!this->wasDestroyed()) {
|
||||
return fImpl.updateData(this->getGpuGL(), src, srcSizeInBytes);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLIndexBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const {
|
||||
SkString buffer_id;
|
||||
buffer_id.appendU32(this->bufferID());
|
||||
traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
|
||||
buffer_id.c_str());
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrGLIndexBuffer_DEFINED
|
||||
#define GrGLIndexBuffer_DEFINED
|
||||
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrGLBufferImpl.h"
|
||||
#include "gl/GrGLInterface.h"
|
||||
|
||||
class GrGLGpu;
|
||||
|
||||
class GrGLIndexBuffer : public GrIndexBuffer {
|
||||
|
||||
public:
|
||||
typedef GrGLBufferImpl::Desc Desc;
|
||||
|
||||
GrGLIndexBuffer(GrGLGpu* gpu, const Desc& desc);
|
||||
|
||||
GrGLuint bufferID() const { return fImpl.bufferID(); }
|
||||
size_t baseOffset() const { return fImpl.baseOffset(); }
|
||||
|
||||
protected:
|
||||
void onAbandon() override;
|
||||
void onRelease() override;
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
private:
|
||||
void* onMap() override;
|
||||
void onUnmap() override;
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
|
||||
|
||||
GrGLGpu* getGpuGL() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return (GrGLGpu*)(this->getGpu());
|
||||
}
|
||||
|
||||
GrGLBufferImpl fImpl;
|
||||
|
||||
typedef GrIndexBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -1,51 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrGLTransferBuffer.h"
|
||||
#include "GrGLGpu.h"
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
GrGLTransferBuffer::GrGLTransferBuffer(GrGLGpu* gpu, const Desc& desc, GrGLenum type)
|
||||
: INHERITED(gpu, desc.fSizeInBytes)
|
||||
, fImpl(gpu, desc, type) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::onRelease() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.release(this->getGpuGL());
|
||||
}
|
||||
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::onAbandon() {
|
||||
fImpl.abandon();
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void* GrGLTransferBuffer::onMap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
return fImpl.map(this->getGpuGL());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::onUnmap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.unmap(this->getGpuGL());
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const {
|
||||
SkString buffer_id;
|
||||
buffer_id.appendU32(this->bufferID());
|
||||
traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
|
||||
buffer_id.c_str());
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrGLTransferBuffer_DEFINED
|
||||
#define GrGLTransferBuffer_DEFINED
|
||||
|
||||
#include "GrTransferBuffer.h"
|
||||
#include "GrGLBufferImpl.h"
|
||||
#include "gl/GrGLInterface.h"
|
||||
|
||||
class GrGLGpu;
|
||||
|
||||
class GrGLTransferBuffer : public GrTransferBuffer {
|
||||
|
||||
public:
|
||||
typedef GrGLBufferImpl::Desc Desc;
|
||||
|
||||
GrGLTransferBuffer(GrGLGpu* gpu, const Desc& desc, GrGLenum type);
|
||||
|
||||
GrGLuint bufferID() const { return fImpl.bufferID(); }
|
||||
size_t baseOffset() const { return fImpl.baseOffset(); }
|
||||
GrGLenum bufferType() const { return fImpl.bufferType(); }
|
||||
|
||||
protected:
|
||||
void onAbandon() override;
|
||||
void onRelease() override;
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
private:
|
||||
void* onMap() override;
|
||||
void onUnmap() override;
|
||||
|
||||
GrGLGpu* getGpuGL() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return (GrGLGpu*)(this->getGpu());
|
||||
}
|
||||
|
||||
GrGLBufferImpl fImpl;
|
||||
|
||||
typedef GrTransferBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -13,8 +13,6 @@
|
||||
#include "gl/GrGLTypes.h"
|
||||
#include "SkTArray.h"
|
||||
|
||||
class GrGLVertexBuffer;
|
||||
class GrGLIndexBuffer;
|
||||
class GrGLGpu;
|
||||
|
||||
/**
|
||||
|
@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "GrGLVertexBuffer.h"
|
||||
#include "GrGLGpu.h"
|
||||
#include "SkTraceMemoryDump.h"
|
||||
|
||||
GrGLVertexBuffer::GrGLVertexBuffer(GrGLGpu* gpu, const Desc& desc)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, GrGLBufferImpl::kDynamicDraw_Usage == desc.fUsage,
|
||||
0 == desc.fID)
|
||||
, fImpl(gpu, desc, GR_GL_ARRAY_BUFFER) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
||||
void GrGLVertexBuffer::onRelease() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.release(this->getGpuGL());
|
||||
}
|
||||
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrGLVertexBuffer::onAbandon() {
|
||||
fImpl.abandon();
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void* GrGLVertexBuffer::onMap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
return fImpl.map(this->getGpuGL());
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLVertexBuffer::onUnmap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
fImpl.unmap(this->getGpuGL());
|
||||
}
|
||||
}
|
||||
|
||||
bool GrGLVertexBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
|
||||
if (!this->wasDestroyed()) {
|
||||
return fImpl.updateData(this->getGpuGL(), src, srcSizeInBytes);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void GrGLVertexBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const {
|
||||
SkString buffer_id;
|
||||
buffer_id.appendU32(this->bufferID());
|
||||
traceMemoryDump->setMemoryBacking(dumpName.c_str(), "gl_buffer",
|
||||
buffer_id.c_str());
|
||||
}
|
@ -1,48 +0,0 @@
|
||||
/*
|
||||
* Copyright 2011 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrGLVertexBuffer_DEFINED
|
||||
#define GrGLVertexBuffer_DEFINED
|
||||
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "GrGLBufferImpl.h"
|
||||
#include "gl/GrGLInterface.h"
|
||||
|
||||
class GrGLGpu;
|
||||
|
||||
class GrGLVertexBuffer : public GrVertexBuffer {
|
||||
|
||||
public:
|
||||
typedef GrGLBufferImpl::Desc Desc;
|
||||
|
||||
GrGLVertexBuffer(GrGLGpu* gpu, const Desc& desc);
|
||||
|
||||
GrGLuint bufferID() const { return fImpl.bufferID(); }
|
||||
size_t baseOffset() const { return fImpl.baseOffset(); }
|
||||
|
||||
protected:
|
||||
void onAbandon() override;
|
||||
void onRelease() override;
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
private:
|
||||
void* onMap() override;
|
||||
void onUnmap() override;
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
|
||||
|
||||
GrGLGpu* getGpuGL() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return (GrGLGpu*)(this->getGpu());
|
||||
}
|
||||
|
||||
GrGLBufferImpl fImpl;
|
||||
|
||||
typedef GrVertexBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
@ -29,7 +29,7 @@ GrVkCaps::GrVkCaps(const GrContextOptions& contextOptions, const GrVkInterface*
|
||||
fUseDrawInsteadOfClear = false; //TODO: figure this out
|
||||
|
||||
fMapBufferFlags = kNone_MapFlags; //TODO: figure this out
|
||||
fGeometryBufferMapThreshold = SK_MaxS32; //TODO: figure this out
|
||||
fBufferMapThreshold = SK_MaxS32; //TODO: figure this out
|
||||
|
||||
fMaxRenderTargetSize = 4096; // minimum required by spec
|
||||
fMaxTextureSize = 4096; // minimum required by spec
|
||||
@ -112,7 +112,7 @@ void GrVkCaps::initGrCaps(const VkPhysicalDeviceProperties& properties,
|
||||
|
||||
// Assuming since we will always map in the end to upload the data we might as well just map
|
||||
// from the get go. There is no hard data to suggest this is faster or slower.
|
||||
fGeometryBufferMapThreshold = 0;
|
||||
fBufferMapThreshold = 0;
|
||||
|
||||
fMapBufferFlags = kCanMap_MapFlag | kSubset_MapFlag;
|
||||
|
||||
|
@ -171,18 +171,26 @@ void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
|
||||
}
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
GrVertexBuffer* GrVkGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
|
||||
return GrVkVertexBuffer::Create(this, size, dynamic);
|
||||
}
|
||||
|
||||
GrIndexBuffer* GrVkGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
|
||||
return GrVkIndexBuffer::Create(this, size, dynamic);
|
||||
}
|
||||
|
||||
GrTransferBuffer* GrVkGpu::onCreateTransferBuffer(size_t size, TransferType type) {
|
||||
GrVkBuffer::Type bufferType = kCpuToGpu_TransferType ? GrVkBuffer::kCopyRead_Type
|
||||
: GrVkBuffer::kCopyWrite_Type;
|
||||
return GrVkTransferBuffer::Create(this, size, bufferType);
|
||||
GrBuffer* GrVkGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
|
||||
switch (type) {
|
||||
case kVertex_GrBufferType:
|
||||
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
|
||||
kStatic_GrAccessPattern == accessPattern);
|
||||
return GrVkVertexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
|
||||
case kIndex_GrBufferType:
|
||||
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
|
||||
kStatic_GrAccessPattern == accessPattern);
|
||||
return GrVkIndexBuffer::Create(this, size, kDynamic_GrAccessPattern == accessPattern);
|
||||
case kXferCpuToGpu_GrBufferType:
|
||||
SkASSERT(kStream_GrAccessPattern == accessPattern);
|
||||
return GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyRead_Type);
|
||||
case kXferGpuToCpu_GrBufferType:
|
||||
SkASSERT(kStream_GrAccessPattern == accessPattern);
|
||||
return GrVkTransferBuffer::Create(this, size, GrVkBuffer::kCopyWrite_Type);
|
||||
default:
|
||||
SkFAIL("Unknown buffer type.");
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
@ -1217,8 +1225,9 @@ bool GrVkGpu::onReadPixels(GrSurface* surface,
|
||||
false);
|
||||
|
||||
GrVkTransferBuffer* transferBuffer =
|
||||
reinterpret_cast<GrVkTransferBuffer*>(this->createTransferBuffer(rowBytes * height,
|
||||
kGpuToCpu_TransferType));
|
||||
static_cast<GrVkTransferBuffer*>(this->createBuffer(kXferGpuToCpu_GrBufferType,
|
||||
rowBytes * height,
|
||||
kStream_GrAccessPattern));
|
||||
|
||||
bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
|
||||
VkOffset3D offset = {
|
||||
|
@ -136,9 +136,7 @@ private:
|
||||
GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&,
|
||||
GrWrapOwnership) override { return NULL; }
|
||||
|
||||
GrVertexBuffer* onCreateVertexBuffer(size_t size, bool dynamic) override;
|
||||
GrIndexBuffer* onCreateIndexBuffer(size_t size, bool dynamic) override;
|
||||
GrTransferBuffer* onCreateTransferBuffer(size_t size, TransferType type) override;
|
||||
GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) override;
|
||||
|
||||
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override;
|
||||
|
||||
@ -161,7 +159,7 @@ private:
|
||||
|
||||
bool onTransferPixels(GrSurface*,
|
||||
int left, int top, int width, int height,
|
||||
GrPixelConfig config, GrTransferBuffer* buffer,
|
||||
GrPixelConfig config, GrBuffer* transferBuffer,
|
||||
size_t offset, size_t rowBytes) override { return false; }
|
||||
|
||||
void onResolveRenderTarget(GrRenderTarget* target) override {}
|
||||
|
@ -10,7 +10,8 @@
|
||||
|
||||
GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
|
||||
const GrVkBuffer::Resource* bufferResource)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
|
||||
: INHERITED(gpu, kIndex_GrBufferType, desc.fSizeInBytes,
|
||||
desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern, false)
|
||||
, GrVkBuffer(desc, bufferResource) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
@ -47,11 +48,9 @@ void GrVkIndexBuffer::onAbandon() {
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void* GrVkIndexBuffer::onMap() {
|
||||
void GrVkIndexBuffer::onMap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
return this->vkMap(this->getVkGpu());
|
||||
} else {
|
||||
return NULL;
|
||||
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,13 @@
|
||||
#ifndef GrVkIndexBuffer_DEFINED
|
||||
#define GrVkIndexBuffer_DEFINED
|
||||
|
||||
#include "GrIndexBuffer.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrVkBuffer.h"
|
||||
#include "vk/GrVkInterface.h"
|
||||
|
||||
class GrVkGpu;
|
||||
|
||||
class GrVkIndexBuffer : public GrIndexBuffer, public GrVkBuffer {
|
||||
class GrVkIndexBuffer : public GrBuffer, public GrVkBuffer {
|
||||
|
||||
public:
|
||||
static GrVkIndexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
|
||||
@ -27,13 +27,13 @@ private:
|
||||
GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
|
||||
const GrVkBuffer::Resource* resource);
|
||||
|
||||
void* onMap() override;
|
||||
void onMap() override;
|
||||
void onUnmap() override;
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
|
||||
|
||||
GrVkGpu* getVkGpu() const;
|
||||
|
||||
typedef GrIndexBuffer INHERITED;
|
||||
typedef GrBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -31,7 +31,9 @@ GrVkTransferBuffer* GrVkTransferBuffer::Create(GrVkGpu* gpu, size_t size, GrVkBu
|
||||
|
||||
GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
|
||||
const GrVkBuffer::Resource* bufferResource)
|
||||
: INHERITED(gpu, desc.fSizeInBytes)
|
||||
: INHERITED(gpu, kCopyRead_Type == desc.fType ?
|
||||
kXferCpuToGpu_GrBufferType : kXferGpuToCpu_GrBufferType,
|
||||
desc.fSizeInBytes, kStream_GrAccessPattern, false)
|
||||
, GrVkBuffer(desc, bufferResource) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
|
@ -8,13 +8,13 @@
|
||||
#ifndef GrVkTransferBuffer_DEFINED
|
||||
#define GrVkTransferBuffer_DEFINED
|
||||
|
||||
#include "GrTransferBuffer.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrVkBuffer.h"
|
||||
#include "vk/GrVkInterface.h"
|
||||
|
||||
class GrVkGpu;
|
||||
|
||||
class GrVkTransferBuffer : public GrTransferBuffer, public GrVkBuffer {
|
||||
class GrVkTransferBuffer : public GrBuffer, public GrVkBuffer {
|
||||
|
||||
public:
|
||||
static GrVkTransferBuffer* Create(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
|
||||
@ -29,11 +29,9 @@ private:
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
void* onMap() override {
|
||||
void onMap() override {
|
||||
if (!this->wasDestroyed()) {
|
||||
return this->vkMap(this->getVkGpu());
|
||||
} else {
|
||||
return nullptr;
|
||||
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,12 +41,17 @@ private:
|
||||
}
|
||||
}
|
||||
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override {
|
||||
SkFAIL("Not implemented for transfer buffers.");
|
||||
return false;
|
||||
}
|
||||
|
||||
GrVkGpu* getVkGpu() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return reinterpret_cast<GrVkGpu*>(this->getGpu());
|
||||
}
|
||||
|
||||
typedef GrTransferBuffer INHERITED;
|
||||
typedef GrBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -10,7 +10,8 @@
|
||||
|
||||
GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
|
||||
const GrVkBuffer::Resource* bufferResource)
|
||||
: INHERITED(gpu, desc.fSizeInBytes, desc.fDynamic, false)
|
||||
: INHERITED(gpu, kVertex_GrBufferType, desc.fSizeInBytes,
|
||||
desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern, false)
|
||||
, GrVkBuffer(desc, bufferResource) {
|
||||
this->registerWithCache();
|
||||
}
|
||||
@ -46,11 +47,9 @@ void GrVkVertexBuffer::onAbandon() {
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void* GrVkVertexBuffer::onMap() {
|
||||
void GrVkVertexBuffer::onMap() {
|
||||
if (!this->wasDestroyed()) {
|
||||
return this->vkMap(this->getVkGpu());
|
||||
} else {
|
||||
return NULL;
|
||||
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,13 @@
|
||||
#ifndef GrVkVertexBuffer_DEFINED
|
||||
#define GrVkVertexBuffer_DEFINED
|
||||
|
||||
#include "GrVertexBuffer.h"
|
||||
#include "GrBuffer.h"
|
||||
#include "GrVkBuffer.h"
|
||||
#include "vk/GrVkInterface.h"
|
||||
|
||||
class GrVkGpu;
|
||||
|
||||
class GrVkVertexBuffer : public GrVertexBuffer, public GrVkBuffer {
|
||||
class GrVkVertexBuffer : public GrBuffer, public GrVkBuffer {
|
||||
public:
|
||||
static GrVkVertexBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
|
||||
|
||||
@ -26,13 +26,13 @@ private:
|
||||
GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
|
||||
const GrVkBuffer::Resource* resource);
|
||||
|
||||
void* onMap() override;
|
||||
void onMap() override;
|
||||
void onUnmap() override;
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
|
||||
|
||||
GrVkGpu* getVkGpu() const;
|
||||
|
||||
typedef GrVertexBuffer INHERITED;
|
||||
typedef GrBuffer INHERITED;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
Loading…
Reference in New Issue
Block a user