Track GL buffer state based on unique resource ID

Reworks GrGLGpu to track GL buffer state based on the unique
GrGpuResource ID. This eliminates the need to notify the gpu object
whenever a buffer is deleted.

This change also allows us to remove the type specifier from GrBuffer.
At this point a buffer is just a chunk of memory, and the type
given at creation time is just a suggestion to the GL backend about
which target to bind to for updates.

BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1854283004

Committed: https://skia.googlesource.com/skia/+/deacc97bc63513b5eacaf21f858727f6e8b98ce5

Review URL: https://codereview.chromium.org/1854283004
This commit is contained in:
cdalton 2016-04-07 18:13:29 -07:00 committed by Commit bot
parent 8dea4e41a1
commit e2e71c2df4
21 changed files with 518 additions and 629 deletions

View File

@ -406,11 +406,22 @@ private:
enum GrBufferType {
kVertex_GrBufferType,
kIndex_GrBufferType,
kTexel_GrBufferType,
kDrawIndirect_GrBufferType,
kXferCpuToGpu_GrBufferType,
kXferGpuToCpu_GrBufferType,
kLast_GrBufferType = kXferGpuToCpu_GrBufferType
};
static const int kGrBufferTypeCount = kLast_GrBufferType + 1;
static inline bool GrBufferTypeIsVertexOrIndex(GrBufferType type) {
SkASSERT(type >= 0 && type < kGrBufferTypeCount);
return type <= kIndex_GrBufferType;
GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
}
/**
* Provides a performance hint regarding the frequency at which a data store will be accessed.

View File

@ -18,22 +18,20 @@ public:
* Computes a scratch key for a buffer with a "dynamic" access pattern. (Buffers with "static"
* and "stream" access patterns are disqualified by nature from being cached and reused.)
*/
static void ComputeScratchKeyForDynamicBuffer(GrBufferType type, size_t size,
static void ComputeScratchKeyForDynamicBuffer(size_t size, GrBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
// a chunk of memory we can use/reuse for any type of data. We really only need to
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
builder[0] = type;
builder[0] = intendedType;
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
builder[2] = (uint32_t)((uint64_t)size >> 32);
}
}
GrBufferType type() const { return fType; }
GrAccessPattern accessPattern() const { return fAccessPattern; }
/**
@ -110,17 +108,16 @@ public:
}
protected:
GrBuffer(GrGpu* gpu, GrBufferType type, size_t gpuMemorySize, GrAccessPattern accessPattern,
bool cpuBacked)
GrBuffer(GrGpu* gpu, size_t gpuMemorySize, GrBufferType intendedType,
GrAccessPattern accessPattern, bool cpuBacked)
: INHERITED(gpu, kCached_LifeCycle),
fMapPtr(nullptr),
fType(type),
fGpuMemorySize(gpuMemorySize), // TODO: Zero for cpu backed buffers?
fAccessPattern(accessPattern),
fCPUBacked(cpuBacked) {
if (!fCPUBacked && SkIsPow2(fGpuMemorySize) && kDynamic_GrAccessPattern == fAccessPattern) {
GrScratchKey key;
ComputeScratchKeyForDynamicBuffer(fType, fGpuMemorySize, &key);
ComputeScratchKeyForDynamicBuffer(fGpuMemorySize, intendedType, &key);
this->setScratchKey(key);
}
}
@ -134,7 +131,6 @@ private:
virtual void onUnmap() = 0;
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
GrBufferType fType;
size_t fGpuMemorySize;
GrAccessPattern fAccessPattern;
bool fCPUBacked;

View File

@ -319,7 +319,7 @@ GrBuffer* GrBufferAllocPool::getBuffer(size_t size) {
// Shouldn't have to use this flag (https://bug.skia.org/4156)
static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
return rp->createBuffer(fBufferType, size, kDynamic_GrAccessPattern, kFlags);
return rp->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, kFlags);
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -235,9 +235,10 @@ GrRenderTarget* GrGpu::wrapBackendTextureAsRenderTarget(const GrBackendTextureDe
return this->onWrapBackendTextureAsRenderTarget(desc);
}
GrBuffer* GrGpu::createBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
GrBuffer* GrGpu::createBuffer(size_t size, GrBufferType intendedType,
GrAccessPattern accessPattern) {
this->handleDirtyContext();
GrBuffer* buffer = this->onCreateBuffer(type, size, accessPattern);
GrBuffer* buffer = this->onCreateBuffer(size, intendedType, accessPattern);
if (!this->caps()->reuseScratchBuffers()) {
buffer->resourcePriv().removeScratchKey();
}

View File

@ -135,9 +135,13 @@ public:
/**
* Creates a buffer.
*
* @param size size of buffer to create.
* @param intendedType hint to the graphics subsystem about what the buffer will be used for.
* @param accessPattern hint to the graphics subsystem about how the data will be accessed.
*
* @return the buffer if successful, otherwise nullptr.
*/
GrBuffer* createBuffer(GrBufferType, size_t size, GrAccessPattern);
GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern accessPattern);
/**
* Resolves MSAA.
@ -533,7 +537,7 @@ private:
virtual GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
GrWrapOwnership) = 0;
virtual GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&) = 0;
virtual GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) = 0;
virtual GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern) = 0;
// overridden by backend-specific derived class to perform the clear.
virtual void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) = 0;

View File

@ -32,7 +32,7 @@ const GrBuffer* GrResourceProvider::createInstancedIndexBuffer(const uint16_t* p
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
// This is typically used in GrBatchs, so we assume kNoPendingIO.
GrBuffer* buffer = this->createBuffer(kIndex_GrBufferType, bufferSize, kStatic_GrAccessPattern,
GrBuffer* buffer = this->createBuffer(bufferSize, kIndex_GrBufferType, kStatic_GrAccessPattern,
kNoPendingIO_Flag);
if (!buffer) {
return nullptr;
@ -88,7 +88,7 @@ GrPathRange* GrResourceProvider::createGlyphs(const SkTypeface* tf, const SkDesc
return this->gpu()->pathRendering()->createGlyphs(tf, desc, stroke);
}
GrBuffer* GrResourceProvider::createBuffer(GrBufferType type, size_t size,
GrBuffer* GrResourceProvider::createBuffer(size_t size, GrBufferType intendedType,
GrAccessPattern accessPattern, uint32_t flags) {
if (this->isAbandoned()) {
return nullptr;
@ -100,7 +100,7 @@ GrBuffer* GrResourceProvider::createBuffer(GrBufferType type, size_t size,
size = SkTMax(MIN_SIZE, GrNextPow2(SkToUInt(size)));
GrScratchKey key;
GrBuffer::ComputeScratchKeyForDynamicBuffer(type, size, &key);
GrBuffer::ComputeScratchKeyForDynamicBuffer(size, intendedType, &key);
uint32_t scratchFlags = 0;
if (flags & kNoPendingIO_Flag) {
scratchFlags = GrResourceCache::kRequireNoPendingIO_ScratchFlag;
@ -112,7 +112,7 @@ GrBuffer* GrResourceProvider::createBuffer(GrBufferType type, size_t size,
return static_cast<GrBuffer*>(resource);
}
}
return this->gpu()->createBuffer(type, size, accessPattern);
return this->gpu()->createBuffer(size, intendedType, accessPattern);
}
GrBatchAtlas* GrResourceProvider::createAtlas(GrPixelConfig config,

View File

@ -102,7 +102,17 @@ public:
kNoPendingIO_Flag = kNoPendingIO_ScratchTextureFlag,
};
GrBuffer* createBuffer(GrBufferType, size_t size, GrAccessPattern, uint32_t flags);
/**
* Returns a buffer.
*
* @param size minimum size of buffer to return.
* @param intendedType hint to the graphics subsystem about what the buffer will be used for.
* @param GrAccessPattern hint to the graphics subsystem about how the data will be accessed.
* @param flags see Flags enum.
*
* @return the buffer if successful, otherwise nullptr.
*/
GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, uint32_t flags);
GrTexture* createApproxTexture(const GrSurfaceDesc& desc, uint32_t flags) {
SkASSERT(0 == flags || kNoPendingIO_Flag == flags);

View File

@ -69,7 +69,7 @@ public:
SkPoint* lock(int vertexCount) override {
size_t size = vertexCount * sizeof(SkPoint);
fVertexBuffer.reset(fResourceProvider->createBuffer(
kVertex_GrBufferType, size, kStatic_GrAccessPattern, 0));
size, kVertex_GrBufferType, kStatic_GrAccessPattern, 0));
if (!fVertexBuffer.get()) {
return nullptr;
}

View File

@ -28,14 +28,14 @@
#define VALIDATE() do {} while(false)
#endif
GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size,
GrAccessPattern accessPattern) {
static const int kIsVertexOrIndex = (1 << kVertex_GrBufferType) | (1 << kIndex_GrBufferType);
GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
bool cpuBacked = gpu->glCaps().useNonVBOVertexAndIndexDynamicData() &&
kDynamic_GrAccessPattern == accessPattern &&
((kIsVertexOrIndex >> type) & 1);
SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, type, size, accessPattern, cpuBacked));
if (!cpuBacked && 0 == buffer->fBufferID) {
GrBufferTypeIsVertexOrIndex(intendedType) &&
kDynamic_GrAccessPattern == accessPattern;
SkAutoTUnref<GrGLBuffer> buffer(new GrGLBuffer(gpu, size, intendedType, accessPattern,
cpuBacked, data));
if (!cpuBacked && 0 == buffer->bufferID()) {
return nullptr;
}
return buffer.release();
@ -45,94 +45,81 @@ GrGLBuffer* GrGLBuffer::Create(GrGLGpu* gpu, GrBufferType type, size_t size,
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_DRAW_PARAM GR_GL_STREAM_DRAW
inline static void get_target_and_usage(GrBufferType type, GrAccessPattern accessPattern,
const GrGLCaps& caps, GrGLenum* target, GrGLenum* usage) {
static const GrGLenum nonXferTargets[] = {
GR_GL_ARRAY_BUFFER,
GR_GL_ELEMENT_ARRAY_BUFFER
};
GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
inline static GrGLenum gr_to_gl_access_pattern(GrBufferType bufferType,
GrAccessPattern accessPattern) {
static const GrGLenum drawUsages[] = {
DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
GR_GL_STATIC_DRAW,
GR_GL_STREAM_DRAW
DYNAMIC_DRAW_PARAM, // TODO: Do we really want to use STREAM_DRAW here on non-Chromium?
GR_GL_STATIC_DRAW, // kStatic_GrAccessPattern
GR_GL_STREAM_DRAW // kStream_GrAccessPattern
};
static const GrGLenum readUsages[] = {
GR_GL_DYNAMIC_READ,
GR_GL_STATIC_READ,
GR_GL_STREAM_READ
GR_GL_DYNAMIC_READ, // kDynamic_GrAccessPattern
GR_GL_STATIC_READ, // kStatic_GrAccessPattern
GR_GL_STREAM_READ // kStream_GrAccessPattern
};
GR_STATIC_ASSERT(0 == kDynamic_GrAccessPattern);
GR_STATIC_ASSERT(1 == kStatic_GrAccessPattern);
GR_STATIC_ASSERT(2 == kStream_GrAccessPattern);
GR_STATIC_ASSERT(SK_ARRAY_COUNT(drawUsages) == 1 + kLast_GrAccessPattern);
GR_STATIC_ASSERT(SK_ARRAY_COUNT(readUsages) == 1 + kLast_GrAccessPattern);
static GrGLenum const* const usageTypes[] = {
drawUsages, // kVertex_GrBufferType,
drawUsages, // kIndex_GrBufferType,
drawUsages, // kTexel_GrBufferType,
drawUsages, // kDrawIndirect_GrBufferType,
drawUsages, // kXferCpuToGpu_GrBufferType,
readUsages // kXferGpuToCpu_GrBufferType,
};
GR_STATIC_ASSERT(0 == kVertex_GrBufferType);
GR_STATIC_ASSERT(1 == kIndex_GrBufferType);
GR_STATIC_ASSERT(2 == kTexel_GrBufferType);
GR_STATIC_ASSERT(3 == kDrawIndirect_GrBufferType);
GR_STATIC_ASSERT(4 == kXferCpuToGpu_GrBufferType);
GR_STATIC_ASSERT(5 == kXferGpuToCpu_GrBufferType);
GR_STATIC_ASSERT(SK_ARRAY_COUNT(usageTypes) == kGrBufferTypeCount);
SkASSERT(bufferType >= 0 && bufferType <= kLast_GrBufferType);
SkASSERT(accessPattern >= 0 && accessPattern <= kLast_GrAccessPattern);
switch (type) {
case kVertex_GrBufferType:
case kIndex_GrBufferType:
*target = nonXferTargets[type];
*usage = drawUsages[accessPattern];
break;
case kXferCpuToGpu_GrBufferType:
if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferType()) {
*target = GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
} else {
SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBufferType());
*target = GR_GL_PIXEL_UNPACK_BUFFER;
}
*usage = drawUsages[accessPattern];
break;
case kXferGpuToCpu_GrBufferType:
if (GrGLCaps::kChromium_TransferBufferType == caps.transferBufferType()) {
*target = GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
} else {
SkASSERT(GrGLCaps::kPBO_TransferBufferType == caps.transferBufferType());
*target = GR_GL_PIXEL_PACK_BUFFER;
}
*usage = readUsages[accessPattern];
break;
default:
SkFAIL("Unexpected buffer type.");
break;
}
return usageTypes[bufferType][accessPattern];
}
GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, GrBufferType type, size_t size, GrAccessPattern accessPattern,
bool cpuBacked)
: INHERITED(gpu, type, size, accessPattern, cpuBacked),
GrGLBuffer::GrGLBuffer(GrGLGpu* gpu, size_t size, GrBufferType intendedType,
GrAccessPattern accessPattern, bool cpuBacked, const void* data)
: INHERITED(gpu, size, intendedType, accessPattern, cpuBacked),
fCPUData(nullptr),
fTarget(0),
fIntendedType(intendedType),
fBufferID(0),
fSizeInBytes(size),
fUsage(0),
fUsage(gr_to_gl_access_pattern(intendedType, accessPattern)),
fGLSizeInBytes(0) {
if (cpuBacked) {
if (this->isCPUBacked()) {
// Core profile uses vertex array objects, which disallow client side arrays.
SkASSERT(!gpu->glCaps().isCoreProfile());
if (gpu->caps()->mustClearUploadedBufferData()) {
fCPUData = sk_calloc_throw(fSizeInBytes);
} else {
fCPUData = sk_malloc_flags(fSizeInBytes, SK_MALLOC_THROW);
}
SkASSERT(kVertex_GrBufferType == type || kIndex_GrBufferType == type);
fTarget = kVertex_GrBufferType == type ? GR_GL_ARRAY_BUFFER : GR_GL_ELEMENT_ARRAY_BUFFER;
if (data) {
memcpy(fCPUData, data, fSizeInBytes);
}
} else {
GL_CALL(GenBuffers(1, &fBufferID));
fSizeInBytes = size;
get_target_and_usage(type, accessPattern, gpu->glCaps(), &fTarget, &fUsage);
if (fBufferID) {
gpu->bindBuffer(fBufferID, fTarget);
GrGLenum target = gpu->bindBuffer(fIntendedType, this);
CLEAR_ERROR_BEFORE_ALLOC(gpu->glInterface());
// make sure driver can allocate memory for this buffer
GL_ALLOC_CALL(gpu->glInterface(), BufferData(fTarget,
GL_ALLOC_CALL(gpu->glInterface(), BufferData(target,
(GrGLsizeiptr) fSizeInBytes,
nullptr, // data ptr
data,
fUsage));
if (CHECK_ALLOC_ERROR(gpu->glInterface()) != GR_GL_NO_ERROR) {
gpu->releaseBuffer(fBufferID, fTarget);
GL_CALL(DeleteBuffers(1, &fBufferID));
fBufferID = 0;
} else {
fGLSizeInBytes = fSizeInBytes;
@ -161,7 +148,7 @@ void GrGLBuffer::onRelease() {
sk_free(fCPUData);
fCPUData = nullptr;
} else if (fBufferID) {
this->glGpu()->releaseBuffer(fBufferID, fTarget);
GL_CALL(DeleteBuffers(1, &fBufferID));
fBufferID = 0;
fGLSizeInBytes = 0;
}
@ -196,44 +183,47 @@ void GrGLBuffer::onMap() {
return;
}
bool readOnly = (kXferGpuToCpu_GrBufferType == this->type());
// TODO: Make this a function parameter.
bool readOnly = (kXferGpuToCpu_GrBufferType == fIntendedType);
// Handling dirty context is done in the bindBuffer call
switch (this->glCaps().mapBufferType()) {
case GrGLCaps::kNone_MapBufferType:
break;
case GrGLCaps::kMapBuffer_MapBufferType:
this->glGpu()->bindBuffer(fBufferID, fTarget);
case GrGLCaps::kMapBuffer_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Let driver know it can discard the old data
if (GR_GL_USE_BUFFER_DATA_NULL_HINT || fGLSizeInBytes != fSizeInBytes) {
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
}
GL_CALL_RET(fMapPtr, MapBuffer(fTarget, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break;
}
case GrGLCaps::kMapBufferRange_MapBufferType: {
this->glGpu()->bindBuffer(fBufferID, fTarget);
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (fGLSizeInBytes != fSizeInBytes) {
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
}
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
// TODO: allow the client to specify invalidation in the transfer buffer case.
if (kXferCpuToGpu_GrBufferType != this->type()) {
if (kXferCpuToGpu_GrBufferType != fIntendedType) {
// TODO: Make this a function parameter.
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
}
GL_CALL_RET(fMapPtr, MapBufferRange(fTarget, 0, fSizeInBytes,
GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, fSizeInBytes,
readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
break;
}
case GrGLCaps::kChromium_MapBufferType:
this->glGpu()->bindBuffer(fBufferID, fTarget);
case GrGLCaps::kChromium_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (fGLSizeInBytes != fSizeInBytes) {
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
}
GL_CALL_RET(fMapPtr, MapBufferSubData(fTarget, 0, fSizeInBytes,
GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, fSizeInBytes,
readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break;
}
}
fGLSizeInBytes = fSizeInBytes;
VALIDATE();
@ -256,12 +246,13 @@ void GrGLBuffer::onUnmap() {
SkDEBUGFAIL("Shouldn't get here.");
return;
case GrGLCaps::kMapBuffer_MapBufferType: // fall through
case GrGLCaps::kMapBufferRange_MapBufferType:
this->glGpu()->bindBuffer(fBufferID, fTarget);
GL_CALL(UnmapBuffer(fTarget));
case GrGLCaps::kMapBufferRange_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
GL_CALL(UnmapBuffer(target));
break;
}
case GrGLCaps::kChromium_MapBufferType:
this->glGpu()->bindBuffer(fBufferID, fTarget);
this->glGpu()->bindBuffer(fIntendedType, this); // TODO: Is this needed?
GL_CALL(UnmapBufferSubData(fMapPtr));
break;
}
@ -274,7 +265,6 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
}
SkASSERT(!this->isMapped());
SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTarget);
VALIDATE();
if (srcSizeInBytes > fSizeInBytes) {
return false;
@ -285,11 +275,11 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
}
SkASSERT(srcSizeInBytes <= fSizeInBytes);
// bindbuffer handles dirty context
this->glGpu()->bindBuffer(fBufferID, fTarget);
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
#if GR_GL_USE_BUFFER_DATA_NULL_HINT
if (fSizeInBytes == srcSizeInBytes) {
GL_CALL(BufferData(fTarget, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
} else {
// Before we call glBufferSubData we give the driver a hint using
// glBufferData with nullptr. This makes the old buffer contents
@ -298,15 +288,15 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
// assign a different allocation for the new contents to avoid
// flushing the gpu past draws consuming the old contents.
// TODO I think we actually want to try calling bufferData here
GL_CALL(BufferData(fTarget, fSizeInBytes, nullptr, fUsage));
GL_CALL(BufferSubData(fTarget, 0, (GrGLsizeiptr) srcSizeInBytes, src));
GL_CALL(BufferData(target, fSizeInBytes, nullptr, fUsage));
GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
}
fGLSizeInBytes = fSizeInBytes;
#else
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
// portions of the buffer (map() does a glBufferData(..size, nullptr..))
GL_CALL(BufferData(fTarget, srcSizeInBytes, src, fUsage));
GL_CALL(BufferData(target, srcSizeInBytes, src, fUsage));
fGLSizeInBytes = srcSizeInBytes;
#endif
VALIDATE();
@ -324,10 +314,6 @@ void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
#ifdef SK_DEBUG
void GrGLBuffer::validate() const {
SkASSERT(GR_GL_ARRAY_BUFFER == fTarget || GR_GL_ELEMENT_ARRAY_BUFFER == fTarget ||
GR_GL_PIXEL_PACK_BUFFER == fTarget || GR_GL_PIXEL_UNPACK_BUFFER == fTarget ||
GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM == fTarget ||
GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == fTarget);
// The following assert isn't valid when the buffer has been abandoned:
// SkASSERT((0 == fDesc.fID) == (fCPUData));
SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);

View File

@ -16,19 +16,20 @@ class GrGLCaps;
class GrGLBuffer : public GrBuffer {
public:
static GrGLBuffer* Create(GrGLGpu*, GrBufferType, size_t size, GrAccessPattern);
static GrGLBuffer* Create(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern,
const void* data = nullptr);
~GrGLBuffer() {
// either release or abandon should have been called by the owner of this object.
SkASSERT(0 == fBufferID);
}
GrGLenum target() const { return fTarget; }
GrGLuint bufferID() const { return fBufferID; }
size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
protected:
GrGLBuffer(GrGLGpu*, GrBufferType, size_t size, GrAccessPattern, bool cpuBacked);
GrGLBuffer(GrGLGpu*, size_t size, GrBufferType intendedType, GrAccessPattern, bool cpuBacked,
const void* data);
void onAbandon() override;
void onRelease() override;
@ -47,13 +48,13 @@ private:
void validate() const;
#endif
void* fCPUData;
GrGLenum fTarget; // GL_ARRAY_BUFFER or GL_ELEMENT_ARRAY_BUFFER, e.g.
GrGLuint fBufferID;
size_t fSizeInBytes;
GrGLenum fUsage;
size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
// smaller or larger than the size in fDesc.
void* fCPUData;
GrBufferType fIntendedType;
GrGLuint fBufferID;
size_t fSizeInBytes;
GrGLenum fUsage;
size_t fGLSizeInBytes; // In certain cases we make the size of the GL buffer object
// smaller or larger than the size in fDesc.
typedef GrBuffer INHERITED;
};

View File

@ -136,6 +136,7 @@ void GrGLCaps::init(const GrContextOptions& contextOptions,
// data for dynamic content on these GPUs. Perhaps we should read the renderer string and
// limit this decision to specific GPU families rather than basing it on the vendor alone.
if (!GR_GL_MUST_USE_VBO &&
!fIsCoreProfile &&
(kARM_GrGLVendor == ctxInfo.vendor() ||
kImagination_GrGLVendor == ctxInfo.vendor() ||
kQualcomm_GrGLVendor == ctxInfo.vendor())) {

View File

@ -188,12 +188,45 @@ static bool gPrintStartupSpew;
GrGLGpu::GrGLGpu(GrGLContext* ctx, GrContext* context)
: GrGpu(context)
, fGLContext(ctx) {
, fGLContext(ctx)
, fProgramCache(new ProgramCache(this))
, fHWProgramID(0)
, fTempSrcFBOID(0)
, fTempDstFBOID(0)
, fStencilClearFBOID(0)
, fHWPLSEnabled(false)
, fPLSHasBeenUsed(false)
, fHWMinSampleShading(0.0) {
for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
fCopyPrograms[i].fProgram = 0;
}
fWireRectProgram.fProgram = 0;
fPLSSetupProgram.fProgram = 0;
SkASSERT(ctx);
fCaps.reset(SkRef(ctx->caps()));
fHWBoundTextureUniqueIDs.reset(this->glCaps().glslCaps()->maxCombinedSamplers());
fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER;
fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER;
fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget =
GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget =
GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
} else {
fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
}
GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState));
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
fPathRendering.reset(new GrGLPathRendering(this));
}
GrGLClearErr(this->glInterface());
if (gPrintStartupSpew) {
const GrGLubyte* vendor;
@ -212,35 +245,15 @@ GrGLGpu::GrGLGpu(GrGLContext* ctx, GrContext* context)
SkDebugf("\n");
SkDebugf("%s", this->glCaps().dump().c_str());
}
fProgramCache = new ProgramCache(this);
fHWProgramID = 0;
fTempSrcFBOID = 0;
fTempDstFBOID = 0;
fStencilClearFBOID = 0;
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
fPathRendering.reset(new GrGLPathRendering(this));
}
this->createCopyPrograms();
fWireRectProgram.fProgram = 0;
fWireRectArrayBuffer = 0;
if (this->glCaps().shaderCaps()->plsPathRenderingSupport()) {
this->createPLSSetupProgram();
}
else {
memset(&fPLSSetupProgram, 0, sizeof(fPLSSetupProgram));
}
fHWPLSEnabled = false;
fPLSHasBeenUsed = false;
fHWMinSampleShading = 0.0;
}
GrGLGpu::~GrGLGpu() {
// Delete the path rendering explicitly, since it will need working gpu object to release the
// resources the object itself holds.
// Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
// to release the resources held by the objects themselves.
fPathRendering.reset();
fCopyProgramArrayBuffer.reset();
fWireRectArrayBuffer.reset();
fPLSSetupProgram.fArrayBuffer.reset();
if (0 != fHWProgramID) {
// detach the current program so there is no confusion on OpenGL's part
@ -264,22 +277,10 @@ GrGLGpu::~GrGLGpu() {
}
}
if (0 != fCopyProgramArrayBuffer) {
GL_CALL(DeleteBuffers(1, &fCopyProgramArrayBuffer));
}
if (0 != fWireRectProgram.fProgram) {
GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
}
if (0 != fWireRectArrayBuffer) {
GL_CALL(DeleteBuffers(1, &fWireRectArrayBuffer));
}
if (0 != fPLSSetupProgram.fArrayBuffer) {
GL_CALL(DeleteBuffers(1, &fPLSSetupProgram.fArrayBuffer));
}
if (0 != fPLSSetupProgram.fProgram) {
GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram));
}
@ -287,7 +288,28 @@ GrGLGpu::~GrGLGpu() {
delete fProgramCache;
}
void GrGLGpu::createPLSSetupProgram() {
bool GrGLGpu::createPLSSetupProgram() {
if (!fPLSSetupProgram.fArrayBuffer) {
static const GrGLfloat vdata[] = {
0, 0,
0, 1,
1, 0,
1, 1
};
fPLSSetupProgram.fArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata),
kVertex_GrBufferType,
kStatic_GrAccessPattern, vdata));
if (!fPLSSetupProgram.fArrayBuffer) {
return false;
}
}
SkASSERT(!fPLSSetupProgram.fProgram);
GL_CALL_RET(fPLSSetupProgram.fProgram, CreateProgram());
if (!fPLSSetupProgram.fProgram) {
return false;
}
const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
const char* version = glslCaps->versionDeclString();
@ -347,7 +369,7 @@ void GrGLGpu::createPLSSetupProgram() {
" pls.windings = ivec4(0, 0, 0, 0);\n"
"}"
);
GL_CALL_RET(fPLSSetupProgram.fProgram, CreateProgram());
const char* str;
GrGLint length;
@ -371,19 +393,7 @@ void GrGLGpu::createPLSSetupProgram() {
GL_CALL(DeleteShader(vshader));
GL_CALL(DeleteShader(fshader));
GL_CALL(GenBuffers(1, &fPLSSetupProgram.fArrayBuffer));
fHWGeometryState.setVertexBufferID(this, fPLSSetupProgram.fArrayBuffer);
static const GrGLfloat vdata[] = {
0, 0,
0, 1,
1, 0,
1, 1
};
GL_ALLOC_CALL(this->glInterface(),
BufferData(GR_GL_ARRAY_BUFFER,
(GrGLsizeiptr) sizeof(vdata),
vdata, // data ptr
GR_GL_STATIC_DRAW));
return true;
}
void GrGLGpu::disconnect(DisconnectType type) {
@ -401,9 +411,6 @@ void GrGLGpu::disconnect(DisconnectType type) {
if (fStencilClearFBOID) {
GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
}
if (fCopyProgramArrayBuffer) {
GL_CALL(DeleteBuffers(1, &fCopyProgramArrayBuffer));
}
for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
if (fCopyPrograms[i].fProgram) {
GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
@ -412,16 +419,9 @@ void GrGLGpu::disconnect(DisconnectType type) {
if (fWireRectProgram.fProgram) {
GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
}
if (fWireRectArrayBuffer) {
GL_CALL(DeleteBuffers(1, &fWireRectArrayBuffer));
}
if (fPLSSetupProgram.fProgram) {
GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram));
}
if (fPLSSetupProgram.fArrayBuffer) {
GL_CALL(DeleteBuffers(1, &fPLSSetupProgram.fArrayBuffer));
}
} else {
if (fProgramCache) {
fProgramCache->abandon();
@ -435,14 +435,14 @@ void GrGLGpu::disconnect(DisconnectType type) {
fTempSrcFBOID = 0;
fTempDstFBOID = 0;
fStencilClearFBOID = 0;
fCopyProgramArrayBuffer = 0;
fCopyProgramArrayBuffer.reset();
for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
fCopyPrograms[i].fProgram = 0;
}
fWireRectProgram.fProgram = 0;
fWireRectArrayBuffer = 0;
fWireRectArrayBuffer.reset();
fPLSSetupProgram.fProgram = 0;
fPLSSetupProgram.fArrayBuffer = 0;
fPLSSetupProgram.fArrayBuffer.reset();
if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
this->glPathRendering()->disconnect(type);
}
@ -456,8 +456,10 @@ void GrGLGpu::onResetContext(uint32_t resetBits) {
GL_CALL(Disable(GR_GL_DEPTH_TEST));
GL_CALL(DepthMask(GR_GL_FALSE));
fHWBoundTextureBufferIDIsValid = false;
fHWBoundDrawIndirectBufferIDIsValid = false;
fHWBufferState[kTexel_GrBufferType].invalidate();
fHWBufferState[kDrawIndirect_GrBufferType].invalidate();
fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate();
fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate();
fHWDrawFace = GrPipelineBuilder::kInvalid_DrawFace;
@ -538,7 +540,9 @@ void GrGLGpu::onResetContext(uint32_t resetBits) {
// Vertex
if (resetBits & kVertex_GrGLBackendState) {
fHWGeometryState.invalidate();
fHWVertexArrayState.invalidate();
fHWBufferState[kVertex_GrBufferType].invalidate();
fHWBufferState[kIndex_GrBufferType].invalidate();
}
if (resetBits & kRenderTarget_GrGLBackendState) {
@ -900,11 +904,10 @@ bool GrGLGpu::onTransferPixels(GrSurface* surface,
this->setScratchTextureUnit();
GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
SkASSERT(kXferCpuToGpu_GrBufferType == transferBuffer->type());
SkASSERT(!transferBuffer->isMapped());
SkASSERT(!transferBuffer->isCPUBacked());
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
this->bindBuffer(glBuffer->bufferID(), glBuffer->target());
this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer);
bool success = false;
GrMipLevel mipLevel;
@ -1974,8 +1977,9 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen
// objects are implemented as client-side-arrays on tile-deferred architectures.
#define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
GrBuffer* GrGLGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
return GrGLBuffer::Create(this, type, size, accessPattern);
GrBuffer* GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
GrAccessPattern accessPattern) {
return GrGLBuffer::Create(this, size, intendedType, accessPattern);
}
void GrGLGpu::flushScissor(const GrScissorState& scissorState,
@ -2077,22 +2081,21 @@ void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
SkASSERT(vbuf);
SkASSERT(!vbuf->isMapped());
SkASSERT(kVertex_GrBufferType == vbuf->type());
const GrGLBuffer* ibuf = nullptr;
GrGLAttribArrayState* attribState;
if (mesh.isIndexed()) {
SkASSERT(indexOffsetInBytes);
*indexOffsetInBytes = 0;
ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer());
const GrGLBuffer* ibuf = static_cast<const GrGLBuffer*>(mesh.indexBuffer());
SkASSERT(ibuf);
SkASSERT(!ibuf->isMapped());
SkASSERT(kIndex_GrBufferType == ibuf->type());
*indexOffsetInBytes += ibuf->baseOffset();
attribState = fHWVertexArrayState.bindInternalVertexArray(this, ibuf);
} else {
attribState = fHWVertexArrayState.bindInternalVertexArray(this);
}
GrGLAttribArrayState* attribState =
fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf);
int vaCount = primProc.numAttribs();
if (vaCount > 0) {
@ -2112,7 +2115,7 @@ void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
GrVertexAttribType attribType = attrib.fType;
attribState->set(this,
attribIndex,
vbuf->bufferID(),
vbuf,
attribType,
stride,
reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + offset));
@ -2122,57 +2125,26 @@ void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
}
}
void GrGLGpu::bindBuffer(GrGLuint id, GrGLenum type) {
GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrGLBuffer* buffer) {
this->handleDirtyContext();
switch (type) {
case GR_GL_ARRAY_BUFFER:
this->bindVertexBuffer(id);
break;
case GR_GL_ELEMENT_ARRAY_BUFFER:
this->bindIndexBufferAndDefaultVertexArray(id);
break;
case GR_GL_TEXTURE_BUFFER:
if (!fHWBoundTextureBufferIDIsValid || id != fHWBoundTextureBufferID) {
GR_GL_CALL(this->glInterface(), BindBuffer(type, id));
fHWBoundTextureBufferID = id;
fHWBoundTextureBufferIDIsValid = true;
}
break;
case GR_GL_DRAW_INDIRECT_BUFFER:
if (!fHWBoundDrawIndirectBufferIDIsValid || id != fHWBoundDrawIndirectBufferID) {
GR_GL_CALL(this->glInterface(), BindBuffer(type, id));
fHWBoundDrawIndirectBufferID = id;
fHWBoundDrawIndirectBufferIDIsValid = true;
}
break;
default:
SkDebugf("WARNING: buffer target 0x%x is not tracked by GrGLGpu.\n", type);
GR_GL_CALL(this->glInterface(), BindBuffer(type, id));
break;
}
}
void GrGLGpu::releaseBuffer(GrGLuint id, GrGLenum type) {
this->handleDirtyContext();
GL_CALL(DeleteBuffers(1, &id));
switch (type) {
case GR_GL_ARRAY_BUFFER:
this->notifyVertexBufferDelete(id);
break;
case GR_GL_ELEMENT_ARRAY_BUFFER:
this->notifyIndexBufferDelete(id);
break;
case GR_GL_TEXTURE_BUFFER:
if (fHWBoundTextureBufferIDIsValid && id == fHWBoundTextureBufferID) {
fHWBoundTextureBufferID = 0;
}
break;
case GR_GL_DRAW_INDIRECT_BUFFER:
if (fHWBoundDrawIndirectBufferIDIsValid && id == fHWBoundDrawIndirectBufferID) {
fHWBoundDrawIndirectBufferID = 0;
}
break;
// Index buffer state is tied to the vertex array.
if (kIndex_GrBufferType == type) {
this->bindVertexArray(0);
}
SkASSERT(type >= 0 && type <= kLast_GrBufferType);
auto& bufferState = fHWBufferState[type];
if (buffer->getUniqueID() != bufferState.fBoundBufferUniqueID) {
if (!buffer->isCPUBacked() || !bufferState.fBufferZeroKnownBound) {
GL_CALL(BindBuffer(bufferState.fGLTarget, buffer->bufferID()));
bufferState.fBufferZeroKnownBound = buffer->isCPUBacked();
}
bufferState.fBoundBufferUniqueID = buffer->getUniqueID();
}
return bufferState.fGLTarget;
}
void GrGLGpu::disableScissor() {
@ -2670,10 +2642,7 @@ void GrGLGpu::finishDrawTarget() {
SkASSERT(!fHWPLSEnabled);
SkASSERT(fMSAAEnabled != kYes_TriState);
GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
this->stampRectUsingProgram(fPLSSetupProgram.fProgram,
SkRect::MakeXYWH(-100.0f, -100.0f, 0.01f, 0.01f),
fPLSSetupProgram.fPosXformUniform,
fPLSSetupProgram.fArrayBuffer);
this->stampPLSSetupRect(SkRect::MakeXYWH(-100.0f, -100.0f, 0.01f, 0.01f));
GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
}
}
@ -2841,18 +2810,26 @@ void GrGLGpu::onDraw(const GrPipeline& pipeline,
#endif
}
void GrGLGpu::stampRectUsingProgram(GrGLuint program, const SkRect& bounds, GrGLint posXformUniform,
GrGLuint arrayBuffer) {
GL_CALL(UseProgram(program));
this->fHWGeometryState.setVertexArrayID(this, 0);
void GrGLGpu::stampPLSSetupRect(const SkRect& bounds) {
SkASSERT(this->glCaps().glslCaps()->plsPathRenderingSupport())
GrGLAttribArrayState* attribs =
this->fHWGeometryState.bindArrayAndBufferToDraw(this, arrayBuffer);
attribs->set(this, 0, arrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat), 0);
if (!fPLSSetupProgram.fProgram) {
if (!this->createPLSSetupProgram()) {
SkDebugf("Failed to create PLS setup program.\n");
return;
}
}
GL_CALL(UseProgram(fPLSSetupProgram.fProgram));
this->fHWVertexArrayState.setVertexArrayID(this, 0);
GrGLAttribArrayState* attribs = this->fHWVertexArrayState.bindInternalVertexArray(this);
attribs->set(this, 0, fPLSSetupProgram.fArrayBuffer, kVec2f_GrVertexAttribType,
2 * sizeof(GrGLfloat), 0);
attribs->disableUnusedArrays(this, 0x1);
GL_CALL(Uniform4f(posXformUniform, bounds.width(), bounds.height(), bounds.left(),
bounds.top()));
GL_CALL(Uniform4f(fPLSSetupProgram.fPosXformUniform, bounds.width(), bounds.height(),
bounds.left(), bounds.top()));
GrXferProcessor::BlendInfo blendInfo;
blendInfo.reset();
@ -2889,8 +2866,7 @@ void GrGLGpu::setupPixelLocalStorage(const GrPipeline& pipeline,
SkRect deviceBounds = SkRect::MakeXYWH(dx0, dy0, dx1 - dx0, dy1 - dy0);
GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
this->stampRectUsingProgram(fPLSSetupProgram.fProgram, deviceBounds,
fPLSSetupProgram.fPosXformUniform, fPLSSetupProgram.fArrayBuffer);
this->stampPLSSetupRect(deviceBounds);
}
void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
@ -3562,8 +3538,9 @@ bool GrGLGpu::onCopySurface(GrSurface* dst,
// Don't prefer copying as a draw if the dst doesn't already have a FBO object.
bool preferCopy = SkToBool(dst->asRenderTarget());
if (preferCopy && src->asTexture()) {
this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
return true;
if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
return true;
}
}
if (can_copy_texsubimage(dst, src, this)) {
@ -3576,151 +3553,173 @@ bool GrGLGpu::onCopySurface(GrSurface* dst,
}
if (!preferCopy && src->asTexture()) {
this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
return true;
if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
return true;
}
}
return false;
}
void GrGLGpu::createCopyPrograms() {
for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
fCopyPrograms[i].fProgram = 0;
}
bool GrGLGpu::createCopyProgram(int progIdx) {
const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
const char* version = glslCaps->versionDeclString();
static const GrSLType kSamplerTypes[3] = { kSampler2D_GrSLType, kSamplerExternal_GrSLType,
kSampler2DRect_GrSLType };
SkASSERT(3 == SK_ARRAY_COUNT(fCopyPrograms));
for (int i = 0; i < 3; ++i) {
if (kSamplerExternal_GrSLType == kSamplerTypes[i] &&
!this->glCaps().glslCaps()->externalTextureSupport()) {
continue;
}
if (kSampler2DRect_GrSLType == kSamplerTypes[i] &&
!this->glCaps().rectangleTextureSupport()) {
continue;
}
GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType,
GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar uTexture("u_texture", kSamplerTypes[i],
GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType,
GrShaderVar::kVaryingOut_TypeModifier);
GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType,
GrShaderVar::kOut_TypeModifier);
SkString vshaderTxt(version);
if (glslCaps->noperspectiveInterpolationSupport()) {
if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
vshaderTxt.appendf("#extension %s : require\n", extension);
}
vTexCoord.addModifier("noperspective");
}
aVertex.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
uPosXform.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
vTexCoord.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
vshaderTxt.append(
"// Copy Program VS\n"
"void main() {"
" v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;"
" gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
" gl_Position.zw = vec2(0, 1);"
"}"
);
SkString fshaderTxt(version);
if (glslCaps->noperspectiveInterpolationSupport()) {
if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
fshaderTxt.appendf("#extension %s : require\n", extension);
}
}
if (kSamplerTypes[i] == kSamplerExternal_GrSLType) {
fshaderTxt.appendf("#extension %s : require\n",
glslCaps->externalTextureExtensionString());
}
GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps,
&fshaderTxt);
vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
vTexCoord.appendDecl(glslCaps, &fshaderTxt);
fshaderTxt.append(";");
uTexture.appendDecl(glslCaps, &fshaderTxt);
fshaderTxt.append(";");
const char* fsOutName;
if (glslCaps->mustDeclareFragmentShaderOutput()) {
oFragColor.appendDecl(glslCaps, &fshaderTxt);
fshaderTxt.append(";");
fsOutName = oFragColor.c_str();
} else {
fsOutName = "gl_FragColor";
}
fshaderTxt.appendf(
"// Copy Program FS\n"
"void main() {"
" %s = %s(u_texture, v_texCoord);"
"}",
fsOutName,
GrGLSLTexture2DFunctionName(kVec2f_GrSLType, kSamplerTypes[i], this->glslGeneration())
);
GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram());
const char* str;
GrGLint length;
str = vshaderTxt.c_str();
length = SkToInt(vshaderTxt.size());
GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[i].fProgram,
GR_GL_VERTEX_SHADER, &str, &length, 1,
&fStats);
str = fshaderTxt.c_str();
length = SkToInt(fshaderTxt.size());
GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[i].fProgram,
GR_GL_FRAGMENT_SHADER, &str, &length, 1,
&fStats);
GL_CALL(LinkProgram(fCopyPrograms[i].fProgram));
GL_CALL_RET(fCopyPrograms[i].fTextureUniform,
GetUniformLocation(fCopyPrograms[i].fProgram, "u_texture"));
GL_CALL_RET(fCopyPrograms[i].fPosXformUniform,
GetUniformLocation(fCopyPrograms[i].fProgram, "u_posXform"));
GL_CALL_RET(fCopyPrograms[i].fTexCoordXformUniform,
GetUniformLocation(fCopyPrograms[i].fProgram, "u_texCoordXform"));
GL_CALL(BindAttribLocation(fCopyPrograms[i].fProgram, 0, "a_vertex"));
GL_CALL(DeleteShader(vshader));
GL_CALL(DeleteShader(fshader));
if (kSamplerExternal_GrSLType == kSamplerTypes[progIdx] &&
!this->glCaps().glslCaps()->externalTextureSupport()) {
return false;
}
fCopyProgramArrayBuffer = 0;
GL_CALL(GenBuffers(1, &fCopyProgramArrayBuffer));
fHWGeometryState.setVertexBufferID(this, fCopyProgramArrayBuffer);
static const GrGLfloat vdata[] = {
0, 0,
0, 1,
1, 0,
1, 1
};
GL_ALLOC_CALL(this->glInterface(),
BufferData(GR_GL_ARRAY_BUFFER,
(GrGLsizeiptr) sizeof(vdata),
vdata, // data ptr
GR_GL_STATIC_DRAW));
if (kSampler2DRect_GrSLType == kSamplerTypes[progIdx] &&
!this->glCaps().rectangleTextureSupport()) {
return false;
}
if (!fCopyProgramArrayBuffer) {
static const GrGLfloat vdata[] = {
0, 0,
0, 1,
1, 0,
1, 1
};
fCopyProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType,
kStatic_GrAccessPattern, vdata));
}
if (!fCopyProgramArrayBuffer) {
return false;
}
SkASSERT(!fCopyPrograms[progIdx].fProgram);
GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
if (!fCopyPrograms[progIdx].fProgram) {
return false;
}
const char* version = glslCaps->versionDeclString();
GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType,
GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar uTexture("u_texture", kSamplerTypes[progIdx],
GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType,
GrShaderVar::kVaryingOut_TypeModifier);
GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType,
GrShaderVar::kOut_TypeModifier);
SkString vshaderTxt(version);
if (glslCaps->noperspectiveInterpolationSupport()) {
if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
vshaderTxt.appendf("#extension %s : require\n", extension);
}
vTexCoord.addModifier("noperspective");
}
aVertex.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
uPosXform.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
vTexCoord.appendDecl(glslCaps, &vshaderTxt);
vshaderTxt.append(";");
vshaderTxt.append(
"// Copy Program VS\n"
"void main() {"
" v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;"
" gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
" gl_Position.zw = vec2(0, 1);"
"}"
);
SkString fshaderTxt(version);
if (glslCaps->noperspectiveInterpolationSupport()) {
if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
fshaderTxt.appendf("#extension %s : require\n", extension);
}
}
if (kSamplerTypes[progIdx] == kSamplerExternal_GrSLType) {
fshaderTxt.appendf("#extension %s : require\n",
glslCaps->externalTextureExtensionString());
}
GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps,
&fshaderTxt);
vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
vTexCoord.appendDecl(glslCaps, &fshaderTxt);
fshaderTxt.append(";");
uTexture.appendDecl(glslCaps, &fshaderTxt);
fshaderTxt.append(";");
const char* fsOutName;
if (glslCaps->mustDeclareFragmentShaderOutput()) {
oFragColor.appendDecl(glslCaps, &fshaderTxt);
fshaderTxt.append(";");
fsOutName = oFragColor.c_str();
} else {
fsOutName = "gl_FragColor";
}
fshaderTxt.appendf(
"// Copy Program FS\n"
"void main() {"
" %s = %s(u_texture, v_texCoord);"
"}",
fsOutName,
GrGLSLTexture2DFunctionName(kVec2f_GrSLType, kSamplerTypes[progIdx], this->glslGeneration())
);
const char* str;
GrGLint length;
str = vshaderTxt.c_str();
length = SkToInt(vshaderTxt.size());
GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
GR_GL_VERTEX_SHADER, &str, &length, 1,
&fStats);
str = fshaderTxt.c_str();
length = SkToInt(fshaderTxt.size());
GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
GR_GL_FRAGMENT_SHADER, &str, &length, 1,
&fStats);
GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
GL_CALL(DeleteShader(vshader));
GL_CALL(DeleteShader(fshader));
return true;
}
void GrGLGpu::createWireRectProgram() {
bool GrGLGpu::createWireRectProgram() {
if (!fWireRectArrayBuffer) {
static const GrGLfloat vdata[] = {
0, 0,
0, 1,
1, 1,
1, 0
};
fWireRectArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType,
kStatic_GrAccessPattern, vdata));
if (!fWireRectArrayBuffer) {
return false;
}
}
SkASSERT(!fWireRectProgram.fProgram);
GL_CALL_RET(fWireRectProgram.fProgram, CreateProgram());
if (!fWireRectProgram.fProgram) {
return false;
}
GrGLSLShaderVar uColor("u_color", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar uRect("u_rect", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
@ -3768,7 +3767,6 @@ void GrGLGpu::createWireRectProgram() {
uColor.c_str()
);
GL_CALL_RET(fWireRectProgram.fProgram, CreateProgram());
const char* str;
GrGLint length;
@ -3794,19 +3792,8 @@ void GrGLGpu::createWireRectProgram() {
GL_CALL(DeleteShader(vshader));
GL_CALL(DeleteShader(fshader));
GL_CALL(GenBuffers(1, &fWireRectArrayBuffer));
fHWGeometryState.setVertexBufferID(this, fWireRectArrayBuffer);
static const GrGLfloat vdata[] = {
0, 0,
0, 1,
1, 1,
1, 0,
};
GL_ALLOC_CALL(this->glInterface(),
BufferData(GR_GL_ARRAY_BUFFER,
(GrGLsizeiptr) sizeof(vdata),
vdata, // data ptr
GR_GL_STATIC_DRAW));
return true;
}
void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor color) {
@ -3815,7 +3802,10 @@ void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor
this->handleDirtyContext();
if (!fWireRectProgram.fProgram) {
this->createWireRectProgram();
if (!this->createWireRectProgram()) {
SkDebugf("Failed to create wire rect program.\n");
return;
}
}
int w = rt->width();
@ -3851,10 +3841,9 @@ void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor
GL_CALL(UseProgram(fWireRectProgram.fProgram));
fHWProgramID = fWireRectProgram.fProgram;
fHWGeometryState.setVertexArrayID(this, 0);
fHWVertexArrayState.setVertexArrayID(this, 0);
GrGLAttribArrayState* attribs =
fHWGeometryState.bindArrayAndBufferToDraw(this, fWireRectArrayBuffer);
GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
attribs->set(this, 0, fWireRectArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat),
0);
attribs->disableUnusedArrays(this, 0x1);
@ -3877,14 +3866,23 @@ void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor
}
void GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint) {
GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture());
int progIdx = TextureTargetToCopyProgramIdx(srcTex->target());
if (!fCopyPrograms[progIdx].fProgram) {
if (!this->createCopyProgram(progIdx)) {
SkDebugf("Failed to create copy program.\n");
return false;
}
}
int w = srcRect.width();
int h = srcRect.height();
GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture());
GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
this->bindTexture(0, params, true, srcTex);
@ -3895,15 +3893,12 @@ void GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
int progIdx = TextureTargetToCopyProgramIdx(srcTex->target());
GL_CALL(UseProgram(fCopyPrograms[progIdx].fProgram));
fHWProgramID = fCopyPrograms[progIdx].fProgram;
fHWGeometryState.setVertexArrayID(this, 0);
fHWVertexArrayState.setVertexArrayID(this, 0);
GrGLAttribArrayState* attribs =
fHWGeometryState.bindArrayAndBufferToDraw(this, fCopyProgramArrayBuffer);
GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
attribs->set(this, 0, fCopyProgramArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat),
0);
attribs->disableUnusedArrays(this, 0x1);
@ -3959,6 +3954,7 @@ void GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, dst);
this->didWriteToSurface(dst, &dstRect);
return true;
}
void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst,
@ -4196,52 +4192,27 @@ void GrGLGpu::resetShaderCacheForTesting() const {
}
///////////////////////////////////////////////////////////////////////////////
GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
GrGLGpu* gpu,
const GrGLBuffer* vbuffer,
const GrGLBuffer* ibuffer) {
SkASSERT(vbuffer);
GrGLuint vbufferID = vbuffer->bufferID();
GrGLuint* ibufferIDPtr = nullptr;
GrGLuint ibufferID;
if (ibuffer) {
ibufferID = ibuffer->bufferID();
ibufferIDPtr = &ibufferID;
}
return this->internalBind(gpu, vbufferID, ibufferIDPtr);
}
GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBufferToDraw(GrGLGpu* gpu,
GrGLuint vbufferID) {
return this->internalBind(gpu, vbufferID, nullptr);
}
GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(GrGLGpu* gpu,
GrGLuint vbufferID,
GrGLuint ibufferID) {
return this->internalBind(gpu, vbufferID, &ibufferID);
}
GrGLAttribArrayState* GrGLGpu::HWGeometryState::internalBind(GrGLGpu* gpu,
GrGLuint vbufferID,
GrGLuint* ibufferID) {
GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
const GrGLBuffer* ibuf) {
GrGLAttribArrayState* attribState;
if (gpu->glCaps().isCoreProfile() && 0 != vbufferID) {
if (!fVBOVertexArray) {
if (gpu->glCaps().isCoreProfile()) {
if (!fCoreProfileVertexArray) {
GrGLuint arrayID;
GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
int attrCount = gpu->glCaps().maxVertexAttributes();
fVBOVertexArray = new GrGLVertexArray(arrayID, attrCount);
fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
}
if (ibufferID) {
attribState = fVBOVertexArray->bindWithIndexBuffer(gpu, *ibufferID);
if (ibuf) {
attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
} else {
attribState = fVBOVertexArray->bind(gpu);
attribState = fCoreProfileVertexArray->bind(gpu);
}
} else {
if (ibufferID) {
this->setIndexBufferIDOnDefaultVertexArray(gpu, *ibufferID);
if (ibuf) {
// bindBuffer implicitly binds VAO 0 when binding an index buffer.
gpu->bindBuffer(kIndex_GrBufferType, ibuf);
} else {
this->setVertexArrayID(gpu, 0);
}

View File

@ -32,7 +32,7 @@ class GrSwizzle;
#define PROGRAM_CACHE_STATS
#endif
class GrGLGpu : public GrGpu {
class GrGLGpu final : public GrGpu {
public:
static GrGpu* Create(GrBackendContext backendContext, const GrContextOptions& options,
GrContext* context);
@ -73,31 +73,20 @@ public:
// These functions should be used to bind GL objects. They track the GL state and skip redundant
// bindings. Making the equivalent glBind calls directly will confuse the state tracking.
void bindVertexArray(GrGLuint id) {
fHWGeometryState.setVertexArrayID(this, id);
}
void bindIndexBufferAndDefaultVertexArray(GrGLuint id) {
fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, id);
}
void bindVertexBuffer(GrGLuint id) {
fHWGeometryState.setVertexBufferID(this, id);
fHWVertexArrayState.setVertexArrayID(this, id);
}
// These callbacks update state tracking when GL objects are deleted. They are called from
// GrGLResource onRelease functions.
void notifyVertexArrayDelete(GrGLuint id) {
fHWGeometryState.notifyVertexArrayDelete(id);
}
void notifyVertexBufferDelete(GrGLuint id) {
fHWGeometryState.notifyVertexBufferDelete(id);
}
void notifyIndexBufferDelete(GrGLuint id) {
fHWGeometryState.notifyIndexBufferDelete(id);
fHWVertexArrayState.notifyVertexArrayDelete(id);
}
// id and type (GL_ARRAY_BUFFER, GL_ELEMENT_ARRAY_BUFFER, etc.) of buffer to bind
void bindBuffer(GrGLuint id, GrGLenum type);
void releaseBuffer(GrGLuint id, GrGLenum type);
// Binds a buffer to the GL target corresponding to 'type', updates internal state tracking, and
// returns the GL target the buffer was bound to.
// When 'type' is kIndex_GrBufferType, this function will also implicitly bind the default VAO.
// If the caller wishes to bind an index buffer to a specific VAO, it can call glBind directly.
GrGLenum bindBuffer(GrBufferType type, const GrGLBuffer*);
const GrGLContext* glContextForTesting() const override {
return &this->glContext();
@ -138,7 +127,7 @@ private:
GrGpuResource::LifeCycle lifeCycle,
const SkTArray<GrMipLevel>& texels) override;
GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) override;
GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern) override;
GrTexture* onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) override;
GrRenderTarget* onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&,
GrWrapOwnership) override;
@ -231,7 +220,7 @@ private:
bool hasExtension(const char* ext) const { return fGLContext->hasExtension(ext); }
void copySurfaceAsDraw(GrSurface* dst,
bool copySurfaceAsDraw(GrSurface* dst,
GrSurface* src,
const SkIRect& srcRect,
const SkIPoint& dstPoint);
@ -244,8 +233,7 @@ private:
const SkIRect& srcRect,
const SkIPoint& dstPoint);
void stampRectUsingProgram(GrGLuint program, const SkRect& bounds, GrGLint posXformUniform,
GrGLuint arrayBuffer);
void stampPLSSetupRect(const SkRect& bounds);
void setupPixelLocalStorage(const GrPipeline&, const GrPrimitiveProcessor&);
@ -373,11 +361,9 @@ private:
SkAutoTUnref<GrGLContext> fGLContext;
void createCopyPrograms();
void createWireRectProgram();
void createUnitRectBuffer();
void createPLSSetupProgram();
bool createCopyProgram(int progIdx);
bool createWireRectProgram();
bool createPLSSetupProgram();
// GL program-related state
ProgramCache* fProgramCache;
@ -412,22 +398,19 @@ private:
GrGLIRect fHWViewport;
/**
* Tracks bound vertex and index buffers and vertex attrib array state.
* Tracks vertex attrib array state.
*/
class HWGeometryState {
class HWVertexArrayState {
public:
HWGeometryState() { fVBOVertexArray = nullptr; this->invalidate(); }
HWVertexArrayState() : fCoreProfileVertexArray(nullptr) { this->invalidate(); }
~HWGeometryState() { delete fVBOVertexArray; }
~HWVertexArrayState() { delete fCoreProfileVertexArray; }
void invalidate() {
fBoundVertexArrayIDIsValid = false;
fBoundVertexBufferIDIsValid = false;
fDefaultVertexArrayBoundIndexBufferID = false;
fDefaultVertexArrayBoundIndexBufferIDIsValid = false;
fDefaultVertexArrayAttribState.invalidate();
if (fVBOVertexArray) {
fVBOVertexArray->invalidateCachedState();
if (fCoreProfileVertexArray) {
fCoreProfileVertexArray->invalidateCachedState();
}
}
@ -450,89 +433,41 @@ private:
}
}
void notifyVertexBufferDelete(GrGLuint id) {
if (fBoundVertexBufferIDIsValid && id == fBoundVertexBufferID) {
fBoundVertexBufferID = 0;
}
if (fVBOVertexArray) {
fVBOVertexArray->notifyVertexBufferDelete(id);
}
fDefaultVertexArrayAttribState.notifyVertexBufferDelete(id);
}
void notifyIndexBufferDelete(GrGLuint id) {
if (fDefaultVertexArrayBoundIndexBufferIDIsValid &&
id == fDefaultVertexArrayBoundIndexBufferID) {
fDefaultVertexArrayBoundIndexBufferID = 0;
}
if (fVBOVertexArray) {
fVBOVertexArray->notifyIndexBufferDelete(id);
}
}
void setVertexBufferID(GrGLGpu* gpu, GrGLuint id) {
if (!fBoundVertexBufferIDIsValid || id != fBoundVertexBufferID) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ARRAY_BUFFER, id));
fBoundVertexBufferIDIsValid = true;
fBoundVertexBufferID = id;
}
}
/**
* Binds the default vertex array and binds the index buffer. This is used when binding
* an index buffer in order to update it.
* Binds the vertex array that should be used for internal draws, and returns its attrib
* state. This binds the default VAO (ID=zero) unless we are on a core profile, in which
* case we use a dummy array instead.
*
* If an index buffer is privided, it will be bound to the vertex array. Otherwise the
* index buffer binding will be left unchanged.
*
* The returned GrGLAttribArrayState should be used to set vertex attribute arrays.
*/
void setIndexBufferIDOnDefaultVertexArray(GrGLGpu* gpu, GrGLuint id) {
this->setVertexArrayID(gpu, 0);
if (!fDefaultVertexArrayBoundIndexBufferIDIsValid ||
id != fDefaultVertexArrayBoundIndexBufferID) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, id));
fDefaultVertexArrayBoundIndexBufferIDIsValid = true;
fDefaultVertexArrayBoundIndexBufferID = id;
}
}
/**
* Binds the vertex array object that should be used to render from the vertex buffer.
* The vertex array is bound and its attrib array state object is returned. The vertex
* buffer is bound. The index buffer (if non-nullptr) is bound to the vertex array. The
* returned GrGLAttribArrayState should be used to set vertex attribute arrays.
*/
GrGLAttribArrayState* bindArrayAndBuffersToDraw(GrGLGpu* gpu,
const GrGLBuffer* vbuffer,
const GrGLBuffer* ibuffer);
/** Variants of the above that takes GL buffer IDs. Note that 0 does not imply that a
buffer won't be bound. The "default buffer" will be bound, which is used for client-side
array rendering. */
GrGLAttribArrayState* bindArrayAndBufferToDraw(GrGLGpu* gpu, GrGLuint vbufferID);
GrGLAttribArrayState* bindArrayAndBuffersToDraw(GrGLGpu* gpu,
GrGLuint vbufferID,
GrGLuint ibufferID);
GrGLAttribArrayState* bindInternalVertexArray(GrGLGpu*, const GrGLBuffer* ibuff = nullptr);
private:
GrGLAttribArrayState* internalBind(GrGLGpu* gpu, GrGLuint vbufferID, GrGLuint* ibufferID);
GrGLuint fBoundVertexArrayID;
GrGLuint fBoundVertexBufferID;
bool fBoundVertexArrayIDIsValid;
bool fBoundVertexBufferIDIsValid;
GrGLuint fDefaultVertexArrayBoundIndexBufferID;
bool fDefaultVertexArrayBoundIndexBufferIDIsValid;
// We return a non-const pointer to this from bindArrayAndBuffersToDraw when vertex array 0
// is bound. However, this class is internal to GrGLGpu and this object never leaks out of
// GrGLGpu.
GrGLAttribArrayState fDefaultVertexArrayAttribState;
// This is used when we're using a core profile and the vertices are in a VBO.
GrGLVertexArray* fVBOVertexArray;
} fHWGeometryState;
// This is used when we're using a core profile.
GrGLVertexArray* fCoreProfileVertexArray;
} fHWVertexArrayState;
GrGLuint fHWBoundTextureBufferID;
GrGLuint fHWBoundDrawIndirectBufferID;
bool fHWBoundTextureBufferIDIsValid;
bool fHWBoundDrawIndirectBufferIDIsValid;
struct {
GrGLenum fGLTarget;
uint32_t fBoundBufferUniqueID;
bool fBufferZeroKnownBound;
void invalidate() {
fBoundBufferUniqueID = SK_InvalidUniqueID;
fBufferZeroKnownBound = false;
}
} fHWBufferState[kGrBufferTypeCount];
struct {
GrBlendEquation fEquation;
@ -575,14 +510,14 @@ private:
GrGLint fTexCoordXformUniform;
GrGLint fPosXformUniform;
} fCopyPrograms[3];
GrGLuint fCopyProgramArrayBuffer;
SkAutoTUnref<GrGLBuffer> fCopyProgramArrayBuffer;
struct {
GrGLuint fProgram;
GrGLint fColorUniform;
GrGLint fRectUniform;
} fWireRectProgram;
GrGLuint fWireRectArrayBuffer;
SkAutoTUnref<GrGLBuffer> fWireRectArrayBuffer;
static int TextureTargetToCopyProgramIdx(GrGLenum target) {
switch (target) {
@ -599,9 +534,9 @@ private:
}
struct {
GrGLuint fProgram;
GrGLint fPosXformUniform;
GrGLuint fArrayBuffer;
GrGLuint fProgram;
GrGLint fPosXformUniform;
SkAutoTUnref<GrGLBuffer> fArrayBuffer;
} fPLSSetupProgram;
bool fHWPLSEnabled;

View File

@ -6,6 +6,7 @@
*/
#include "GrGLVertexArray.h"
#include "GrGLBuffer.h"
#include "GrGLGpu.h"
struct AttribLayout {
@ -38,7 +39,7 @@ GR_STATIC_ASSERT(8 == kUint_GrVertexAttribType);
void GrGLAttribArrayState::set(GrGLGpu* gpu,
int index,
GrGLuint vertexBufferID,
const GrGLBuffer* vertexBuffer,
GrVertexAttribType type,
GrGLsizei stride,
GrGLvoid* offset) {
@ -49,13 +50,11 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu,
array->fEnableIsValid = true;
array->fEnabled = true;
}
if (!array->fAttribPointerIsValid ||
array->fVertexBufferID != vertexBufferID ||
if (array->fVertexBufferUniqueID != vertexBuffer->getUniqueID() ||
array->fType != type ||
array->fStride != stride ||
array->fOffset != offset) {
gpu->bindVertexBuffer(vertexBufferID);
gpu->bindBuffer(kVertex_GrBufferType, vertexBuffer);
const AttribLayout& layout = gLayouts[type];
if (!GrVertexAttribTypeIsIntType(type)) {
GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
@ -73,8 +72,7 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu,
stride,
offset));
}
array->fAttribPointerIsValid = true;
array->fVertexBufferID = vertexBufferID;
array->fVertexBufferUniqueID = vertexBuffer->getUniqueID();
array->fType = type;
array->fStride = stride;
array->fOffset = offset;
@ -103,7 +101,7 @@ void GrGLAttribArrayState::disableUnusedArrays(const GrGLGpu* gpu, uint64_t used
GrGLVertexArray::GrGLVertexArray(GrGLint id, int attribCount)
: fID(id)
, fAttribArrays(attribCount)
, fIndexBufferIDIsValid(false) {
, fIndexBufferUniqueID(SK_InvalidUniqueID) {
}
GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
@ -114,25 +112,16 @@ GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
return &fAttribArrays;
}
GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, GrGLuint ibufferID) {
GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrGLBuffer* ibuff) {
GrGLAttribArrayState* state = this->bind(gpu);
if (state) {
if (!fIndexBufferIDIsValid || ibufferID != fIndexBufferID) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, ibufferID));
fIndexBufferIDIsValid = true;
fIndexBufferID = ibufferID;
}
if (state && fIndexBufferUniqueID != ibuff->getUniqueID()) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, ibuff->bufferID()));
fIndexBufferUniqueID = ibuff->getUniqueID();
}
return state;
}
void GrGLVertexArray::notifyIndexBufferDelete(GrGLuint bufferID) {
if (fIndexBufferIDIsValid && bufferID == fIndexBufferID) {
fIndexBufferID = 0;
}
}
void GrGLVertexArray::invalidateCachedState() {
fAttribArrays.invalidate();
fIndexBufferIDIsValid = false;
fIndexBufferUniqueID = SK_InvalidUniqueID;
}

View File

@ -13,6 +13,7 @@
#include "gl/GrGLTypes.h"
#include "SkTArray.h"
class GrGLBuffer;
class GrGLGpu;
/**
@ -39,7 +40,7 @@ public:
*/
void set(GrGLGpu*,
int attribIndex,
GrGLuint vertexBufferID,
const GrGLBuffer* vertexBuffer,
GrVertexAttribType type,
GrGLsizei stride,
GrGLvoid* offset);
@ -57,16 +58,6 @@ public:
}
}
void notifyVertexBufferDelete(GrGLuint id) {
int count = fAttribArrayStates.count();
for (int i = 0; i < count; ++i) {
if (fAttribArrayStates[i].fAttribPointerIsValid &&
id == fAttribArrayStates[i].fVertexBufferID) {
fAttribArrayStates[i].invalidate();
}
}
}
/**
* The number of attrib arrays that this object is configured to track.
*/
@ -79,13 +70,12 @@ private:
struct AttribArrayState {
void invalidate() {
fEnableIsValid = false;
fAttribPointerIsValid = false;
fVertexBufferUniqueID = SK_InvalidUniqueID;
}
bool fEnableIsValid;
bool fAttribPointerIsValid;
bool fEnabled;
GrGLuint fVertexBufferID;
uint32_t fVertexBufferUniqueID;
GrVertexAttribType fType;
GrGLsizei fStride;
GrGLvoid* fOffset;
@ -113,13 +103,7 @@ public:
* This is a version of the above function that also binds an index buffer to the vertex
* array object.
*/
GrGLAttribArrayState* bindWithIndexBuffer(GrGLGpu* gpu, GrGLuint indexBufferID);
void notifyIndexBufferDelete(GrGLuint bufferID);
void notifyVertexBufferDelete(GrGLuint id) {
fAttribArrays.notifyVertexBufferDelete(id);
}
GrGLAttribArrayState* bindWithIndexBuffer(GrGLGpu* gpu, const GrGLBuffer* indexBuffer);
GrGLuint arrayID() const { return fID; }
@ -128,8 +112,7 @@ public:
private:
GrGLuint fID;
GrGLAttribArrayState fAttribArrays;
GrGLuint fIndexBufferID;
bool fIndexBufferIDIsValid;
uint32_t fIndexBufferUniqueID;
};
#endif

View File

@ -172,7 +172,7 @@ void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
}
///////////////////////////////////////////////////////////////////////////////
GrBuffer* GrVkGpu::onCreateBuffer(GrBufferType type, size_t size, GrAccessPattern accessPattern) {
GrBuffer* GrVkGpu::onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern accessPattern) {
switch (type) {
case kVertex_GrBufferType:
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
@ -1359,8 +1359,8 @@ bool GrVkGpu::onReadPixels(GrSurface* surface,
false);
GrVkTransferBuffer* transferBuffer =
static_cast<GrVkTransferBuffer*>(this->createBuffer(kXferGpuToCpu_GrBufferType,
rowBytes * height,
static_cast<GrVkTransferBuffer*>(this->createBuffer(rowBytes * height,
kXferGpuToCpu_GrBufferType,
kStream_GrAccessPattern));
bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();

View File

@ -131,7 +131,7 @@ private:
GrWrapOwnership) override;
GrRenderTarget* onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&) override { return NULL; }
GrBuffer* onCreateBuffer(GrBufferType, size_t size, GrAccessPattern) override;
GrBuffer* onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern) override;
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override;

View File

@ -10,7 +10,7 @@
GrVkIndexBuffer::GrVkIndexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
: INHERITED(gpu, kIndex_GrBufferType, desc.fSizeInBytes,
: INHERITED(gpu, desc.fSizeInBytes, kIndex_GrBufferType,
desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern, false)
, GrVkBuffer(desc, bufferResource) {
this->registerWithCache();

View File

@ -31,9 +31,10 @@ GrVkTransferBuffer* GrVkTransferBuffer::Create(GrVkGpu* gpu, size_t size, GrVkBu
GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
: INHERITED(gpu, kCopyRead_Type == desc.fType ?
kXferCpuToGpu_GrBufferType : kXferGpuToCpu_GrBufferType,
desc.fSizeInBytes, kStream_GrAccessPattern, false)
: INHERITED(gpu, desc.fSizeInBytes,
kCopyRead_Type == desc.fType ?
kXferCpuToGpu_GrBufferType : kXferGpuToCpu_GrBufferType,
kStream_GrAccessPattern, false)
, GrVkBuffer(desc, bufferResource) {
this->registerWithCache();
}

View File

@ -10,7 +10,7 @@
GrVkVertexBuffer::GrVkVertexBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc,
const GrVkBuffer::Resource* bufferResource)
: INHERITED(gpu, kVertex_GrBufferType, desc.fSizeInBytes,
: INHERITED(gpu, desc.fSizeInBytes, kVertex_GrBufferType,
desc.fDynamic ? kDynamic_GrAccessPattern : kStatic_GrAccessPattern, false)
, GrVkBuffer(desc, bufferResource) {
this->registerWithCache();

View File

@ -345,7 +345,7 @@ private:
return nullptr;
}
GrBuffer* onCreateBuffer(GrBufferType, size_t, GrAccessPattern) override { return nullptr; }
GrBuffer* onCreateBuffer(size_t, GrBufferType, GrAccessPattern) override { return nullptr; }
void onClear(GrRenderTarget*, const SkIRect& rect, GrColor color) override {}