From dbf7072a5901bfb8d93ac71cf6c3f143de15b9ae Mon Sep 17 00:00:00 2001 From: Brian Salomon Date: Thu, 7 Feb 2019 11:31:24 -0500 Subject: [PATCH] Use different classes for client side arrays and GPU buffer objects. GrBuffer is a base class for GrGpuBuffer and GrCpuBuffer. GrGpuBuffer is a GrGpuResource and the others are not. This allows GrCpuBuffers to exist outside of the GrGpuResourceCache. Also removes flags from GrResourceProvider buffer factory function. The only flag still in use was kRequireGpuMemory. Now CPU buffers are made without using GrResourceProvider. Change-Id: I82670d1316e28fd6331ca36b26c8c4ead33846f9 Reviewed-on: https://skia-review.googlesource.com/c/188823 Commit-Queue: Brian Salomon Reviewed-by: Robert Phillips --- gm/clockwise.cpp | 3 +- gm/fwidth_squircle.cpp | 3 +- gn/gpu.gni | 4 +- include/gpu/GrGpuResource.h | 3 +- include/private/GrTypesPriv.h | 13 -- samplecode/SampleCCPRGeometry.cpp | 6 +- src/gpu/GrBuffer.cpp | 72 ---------- src/gpu/GrBuffer.h | 127 +++--------------- src/gpu/GrBufferAllocPool.cpp | 108 ++++++++------- src/gpu/GrBufferAllocPool.h | 3 +- src/gpu/GrCpuBuffer.h | 36 +++++ src/gpu/GrGpu.cpp | 9 +- src/gpu/GrGpu.h | 14 +- src/gpu/GrGpuBuffer.cpp | 38 ++++++ src/gpu/GrGpuBuffer.h | 113 ++++++++++++++++ src/gpu/GrMesh.h | 10 +- src/gpu/GrOnFlushResourceProvider.cpp | 16 +-- src/gpu/GrOnFlushResourceProvider.h | 6 +- src/gpu/GrProcessor.h | 2 +- src/gpu/GrResourceProvider.cpp | 65 ++++----- src/gpu/GrResourceProvider.h | 47 +++---- src/gpu/ccpr/GrCCCoverageProcessor.h | 10 +- src/gpu/ccpr/GrCCCoverageProcessor_GSImpl.cpp | 2 +- src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp | 2 +- src/gpu/ccpr/GrCCFiller.h | 2 +- src/gpu/ccpr/GrCCPathProcessor.cpp | 4 +- src/gpu/ccpr/GrCCPathProcessor.h | 4 +- src/gpu/ccpr/GrCCPerFlushResources.h | 12 +- src/gpu/ccpr/GrCCStroker.cpp | 4 +- src/gpu/ccpr/GrCCStroker.h | 4 +- src/gpu/gl/GrGLBuffer.cpp | 34 ++--- src/gpu/gl/GrGLBuffer.h | 8 +- src/gpu/gl/GrGLGpu.cpp | 54 +++++--- src/gpu/gl/GrGLGpu.h | 6 +- src/gpu/gl/GrGLVertexArray.cpp | 46 +++++-- src/gpu/gl/GrGLVertexArray.h | 4 +- src/gpu/mock/GrMockBuffer.h | 8 +- src/gpu/mock/GrMockGpu.cpp | 6 +- src/gpu/mock/GrMockGpu.h | 6 +- src/gpu/mtl/GrMtlBuffer.h | 6 +- src/gpu/mtl/GrMtlGpu.h | 5 +- src/gpu/mtl/GrMtlGpu.mm | 4 +- src/gpu/mtl/GrMtlGpuCommandBuffer.mm | 12 +- src/gpu/mtl/GrMtlPipelineState.mm | 4 +- src/gpu/ops/GrAtlasTextOp.cpp | 3 +- src/gpu/ops/GrDrawVerticesOp.cpp | 23 ++-- src/gpu/ops/GrMeshDrawOp.cpp | 6 +- src/gpu/ops/GrMeshDrawOp.h | 7 +- src/gpu/ops/GrQuadPerEdgeAA.cpp | 6 +- src/gpu/ops/GrRegionOp.cpp | 2 +- src/gpu/ops/GrSmallPathRenderer.cpp | 2 +- src/gpu/ops/GrStrokeRectOp.cpp | 8 +- src/gpu/ops/GrTessellatingPathRenderer.cpp | 13 +- src/gpu/vk/GrVkGpu.cpp | 8 +- src/gpu/vk/GrVkGpu.h | 6 +- src/gpu/vk/GrVkGpuCommandBuffer.cpp | 23 ++-- src/gpu/vk/GrVkGpuCommandBuffer.h | 6 +- src/gpu/vk/GrVkIndexBuffer.cpp | 2 +- src/gpu/vk/GrVkIndexBuffer.h | 7 +- src/gpu/vk/GrVkTransferBuffer.h | 9 +- src/gpu/vk/GrVkVertexBuffer.cpp | 2 +- src/gpu/vk/GrVkVertexBuffer.h | 6 +- tests/GrMeshTest.cpp | 3 +- tests/GrPipelineDynamicStateTest.cpp | 5 +- tests/ProcessorTest.cpp | 7 +- tests/TransferPixelsTest.cpp | 5 +- 66 files changed, 569 insertions(+), 535 deletions(-) delete mode 100644 src/gpu/GrBuffer.cpp create mode 100644 src/gpu/GrCpuBuffer.h create mode 100644 src/gpu/GrGpuBuffer.cpp create mode 100644 src/gpu/GrGpuBuffer.h diff --git a/gm/clockwise.cpp b/gm/clockwise.cpp index 9b1ef3fa99..aaeaaaa692 100644 --- a/gm/clockwise.cpp +++ b/gm/clockwise.cpp @@ -116,8 +116,7 @@ private: {100, fY+100}, }; sk_sp vertexBuffer(flushState->resourceProvider()->createBuffer( - sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, - GrResourceProvider::Flags::kNone, vertices)); + sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, vertices)); if (!vertexBuffer) { return; } diff --git a/gm/fwidth_squircle.cpp b/gm/fwidth_squircle.cpp index 76ad2d4ccc..21a2be4f3d 100644 --- a/gm/fwidth_squircle.cpp +++ b/gm/fwidth_squircle.cpp @@ -141,8 +141,7 @@ private: {+1, +1}, }; sk_sp vertexBuffer(flushState->resourceProvider()->createBuffer( - sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, - GrResourceProvider::Flags::kNone, vertices)); + sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, vertices)); if (!vertexBuffer) { return; } diff --git a/gn/gpu.gni b/gn/gpu.gni index f5e691c322..d9fc96d2ea 100644 --- a/gn/gpu.gni +++ b/gn/gpu.gni @@ -64,7 +64,6 @@ skia_gpu_sources = [ "$_src/gpu/GrBitmapTextureMaker.h", "$_src/gpu/GrBlurUtils.cpp", "$_src/gpu/GrBlurUtils.h", - "$_src/gpu/GrBuffer.cpp", "$_src/gpu/GrBuffer.h", "$_src/gpu/GrBufferAllocPool.cpp", "$_src/gpu/GrBufferAllocPool.h", @@ -83,6 +82,7 @@ skia_gpu_sources = [ "$_src/gpu/GrContextThreadSafeProxy.cpp", "$_src/gpu/GrContextThreadSafeProxyPriv.h", "$_src/gpu/GrCoordTransform.h", + "$_src/gpu/GrCpuBuffer.h", "$_src/gpu/GrDDLContext.cpp", "$_src/gpu/GrDefaultGeoProcFactory.cpp", "$_src/gpu/GrDefaultGeoProcFactory.h", @@ -108,6 +108,8 @@ skia_gpu_sources = [ "$_src/gpu/GrGlyph.h", "$_src/gpu/GrGpu.cpp", "$_src/gpu/GrGpu.h", + "$_src/gpu/GrGpuBuffer.cpp", + "$_src/gpu/GrGpuBuffer.h", "$_src/gpu/GrGpuResourceCacheAccess.h", "$_src/gpu/GrGpuCommandBuffer.cpp", "$_src/gpu/GrGpuCommandBuffer.h", diff --git a/include/gpu/GrGpuResource.h b/include/gpu/GrGpuResource.h index 0215e2ba91..af4d3579ce 100644 --- a/include/gpu/GrGpuResource.h +++ b/include/gpu/GrGpuResource.h @@ -93,7 +93,6 @@ protected: bool internalHasUniqueRef() const { return fRefCnt == 1; } private: - friend class GrIORefProxy; // needs to forward on wrapped IO calls // This is for a unit test. template friend void testingOnly_getIORefCnts(const T*, int* refCnt, int* readCnt, int* writeCnt); @@ -120,7 +119,6 @@ private: this->didRemoveRefOrPendingIO(kPendingWrite_CntType); } -private: void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const { if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) { static_cast(this)->notifyAllCntsAreZero(cntTypeRemoved); @@ -131,6 +129,7 @@ private: mutable int32_t fPendingReads; mutable int32_t fPendingWrites; + friend class GrIORefProxy; // needs to forward on wrapped IO calls friend class GrResourceCache; // to check IO ref counts. template friend class GrPendingIOResource; diff --git a/include/private/GrTypesPriv.h b/include/private/GrTypesPriv.h index 7900336d8c..f0f85c746e 100644 --- a/include/private/GrTypesPriv.h +++ b/include/private/GrTypesPriv.h @@ -829,19 +829,6 @@ enum class GrGpuBufferType { }; static const int kGrGpuBufferTypeCount = static_cast(GrGpuBufferType::kXferGpuToCpu) + 1; -static inline bool GrBufferTypeIsVertexOrIndex(GrGpuBufferType type) { - switch (type) { - case GrGpuBufferType::kVertex: - case GrGpuBufferType::kIndex: - return true; - case GrGpuBufferType::kXferCpuToGpu: - case GrGpuBufferType::kXferGpuToCpu: - return false; - } - SK_ABORT("Unexpected GrGpuBufferType."); - return false; -} - /** * Provides a performance hint regarding the frequency at which a data store will be accessed. */ diff --git a/samplecode/SampleCCPRGeometry.cpp b/samplecode/SampleCCPRGeometry.cpp index 994fd05621..095027a662 100644 --- a/samplecode/SampleCCPRGeometry.cpp +++ b/samplecode/SampleCCPRGeometry.cpp @@ -342,19 +342,17 @@ void CCPRGeometryView::DrawCoverageCountOp::onExecute(GrOpFlushState* state, SkSTArray<1, GrMesh> mesh; if (PrimitiveType::kCubics == fView->fPrimitiveType || PrimitiveType::kConics == fView->fPrimitiveType) { - sk_sp instBuff( + sk_sp instBuff( rp->createBuffer(fView->fQuadPointInstances.count() * sizeof(QuadPointInstance), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, - GrResourceProvider::Flags::kRequireGpuMemory, fView->fQuadPointInstances.begin())); if (!fView->fQuadPointInstances.empty() && instBuff) { proc.appendMesh(std::move(instBuff), fView->fQuadPointInstances.count(), 0, &mesh); } } else { - sk_sp instBuff( + sk_sp instBuff( rp->createBuffer(fView->fTriPointInstances.count() * sizeof(TriPointInstance), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, - GrResourceProvider::Flags::kRequireGpuMemory, fView->fTriPointInstances.begin())); if (!fView->fTriPointInstances.empty() && instBuff) { proc.appendMesh(std::move(instBuff), fView->fTriPointInstances.count(), 0, &mesh); diff --git a/src/gpu/GrBuffer.cpp b/src/gpu/GrBuffer.cpp deleted file mode 100644 index cec2556c25..0000000000 --- a/src/gpu/GrBuffer.cpp +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright 2016 Google Inc. - * - * Use of this source code is governed by a BSD-style license that can be - * found in the LICENSE file. - */ - -#include "GrBuffer.h" -#include "GrGpu.h" -#include "GrCaps.h" - -sk_sp GrBuffer::MakeCPUBacked(GrGpu* gpu, size_t sizeInBytes, - GrGpuBufferType intendedType, const void* data) { - SkASSERT(GrBufferTypeIsVertexOrIndex(intendedType)); - void* cpuData; - if (gpu->caps()->mustClearUploadedBufferData()) { - cpuData = sk_calloc_throw(sizeInBytes); - } else { - cpuData = sk_malloc_throw(sizeInBytes); - } - if (data) { - memcpy(cpuData, data, sizeInBytes); - } - return sk_sp(new GrBuffer(gpu, sizeInBytes, intendedType, cpuData)); -} - -GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, void* cpuData) - : INHERITED(gpu) - , fMapPtr(nullptr) - , fSizeInBytes(sizeInBytes) - , fAccessPattern(kDynamic_GrAccessPattern) - , fCPUData(cpuData) - , fIntendedType(type) { - this->registerWithCache(SkBudgeted::kNo); -} - -GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, GrAccessPattern pattern) - : INHERITED(gpu) - , fMapPtr(nullptr) - , fSizeInBytes(sizeInBytes) - , fAccessPattern(pattern) - , fCPUData(nullptr) - , fIntendedType(type) { - // Subclass registers with cache. -} - -void GrBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType, - GrScratchKey* key) { - static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType(); - GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4); - // TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just - // a chunk of memory we can use/reuse for any type of data. We really only need to - // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types. - builder[0] = SkToU32(intendedType); - builder[1] = (uint32_t)size; - if (sizeof(size_t) > 4) { - builder[2] = (uint32_t)((uint64_t)size >> 32); - } -} - -bool GrBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { - SkASSERT(this->isCPUBacked()); - memcpy(fCPUData, src, srcSizeInBytes); - return true; -} - -void GrBuffer::computeScratchKey(GrScratchKey* key) const { - if (!this->isCPUBacked() && SkIsPow2(fSizeInBytes) && - kDynamic_GrAccessPattern == fAccessPattern) { - ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key); - } -} diff --git a/src/gpu/GrBuffer.h b/src/gpu/GrBuffer.h index d90b587bfc..cc710c73bd 100644 --- a/src/gpu/GrBuffer.h +++ b/src/gpu/GrBuffer.h @@ -1,5 +1,5 @@ /* - * Copyright 2016 Google Inc. + * Copyright 2019 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. @@ -8,124 +8,29 @@ #ifndef GrBuffer_DEFINED #define GrBuffer_DEFINED -#include "GrGpuResource.h" +#include "GrTypes.h" -class GrGpu; - -class GrBuffer : public GrGpuResource { +/** Base class for a GPU buffer object or a client side arrays. */ +class GrBuffer { public: - /** - * Creates a client-side buffer. - */ - static SK_WARN_UNUSED_RESULT sk_sp MakeCPUBacked(GrGpu*, size_t sizeInBytes, - GrGpuBufferType, - const void* data = nullptr); + GrBuffer(const GrBuffer&) = delete; + GrBuffer& operator=(const GrBuffer&) = delete; - /** - * Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with - * "static" and "stream" patterns are disqualified by nature from being cached and reused.) - */ - static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*); + virtual ~GrBuffer() = default; - GrAccessPattern accessPattern() const { return fAccessPattern; } - size_t sizeInBytes() const { return fSizeInBytes; } + // Our subclasses derive from different ref counting base classes. In order to use base + // class pointers with sk_sp we virtualize ref() and unref(). + virtual void ref() const = 0; + virtual void unref() const = 0; - /** - * Returns true if the buffer is a wrapper around a CPU array. If true it - * indicates that map will always succeed and will be free. - */ - bool isCPUBacked() const { return SkToBool(fCPUData); } - size_t baseOffset() const { return reinterpret_cast(fCPUData); } + /** Size of the buffer in bytes. */ + virtual size_t size() const = 0; - /** - * Maps the buffer to be written by the CPU. - * - * The previous content of the buffer is invalidated. It is an error - * to draw from the buffer while it is mapped. It may fail if the backend - * doesn't support mapping the buffer. If the buffer is CPU backed then - * it will always succeed and is a free operation. Once a buffer is mapped, - * subsequent calls to map() are ignored. - * - * Note that buffer mapping does not go through GrContext and therefore is - * not serialized with other operations. - * - * @return a pointer to the data or nullptr if the map fails. - */ - void* map() { - if (!fMapPtr) { - this->onMap(); - } - return fMapPtr; - } - - /** - * Unmaps the buffer. - * - * The pointer returned by the previous map call will no longer be valid. - */ - void unmap() { - SkASSERT(fMapPtr); - this->onUnmap(); - fMapPtr = nullptr; - } - - /** - Queries whether the buffer has been mapped. - - @return true if the buffer is mapped, false otherwise. - */ - bool isMapped() const { return SkToBool(fMapPtr); } - - /** - * Updates the buffer data. - * - * The size of the buffer will be preserved. The src data will be - * placed at the beginning of the buffer and any remaining contents will - * be undefined. srcSizeInBytes must be <= to the buffer size. - * - * The buffer must not be mapped. - * - * Note that buffer updates do not go through GrContext and therefore are - * not serialized with other operations. - * - * @return returns true if the update succeeds, false otherwise. - */ - bool updateData(const void* src, size_t srcSizeInBytes) { - SkASSERT(!this->isMapped()); - SkASSERT(srcSizeInBytes <= fSizeInBytes); - return this->onUpdateData(src, srcSizeInBytes); - } - - ~GrBuffer() override { - sk_free(fCPUData); - } + /** Is this an instance of GrCpuBuffer? Otherwise, an instance of GrGpuBuffer. */ + virtual bool isCpuBuffer() const = 0; protected: - GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern); - GrGpuBufferType intendedType() const { return fIntendedType; } - - void* fMapPtr; - -private: - /** - * Internal constructor to make a CPU-backed buffer. - */ - GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, void* cpuData); - - virtual void onMap() { SkASSERT(this->isCPUBacked()); fMapPtr = fCPUData; } - virtual void onUnmap() { SkASSERT(this->isCPUBacked()); } - virtual bool onUpdateData(const void* src, size_t srcSizeInBytes); - - size_t onGpuMemorySize() const override { return fSizeInBytes; } // TODO: zero for cpu backed? - const char* getResourceType() const override { return "Buffer Object"; } - void computeScratchKey(GrScratchKey* key) const override; - - size_t fSizeInBytes; - GrAccessPattern fAccessPattern; - void* fCPUData; - GrGpuBufferType fIntendedType; - - typedef GrGpuResource INHERITED; + GrBuffer() = default; }; #endif diff --git a/src/gpu/GrBufferAllocPool.cpp b/src/gpu/GrBufferAllocPool.cpp index 0d0e151e45..cf986bceb8 100644 --- a/src/gpu/GrBufferAllocPool.cpp +++ b/src/gpu/GrBufferAllocPool.cpp @@ -6,12 +6,12 @@ */ #include "GrBufferAllocPool.h" - -#include "GrBuffer.h" #include "GrCaps.h" #include "GrContext.h" #include "GrContextPriv.h" +#include "GrCpuBuffer.h" #include "GrGpu.h" +#include "GrGpuBuffer.h" #include "GrResourceProvider.h" #include "GrTypes.h" #include "SkMacros.h" @@ -24,15 +24,14 @@ static void VALIDATE(bool = false) {} #endif -#define UNMAP_BUFFER(block) \ -do { \ - TRACE_EVENT_INSTANT1("skia.gpu", \ - "GrBufferAllocPool Unmapping Buffer", \ - TRACE_EVENT_SCOPE_THREAD, \ - "percent_unwritten", \ - (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \ - (block).fBuffer->unmap(); \ -} while (false) +#define UNMAP_BUFFER(block) \ + do { \ + TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \ + TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \ + (float)((block).fBytesFree) / (block).fBuffer->size()); \ + SkASSERT(!block.fBuffer->isCpuBuffer()); \ + static_cast(block.fBuffer.get())->unmap(); \ + } while (false) constexpr size_t GrBufferAllocPool::kDefaultBufferSize; @@ -47,7 +46,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, voi void GrBufferAllocPool::deleteBlocks() { if (fBlocks.count()) { GrBuffer* buffer = fBlocks.back().fBuffer.get(); - if (buffer->isMapped()) { + if (!buffer->isCpuBuffer() && static_cast(buffer)->isMapped()) { UNMAP_BUFFER(fBlocks.back()); } } @@ -78,11 +77,14 @@ void GrBufferAllocPool::unmap() { if (fBufferPtr) { BufferBlock& block = fBlocks.back(); - if (block.fBuffer->isMapped()) { - UNMAP_BUFFER(block); - } else { - size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree; - this->flushCpuData(fBlocks.back(), flushSize); + GrBuffer* buffer = block.fBuffer.get(); + if (!buffer->isCpuBuffer()) { + if (static_cast(buffer)->isMapped()) { + UNMAP_BUFFER(block); + } else { + size_t flushSize = block.fBuffer->size() - block.fBytesFree; + this->flushCpuData(fBlocks.back(), flushSize); + } } fBufferPtr = nullptr; } @@ -94,21 +96,25 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const { bool wasDestroyed = false; if (fBufferPtr) { SkASSERT(!fBlocks.empty()); - if (!fBlocks.back().fBuffer->isMapped()) { + const GrBuffer* buffer = fBlocks.back().fBuffer.get(); + if (!buffer->isCpuBuffer() && !static_cast(buffer)->isMapped()) { SkASSERT(fCpuData == fBufferPtr); } - } else { - SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); + } else if (!fBlocks.empty()) { + const GrBuffer* buffer = fBlocks.back().fBuffer.get(); + SkASSERT(buffer->isCpuBuffer() || !static_cast(buffer)->isMapped()); } size_t bytesInUse = 0; for (int i = 0; i < fBlocks.count() - 1; ++i) { - SkASSERT(!fBlocks[i].fBuffer->isMapped()); + const GrBuffer* buffer = fBlocks[i].fBuffer.get(); + SkASSERT(buffer->isCpuBuffer() || !static_cast(buffer)->isMapped()); } for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) { - if (fBlocks[i].fBuffer->wasDestroyed()) { + GrBuffer* buffer = fBlocks[i].fBuffer.get(); + if (!buffer->isCpuBuffer() && static_cast(buffer)->wasDestroyed()) { wasDestroyed = true; } else { - size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree; + size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree; bytesInUse += bytes; SkASSERT(bytes || unusedBlockAllowed); } @@ -137,7 +143,7 @@ void* GrBufferAllocPool::makeSpace(size_t size, if (fBufferPtr) { BufferBlock& back = fBlocks.back(); - size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; + size_t usedBytes = back.fBuffer->size() - back.fBytesFree; size_t pad = GrSizeAlignUpPad(usedBytes, alignment); SkSafeMath safeMath; size_t alignedSize = safeMath.add(pad, size); @@ -192,7 +198,7 @@ void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize, if (fBufferPtr) { BufferBlock& back = fBlocks.back(); - size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; + size_t usedBytes = back.fBuffer->size() - back.fBytesFree; size_t pad = GrSizeAlignUpPad(usedBytes, alignment); if ((minSize + pad) <= back.fBytesFree) { // Consume padding first, to make subsequent alignment math easier @@ -250,13 +256,14 @@ void GrBufferAllocPool::putBack(size_t bytes) { // caller shouldn't try to put back more than they've taken SkASSERT(!fBlocks.empty()); BufferBlock& block = fBlocks.back(); - size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; + size_t bytesUsed = block.fBuffer->size() - block.fBytesFree; if (bytes >= bytesUsed) { bytes -= bytesUsed; fBytesInUse -= bytesUsed; // if we locked a vb to satisfy the make space and we're releasing // beyond it, then unmap it. - if (block.fBuffer->isMapped()) { + GrBuffer* buffer = block.fBuffer.get(); + if (!buffer->isCpuBuffer() && static_cast(buffer)->isMapped()) { UNMAP_BUFFER(block); } this->destroyBlock(); @@ -284,32 +291,35 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) { return false; } - block.fBytesFree = block.fBuffer->gpuMemorySize(); + block.fBytesFree = block.fBuffer->size(); if (fBufferPtr) { SkASSERT(fBlocks.count() > 1); BufferBlock& prev = fBlocks.fromBack(1); - if (prev.fBuffer->isMapped()) { - UNMAP_BUFFER(prev); - } else { - this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree); + GrBuffer* buffer = prev.fBuffer.get(); + if (!buffer->isCpuBuffer()) { + if (static_cast(buffer)->isMapped()) { + UNMAP_BUFFER(prev); + } else { + this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree); + } } fBufferPtr = nullptr; } SkASSERT(!fBufferPtr); - // If the buffer is CPU-backed we map it because it is free to do so and saves a copy. + // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy. // Otherwise when buffer mapping is supported we map if the buffer size is greater than the // threshold. - bool attemptMap = block.fBuffer->isCPUBacked(); - if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { - attemptMap = size > fGpu->caps()->bufferMapThreshold(); + if (block.fBuffer->isCpuBuffer()) { + fBufferPtr = static_cast(block.fBuffer.get())->data(); + SkASSERT(fBufferPtr); + } else { + if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && + size > fGpu->caps()->bufferMapThreshold()) { + fBufferPtr = static_cast(block.fBuffer.get())->map(); + } } - - if (attemptMap) { - fBufferPtr = block.fBuffer->map(); - } - if (!fBufferPtr) { fBufferPtr = this->resetCpuData(block.fBytesFree); } @@ -321,7 +331,8 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) { void GrBufferAllocPool::destroyBlock() { SkASSERT(!fBlocks.empty()); - SkASSERT(!fBlocks.back().fBuffer->isMapped()); + SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() || + !static_cast(fBlocks.back().fBuffer.get())->isMapped()); fBlocks.pop_back(); fBufferPtr = nullptr; } @@ -345,11 +356,12 @@ void* GrBufferAllocPool::resetCpuData(size_t newSize) { void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { - GrBuffer* buffer = block.fBuffer.get(); - SkASSERT(buffer); + SkASSERT(block.fBuffer.get()); + SkASSERT(!block.fBuffer.get()->isCpuBuffer()); + GrGpuBuffer* buffer = static_cast(block.fBuffer.get()); SkASSERT(!buffer->isMapped()); SkASSERT(fCpuData == fBufferPtr); - SkASSERT(flushSize <= buffer->gpuMemorySize()); + SkASSERT(flushSize <= buffer->size()); VALIDATE(true); if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && @@ -368,8 +380,10 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) sk_sp GrBufferAllocPool::getBuffer(size_t size) { auto resourceProvider = fGpu->getContext()->priv().resourceProvider(); - return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, - GrResourceProvider::Flags::kNone); + if (fGpu->caps()->preferClientSideDynamicBuffers()) { + return GrCpuBuffer::Make(size); + } + return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern); } //////////////////////////////////////////////////////////////////////////////// diff --git a/src/gpu/GrBufferAllocPool.h b/src/gpu/GrBufferAllocPool.h index 9453660787..d0fda195a4 100644 --- a/src/gpu/GrBufferAllocPool.h +++ b/src/gpu/GrBufferAllocPool.h @@ -8,13 +8,14 @@ #ifndef GrBufferAllocPool_DEFINED #define GrBufferAllocPool_DEFINED +#include "GrGpuBuffer.h" #include "GrTypesPriv.h" #include "SkNoncopyable.h" #include "SkTArray.h" #include "SkTDArray.h" #include "SkTypes.h" -class GrBuffer; + class GrGpu; /** diff --git a/src/gpu/GrCpuBuffer.h b/src/gpu/GrCpuBuffer.h new file mode 100644 index 0000000000..3fab08bd15 --- /dev/null +++ b/src/gpu/GrCpuBuffer.h @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrCpuBuffer_DEFINED +#define GrCpuBuffer_DEFINED + +#include "GrBuffer.h" +#include "GrNonAtomicRef.h" + +class GrCpuBuffer final : public GrNonAtomicRef, public GrBuffer { +public: + static sk_sp Make(size_t size) { + SkASSERT(size > 0); + auto mem = ::operator new(sizeof(GrCpuBuffer) + size); + return sk_sp(new (mem) GrCpuBuffer((char*)mem + sizeof(GrCpuBuffer), size)); + } + + void ref() const override { GrNonAtomicRef::ref(); } + void unref() const override { GrNonAtomicRef::unref(); } + size_t size() const override { return fSize; } + bool isCpuBuffer() const override { return true; } + + char* data() { return reinterpret_cast(fData); } + const char* data() const { return reinterpret_cast(fData); } + +private: + GrCpuBuffer(void* data, size_t size) : fData(data), fSize(size) {} + void* fData; + size_t fSize; +}; + +#endif diff --git a/src/gpu/GrGpu.cpp b/src/gpu/GrGpu.cpp index cfb22e8d17..2600076c4a 100644 --- a/src/gpu/GrGpu.cpp +++ b/src/gpu/GrGpu.cpp @@ -10,7 +10,6 @@ #include "GrBackendSemaphore.h" #include "GrBackendSurface.h" -#include "GrBuffer.h" #include "GrCaps.h" #include "GrContext.h" #include "GrContextPriv.h" @@ -217,10 +216,10 @@ sk_sp GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImage return nullptr; } -sk_sp GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType, - GrAccessPattern accessPattern, const void* data) { +sk_sp GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType, + GrAccessPattern accessPattern, const void* data) { this->handleDirtyContext(); - sk_sp buffer = this->onCreateBuffer(size, intendedType, accessPattern, data); + sk_sp buffer = this->onCreateBuffer(size, intendedType, accessPattern, data); if (!this->caps()->reuseScratchBuffers()) { buffer->resourcePriv().removeScratchKey(); } @@ -303,7 +302,7 @@ bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int he } bool GrGpu::transferPixels(GrTexture* texture, int left, int top, int width, int height, - GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, + GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) { SkASSERT(texture); SkASSERT(transferBuffer); diff --git a/src/gpu/GrGpu.h b/src/gpu/GrGpu.h index 917e88701f..867af549e9 100644 --- a/src/gpu/GrGpu.h +++ b/src/gpu/GrGpu.h @@ -22,7 +22,7 @@ class GrBackendRenderTarget; class GrBackendSemaphore; -class GrBuffer; +class GrGpuBuffer; class GrContext; struct GrContextOptions; class GrGLContext; @@ -140,8 +140,8 @@ public: * * @return the buffer if successful, otherwise nullptr. */ - sk_sp createBuffer(size_t size, GrGpuBufferType intendedType, - GrAccessPattern accessPattern, const void* data = nullptr); + sk_sp createBuffer(size_t size, GrGpuBufferType intendedType, + GrAccessPattern accessPattern, const void* data = nullptr); /** * Resolves MSAA. @@ -217,7 +217,7 @@ public: * means rows are tightly packed. */ bool transferPixels(GrTexture* texture, int left, int top, int width, int height, - GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, + GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes); // After the client interacts directly with the 3D context state the GrGpu @@ -472,8 +472,8 @@ private: virtual sk_sp onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, const GrVkDrawableInfo&); - virtual sk_sp onCreateBuffer(size_t size, GrGpuBufferType intendedType, - GrAccessPattern, const void* data) = 0; + virtual sk_sp onCreateBuffer(size_t size, GrGpuBufferType intendedType, + GrAccessPattern, const void* data) = 0; // overridden by backend-specific derived class to perform the surface read virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, GrColorType, @@ -485,7 +485,7 @@ private: // overridden by backend-specific derived class to perform the texture transfer virtual bool onTransferPixels(GrTexture*, int left, int top, int width, int height, - GrColorType colorType, GrBuffer* transferBuffer, size_t offset, + GrColorType colorType, GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) = 0; // overridden by backend-specific derived class to perform the resolve diff --git a/src/gpu/GrGpuBuffer.cpp b/src/gpu/GrGpuBuffer.cpp new file mode 100644 index 0000000000..b679b47f45 --- /dev/null +++ b/src/gpu/GrGpuBuffer.cpp @@ -0,0 +1,38 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#include "GrGpuBuffer.h" +#include "GrCaps.h" +#include "GrGpu.h" + +GrGpuBuffer::GrGpuBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, + GrAccessPattern pattern) + : GrGpuResource(gpu) + , fMapPtr(nullptr) + , fSizeInBytes(sizeInBytes) + , fAccessPattern(pattern) + , fIntendedType(type) {} + +void GrGpuBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType, + GrScratchKey* key) { + static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType(); + GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4); + // TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just + // a chunk of memory we can use/reuse for any type of data. We really only need to + // differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types. + builder[0] = SkToU32(intendedType); + builder[1] = (uint32_t)size; + if (sizeof(size_t) > 4) { + builder[2] = (uint32_t)((uint64_t)size >> 32); + } +} + +void GrGpuBuffer::computeScratchKey(GrScratchKey* key) const { + if (SkIsPow2(fSizeInBytes) && kDynamic_GrAccessPattern == fAccessPattern) { + ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key); + } +} diff --git a/src/gpu/GrGpuBuffer.h b/src/gpu/GrGpuBuffer.h new file mode 100644 index 0000000000..5bc6ed177a --- /dev/null +++ b/src/gpu/GrGpuBuffer.h @@ -0,0 +1,113 @@ +/* + * Copyright 2019 Google Inc. + * + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. + */ + +#ifndef GrGpuBuffer_DEFINED +#define GrGpuBuffer_DEFINED + +#include "GrBuffer.h" +#include "GrGpuResource.h" + +class GrGpu; + +class GrGpuBuffer : public GrGpuResource, public GrBuffer { +public: + /** + * Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with + * "static" and "stream" patterns are disqualified by nature from being cached and reused.) + */ + static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*); + + GrAccessPattern accessPattern() const { return fAccessPattern; } + + size_t size() const final { return fSizeInBytes; } + + void ref() const final { GrGpuResource::ref(); } + + void unref() const final { GrGpuResource::unref(); } + + /** + * Maps the buffer to be written by the CPU. + * + * The previous content of the buffer is invalidated. It is an error + * to draw from the buffer while it is mapped. It may fail if the backend + * doesn't support mapping the buffer. If the buffer is CPU backed then + * it will always succeed and is a free operation. Once a buffer is mapped, + * subsequent calls to map() are ignored. + * + * Note that buffer mapping does not go through GrContext and therefore is + * not serialized with other operations. + * + * @return a pointer to the data or nullptr if the map fails. + */ + void* map() { + if (!fMapPtr) { + this->onMap(); + } + return fMapPtr; + } + + /** + * Unmaps the buffer. + * + * The pointer returned by the previous map call will no longer be valid. + */ + void unmap() { + SkASSERT(fMapPtr); + this->onUnmap(); + fMapPtr = nullptr; + } + + /** + Queries whether the buffer has been mapped. + + @return true if the buffer is mapped, false otherwise. + */ + bool isMapped() const { return SkToBool(fMapPtr); } + + bool isCpuBuffer() const final { return false; } + + /** + * Updates the buffer data. + * + * The size of the buffer will be preserved. The src data will be + * placed at the beginning of the buffer and any remaining contents will + * be undefined. srcSizeInBytes must be <= to the buffer size. + * + * The buffer must not be mapped. + * + * Note that buffer updates do not go through GrContext and therefore are + * not serialized with other operations. + * + * @return returns true if the update succeeds, false otherwise. + */ + bool updateData(const void* src, size_t srcSizeInBytes) { + SkASSERT(!this->isMapped()); + SkASSERT(srcSizeInBytes <= fSizeInBytes); + return this->onUpdateData(src, srcSizeInBytes); + } + +protected: + GrGpuBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern); + GrGpuBufferType intendedType() const { return fIntendedType; } + + void* fMapPtr; + +private: + virtual void onMap() = 0; + virtual void onUnmap() = 0; + virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0; + + size_t onGpuMemorySize() const override { return fSizeInBytes; } + const char* getResourceType() const override { return "Buffer Object"; } + void computeScratchKey(GrScratchKey* key) const override; + + size_t fSizeInBytes; + GrAccessPattern fAccessPattern; + GrGpuBufferType fIntendedType; +}; + +#endif diff --git a/src/gpu/GrMesh.h b/src/gpu/GrMesh.h index a76ed87c3a..bacffd4375 100644 --- a/src/gpu/GrMesh.h +++ b/src/gpu/GrMesh.h @@ -10,6 +10,7 @@ #include "GrBuffer.h" #include "GrPendingIOResource.h" +#include "GrGpuBuffer.h" class GrPrimitiveProcessor; @@ -42,8 +43,9 @@ public: void setInstanced(sk_sp instanceBuffer, int instanceCount, int baseInstance, int vertexCount); - void setIndexedInstanced(sk_sp, int indexCount, sk_sp, - int instanceCount, int baseInstance, GrPrimitiveRestart); + void setIndexedInstanced(sk_sp indexBuffer, int indexCount, + sk_sp instanceBuffer, int instanceCount, + int baseInstance, GrPrimitiveRestart); void setVertexData(sk_sp vertexBuffer, int baseVertex = 0); @@ -127,8 +129,8 @@ private: }; inline void GrMesh::setNonIndexedNonInstanced(int vertexCount) { - fIndexBuffer.reset(nullptr); - fInstanceBuffer.reset(nullptr); + fIndexBuffer.reset(); + fInstanceBuffer.reset(); fNonIndexNonInstanceData.fVertexCount = vertexCount; fPrimitiveRestart = GrPrimitiveRestart::kNo; } diff --git a/src/gpu/GrOnFlushResourceProvider.cpp b/src/gpu/GrOnFlushResourceProvider.cpp index a2c56e3320..285e8e25f8 100644 --- a/src/gpu/GrOnFlushResourceProvider.cpp +++ b/src/gpu/GrOnFlushResourceProvider.cpp @@ -73,20 +73,18 @@ bool GrOnFlushResourceProvider::instatiateProxy(GrSurfaceProxy* proxy) { return proxy->instantiate(resourceProvider); } -sk_sp GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size, - const void* data) { +sk_sp GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size, + const void* data) { auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider(); - return sk_sp(resourceProvider->createBuffer(size, intendedType, - kDynamic_GrAccessPattern, - GrResourceProvider::Flags::kNone, - data)); + return sk_sp( + resourceProvider->createBuffer(size, intendedType, kDynamic_GrAccessPattern, data)); } -sk_sp GrOnFlushResourceProvider::findOrMakeStaticBuffer( +sk_sp GrOnFlushResourceProvider::findOrMakeStaticBuffer( GrGpuBufferType intendedType, size_t size, const void* data, const GrUniqueKey& key) { auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider(); - sk_sp buffer = resourceProvider->findOrMakeStaticBuffer(intendedType, size, - data, key); + sk_sp buffer = + resourceProvider->findOrMakeStaticBuffer(intendedType, size, data, key); // Static buffers should never have pending IO. SkASSERT(!buffer || !buffer->resourcePriv().hasPendingIO_debugOnly()); return buffer; diff --git a/src/gpu/GrOnFlushResourceProvider.h b/src/gpu/GrOnFlushResourceProvider.h index 11caf39482..e2332a9ce1 100644 --- a/src/gpu/GrOnFlushResourceProvider.h +++ b/src/gpu/GrOnFlushResourceProvider.h @@ -86,11 +86,11 @@ public: bool instatiateProxy(GrSurfaceProxy*); // Creates a GPU buffer with a "dynamic" access pattern. - sk_sp makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr); + sk_sp makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr); // Either finds and refs, or creates a static GPU buffer with the given data. - sk_sp findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data, - const GrUniqueKey&); + sk_sp findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data, + const GrUniqueKey&); uint32_t contextID() const; const GrCaps* caps() const; diff --git a/src/gpu/GrProcessor.h b/src/gpu/GrProcessor.h index 8ae9493573..19ab4abf96 100644 --- a/src/gpu/GrProcessor.h +++ b/src/gpu/GrProcessor.h @@ -8,8 +8,8 @@ #ifndef GrProcessor_DEFINED #define GrProcessor_DEFINED -#include "GrBuffer.h" #include "GrColor.h" +#include "GrGpuBuffer.h" #include "GrProcessorUnitTest.h" #include "GrSamplerState.h" #include "GrShaderVar.h" diff --git a/src/gpu/GrResourceProvider.cpp b/src/gpu/GrResourceProvider.cpp index 4ba7de4394..10ceefb4cb 100644 --- a/src/gpu/GrResourceProvider.cpp +++ b/src/gpu/GrResourceProvider.cpp @@ -6,13 +6,13 @@ */ #include "GrResourceProvider.h" - +#include "../private/GrSingleOwner.h" #include "GrBackendSemaphore.h" -#include "GrBuffer.h" #include "GrCaps.h" #include "GrContext.h" #include "GrContextPriv.h" #include "GrGpu.h" +#include "GrGpuBuffer.h" #include "GrPath.h" #include "GrPathRendering.h" #include "GrProxyProvider.h" @@ -22,7 +22,6 @@ #include "GrSemaphore.h" #include "GrStencilAttachment.h" #include "GrTexturePriv.h" -#include "../private/GrSingleOwner.h" #include "SkGr.h" #include "SkMathPriv.h" @@ -285,35 +284,34 @@ sk_sp GrResourceProvider::findResourceByUniqueKey(const GrUniqueK : sk_sp(fCache->findAndRefUniqueResource(key)); } -sk_sp GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType, - size_t size, - const void* data, - const GrUniqueKey& key) { - if (auto buffer = this->findByUniqueKey(key)) { +sk_sp GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType, + size_t size, + const void* data, + const GrUniqueKey& key) { + if (auto buffer = this->findByUniqueKey(key)) { return std::move(buffer); } - if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, Flags::kNone, - data)) { + if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, data)) { // We shouldn't bin and/or cache static buffers. - SkASSERT(buffer->sizeInBytes() == size); + SkASSERT(buffer->size() == size); SkASSERT(!buffer->resourcePriv().getScratchKey().isValid()); SkASSERT(!buffer->resourcePriv().hasPendingIO_debugOnly()); buffer->resourcePriv().setUniqueKey(key); - return sk_sp(buffer); + return sk_sp(buffer); } return nullptr; } -sk_sp GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern, - int patternSize, - int reps, - int vertCount, - const GrUniqueKey& key) { +sk_sp GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern, + int patternSize, + int reps, + int vertCount, + const GrUniqueKey& key) { size_t bufferSize = patternSize * reps * sizeof(uint16_t); // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO. - sk_sp buffer(this->createBuffer(bufferSize, GrGpuBufferType::kIndex, - kStatic_GrAccessPattern, Flags::kNone)); + sk_sp buffer( + this->createBuffer(bufferSize, GrGpuBufferType::kIndex, kStatic_GrAccessPattern)); if (!buffer) { return nullptr; } @@ -343,7 +341,7 @@ sk_sp GrResourceProvider::createPatternedIndexBuffer(const uint1 static constexpr int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1; -sk_sp GrResourceProvider::createQuadIndexBuffer() { +sk_sp GrResourceProvider::createQuadIndexBuffer() { GR_STATIC_ASSERT(4 * kMaxQuads <= 65535); static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 }; return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey); @@ -360,36 +358,24 @@ sk_sp GrResourceProvider::createPath(const SkPath& path, const GrStyle& return this->gpu()->pathRendering()->createPath(path, style); } -sk_sp GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType, - GrAccessPattern accessPattern, Flags flags, - const void* data) { +sk_sp GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType, + GrAccessPattern accessPattern, + const void* data) { if (this->isAbandoned()) { return nullptr; } if (kDynamic_GrAccessPattern != accessPattern) { return this->gpu()->createBuffer(size, intendedType, accessPattern, data); } - if (!(flags & Flags::kRequireGpuMemory) && - this->gpu()->caps()->preferClientSideDynamicBuffers() && - GrBufferTypeIsVertexOrIndex(intendedType) && - kDynamic_GrAccessPattern == accessPattern) { - return GrBuffer::MakeCPUBacked(this->gpu(), size, intendedType, data); - } - // bin by pow2 with a reasonable min static const size_t MIN_SIZE = 1 << 12; size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size)); GrScratchKey key; - GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key); - auto scratchFlags = GrResourceCache::ScratchFlags::kNone; - if (flags & Flags::kNoPendingIO) { - scratchFlags = GrResourceCache::ScratchFlags::kRequireNoPendingIO; - } else { - scratchFlags = GrResourceCache::ScratchFlags::kPreferNoPendingIO; - } - auto buffer = sk_sp(static_cast( - this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags))); + GrGpuBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key); + auto buffer = + sk_sp(static_cast(this->cache()->findAndRefScratchResource( + key, allocSize, GrResourceCache::ScratchFlags::kNone))); if (!buffer) { buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern); if (!buffer) { @@ -399,7 +385,6 @@ sk_sp GrResourceProvider::createBuffer(size_t size, GrGpuBufferType in if (data) { buffer->updateData(data, size); } - SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs. return buffer; } diff --git a/src/gpu/GrResourceProvider.h b/src/gpu/GrResourceProvider.h index 9005a966d8..7fb249cccc 100644 --- a/src/gpu/GrResourceProvider.h +++ b/src/gpu/GrResourceProvider.h @@ -8,8 +8,8 @@ #ifndef GrResourceProvider_DEFINED #define GrResourceProvider_DEFINED -#include "GrBuffer.h" #include "GrContextOptions.h" +#include "GrGpuBuffer.h" #include "GrResourceCache.h" #include "SkImageInfoPriv.h" #include "SkScalerContext.h" @@ -51,11 +51,6 @@ public: * Make this automatic: https://bug.skia.org/4156 */ kNoPendingIO = 0x1, - - /** Normally the caps may indicate a preference for client-side buffers. Set this flag when - * creating a buffer to guarantee it resides in GPU memory. - */ - kRequireGpuMemory = 0x2, }; GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*, @@ -66,7 +61,9 @@ public: * must be sure that if a resource of exists in the cache with the given unique key then it is * of type T. */ - template sk_sp findByUniqueKey(const GrUniqueKey& key) { + template + typename std::enable_if::value, sk_sp>::type + findByUniqueKey(const GrUniqueKey& key) { return sk_sp(static_cast(this->findResourceByUniqueKey(key).release())); } @@ -145,8 +142,8 @@ public: * * @return The buffer if successful, otherwise nullptr. */ - sk_sp findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size, - const void* data, const GrUniqueKey& key); + sk_sp findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size, + const void* data, const GrUniqueKey& key); /** * Either finds and refs, or creates an index buffer with a repeating pattern for drawing @@ -161,12 +158,12 @@ public: * * @return The index buffer if successful, otherwise nullptr. */ - sk_sp findOrCreatePatternedIndexBuffer(const uint16_t* pattern, - int patternSize, - int reps, - int vertCount, - const GrUniqueKey& key) { - if (auto buffer = this->findByUniqueKey(key)) { + sk_sp findOrCreatePatternedIndexBuffer(const uint16_t* pattern, + int patternSize, + int reps, + int vertCount, + const GrUniqueKey& key) { + if (auto buffer = this->findByUniqueKey(key)) { return std::move(buffer); } return this->createPatternedIndexBuffer(pattern, patternSize, reps, vertCount, key); @@ -179,8 +176,8 @@ public: * Draw with GrPrimitiveType::kTriangles * @ return the quad index buffer */ - sk_sp refQuadIndexBuffer() { - if (auto buffer = this->findByUniqueKey(fQuadIndexBufferKey)) { + sk_sp refQuadIndexBuffer() { + if (auto buffer = this->findByUniqueKey(fQuadIndexBufferKey)) { return buffer; } return this->createQuadIndexBuffer(); @@ -205,8 +202,8 @@ public: * * @return the buffer if successful, otherwise nullptr. */ - sk_sp createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, Flags, - const void* data = nullptr); + sk_sp createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, + const void* data = nullptr); /** * If passed in render target already has a stencil buffer, return true. Otherwise attempt to @@ -286,13 +283,13 @@ private: return !SkToBool(fCache); } - sk_sp createPatternedIndexBuffer(const uint16_t* pattern, - int patternSize, - int reps, - int vertCount, - const GrUniqueKey& key); + sk_sp createPatternedIndexBuffer(const uint16_t* pattern, + int patternSize, + int reps, + int vertCount, + const GrUniqueKey& key); - sk_sp createQuadIndexBuffer(); + sk_sp createQuadIndexBuffer(); GrResourceCache* fCache; GrGpu* fGpu; diff --git a/src/gpu/ccpr/GrCCCoverageProcessor.h b/src/gpu/ccpr/GrCCCoverageProcessor.h index 5871fa15a0..bbf898ee26 100644 --- a/src/gpu/ccpr/GrCCCoverageProcessor.h +++ b/src/gpu/ccpr/GrCCCoverageProcessor.h @@ -100,7 +100,7 @@ public: // Appends a GrMesh that will draw the provided instances. The instanceBuffer must be an array // of either TriPointInstance or QuadPointInstance, depending on this processor's RendererPass, // with coordinates in the desired shape's final atlas-space position. - void appendMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, + void appendMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, SkTArray* out) const { if (Impl::kGeometryShader == fImpl) { this->appendGSMesh(std::move(instanceBuffer), instanceCount, baseInstance, out); @@ -250,9 +250,9 @@ private: void initGS(); void initVS(GrResourceProvider*); - void appendGSMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, + void appendGSMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, SkTArray* out) const; - void appendVSMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, + void appendVSMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, SkTArray* out) const; GrGLSLPrimitiveProcessor* createGSImpl(std::unique_ptr) const; @@ -269,8 +269,8 @@ private: // Used by VSImpl. Attribute fInstanceAttributes[2]; - sk_sp fVSVertexBuffer; - sk_sp fVSIndexBuffer; + sk_sp fVSVertexBuffer; + sk_sp fVSIndexBuffer; int fVSNumIndicesPerInstance; GrPrimitiveType fVSTriangleType; diff --git a/src/gpu/ccpr/GrCCCoverageProcessor_GSImpl.cpp b/src/gpu/ccpr/GrCCCoverageProcessor_GSImpl.cpp index 79ee1afab4..2c1d40aafa 100644 --- a/src/gpu/ccpr/GrCCCoverageProcessor_GSImpl.cpp +++ b/src/gpu/ccpr/GrCCCoverageProcessor_GSImpl.cpp @@ -396,7 +396,7 @@ void GrCCCoverageProcessor::initGS() { this->setWillUseGeoShader(); } -void GrCCCoverageProcessor::appendGSMesh(sk_sp instanceBuffer, int instanceCount, +void GrCCCoverageProcessor::appendGSMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, SkTArray* out) const { // GSImpl doesn't actually make instanced draw calls. Instead, we feed transposed x,y point // values to the GPU in a regular vertex array and draw kLines (see initGS). Then, each vertex diff --git a/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp b/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp index 4162ee0b2d..10300f3527 100644 --- a/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp +++ b/src/gpu/ccpr/GrCCCoverageProcessor_VSImpl.cpp @@ -528,7 +528,7 @@ void GrCCCoverageProcessor::initVS(GrResourceProvider* rp) { } } -void GrCCCoverageProcessor::appendVSMesh(sk_sp instanceBuffer, int instanceCount, +void GrCCCoverageProcessor::appendVSMesh(sk_sp instanceBuffer, int instanceCount, int baseInstance, SkTArray* out) const { SkASSERT(Impl::kVertexShader == fImpl); GrMesh& mesh = out->emplace_back(fVSTriangleType); diff --git a/src/gpu/ccpr/GrCCFiller.h b/src/gpu/ccpr/GrCCFiller.h index 45a03a433f..4eec19054a 100644 --- a/src/gpu/ccpr/GrCCFiller.h +++ b/src/gpu/ccpr/GrCCFiller.h @@ -106,7 +106,7 @@ private: PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes]; int fMaxMeshesPerDraw = 0; - sk_sp fInstanceBuffer; + sk_sp fInstanceBuffer; PrimitiveTallies fBaseInstances[kNumScissorModes]; mutable SkSTArray<32, GrMesh> fMeshesScratchBuffer; mutable SkSTArray<32, SkIRect> fScissorRectScratchBuffer; diff --git a/src/gpu/ccpr/GrCCPathProcessor.cpp b/src/gpu/ccpr/GrCCPathProcessor.cpp index 06e6435eab..feefd4fc8c 100644 --- a/src/gpu/ccpr/GrCCPathProcessor.cpp +++ b/src/gpu/ccpr/GrCCPathProcessor.cpp @@ -34,7 +34,7 @@ static constexpr float kOctoEdgeNorms[8 * 4] = { GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey); -sk_sp GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) { +sk_sp GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) { GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey); return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms), kOctoEdgeNorms, gVertexBufferKey); @@ -64,7 +64,7 @@ GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey); constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kInstanceAttribs[]; constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kEdgeNormsAttrib; -sk_sp GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) { +sk_sp GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) { GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey); if (onFlushRP->caps()->usePrimitiveRestart()) { return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex, diff --git a/src/gpu/ccpr/GrCCPathProcessor.h b/src/gpu/ccpr/GrCCPathProcessor.h index 2f54d6799c..534f08d237 100644 --- a/src/gpu/ccpr/GrCCPathProcessor.h +++ b/src/gpu/ccpr/GrCCPathProcessor.h @@ -66,8 +66,8 @@ public: GR_STATIC_ASSERT(4 * 12 == sizeof(Instance)); - static sk_sp FindVertexBuffer(GrOnFlushResourceProvider*); - static sk_sp FindIndexBuffer(GrOnFlushResourceProvider*); + static sk_sp FindVertexBuffer(GrOnFlushResourceProvider*); + static sk_sp FindIndexBuffer(GrOnFlushResourceProvider*); GrCCPathProcessor(const GrTextureProxy* atlas, const SkMatrix& viewMatrixIfUsingLocalCoords = SkMatrix::I()); diff --git a/src/gpu/ccpr/GrCCPerFlushResources.h b/src/gpu/ccpr/GrCCPerFlushResources.h index fc69cee0ec..d50778726b 100644 --- a/src/gpu/ccpr/GrCCPerFlushResources.h +++ b/src/gpu/ccpr/GrCCPerFlushResources.h @@ -105,15 +105,15 @@ public: // Accessors used by draw calls, once the resources have been finalized. const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; } const GrCCStroker& stroker() const { SkASSERT(!this->isMapped()); return fStroker; } - sk_sp refIndexBuffer() const { + sk_sp refIndexBuffer() const { SkASSERT(!this->isMapped()); return fIndexBuffer; } - sk_sp refVertexBuffer() const { + sk_sp refVertexBuffer() const { SkASSERT(!this->isMapped()); return fVertexBuffer; } - sk_sp refInstanceBuffer() const { + sk_sp refInstanceBuffer() const { SkASSERT(!this->isMapped()); return fInstanceBuffer; } @@ -131,9 +131,9 @@ private: GrCCAtlasStack fCopyAtlasStack; GrCCAtlasStack fRenderedAtlasStack; - const sk_sp fIndexBuffer; - const sk_sp fVertexBuffer; - const sk_sp fInstanceBuffer; + const sk_sp fIndexBuffer; + const sk_sp fVertexBuffer; + const sk_sp fInstanceBuffer; GrCCPathProcessor::Instance* fPathInstanceData = nullptr; int fNextCopyInstanceIdx; diff --git a/src/gpu/ccpr/GrCCStroker.cpp b/src/gpu/ccpr/GrCCStroker.cpp index e0e1bc3886..c097c0acda 100644 --- a/src/gpu/ccpr/GrCCStroker.cpp +++ b/src/gpu/ccpr/GrCCStroker.cpp @@ -497,7 +497,7 @@ public: } } - sk_sp finish() { + sk_sp finish() { SkASSERT(this->isMapped()); SkASSERT(!memcmp(fNextInstances, fEndInstances, sizeof(fNextInstances))); fInstanceBuffer->unmap(); @@ -543,7 +543,7 @@ private: InstanceTallies* fCurrNextInstances; SkDEBUGCODE(const InstanceTallies* fCurrEndInstances); - sk_sp fInstanceBuffer; + sk_sp fInstanceBuffer; void* fInstanceBufferData = nullptr; InstanceTallies fNextInstances[2]; SkDEBUGCODE(InstanceTallies fEndInstances[2]); diff --git a/src/gpu/ccpr/GrCCStroker.h b/src/gpu/ccpr/GrCCStroker.h index ac710117cf..6ddbd156b7 100644 --- a/src/gpu/ccpr/GrCCStroker.h +++ b/src/gpu/ccpr/GrCCStroker.h @@ -13,7 +13,7 @@ #include "SkNx.h" #include "ccpr/GrCCStrokeGeometry.h" -class GrBuffer; +class GrGpuBuffer; class GrCCCoverageProcessor; class GrOnFlushResourceProvider; class GrOpFlushState; @@ -116,7 +116,7 @@ private: GrSTAllocator<128, InstanceTallies> fTalliesAllocator; const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies}; - sk_sp fInstanceBuffer; + sk_sp fInstanceBuffer; // The indices stored in batches are relative to these base instances. InstanceTallies fBaseInstances[kNumScissorModes]; diff --git a/src/gpu/gl/GrGLBuffer.cpp b/src/gpu/gl/GrGLBuffer.cpp index dd3e1c5c02..433540900b 100644 --- a/src/gpu/gl/GrGLBuffer.cpp +++ b/src/gpu/gl/GrGLBuffer.cpp @@ -176,8 +176,8 @@ void GrGLBuffer::onMap() { case GrGLCaps::kMapBuffer_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Let driver know it can discard the old data - if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->sizeInBytes()) { - GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); + if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) { + GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); } GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); break; @@ -185,30 +185,30 @@ void GrGLBuffer::onMap() { case GrGLCaps::kMapBufferRange_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Make sure the GL buffer size agrees with fDesc before mapping. - if (fGLSizeInBytes != this->sizeInBytes()) { - GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); + if (fGLSizeInBytes != this->size()) { + GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); } GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) { // TODO: Make this a function parameter. writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; } - GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->sizeInBytes(), - readOnly ? GR_GL_MAP_READ_BIT : writeAccess)); + GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(), + readOnly ? GR_GL_MAP_READ_BIT : writeAccess)); break; } case GrGLCaps::kChromium_MapBufferType: { GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); // Make sure the GL buffer size agrees with fDesc before mapping. - if (fGLSizeInBytes != this->sizeInBytes()) { - GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); + if (fGLSizeInBytes != this->size()) { + GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); } - GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->sizeInBytes(), - readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); + GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(), + readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); break; } } - fGLSizeInBytes = this->sizeInBytes(); + fGLSizeInBytes = this->size(); VALIDATE(); } @@ -251,15 +251,15 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { SkASSERT(!this->isMapped()); VALIDATE(); - if (srcSizeInBytes > this->sizeInBytes()) { + if (srcSizeInBytes > this->size()) { return false; } - SkASSERT(srcSizeInBytes <= this->sizeInBytes()); + SkASSERT(srcSizeInBytes <= this->size()); // bindbuffer handles dirty context GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); if (this->glCaps().useBufferDataNullHint()) { - if (this->sizeInBytes() == srcSizeInBytes) { + if (this->size() == srcSizeInBytes) { GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); } else { // Before we call glBufferSubData we give the driver a hint using @@ -269,10 +269,10 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) { // assign a different allocation for the new contents to avoid // flushing the gpu past draws consuming the old contents. // TODO I think we actually want to try calling bufferData here - GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); + GL_CALL(BufferData(target, this->size(), nullptr, fUsage)); GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); } - fGLSizeInBytes = this->sizeInBytes(); + fGLSizeInBytes = this->size(); } else { // Note that we're cheating on the size here. Currently no methods // allow a partial update that preserves contents of non-updated @@ -296,7 +296,7 @@ void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump, void GrGLBuffer::validate() const { SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); - SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->sizeInBytes()); + SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size()); } #endif diff --git a/src/gpu/gl/GrGLBuffer.h b/src/gpu/gl/GrGLBuffer.h index 18480ffb90..76d902c38e 100644 --- a/src/gpu/gl/GrGLBuffer.h +++ b/src/gpu/gl/GrGLBuffer.h @@ -8,13 +8,13 @@ #ifndef GrGLBuffer_DEFINED #define GrGLBuffer_DEFINED -#include "GrBuffer.h" +#include "GrGpuBuffer.h" #include "gl/GrGLTypes.h" class GrGLGpu; class GrGLCaps; -class GrGLBuffer : public GrBuffer { +class GrGLBuffer : public GrGpuBuffer { public: static sk_sp Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern, const void* data = nullptr); @@ -28,7 +28,7 @@ public: /** * Returns the actual size of the underlying GL buffer object. In certain cases we may make this - * smaller than the size reported by GrBuffer. + * smaller than the size reported by GrGpuBuffer. */ size_t glSizeInBytes() const { return fGLSizeInBytes; } @@ -62,7 +62,7 @@ private: size_t fGLSizeInBytes; bool fHasAttachedToTexture; - typedef GrBuffer INHERITED; + typedef GrGpuBuffer INHERITED; }; #endif diff --git a/src/gpu/gl/GrGLGpu.cpp b/src/gpu/gl/GrGLGpu.cpp index a22df4fc66..ee743d952e 100644 --- a/src/gpu/gl/GrGLGpu.cpp +++ b/src/gpu/gl/GrGLGpu.cpp @@ -8,6 +8,7 @@ #include "GrGLGpu.h" #include "GrBackendSemaphore.h" #include "GrBackendSurface.h" +#include "GrCpuBuffer.h" #include "GrFixedClip.h" #include "GrGLBuffer.h" #include "GrGLGpuCommandBuffer.h" @@ -842,8 +843,8 @@ static inline GrGLint config_alignment(GrPixelConfig config) { } bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height, - GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, - size_t rowBytes) { + GrColorType bufferColorType, GrGpuBuffer* transferBuffer, + size_t offset, size_t rowBytes) { GrGLTexture* glTex = static_cast(texture); GrPixelConfig texConfig = glTex->config(); SkASSERT(this->caps()->isConfigTexturable(texConfig)); @@ -864,7 +865,7 @@ bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, GL_CALL(BindTexture(glTex->target(), glTex->textureID())); SkASSERT(!transferBuffer->isMapped()); - SkASSERT(!transferBuffer->isCPUBacked()); + SkASSERT(!transferBuffer->isCpuBuffer()); const GrGLBuffer* glBuffer = static_cast(transferBuffer); this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer); @@ -1847,8 +1848,8 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen //////////////////////////////////////////////////////////////////////////////// -sk_sp GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, - GrAccessPattern accessPattern, const void* data) { +sk_sp GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, + GrAccessPattern accessPattern, const void* data) { return GrGLBuffer::Make(this, size, intendedType, accessPattern, data); } @@ -2063,7 +2064,8 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer, GrGLAttribArrayState* attribState; if (indexBuffer) { - SkASSERT(indexBuffer && !indexBuffer->isMapped()); + SkASSERT(indexBuffer->isCpuBuffer() || + !static_cast(indexBuffer)->isMapped()); attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer); } else { attribState = fHWVertexArrayState.bindInternalVertexArray(this); @@ -2073,9 +2075,10 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer, attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart); if (int vertexStride = fHWProgram->vertexStride()) { - SkASSERT(vertexBuffer && !vertexBuffer->isMapped()); - size_t bufferOffset = vertexBuffer->baseOffset(); - bufferOffset += baseVertex * static_cast(vertexStride); + SkASSERT(vertexBuffer); + SkASSERT(vertexBuffer->isCpuBuffer() || + !static_cast(vertexBuffer)->isMapped()); + size_t bufferOffset = baseVertex * static_cast(vertexStride); for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) { const auto& attrib = fHWProgram->vertexAttribute(i); static constexpr int kDivisor = 0; @@ -2084,9 +2087,10 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer, } } if (int instanceStride = fHWProgram->instanceStride()) { - SkASSERT(instanceBuffer && !instanceBuffer->isMapped()); - size_t bufferOffset = instanceBuffer->baseOffset(); - bufferOffset += baseInstance * static_cast(instanceStride); + SkASSERT(instanceBuffer); + SkASSERT(instanceBuffer->isCpuBuffer() || + !static_cast(instanceBuffer)->isMapped()); + size_t bufferOffset = baseInstance * static_cast(instanceStride); int attribIdx = fHWProgram->numVertexAttributes(); for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) { const auto& attrib = fHWProgram->instanceAttribute(i); @@ -2107,13 +2111,14 @@ GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) { } auto* bufferState = this->hwBufferState(type); - if (buffer->isCPUBacked()) { + if (buffer->isCpuBuffer()) { if (!bufferState->fBufferZeroKnownBound) { GL_CALL(BindBuffer(bufferState->fGLTarget, 0)); bufferState->fBufferZeroKnownBound = true; bufferState->fBoundBufferUniqueID.makeInvalid(); } - } else if (buffer->uniqueID() != bufferState->fBoundBufferUniqueID) { + } else if (static_cast(buffer)->uniqueID() != + bufferState->fBoundBufferUniqueID) { const GrGLBuffer* glBuffer = static_cast(buffer); GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID())); bufferState->fBufferZeroKnownBound = false; @@ -2608,21 +2613,29 @@ void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* verte fStats.incNumDraws(); } +static const GrGLvoid* element_ptr(const GrBuffer* indexBuffer, int baseIndex) { + size_t baseOffset = baseIndex * sizeof(uint16_t); + if (indexBuffer->isCpuBuffer()) { + return static_cast(indexBuffer)->data() + baseOffset; + } else { + return reinterpret_cast(baseOffset); + } +} + void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer, int indexCount, int baseIndex, uint16_t minIndexValue, uint16_t maxIndexValue, const GrBuffer* vertexBuffer, int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) { const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); - GrGLvoid* const indices = reinterpret_cast(indexBuffer->baseOffset() + - sizeof(uint16_t) * baseIndex); + const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex); this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart); if (this->glCaps().drawRangeElementsSupport()) { GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount, - GR_GL_UNSIGNED_SHORT, indices)); + GR_GL_UNSIGNED_SHORT, elementPtr)); } else { - GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices)); + GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr)); } fStats.incNumDraws(); } @@ -2649,13 +2662,12 @@ void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType, int instanceCount, int baseInstance, GrPrimitiveRestart enablePrimitiveRestart) { const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); - GrGLvoid* indices = reinterpret_cast(indexBuffer->baseOffset() + - sizeof(uint16_t) * baseIndex); + const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex); int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount); for (int i = 0; i < instanceCount; i += maxInstances) { this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i, enablePrimitiveRestart); - GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices, + GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr, SkTMin(instanceCount - i, maxInstances))); fStats.incNumDraws(); } diff --git a/src/gpu/gl/GrGLGpu.h b/src/gpu/gl/GrGLGpu.h index 5167b4a65e..83d8bf23c7 100644 --- a/src/gpu/gl/GrGLGpu.h +++ b/src/gpu/gl/GrGLGpu.h @@ -187,8 +187,8 @@ private: sk_sp onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, const GrMipLevel texels[], int mipLevelCount) override; - sk_sp onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, - const void* data) override; + sk_sp onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, + const void* data) override; sk_sp onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable, GrIOType) override; @@ -233,7 +233,7 @@ private: const GrMipLevel texels[], int mipLevelCount) override; bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType, - GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override; + GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override; // Before calling any variation of TexImage, TexSubImage, etc..., call this to ensure that the // PIXEL_UNPACK_BUFFER is unbound. diff --git a/src/gpu/gl/GrGLVertexArray.cpp b/src/gpu/gl/GrGLVertexArray.cpp index 6b12a3fa21..ab52b256b9 100644 --- a/src/gpu/gl/GrGLVertexArray.cpp +++ b/src/gpu/gl/GrGLVertexArray.cpp @@ -6,6 +6,7 @@ */ #include "GrGLVertexArray.h" +#include "GrCpuBuffer.h" #include "GrGLBuffer.h" #include "GrGLGpu.h" @@ -89,14 +90,32 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu, SkASSERT(index >= 0 && index < fAttribArrayStates.count()); SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport()); AttribArrayState* array = &fAttribArrayStates[index]; - if (array->fVertexBufferUniqueID != vertexBuffer->uniqueID() || + const char* offsetAsPtr; + bool bufferChanged = false; + if (vertexBuffer->isCpuBuffer()) { + if (!array->fUsingCpuBuffer) { + bufferChanged = true; + array->fUsingCpuBuffer = true; + } + offsetAsPtr = static_cast(vertexBuffer)->data() + offsetInBytes; + } else { + auto gpuBuffer = static_cast(vertexBuffer); + if (array->fUsingCpuBuffer || array->fVertexBufferUniqueID != gpuBuffer->uniqueID()) { + bufferChanged = true; + array->fVertexBufferUniqueID = gpuBuffer->uniqueID(); + } + offsetAsPtr = reinterpret_cast(offsetInBytes); + } + if (bufferChanged || array->fCPUType != cpuType || array->fGPUType != gpuType || array->fStride != stride || - array->fOffset != offsetInBytes) { + array->fOffset != offsetAsPtr) { + // We always have to call this if we're going to change the array pointer. 'array' is + // tracking the last buffer used to setup attrib pointers, not the last buffer bound. + // GrGLGpu will avoid redundant binds. gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer); const AttribLayout& layout = attrib_layout(cpuType); - const GrGLvoid* offsetAsPtr = reinterpret_cast(offsetInBytes); if (GrSLTypeIsFloatType(gpuType)) { GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index, layout.fCount, @@ -113,11 +132,10 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu, stride, offsetAsPtr)); } - array->fVertexBufferUniqueID = vertexBuffer->uniqueID(); array->fCPUType = cpuType; array->fGPUType = gpuType; array->fStride = stride; - array->fOffset = offsetInBytes; + array->fOffset = offsetAsPtr; } if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) { SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect. @@ -179,15 +197,19 @@ GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) { GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) { GrGLAttribArrayState* state = this->bind(gpu); - if (state && fIndexBufferUniqueID != ibuff->uniqueID()) { - if (ibuff->isCPUBacked()) { - GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0)); - } else { + if (!state) { + return nullptr; + } + if (ibuff->isCpuBuffer()) { + GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0)); + } else { + const GrGLBuffer* glBuffer = static_cast(ibuff); + if (fIndexBufferUniqueID != glBuffer->uniqueID()) { const GrGLBuffer* glBuffer = static_cast(ibuff); - GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, - glBuffer->bufferID())); + GR_GL_CALL(gpu->glInterface(), + BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, glBuffer->bufferID())); + fIndexBufferUniqueID = glBuffer->uniqueID(); } - fIndexBufferUniqueID = ibuff->uniqueID(); } return state; } diff --git a/src/gpu/gl/GrGLVertexArray.h b/src/gpu/gl/GrGLVertexArray.h index 93bd526055..4e28e627a9 100644 --- a/src/gpu/gl/GrGLVertexArray.h +++ b/src/gpu/gl/GrGLVertexArray.h @@ -75,13 +75,15 @@ private: void invalidate() { fVertexBufferUniqueID.makeInvalid(); fDivisor = kInvalidDivisor; + fUsingCpuBuffer = false; } GrGpuResource::UniqueID fVertexBufferUniqueID; + bool fUsingCpuBuffer; GrVertexAttribType fCPUType; GrSLType fGPUType; GrGLsizei fStride; - size_t fOffset; + const GrGLvoid* fOffset; int fDivisor; }; diff --git a/src/gpu/mock/GrMockBuffer.h b/src/gpu/mock/GrMockBuffer.h index cf915ee7a5..efb959b82e 100644 --- a/src/gpu/mock/GrMockBuffer.h +++ b/src/gpu/mock/GrMockBuffer.h @@ -8,11 +8,11 @@ #ifndef GrMockBuffer_DEFINED #define GrMockBuffer_DEFINED -#include "GrBuffer.h" #include "GrCaps.h" +#include "GrGpuBuffer.h" #include "GrMockGpu.h" -class GrMockBuffer : public GrBuffer { +class GrMockBuffer : public GrGpuBuffer { public: GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, GrAccessPattern accessPattern) @@ -23,13 +23,13 @@ public: private: void onMap() override { if (GrCaps::kNone_MapFlags != this->getGpu()->caps()->mapBufferFlags()) { - fMapPtr = sk_malloc_throw(this->sizeInBytes()); + fMapPtr = sk_malloc_throw(this->size()); } } void onUnmap() override { sk_free(fMapPtr); } bool onUpdateData(const void* src, size_t srcSizeInBytes) override { return true; } - typedef GrBuffer INHERITED; + typedef GrGpuBuffer INHERITED; }; #endif diff --git a/src/gpu/mock/GrMockGpu.cpp b/src/gpu/mock/GrMockGpu.cpp index 4960fd0557..1fdae0a360 100644 --- a/src/gpu/mock/GrMockGpu.cpp +++ b/src/gpu/mock/GrMockGpu.cpp @@ -183,9 +183,9 @@ sk_sp GrMockGpu::onWrapBackendTextureAsRenderTarget(const GrBack new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc, rtInfo)); } -sk_sp GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type, - GrAccessPattern accessPattern, const void*) { - return sk_sp(new GrMockBuffer(this, sizeInBytes, type, accessPattern)); +sk_sp GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type, + GrAccessPattern accessPattern, const void*) { + return sk_sp(new GrMockBuffer(this, sizeInBytes, type, accessPattern)); } GrStencilAttachment* GrMockGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, diff --git a/src/gpu/mock/GrMockGpu.h b/src/gpu/mock/GrMockGpu.h index d574a5d33b..0131a7b6f2 100644 --- a/src/gpu/mock/GrMockGpu.h +++ b/src/gpu/mock/GrMockGpu.h @@ -72,8 +72,8 @@ private: sk_sp onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, int sampleCnt) override; - sk_sp onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern, - const void*) override; + sk_sp onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern, + const void*) override; bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, void* buffer, size_t rowBytes) override { @@ -86,7 +86,7 @@ private: } bool onTransferPixels(GrTexture* texture, int left, int top, int width, int height, GrColorType, - GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override { + GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override { return true; } diff --git a/src/gpu/mtl/GrMtlBuffer.h b/src/gpu/mtl/GrMtlBuffer.h index 63a924bc28..90e67fd432 100644 --- a/src/gpu/mtl/GrMtlBuffer.h +++ b/src/gpu/mtl/GrMtlBuffer.h @@ -8,14 +8,14 @@ #ifndef GrMtlBuffer_DEFINED #define GrMtlBuffer_DEFINED -#include "GrBuffer.h" +#include "GrGpuBuffer.h" #import class GrMtlCaps; class GrMtlGpu; -class GrMtlBuffer: public GrBuffer { +class GrMtlBuffer: public GrGpuBuffer { public: static sk_sp Make(GrMtlGpu*, size_t size, GrGpuBufferType intendedType, GrAccessPattern, const void* data = nullptr); @@ -48,7 +48,7 @@ private: id fMtlBuffer; id fMappedBuffer; - typedef GrBuffer INHERITED; + typedef GrGpuBuffer INHERITED; }; #endif diff --git a/src/gpu/mtl/GrMtlGpu.h b/src/gpu/mtl/GrMtlGpu.h index d82a30af90..f36be1afbe 100644 --- a/src/gpu/mtl/GrMtlGpu.h +++ b/src/gpu/mtl/GrMtlGpu.h @@ -143,7 +143,8 @@ private: sk_sp onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, int sampleCnt) override; - sk_sp onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern, const void*) override; + sk_sp onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern, + const void*) override; bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, void* buffer, size_t rowBytes) override; @@ -153,7 +154,7 @@ private: bool onTransferPixels(GrTexture*, int left, int top, int width, int height, - GrColorType, GrBuffer*, + GrColorType, GrGpuBuffer*, size_t offset, size_t rowBytes) override { return false; } diff --git a/src/gpu/mtl/GrMtlGpu.mm b/src/gpu/mtl/GrMtlGpu.mm index 833dfdb0f3..e11a04caa9 100644 --- a/src/gpu/mtl/GrMtlGpu.mm +++ b/src/gpu/mtl/GrMtlGpu.mm @@ -131,8 +131,8 @@ void GrMtlGpu::submitCommandBuffer(SyncQueue sync) { fCmdBuffer = [fQueue commandBuffer]; } -sk_sp GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type, - GrAccessPattern accessPattern, const void* data) { +sk_sp GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type, + GrAccessPattern accessPattern, const void* data) { return GrMtlBuffer::Make(this, size, type, accessPattern, data); } diff --git a/src/gpu/mtl/GrMtlGpuCommandBuffer.mm b/src/gpu/mtl/GrMtlGpuCommandBuffer.mm index 7df017c946..914e9e3437 100644 --- a/src/gpu/mtl/GrMtlGpuCommandBuffer.mm +++ b/src/gpu/mtl/GrMtlGpuCommandBuffer.mm @@ -274,8 +274,8 @@ void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer, const GrBuffer* instanceBuffer) { size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1; if (vertexBuffer) { - SkASSERT(!vertexBuffer->isCPUBacked()); - SkASSERT(!vertexBuffer->isMapped()); + SkASSERT(!vertexBuffer->isCpuBuffer()); + SkASSERT(!static_cast(vertexBuffer)->isMapped()); auto mtlVertexBuffer = static_cast(vertexBuffer)->mtlBuffer(); SkASSERT(mtlVertexBuffer); @@ -284,8 +284,8 @@ void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer, atIndex: bufferIndex++]; } if (instanceBuffer) { - SkASSERT(!instanceBuffer->isCPUBacked()); - SkASSERT(!instanceBuffer->isMapped()); + SkASSERT(!instanceBuffer->isCpuBuffer()); + SkASSERT(!static_cast(instanceBuffer)->isMapped()); auto mtlInstanceBuffer = static_cast(instanceBuffer)->mtlBuffer(); SkASSERT(mtlInstanceBuffer); @@ -327,8 +327,8 @@ void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType prim SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported. id mtlIndexBuffer; if (indexBuffer) { - SkASSERT(!indexBuffer->isCPUBacked()); - SkASSERT(!indexBuffer->isMapped()); + SkASSERT(!indexBuffer->isCpuBuffer()); + SkASSERT(!static_cast(indexBuffer)->isMapped()); mtlIndexBuffer = static_cast(indexBuffer)->mtlBuffer(); SkASSERT(mtlIndexBuffer); diff --git a/src/gpu/mtl/GrMtlPipelineState.mm b/src/gpu/mtl/GrMtlPipelineState.mm index e1e4977f54..62b847faa9 100644 --- a/src/gpu/mtl/GrMtlPipelineState.mm +++ b/src/gpu/mtl/GrMtlPipelineState.mm @@ -55,8 +55,8 @@ GrMtlPipelineState::GrMtlPipelineState( , fXferProcessor(std::move(xferProcessor)) , fFragmentProcessors(std::move(fragmentProcessors)) , fFragmentProcessorCnt(fragmentProcessorCnt) - , fDataManager(uniforms, fGeometryUniformBuffer->sizeInBytes(), - fFragmentUniformBuffer->sizeInBytes()) { + , fDataManager(uniforms, fGeometryUniformBuffer->size(), + fFragmentUniformBuffer->size()) { (void) fPixelFormat; // Suppress unused-var warning. } diff --git a/src/gpu/ops/GrAtlasTextOp.cpp b/src/gpu/ops/GrAtlasTextOp.cpp index d4ffc7c4cf..b10c637db5 100644 --- a/src/gpu/ops/GrAtlasTextOp.cpp +++ b/src/gpu/ops/GrAtlasTextOp.cpp @@ -423,8 +423,7 @@ void GrAtlasTextOp::flush(GrMeshDrawOp::Target* target, FlushInfo* flushInfo) co samplerState); } } - int maxGlyphsPerDraw = - static_cast(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6); + int maxGlyphsPerDraw = static_cast(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6); GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles); mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerGlyph, kVerticesPerGlyph, flushInfo->fGlyphsToFlush, maxGlyphsPerDraw); diff --git a/src/gpu/ops/GrDrawVerticesOp.cpp b/src/gpu/ops/GrDrawVerticesOp.cpp index 2b368fee4e..660d640cc8 100644 --- a/src/gpu/ops/GrDrawVerticesOp.cpp +++ b/src/gpu/ops/GrDrawVerticesOp.cpp @@ -227,7 +227,7 @@ void GrDrawVerticesOp::drawVolatile(Target* target) { // Allocate buffers. size_t vertexStride = gp->vertexStride(); - sk_sp vertexBuffer = nullptr; + sk_sp vertexBuffer; int firstVertex = 0; void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex); if (!verts) { @@ -235,7 +235,7 @@ void GrDrawVerticesOp::drawVolatile(Target* target) { return; } - sk_sp indexBuffer = nullptr; + sk_sp indexBuffer; int firstIndex = 0; uint16_t* indices = nullptr; if (this->isIndexed()) { @@ -286,10 +286,9 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) { indexKeyBuilder.finish(); // Try to grab data from the cache. - sk_sp vertexBuffer = rp->findByUniqueKey(vertexKey); - sk_sp indexBuffer = this->isIndexed() ? - rp->findByUniqueKey(indexKey) : - nullptr; + sk_sp vertexBuffer = rp->findByUniqueKey(vertexKey); + sk_sp indexBuffer = + this->isIndexed() ? rp->findByUniqueKey(indexKey) : nullptr; // Draw using the cached buffers if possible. if (vertexBuffer && (!this->isIndexed() || indexBuffer)) { @@ -300,10 +299,8 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) { // Allocate vertex buffer. size_t vertexStride = gp->vertexStride(); - vertexBuffer = rp->createBuffer(fVertexCount * vertexStride, - GrGpuBufferType::kVertex, - kStatic_GrAccessPattern, - GrResourceProvider::Flags::kNone); + vertexBuffer = rp->createBuffer( + fVertexCount * vertexStride, GrGpuBufferType::kVertex, kStatic_GrAccessPattern); void* verts = vertexBuffer ? vertexBuffer->map() : nullptr; if (!verts) { SkDebugf("Could not allocate vertices\n"); @@ -313,10 +310,8 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) { // Allocate index buffer. uint16_t* indices = nullptr; if (this->isIndexed()) { - indexBuffer = rp->createBuffer(fIndexCount * sizeof(uint16_t), - GrGpuBufferType::kIndex, - kStatic_GrAccessPattern, - GrResourceProvider::Flags::kNone); + indexBuffer = rp->createBuffer( + fIndexCount * sizeof(uint16_t), GrGpuBufferType::kIndex, kStatic_GrAccessPattern); indices = indexBuffer ? static_cast(indexBuffer->map()) : nullptr; if (!indices) { SkDebugf("Could not allocate indices\n"); diff --git a/src/gpu/ops/GrMeshDrawOp.cpp b/src/gpu/ops/GrMeshDrawOp.cpp index d467a6b27c..ac019f3409 100644 --- a/src/gpu/ops/GrMeshDrawOp.cpp +++ b/src/gpu/ops/GrMeshDrawOp.cpp @@ -45,10 +45,10 @@ void GrMeshDrawOp::PatternHelper::init(Target* target, GrPrimitiveType primitive return; } SkASSERT(vertexBuffer); - size_t ibSize = indexBuffer->gpuMemorySize(); + size_t ibSize = indexBuffer->size(); int maxRepetitions = static_cast(ibSize / (sizeof(uint16_t) * indicesPerRepetition)); fMesh = target->allocMesh(primitiveType); - fMesh->setIndexedPatterned(indexBuffer, indicesPerRepetition, verticesPerRepetition, + fMesh->setIndexedPatterned(std::move(indexBuffer), indicesPerRepetition, verticesPerRepetition, repeatCount, maxRepetitions); fMesh->setVertexData(std::move(vertexBuffer), firstVertex); } @@ -62,7 +62,7 @@ void GrMeshDrawOp::PatternHelper::recordDraw( ////////////////////////////////////////////////////////////////////////////// GrMeshDrawOp::QuadHelper::QuadHelper(Target* target, size_t vertexStride, int quadsToDraw) { - sk_sp quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer(); + sk_sp quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer(); if (!quadIndexBuffer) { SkDebugf("Could not get quad index buffer."); return; diff --git a/src/gpu/ops/GrMeshDrawOp.h b/src/gpu/ops/GrMeshDrawOp.h index bba173d601..170fee0e85 100644 --- a/src/gpu/ops/GrMeshDrawOp.h +++ b/src/gpu/ops/GrMeshDrawOp.h @@ -34,8 +34,9 @@ protected: space for the vertices and flushes the draws to the GrMeshDrawOp::Target. */ class PatternHelper { public: - PatternHelper(Target*, GrPrimitiveType, size_t vertexStride, sk_sp, - int verticesPerRepetition, int indicesPerRepetition, int repeatCount); + PatternHelper(Target*, GrPrimitiveType, size_t vertexStride, + sk_sp indexBuffer, int verticesPerRepetition, + int indicesPerRepetition, int repeatCount); /** Called to issue draws to the GrMeshDrawOp::Target.*/ void recordDraw(Target*, sk_sp, const GrPipeline*, @@ -45,7 +46,7 @@ protected: protected: PatternHelper() = default; - void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp, + void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp indexBuffer, int verticesPerRepetition, int indicesPerRepetition, int repeatCount); private: diff --git a/src/gpu/ops/GrQuadPerEdgeAA.cpp b/src/gpu/ops/GrQuadPerEdgeAA.cpp index 70caf55961..de2641f2fd 100644 --- a/src/gpu/ops/GrQuadPerEdgeAA.cpp +++ b/src/gpu/ops/GrQuadPerEdgeAA.cpp @@ -374,7 +374,7 @@ GR_DECLARE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey); static const int kVertsPerAAFillRect = 8; static const int kIndicesPerAAFillRect = 30; -static sk_sp get_index_buffer(GrResourceProvider* resourceProvider) { +static sk_sp get_index_buffer(GrResourceProvider* resourceProvider) { GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey); // clang-format off @@ -467,7 +467,7 @@ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const Vert int quadCount) { if (spec.usesCoverageAA()) { // AA quads use 8 vertices, basically nested rectangles - sk_sp ibuffer = get_index_buffer(target->resourceProvider()); + sk_sp ibuffer = get_index_buffer(target->resourceProvider()); if (!ibuffer) { return false; } @@ -478,7 +478,7 @@ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const Vert } else { // Non-AA quads use 4 vertices, and regular triangle strip layout if (quadCount > 1) { - sk_sp ibuffer = target->resourceProvider()->refQuadIndexBuffer(); + sk_sp ibuffer = target->resourceProvider()->refQuadIndexBuffer(); if (!ibuffer) { return false; } diff --git a/src/gpu/ops/GrRegionOp.cpp b/src/gpu/ops/GrRegionOp.cpp index 17254355c3..9db1feca66 100644 --- a/src/gpu/ops/GrRegionOp.cpp +++ b/src/gpu/ops/GrRegionOp.cpp @@ -109,7 +109,7 @@ private: if (!numRects) { return; } - sk_sp indexBuffer = target->resourceProvider()->refQuadIndexBuffer(); + sk_sp indexBuffer = target->resourceProvider()->refQuadIndexBuffer(); if (!indexBuffer) { SkDebugf("Could not allocate indices\n"); return; diff --git a/src/gpu/ops/GrSmallPathRenderer.cpp b/src/gpu/ops/GrSmallPathRenderer.cpp index 5132319305..13bdec15a2 100644 --- a/src/gpu/ops/GrSmallPathRenderer.cpp +++ b/src/gpu/ops/GrSmallPathRenderer.cpp @@ -794,7 +794,7 @@ private: if (flushInfo->fInstancesToFlush) { GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles); int maxInstancesPerDraw = - static_cast(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6); + static_cast(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6); mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerQuad, kVerticesPerQuad, flushInfo->fInstancesToFlush, maxInstancesPerDraw); mesh->setVertexData(flushInfo->fVertexBuffer, flushInfo->fVertexOffset); diff --git a/src/gpu/ops/GrStrokeRectOp.cpp b/src/gpu/ops/GrStrokeRectOp.cpp index 949533312a..30242cc0a2 100644 --- a/src/gpu/ops/GrStrokeRectOp.cpp +++ b/src/gpu/ops/GrStrokeRectOp.cpp @@ -420,7 +420,7 @@ private: static const int kBevelVertexCnt = 24; static const int kNumBevelRectsInIndexBuffer = 256; - static sk_sp GetIndexBuffer(GrResourceProvider*, bool miterStroke); + static sk_sp GetIndexBuffer(GrResourceProvider*, bool miterStroke); const SkMatrix& viewMatrix() const { return fViewMatrix; } bool miterStroke() const { return fMiterStroke; } @@ -472,7 +472,7 @@ void AAStrokeRectOp::onPrepareDraws(Target* target) { int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt; int instanceCount = fRects.count(); - sk_sp indexBuffer = + sk_sp indexBuffer = GetIndexBuffer(target->resourceProvider(), this->miterStroke()); if (!indexBuffer) { SkDebugf("Could not allocate indices\n"); @@ -503,8 +503,8 @@ void AAStrokeRectOp::onPrepareDraws(Target* target) { helper.recordDraw(target, std::move(gp), pipe.fPipeline, pipe.fFixedDynamicState); } -sk_sp AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider, - bool miterStroke) { +sk_sp AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider, + bool miterStroke) { if (miterStroke) { // clang-format off static const uint16_t gMiterIndices[] = { diff --git a/src/gpu/ops/GrTessellatingPathRenderer.cpp b/src/gpu/ops/GrTessellatingPathRenderer.cpp index 0314e4ab0c..c29b7bf7f9 100644 --- a/src/gpu/ops/GrTessellatingPathRenderer.cpp +++ b/src/gpu/ops/GrTessellatingPathRenderer.cpp @@ -53,7 +53,7 @@ private: } }; -bool cache_match(GrBuffer* vertexBuffer, SkScalar tol, int* actualCount) { +bool cache_match(GrGpuBuffer* vertexBuffer, SkScalar tol, int* actualCount) { if (!vertexBuffer) { return false; } @@ -78,8 +78,7 @@ public: void* lock(int vertexCount) override { size_t size = vertexCount * stride(); fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex, - kStatic_GrAccessPattern, - GrResourceProvider::Flags::kNone); + kStatic_GrAccessPattern); if (!fVertexBuffer.get()) { return nullptr; } @@ -99,10 +98,10 @@ public: } fVertices = nullptr; } - sk_sp detachVertexBuffer() { return std::move(fVertexBuffer); } + sk_sp detachVertexBuffer() { return std::move(fVertexBuffer); } private: - sk_sp fVertexBuffer; + sk_sp fVertexBuffer; GrResourceProvider* fResourceProvider; bool fCanMapVB; void* fVertices; @@ -261,7 +260,7 @@ private: memset(&builder[shapeKeyDataCnt], 0, sizeof(fDevClipBounds)); } builder.finish(); - sk_sp cachedVertexBuffer(rp->findByUniqueKey(key)); + sk_sp cachedVertexBuffer(rp->findByUniqueKey(key)); int actualCount; SkScalar tol = GrPathUtils::kDefaultTolerance; tol = GrPathUtils::scaleToleranceToSrc(tol, fViewMatrix, fShape.bounds()); @@ -286,7 +285,7 @@ private: if (count == 0) { return; } - sk_sp vb = allocator.detachVertexBuffer(); + sk_sp vb = allocator.detachVertexBuffer(); TessInfo info; info.fTolerance = isLinear ? 0 : tol; info.fCount = count; diff --git a/src/gpu/vk/GrVkGpu.cpp b/src/gpu/vk/GrVkGpu.cpp index ad143ee224..3a820d9782 100644 --- a/src/gpu/vk/GrVkGpu.cpp +++ b/src/gpu/vk/GrVkGpu.cpp @@ -343,9 +343,9 @@ void GrVkGpu::submitCommandBuffer(SyncQueue sync) { } /////////////////////////////////////////////////////////////////////////////// -sk_sp GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type, - GrAccessPattern accessPattern, const void* data) { - sk_sp buff; +sk_sp GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type, + GrAccessPattern accessPattern, const void* data) { + sk_sp buff; switch (type) { case GrGpuBufferType::kVertex: SkASSERT(kDynamic_GrAccessPattern == accessPattern || @@ -419,7 +419,7 @@ bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, in } bool GrVkGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height, - GrColorType bufferColorType, GrBuffer* transferBuffer, + GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t bufferOffset, size_t rowBytes) { // Can't transfer compressed data SkASSERT(!GrPixelConfigIsCompressed(texture->config())); diff --git a/src/gpu/vk/GrVkGpu.h b/src/gpu/vk/GrVkGpu.h index a52a982e9f..b956afccd4 100644 --- a/src/gpu/vk/GrVkGpu.h +++ b/src/gpu/vk/GrVkGpu.h @@ -198,8 +198,8 @@ private: sk_sp onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, const GrVkDrawableInfo&) override; - sk_sp onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, - const void* data) override; + sk_sp onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, + const void* data) override; bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, void* buffer, size_t rowBytes) override; @@ -208,7 +208,7 @@ private: const GrMipLevel texels[], int mipLevelCount) override; bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType, - GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override; + GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override; bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.cpp b/src/gpu/vk/GrVkGpuCommandBuffer.cpp index 33c4cd9c31..44405d7e8c 100644 --- a/src/gpu/vk/GrVkGpuCommandBuffer.cpp +++ b/src/gpu/vk/GrVkGpuCommandBuffer.cpp @@ -593,9 +593,9 @@ void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, con //////////////////////////////////////////////////////////////////////////////// -void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer, - const GrBuffer* vertexBuffer, - const GrBuffer* instanceBuffer) { +void GrVkGpuRTCommandBuffer::bindGeometry(const GrGpuBuffer* indexBuffer, + const GrGpuBuffer* vertexBuffer, + const GrGpuBuffer* instanceBuffer) { GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf(); // There is no need to put any memory barriers to make sure host writes have finished here. // When a command buffer is submitted to a queue, there is an implicit memory barrier that @@ -608,7 +608,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer, if (vertexBuffer) { SkASSERT(vertexBuffer); - SkASSERT(!vertexBuffer->isCPUBacked()); SkASSERT(!vertexBuffer->isMapped()); currCmdBuf->bindInputBuffer(fGpu, binding++, @@ -617,7 +616,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer, if (instanceBuffer) { SkASSERT(instanceBuffer); - SkASSERT(!instanceBuffer->isCPUBacked()); SkASSERT(!instanceBuffer->isMapped()); currCmdBuf->bindInputBuffer(fGpu, binding++, @@ -626,7 +624,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer, if (indexBuffer) { SkASSERT(indexBuffer); SkASSERT(!indexBuffer->isMapped()); - SkASSERT(!indexBuffer->isCPUBacked()); currCmdBuf->bindIndexBuffer(fGpu, static_cast(indexBuffer)); } @@ -807,7 +804,11 @@ void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType, int instanceCount, int baseInstance) { CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo]; - this->bindGeometry(nullptr, vertexBuffer, instanceBuffer); + SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer()); + SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer()); + auto gpuVertexBuffer = static_cast(vertexBuffer); + auto gpuInstanceBuffer = static_cast(instanceBuffer); + this->bindGeometry(nullptr, gpuVertexBuffer, gpuInstanceBuffer); cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance); fGpu->stats()->incNumDraws(); } @@ -824,7 +825,13 @@ void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType, GrPrimitiveRestart restart) { SkASSERT(restart == GrPrimitiveRestart::kNo); CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo]; - this->bindGeometry(indexBuffer, vertexBuffer, instanceBuffer); + SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer()); + SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer()); + SkASSERT(!indexBuffer->isCpuBuffer()); + auto gpuIndexxBuffer = static_cast(indexBuffer); + auto gpuVertexBuffer = static_cast(vertexBuffer); + auto gpuInstanceBuffer = static_cast(instanceBuffer); + this->bindGeometry(gpuIndexxBuffer, gpuVertexBuffer, gpuInstanceBuffer); cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount, baseIndex, baseVertex, baseInstance); fGpu->stats()->incNumDraws(); diff --git a/src/gpu/vk/GrVkGpuCommandBuffer.h b/src/gpu/vk/GrVkGpuCommandBuffer.h index 9dac586e10..2616708d86 100644 --- a/src/gpu/vk/GrVkGpuCommandBuffer.h +++ b/src/gpu/vk/GrVkGpuCommandBuffer.h @@ -96,9 +96,9 @@ private: GrGpu* gpu() override; // Bind vertex and index buffers - void bindGeometry(const GrBuffer* indexBuffer, - const GrBuffer* vertexBuffer, - const GrBuffer* instanceBuffer); + void bindGeometry(const GrGpuBuffer* indexBuffer, + const GrGpuBuffer* vertexBuffer, + const GrGpuBuffer* instanceBuffer); GrVkPipelineState* prepareDrawState(const GrPrimitiveProcessor&, const GrPipeline&, diff --git a/src/gpu/vk/GrVkIndexBuffer.cpp b/src/gpu/vk/GrVkIndexBuffer.cpp index ec2e11ef94..f5a7bbc72f 100644 --- a/src/gpu/vk/GrVkIndexBuffer.cpp +++ b/src/gpu/vk/GrVkIndexBuffer.cpp @@ -50,7 +50,7 @@ void GrVkIndexBuffer::onAbandon() { void GrVkIndexBuffer::onMap() { if (!this->wasDestroyed()) { - this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu()); + this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu()); } } diff --git a/src/gpu/vk/GrVkIndexBuffer.h b/src/gpu/vk/GrVkIndexBuffer.h index ab5c349c82..c35ef1242c 100644 --- a/src/gpu/vk/GrVkIndexBuffer.h +++ b/src/gpu/vk/GrVkIndexBuffer.h @@ -8,13 +8,12 @@ #ifndef GrVkIndexBuffer_DEFINED #define GrVkIndexBuffer_DEFINED -#include "GrBuffer.h" +#include "GrGpuBuffer.h" #include "GrVkBuffer.h" class GrVkGpu; -class GrVkIndexBuffer : public GrBuffer, public GrVkBuffer { - +class GrVkIndexBuffer : public GrGpuBuffer, public GrVkBuffer { public: static sk_sp Make(GrVkGpu* gpu, size_t size, bool dynamic); @@ -32,7 +31,7 @@ private: GrVkGpu* getVkGpu() const; - typedef GrBuffer INHERITED; + typedef GrGpuBuffer INHERITED; }; #endif diff --git a/src/gpu/vk/GrVkTransferBuffer.h b/src/gpu/vk/GrVkTransferBuffer.h index 036f4f7ff3..22f72861b6 100644 --- a/src/gpu/vk/GrVkTransferBuffer.h +++ b/src/gpu/vk/GrVkTransferBuffer.h @@ -8,14 +8,13 @@ #ifndef GrVkTransferBuffer_DEFINED #define GrVkTransferBuffer_DEFINED -#include "GrBuffer.h" +#include "GrGpuBuffer.h" #include "GrVkBuffer.h" #include "vk/GrVkTypes.h" class GrVkGpu; -class GrVkTransferBuffer : public GrBuffer, public GrVkBuffer { - +class GrVkTransferBuffer : public GrGpuBuffer, public GrVkBuffer { public: static sk_sp Make(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type); @@ -31,7 +30,7 @@ private: void onMap() override { if (!this->wasDestroyed()) { - this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu()); + this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu()); } } @@ -51,7 +50,7 @@ private: return reinterpret_cast(this->getGpu()); } - typedef GrBuffer INHERITED; + typedef GrGpuBuffer INHERITED; }; #endif diff --git a/src/gpu/vk/GrVkVertexBuffer.cpp b/src/gpu/vk/GrVkVertexBuffer.cpp index f8e55a823e..af22cc1832 100644 --- a/src/gpu/vk/GrVkVertexBuffer.cpp +++ b/src/gpu/vk/GrVkVertexBuffer.cpp @@ -49,7 +49,7 @@ void GrVkVertexBuffer::onAbandon() { void GrVkVertexBuffer::onMap() { if (!this->wasDestroyed()) { - this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu()); + this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu()); } } diff --git a/src/gpu/vk/GrVkVertexBuffer.h b/src/gpu/vk/GrVkVertexBuffer.h index af65a7eed0..9497fd31f3 100644 --- a/src/gpu/vk/GrVkVertexBuffer.h +++ b/src/gpu/vk/GrVkVertexBuffer.h @@ -8,12 +8,12 @@ #ifndef GrVkVertexBuffer_DEFINED #define GrVkVertexBuffer_DEFINED -#include "GrBuffer.h" +#include "GrGpuBuffer.h" #include "GrVkBuffer.h" class GrVkGpu; -class GrVkVertexBuffer : public GrBuffer, public GrVkBuffer { +class GrVkVertexBuffer : public GrGpuBuffer, public GrVkBuffer { public: static sk_sp Make(GrVkGpu* gpu, size_t size, bool dynamic); @@ -31,7 +31,7 @@ private: GrVkGpu* getVkGpu() const; - typedef GrBuffer INHERITED; + typedef GrGpuBuffer INHERITED; }; #endif diff --git a/tests/GrMeshTest.cpp b/tests/GrMeshTest.cpp index 309a29b13d..9dd5f68043 100644 --- a/tests/GrMeshTest.cpp +++ b/tests/GrMeshTest.cpp @@ -375,8 +375,7 @@ GrGLSLPrimitiveProcessor* GrMeshTestProcessor::createGLSLInstance(const GrShader template sk_sp DrawMeshHelper::makeVertexBuffer(const T* data, int count) { return sk_sp(fState->resourceProvider()->createBuffer( - count * sizeof(T), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, - GrResourceProvider::Flags::kRequireGpuMemory, data)); + count * sizeof(T), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, data)); } sk_sp DrawMeshHelper::getIndexBuffer() { diff --git a/tests/GrPipelineDynamicStateTest.cpp b/tests/GrPipelineDynamicStateTest.cpp index 5e2d36c207..5d51bfa333 100644 --- a/tests/GrPipelineDynamicStateTest.cpp +++ b/tests/GrPipelineDynamicStateTest.cpp @@ -194,9 +194,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrPipelineDynamicStateTest, reporter, ctxInfo {d, d, kMeshColors[3]} }; - sk_sp vbuff( - rp->createBuffer(sizeof(vdata), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, - GrResourceProvider::Flags::kRequireGpuMemory, vdata)); + sk_sp vbuff(rp->createBuffer(sizeof(vdata), GrGpuBufferType::kVertex, + kDynamic_GrAccessPattern, vdata)); if (!vbuff) { ERRORF(reporter, "vbuff is null."); return; diff --git a/tests/ProcessorTest.cpp b/tests/ProcessorTest.cpp index 6a5ce8cc73..0c854d1df1 100644 --- a/tests/ProcessorTest.cpp +++ b/tests/ProcessorTest.cpp @@ -77,7 +77,7 @@ public: return std::unique_ptr(new TestFP(std::move(child))); } static std::unique_ptr Make(const SkTArray>& proxies, - const SkTArray>& buffers) { + const SkTArray>& buffers) { return std::unique_ptr(new TestFP(proxies, buffers)); } @@ -93,7 +93,8 @@ public: } private: - TestFP(const SkTArray>& proxies, const SkTArray>& buffers) + TestFP(const SkTArray>& proxies, + const SkTArray>& buffers) : INHERITED(kTestFP_ClassID, kNone_OptimizationFlags), fSamplers(4) { for (const auto& proxy : proxies) { fSamplers.emplace_back(proxy); @@ -185,7 +186,7 @@ DEF_GPUTEST_FOR_ALL_CONTEXTS(ProcessorRefTest, reporter, ctxInfo) { SkBudgeted::kYes); { SkTArray> proxies; - SkTArray> buffers; + SkTArray> buffers; proxies.push_back(proxy1); auto fp = TestFP::Make(std::move(proxies), std::move(buffers)); for (int i = 0; i < parentCnt; ++i) { diff --git a/tests/TransferPixelsTest.cpp b/tests/TransferPixelsTest.cpp index dfc5a048b3..c0b3947735 100644 --- a/tests/TransferPixelsTest.cpp +++ b/tests/TransferPixelsTest.cpp @@ -83,9 +83,8 @@ void basic_transfer_test(skiatest::Reporter* reporter, GrContext* context, GrCol // create and fill transfer buffer size_t size = rowBytes*kBufferHeight; - auto bufferFlags = GrResourceProvider::Flags::kNoPendingIO; - sk_sp buffer(resourceProvider->createBuffer(size, GrGpuBufferType::kXferCpuToGpu, - kDynamic_GrAccessPattern, bufferFlags)); + sk_sp buffer(resourceProvider->createBuffer(size, GrGpuBufferType::kXferCpuToGpu, + kDynamic_GrAccessPattern)); if (!buffer) { return; }