Use different classes for client side arrays and GPU buffer objects.

GrBuffer is a base class for GrGpuBuffer and GrCpuBuffer. GrGpuBuffer is a
GrGpuResource and the others are not. This allows GrCpuBuffers to exist
outside of the GrGpuResourceCache.

Also removes flags from GrResourceProvider buffer factory function. The
only flag still in use was kRequireGpuMemory. Now CPU buffers are made
without using GrResourceProvider.

Change-Id: I82670d1316e28fd6331ca36b26c8c4ead33846f9
Reviewed-on: https://skia-review.googlesource.com/c/188823
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Brian Salomon 2019-02-07 11:31:24 -05:00 committed by Skia Commit-Bot
parent f4766758aa
commit dbf7072a59
66 changed files with 569 additions and 535 deletions

View File

@ -116,8 +116,7 @@ private:
{100, fY+100}, {100, fY+100},
}; };
sk_sp<const GrBuffer> vertexBuffer(flushState->resourceProvider()->createBuffer( sk_sp<const GrBuffer> vertexBuffer(flushState->resourceProvider()->createBuffer(
sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, vertices));
GrResourceProvider::Flags::kNone, vertices));
if (!vertexBuffer) { if (!vertexBuffer) {
return; return;
} }

View File

@ -141,8 +141,7 @@ private:
{+1, +1}, {+1, +1},
}; };
sk_sp<const GrBuffer> vertexBuffer(flushState->resourceProvider()->createBuffer( sk_sp<const GrBuffer> vertexBuffer(flushState->resourceProvider()->createBuffer(
sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, vertices));
GrResourceProvider::Flags::kNone, vertices));
if (!vertexBuffer) { if (!vertexBuffer) {
return; return;
} }

View File

@ -64,7 +64,6 @@ skia_gpu_sources = [
"$_src/gpu/GrBitmapTextureMaker.h", "$_src/gpu/GrBitmapTextureMaker.h",
"$_src/gpu/GrBlurUtils.cpp", "$_src/gpu/GrBlurUtils.cpp",
"$_src/gpu/GrBlurUtils.h", "$_src/gpu/GrBlurUtils.h",
"$_src/gpu/GrBuffer.cpp",
"$_src/gpu/GrBuffer.h", "$_src/gpu/GrBuffer.h",
"$_src/gpu/GrBufferAllocPool.cpp", "$_src/gpu/GrBufferAllocPool.cpp",
"$_src/gpu/GrBufferAllocPool.h", "$_src/gpu/GrBufferAllocPool.h",
@ -83,6 +82,7 @@ skia_gpu_sources = [
"$_src/gpu/GrContextThreadSafeProxy.cpp", "$_src/gpu/GrContextThreadSafeProxy.cpp",
"$_src/gpu/GrContextThreadSafeProxyPriv.h", "$_src/gpu/GrContextThreadSafeProxyPriv.h",
"$_src/gpu/GrCoordTransform.h", "$_src/gpu/GrCoordTransform.h",
"$_src/gpu/GrCpuBuffer.h",
"$_src/gpu/GrDDLContext.cpp", "$_src/gpu/GrDDLContext.cpp",
"$_src/gpu/GrDefaultGeoProcFactory.cpp", "$_src/gpu/GrDefaultGeoProcFactory.cpp",
"$_src/gpu/GrDefaultGeoProcFactory.h", "$_src/gpu/GrDefaultGeoProcFactory.h",
@ -108,6 +108,8 @@ skia_gpu_sources = [
"$_src/gpu/GrGlyph.h", "$_src/gpu/GrGlyph.h",
"$_src/gpu/GrGpu.cpp", "$_src/gpu/GrGpu.cpp",
"$_src/gpu/GrGpu.h", "$_src/gpu/GrGpu.h",
"$_src/gpu/GrGpuBuffer.cpp",
"$_src/gpu/GrGpuBuffer.h",
"$_src/gpu/GrGpuResourceCacheAccess.h", "$_src/gpu/GrGpuResourceCacheAccess.h",
"$_src/gpu/GrGpuCommandBuffer.cpp", "$_src/gpu/GrGpuCommandBuffer.cpp",
"$_src/gpu/GrGpuCommandBuffer.h", "$_src/gpu/GrGpuCommandBuffer.h",

View File

@ -93,7 +93,6 @@ protected:
bool internalHasUniqueRef() const { return fRefCnt == 1; } bool internalHasUniqueRef() const { return fRefCnt == 1; }
private: private:
friend class GrIORefProxy; // needs to forward on wrapped IO calls
// This is for a unit test. // This is for a unit test.
template <typename T> template <typename T>
friend void testingOnly_getIORefCnts(const T*, int* refCnt, int* readCnt, int* writeCnt); friend void testingOnly_getIORefCnts(const T*, int* refCnt, int* readCnt, int* writeCnt);
@ -120,7 +119,6 @@ private:
this->didRemoveRefOrPendingIO(kPendingWrite_CntType); this->didRemoveRefOrPendingIO(kPendingWrite_CntType);
} }
private:
void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const { void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const {
if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) { if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) {
static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved); static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved);
@ -131,6 +129,7 @@ private:
mutable int32_t fPendingReads; mutable int32_t fPendingReads;
mutable int32_t fPendingWrites; mutable int32_t fPendingWrites;
friend class GrIORefProxy; // needs to forward on wrapped IO calls
friend class GrResourceCache; // to check IO ref counts. friend class GrResourceCache; // to check IO ref counts.
template <typename, GrIOType> friend class GrPendingIOResource; template <typename, GrIOType> friend class GrPendingIOResource;

View File

@ -829,19 +829,6 @@ enum class GrGpuBufferType {
}; };
static const int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kXferGpuToCpu) + 1; static const int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kXferGpuToCpu) + 1;
static inline bool GrBufferTypeIsVertexOrIndex(GrGpuBufferType type) {
switch (type) {
case GrGpuBufferType::kVertex:
case GrGpuBufferType::kIndex:
return true;
case GrGpuBufferType::kXferCpuToGpu:
case GrGpuBufferType::kXferGpuToCpu:
return false;
}
SK_ABORT("Unexpected GrGpuBufferType.");
return false;
}
/** /**
* Provides a performance hint regarding the frequency at which a data store will be accessed. * Provides a performance hint regarding the frequency at which a data store will be accessed.
*/ */

View File

@ -342,19 +342,17 @@ void CCPRGeometryView::DrawCoverageCountOp::onExecute(GrOpFlushState* state,
SkSTArray<1, GrMesh> mesh; SkSTArray<1, GrMesh> mesh;
if (PrimitiveType::kCubics == fView->fPrimitiveType || if (PrimitiveType::kCubics == fView->fPrimitiveType ||
PrimitiveType::kConics == fView->fPrimitiveType) { PrimitiveType::kConics == fView->fPrimitiveType) {
sk_sp<GrBuffer> instBuff( sk_sp<GrGpuBuffer> instBuff(
rp->createBuffer(fView->fQuadPointInstances.count() * sizeof(QuadPointInstance), rp->createBuffer(fView->fQuadPointInstances.count() * sizeof(QuadPointInstance),
GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, GrGpuBufferType::kVertex, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kRequireGpuMemory,
fView->fQuadPointInstances.begin())); fView->fQuadPointInstances.begin()));
if (!fView->fQuadPointInstances.empty() && instBuff) { if (!fView->fQuadPointInstances.empty() && instBuff) {
proc.appendMesh(std::move(instBuff), fView->fQuadPointInstances.count(), 0, &mesh); proc.appendMesh(std::move(instBuff), fView->fQuadPointInstances.count(), 0, &mesh);
} }
} else { } else {
sk_sp<GrBuffer> instBuff( sk_sp<GrGpuBuffer> instBuff(
rp->createBuffer(fView->fTriPointInstances.count() * sizeof(TriPointInstance), rp->createBuffer(fView->fTriPointInstances.count() * sizeof(TriPointInstance),
GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, GrGpuBufferType::kVertex, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kRequireGpuMemory,
fView->fTriPointInstances.begin())); fView->fTriPointInstances.begin()));
if (!fView->fTriPointInstances.empty() && instBuff) { if (!fView->fTriPointInstances.empty() && instBuff) {
proc.appendMesh(std::move(instBuff), fView->fTriPointInstances.count(), 0, &mesh); proc.appendMesh(std::move(instBuff), fView->fTriPointInstances.count(), 0, &mesh);

View File

@ -1,72 +0,0 @@
/*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrBuffer.h"
#include "GrGpu.h"
#include "GrCaps.h"
sk_sp<GrBuffer> GrBuffer::MakeCPUBacked(GrGpu* gpu, size_t sizeInBytes,
GrGpuBufferType intendedType, const void* data) {
SkASSERT(GrBufferTypeIsVertexOrIndex(intendedType));
void* cpuData;
if (gpu->caps()->mustClearUploadedBufferData()) {
cpuData = sk_calloc_throw(sizeInBytes);
} else {
cpuData = sk_malloc_throw(sizeInBytes);
}
if (data) {
memcpy(cpuData, data, sizeInBytes);
}
return sk_sp<GrBuffer>(new GrBuffer(gpu, sizeInBytes, intendedType, cpuData));
}
GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, void* cpuData)
: INHERITED(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
, fAccessPattern(kDynamic_GrAccessPattern)
, fCPUData(cpuData)
, fIntendedType(type) {
this->registerWithCache(SkBudgeted::kNo);
}
GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, GrAccessPattern pattern)
: INHERITED(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
, fAccessPattern(pattern)
, fCPUData(nullptr)
, fIntendedType(type) {
// Subclass registers with cache.
}
void GrBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
// a chunk of memory we can use/reuse for any type of data. We really only need to
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
builder[0] = SkToU32(intendedType);
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
builder[2] = (uint32_t)((uint64_t)size >> 32);
}
}
bool GrBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(this->isCPUBacked());
memcpy(fCPUData, src, srcSizeInBytes);
return true;
}
void GrBuffer::computeScratchKey(GrScratchKey* key) const {
if (!this->isCPUBacked() && SkIsPow2(fSizeInBytes) &&
kDynamic_GrAccessPattern == fAccessPattern) {
ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key);
}
}

View File

@ -1,5 +1,5 @@
/* /*
* Copyright 2016 Google Inc. * Copyright 2019 Google Inc.
* *
* Use of this source code is governed by a BSD-style license that can be * Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. * found in the LICENSE file.
@ -8,124 +8,29 @@
#ifndef GrBuffer_DEFINED #ifndef GrBuffer_DEFINED
#define GrBuffer_DEFINED #define GrBuffer_DEFINED
#include "GrGpuResource.h" #include "GrTypes.h"
class GrGpu; /** Base class for a GPU buffer object or a client side arrays. */
class GrBuffer {
class GrBuffer : public GrGpuResource {
public: public:
/** GrBuffer(const GrBuffer&) = delete;
* Creates a client-side buffer. GrBuffer& operator=(const GrBuffer&) = delete;
*/
static SK_WARN_UNUSED_RESULT sk_sp<GrBuffer> MakeCPUBacked(GrGpu*, size_t sizeInBytes,
GrGpuBufferType,
const void* data = nullptr);
/** virtual ~GrBuffer() = default;
* Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
* "static" and "stream" patterns are disqualified by nature from being cached and reused.)
*/
static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*);
GrAccessPattern accessPattern() const { return fAccessPattern; } // Our subclasses derive from different ref counting base classes. In order to use base
size_t sizeInBytes() const { return fSizeInBytes; } // class pointers with sk_sp we virtualize ref() and unref().
virtual void ref() const = 0;
virtual void unref() const = 0;
/** /** Size of the buffer in bytes. */
* Returns true if the buffer is a wrapper around a CPU array. If true it virtual size_t size() const = 0;
* indicates that map will always succeed and will be free.
*/
bool isCPUBacked() const { return SkToBool(fCPUData); }
size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
/** /** Is this an instance of GrCpuBuffer? Otherwise, an instance of GrGpuBuffer. */
* Maps the buffer to be written by the CPU. virtual bool isCpuBuffer() const = 0;
*
* The previous content of the buffer is invalidated. It is an error
* to draw from the buffer while it is mapped. It may fail if the backend
* doesn't support mapping the buffer. If the buffer is CPU backed then
* it will always succeed and is a free operation. Once a buffer is mapped,
* subsequent calls to map() are ignored.
*
* Note that buffer mapping does not go through GrContext and therefore is
* not serialized with other operations.
*
* @return a pointer to the data or nullptr if the map fails.
*/
void* map() {
if (!fMapPtr) {
this->onMap();
}
return fMapPtr;
}
/**
* Unmaps the buffer.
*
* The pointer returned by the previous map call will no longer be valid.
*/
void unmap() {
SkASSERT(fMapPtr);
this->onUnmap();
fMapPtr = nullptr;
}
/**
Queries whether the buffer has been mapped.
@return true if the buffer is mapped, false otherwise.
*/
bool isMapped() const { return SkToBool(fMapPtr); }
/**
* Updates the buffer data.
*
* The size of the buffer will be preserved. The src data will be
* placed at the beginning of the buffer and any remaining contents will
* be undefined. srcSizeInBytes must be <= to the buffer size.
*
* The buffer must not be mapped.
*
* Note that buffer updates do not go through GrContext and therefore are
* not serialized with other operations.
*
* @return returns true if the update succeeds, false otherwise.
*/
bool updateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isMapped());
SkASSERT(srcSizeInBytes <= fSizeInBytes);
return this->onUpdateData(src, srcSizeInBytes);
}
~GrBuffer() override {
sk_free(fCPUData);
}
protected: protected:
GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern); GrBuffer() = default;
GrGpuBufferType intendedType() const { return fIntendedType; }
void* fMapPtr;
private:
/**
* Internal constructor to make a CPU-backed buffer.
*/
GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, void* cpuData);
virtual void onMap() { SkASSERT(this->isCPUBacked()); fMapPtr = fCPUData; }
virtual void onUnmap() { SkASSERT(this->isCPUBacked()); }
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes);
size_t onGpuMemorySize() const override { return fSizeInBytes; } // TODO: zero for cpu backed?
const char* getResourceType() const override { return "Buffer Object"; }
void computeScratchKey(GrScratchKey* key) const override;
size_t fSizeInBytes;
GrAccessPattern fAccessPattern;
void* fCPUData;
GrGpuBufferType fIntendedType;
typedef GrGpuResource INHERITED;
}; };
#endif #endif

View File

@ -6,12 +6,12 @@
*/ */
#include "GrBufferAllocPool.h" #include "GrBufferAllocPool.h"
#include "GrBuffer.h"
#include "GrCaps.h" #include "GrCaps.h"
#include "GrContext.h" #include "GrContext.h"
#include "GrContextPriv.h" #include "GrContextPriv.h"
#include "GrCpuBuffer.h"
#include "GrGpu.h" #include "GrGpu.h"
#include "GrGpuBuffer.h"
#include "GrResourceProvider.h" #include "GrResourceProvider.h"
#include "GrTypes.h" #include "GrTypes.h"
#include "SkMacros.h" #include "SkMacros.h"
@ -24,15 +24,14 @@
static void VALIDATE(bool = false) {} static void VALIDATE(bool = false) {}
#endif #endif
#define UNMAP_BUFFER(block) \ #define UNMAP_BUFFER(block) \
do { \ do { \
TRACE_EVENT_INSTANT1("skia.gpu", \ TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
"GrBufferAllocPool Unmapping Buffer", \ TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
TRACE_EVENT_SCOPE_THREAD, \ (float)((block).fBytesFree) / (block).fBuffer->size()); \
"percent_unwritten", \ SkASSERT(!block.fBuffer->isCpuBuffer()); \
(float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \ static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
(block).fBuffer->unmap(); \ } while (false)
} while (false)
constexpr size_t GrBufferAllocPool::kDefaultBufferSize; constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
@ -47,7 +46,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, voi
void GrBufferAllocPool::deleteBlocks() { void GrBufferAllocPool::deleteBlocks() {
if (fBlocks.count()) { if (fBlocks.count()) {
GrBuffer* buffer = fBlocks.back().fBuffer.get(); GrBuffer* buffer = fBlocks.back().fBuffer.get();
if (buffer->isMapped()) { if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
UNMAP_BUFFER(fBlocks.back()); UNMAP_BUFFER(fBlocks.back());
} }
} }
@ -78,11 +77,14 @@ void GrBufferAllocPool::unmap() {
if (fBufferPtr) { if (fBufferPtr) {
BufferBlock& block = fBlocks.back(); BufferBlock& block = fBlocks.back();
if (block.fBuffer->isMapped()) { GrBuffer* buffer = block.fBuffer.get();
UNMAP_BUFFER(block); if (!buffer->isCpuBuffer()) {
} else { if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree; UNMAP_BUFFER(block);
this->flushCpuData(fBlocks.back(), flushSize); } else {
size_t flushSize = block.fBuffer->size() - block.fBytesFree;
this->flushCpuData(fBlocks.back(), flushSize);
}
} }
fBufferPtr = nullptr; fBufferPtr = nullptr;
} }
@ -94,21 +96,25 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
bool wasDestroyed = false; bool wasDestroyed = false;
if (fBufferPtr) { if (fBufferPtr) {
SkASSERT(!fBlocks.empty()); SkASSERT(!fBlocks.empty());
if (!fBlocks.back().fBuffer->isMapped()) { const GrBuffer* buffer = fBlocks.back().fBuffer.get();
if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
SkASSERT(fCpuData == fBufferPtr); SkASSERT(fCpuData == fBufferPtr);
} }
} else { } else if (!fBlocks.empty()) {
SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped()); const GrBuffer* buffer = fBlocks.back().fBuffer.get();
SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
} }
size_t bytesInUse = 0; size_t bytesInUse = 0;
for (int i = 0; i < fBlocks.count() - 1; ++i) { for (int i = 0; i < fBlocks.count() - 1; ++i) {
SkASSERT(!fBlocks[i].fBuffer->isMapped()); const GrBuffer* buffer = fBlocks[i].fBuffer.get();
SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
} }
for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) { for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
if (fBlocks[i].fBuffer->wasDestroyed()) { GrBuffer* buffer = fBlocks[i].fBuffer.get();
if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
wasDestroyed = true; wasDestroyed = true;
} else { } else {
size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree; size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
bytesInUse += bytes; bytesInUse += bytes;
SkASSERT(bytes || unusedBlockAllowed); SkASSERT(bytes || unusedBlockAllowed);
} }
@ -137,7 +143,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
if (fBufferPtr) { if (fBufferPtr) {
BufferBlock& back = fBlocks.back(); BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes, alignment); size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
SkSafeMath safeMath; SkSafeMath safeMath;
size_t alignedSize = safeMath.add(pad, size); size_t alignedSize = safeMath.add(pad, size);
@ -192,7 +198,7 @@ void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
if (fBufferPtr) { if (fBufferPtr) {
BufferBlock& back = fBlocks.back(); BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree; size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes, alignment); size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
if ((minSize + pad) <= back.fBytesFree) { if ((minSize + pad) <= back.fBytesFree) {
// Consume padding first, to make subsequent alignment math easier // Consume padding first, to make subsequent alignment math easier
@ -250,13 +256,14 @@ void GrBufferAllocPool::putBack(size_t bytes) {
// caller shouldn't try to put back more than they've taken // caller shouldn't try to put back more than they've taken
SkASSERT(!fBlocks.empty()); SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back(); BufferBlock& block = fBlocks.back();
size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree; size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
if (bytes >= bytesUsed) { if (bytes >= bytesUsed) {
bytes -= bytesUsed; bytes -= bytesUsed;
fBytesInUse -= bytesUsed; fBytesInUse -= bytesUsed;
// if we locked a vb to satisfy the make space and we're releasing // if we locked a vb to satisfy the make space and we're releasing
// beyond it, then unmap it. // beyond it, then unmap it.
if (block.fBuffer->isMapped()) { GrBuffer* buffer = block.fBuffer.get();
if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
UNMAP_BUFFER(block); UNMAP_BUFFER(block);
} }
this->destroyBlock(); this->destroyBlock();
@ -284,32 +291,35 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
return false; return false;
} }
block.fBytesFree = block.fBuffer->gpuMemorySize(); block.fBytesFree = block.fBuffer->size();
if (fBufferPtr) { if (fBufferPtr) {
SkASSERT(fBlocks.count() > 1); SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1); BufferBlock& prev = fBlocks.fromBack(1);
if (prev.fBuffer->isMapped()) { GrBuffer* buffer = prev.fBuffer.get();
UNMAP_BUFFER(prev); if (!buffer->isCpuBuffer()) {
} else { if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree); UNMAP_BUFFER(prev);
} else {
this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
}
} }
fBufferPtr = nullptr; fBufferPtr = nullptr;
} }
SkASSERT(!fBufferPtr); SkASSERT(!fBufferPtr);
// If the buffer is CPU-backed we map it because it is free to do so and saves a copy. // If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
// Otherwise when buffer mapping is supported we map if the buffer size is greater than the // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
// threshold. // threshold.
bool attemptMap = block.fBuffer->isCPUBacked(); if (block.fBuffer->isCpuBuffer()) {
if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) { fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
attemptMap = size > fGpu->caps()->bufferMapThreshold(); SkASSERT(fBufferPtr);
} else {
if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
size > fGpu->caps()->bufferMapThreshold()) {
fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
}
} }
if (attemptMap) {
fBufferPtr = block.fBuffer->map();
}
if (!fBufferPtr) { if (!fBufferPtr) {
fBufferPtr = this->resetCpuData(block.fBytesFree); fBufferPtr = this->resetCpuData(block.fBytesFree);
} }
@ -321,7 +331,8 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
void GrBufferAllocPool::destroyBlock() { void GrBufferAllocPool::destroyBlock() {
SkASSERT(!fBlocks.empty()); SkASSERT(!fBlocks.empty());
SkASSERT(!fBlocks.back().fBuffer->isMapped()); SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
!static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
fBlocks.pop_back(); fBlocks.pop_back();
fBufferPtr = nullptr; fBufferPtr = nullptr;
} }
@ -345,11 +356,12 @@ void* GrBufferAllocPool::resetCpuData(size_t newSize) {
void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) { void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
GrBuffer* buffer = block.fBuffer.get(); SkASSERT(block.fBuffer.get());
SkASSERT(buffer); SkASSERT(!block.fBuffer.get()->isCpuBuffer());
GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
SkASSERT(!buffer->isMapped()); SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData == fBufferPtr); SkASSERT(fCpuData == fBufferPtr);
SkASSERT(flushSize <= buffer->gpuMemorySize()); SkASSERT(flushSize <= buffer->size());
VALIDATE(true); VALIDATE(true);
if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() && if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
@ -368,8 +380,10 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) { sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
auto resourceProvider = fGpu->getContext()->priv().resourceProvider(); auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern, if (fGpu->caps()->preferClientSideDynamicBuffers()) {
GrResourceProvider::Flags::kNone); return GrCpuBuffer::Make(size);
}
return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -8,13 +8,14 @@
#ifndef GrBufferAllocPool_DEFINED #ifndef GrBufferAllocPool_DEFINED
#define GrBufferAllocPool_DEFINED #define GrBufferAllocPool_DEFINED
#include "GrGpuBuffer.h"
#include "GrTypesPriv.h" #include "GrTypesPriv.h"
#include "SkNoncopyable.h" #include "SkNoncopyable.h"
#include "SkTArray.h" #include "SkTArray.h"
#include "SkTDArray.h" #include "SkTDArray.h"
#include "SkTypes.h" #include "SkTypes.h"
class GrBuffer;
class GrGpu; class GrGpu;
/** /**

36
src/gpu/GrCpuBuffer.h Normal file
View File

@ -0,0 +1,36 @@
/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCpuBuffer_DEFINED
#define GrCpuBuffer_DEFINED
#include "GrBuffer.h"
#include "GrNonAtomicRef.h"
class GrCpuBuffer final : public GrNonAtomicRef<GrCpuBuffer>, public GrBuffer {
public:
static sk_sp<GrCpuBuffer> Make(size_t size) {
SkASSERT(size > 0);
auto mem = ::operator new(sizeof(GrCpuBuffer) + size);
return sk_sp<GrCpuBuffer>(new (mem) GrCpuBuffer((char*)mem + sizeof(GrCpuBuffer), size));
}
void ref() const override { GrNonAtomicRef<GrCpuBuffer>::ref(); }
void unref() const override { GrNonAtomicRef<GrCpuBuffer>::unref(); }
size_t size() const override { return fSize; }
bool isCpuBuffer() const override { return true; }
char* data() { return reinterpret_cast<char*>(fData); }
const char* data() const { return reinterpret_cast<const char*>(fData); }
private:
GrCpuBuffer(void* data, size_t size) : fData(data), fSize(size) {}
void* fData;
size_t fSize;
};
#endif

View File

@ -10,7 +10,6 @@
#include "GrBackendSemaphore.h" #include "GrBackendSemaphore.h"
#include "GrBackendSurface.h" #include "GrBackendSurface.h"
#include "GrBuffer.h"
#include "GrCaps.h" #include "GrCaps.h"
#include "GrContext.h" #include "GrContext.h"
#include "GrContextPriv.h" #include "GrContextPriv.h"
@ -217,10 +216,10 @@ sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImage
return nullptr; return nullptr;
} }
sk_sp<GrBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType, sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) { GrAccessPattern accessPattern, const void* data) {
this->handleDirtyContext(); this->handleDirtyContext();
sk_sp<GrBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data); sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
if (!this->caps()->reuseScratchBuffers()) { if (!this->caps()->reuseScratchBuffers()) {
buffer->resourcePriv().removeScratchKey(); buffer->resourcePriv().removeScratchKey();
} }
@ -303,7 +302,7 @@ bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int he
} }
bool GrGpu::transferPixels(GrTexture* texture, int left, int top, int width, int height, bool GrGpu::transferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t offset,
size_t rowBytes) { size_t rowBytes) {
SkASSERT(texture); SkASSERT(texture);
SkASSERT(transferBuffer); SkASSERT(transferBuffer);

View File

@ -22,7 +22,7 @@
class GrBackendRenderTarget; class GrBackendRenderTarget;
class GrBackendSemaphore; class GrBackendSemaphore;
class GrBuffer; class GrGpuBuffer;
class GrContext; class GrContext;
struct GrContextOptions; struct GrContextOptions;
class GrGLContext; class GrGLContext;
@ -140,8 +140,8 @@ public:
* *
* @return the buffer if successful, otherwise nullptr. * @return the buffer if successful, otherwise nullptr.
*/ */
sk_sp<GrBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data = nullptr); GrAccessPattern accessPattern, const void* data = nullptr);
/** /**
* Resolves MSAA. * Resolves MSAA.
@ -217,7 +217,7 @@ public:
* means rows are tightly packed. * means rows are tightly packed.
*/ */
bool transferPixels(GrTexture* texture, int left, int top, int width, int height, bool transferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t offset,
size_t rowBytes); size_t rowBytes);
// After the client interacts directly with the 3D context state the GrGpu // After the client interacts directly with the 3D context state the GrGpu
@ -472,8 +472,8 @@ private:
virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
const GrVkDrawableInfo&); const GrVkDrawableInfo&);
virtual sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data) = 0; GrAccessPattern, const void* data) = 0;
// overridden by backend-specific derived class to perform the surface read // overridden by backend-specific derived class to perform the surface read
virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, GrColorType, virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, GrColorType,
@ -485,7 +485,7 @@ private:
// overridden by backend-specific derived class to perform the texture transfer // overridden by backend-specific derived class to perform the texture transfer
virtual bool onTransferPixels(GrTexture*, int left, int top, int width, int height, virtual bool onTransferPixels(GrTexture*, int left, int top, int width, int height,
GrColorType colorType, GrBuffer* transferBuffer, size_t offset, GrColorType colorType, GrGpuBuffer* transferBuffer, size_t offset,
size_t rowBytes) = 0; size_t rowBytes) = 0;
// overridden by backend-specific derived class to perform the resolve // overridden by backend-specific derived class to perform the resolve

38
src/gpu/GrGpuBuffer.cpp Normal file
View File

@ -0,0 +1,38 @@
/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrGpuBuffer.h"
#include "GrCaps.h"
#include "GrGpu.h"
GrGpuBuffer::GrGpuBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern pattern)
: GrGpuResource(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
, fAccessPattern(pattern)
, fIntendedType(type) {}
void GrGpuBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
// a chunk of memory we can use/reuse for any type of data. We really only need to
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
builder[0] = SkToU32(intendedType);
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
builder[2] = (uint32_t)((uint64_t)size >> 32);
}
}
void GrGpuBuffer::computeScratchKey(GrScratchKey* key) const {
if (SkIsPow2(fSizeInBytes) && kDynamic_GrAccessPattern == fAccessPattern) {
ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key);
}
}

113
src/gpu/GrGpuBuffer.h Normal file
View File

@ -0,0 +1,113 @@
/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrGpuBuffer_DEFINED
#define GrGpuBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuResource.h"
class GrGpu;
class GrGpuBuffer : public GrGpuResource, public GrBuffer {
public:
/**
* Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
* "static" and "stream" patterns are disqualified by nature from being cached and reused.)
*/
static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*);
GrAccessPattern accessPattern() const { return fAccessPattern; }
size_t size() const final { return fSizeInBytes; }
void ref() const final { GrGpuResource::ref(); }
void unref() const final { GrGpuResource::unref(); }
/**
* Maps the buffer to be written by the CPU.
*
* The previous content of the buffer is invalidated. It is an error
* to draw from the buffer while it is mapped. It may fail if the backend
* doesn't support mapping the buffer. If the buffer is CPU backed then
* it will always succeed and is a free operation. Once a buffer is mapped,
* subsequent calls to map() are ignored.
*
* Note that buffer mapping does not go through GrContext and therefore is
* not serialized with other operations.
*
* @return a pointer to the data or nullptr if the map fails.
*/
void* map() {
if (!fMapPtr) {
this->onMap();
}
return fMapPtr;
}
/**
* Unmaps the buffer.
*
* The pointer returned by the previous map call will no longer be valid.
*/
void unmap() {
SkASSERT(fMapPtr);
this->onUnmap();
fMapPtr = nullptr;
}
/**
Queries whether the buffer has been mapped.
@return true if the buffer is mapped, false otherwise.
*/
bool isMapped() const { return SkToBool(fMapPtr); }
bool isCpuBuffer() const final { return false; }
/**
* Updates the buffer data.
*
* The size of the buffer will be preserved. The src data will be
* placed at the beginning of the buffer and any remaining contents will
* be undefined. srcSizeInBytes must be <= to the buffer size.
*
* The buffer must not be mapped.
*
* Note that buffer updates do not go through GrContext and therefore are
* not serialized with other operations.
*
* @return returns true if the update succeeds, false otherwise.
*/
bool updateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isMapped());
SkASSERT(srcSizeInBytes <= fSizeInBytes);
return this->onUpdateData(src, srcSizeInBytes);
}
protected:
GrGpuBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern);
GrGpuBufferType intendedType() const { return fIntendedType; }
void* fMapPtr;
private:
virtual void onMap() = 0;
virtual void onUnmap() = 0;
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
size_t onGpuMemorySize() const override { return fSizeInBytes; }
const char* getResourceType() const override { return "Buffer Object"; }
void computeScratchKey(GrScratchKey* key) const override;
size_t fSizeInBytes;
GrAccessPattern fAccessPattern;
GrGpuBufferType fIntendedType;
};
#endif

View File

@ -10,6 +10,7 @@
#include "GrBuffer.h" #include "GrBuffer.h"
#include "GrPendingIOResource.h" #include "GrPendingIOResource.h"
#include "GrGpuBuffer.h"
class GrPrimitiveProcessor; class GrPrimitiveProcessor;
@ -42,8 +43,9 @@ public:
void setInstanced(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance, void setInstanced(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance,
int vertexCount); int vertexCount);
void setIndexedInstanced(sk_sp<const GrBuffer>, int indexCount, sk_sp<const GrBuffer>, void setIndexedInstanced(sk_sp<const GrBuffer> indexBuffer, int indexCount,
int instanceCount, int baseInstance, GrPrimitiveRestart); sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
int baseInstance, GrPrimitiveRestart);
void setVertexData(sk_sp<const GrBuffer> vertexBuffer, int baseVertex = 0); void setVertexData(sk_sp<const GrBuffer> vertexBuffer, int baseVertex = 0);
@ -127,8 +129,8 @@ private:
}; };
inline void GrMesh::setNonIndexedNonInstanced(int vertexCount) { inline void GrMesh::setNonIndexedNonInstanced(int vertexCount) {
fIndexBuffer.reset(nullptr); fIndexBuffer.reset();
fInstanceBuffer.reset(nullptr); fInstanceBuffer.reset();
fNonIndexNonInstanceData.fVertexCount = vertexCount; fNonIndexNonInstanceData.fVertexCount = vertexCount;
fPrimitiveRestart = GrPrimitiveRestart::kNo; fPrimitiveRestart = GrPrimitiveRestart::kNo;
} }

View File

@ -73,20 +73,18 @@ bool GrOnFlushResourceProvider::instatiateProxy(GrSurfaceProxy* proxy) {
return proxy->instantiate(resourceProvider); return proxy->instantiate(resourceProvider);
} }
sk_sp<GrBuffer> GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size, sk_sp<GrGpuBuffer> GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size,
const void* data) { const void* data) {
auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider(); auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider();
return sk_sp<GrBuffer>(resourceProvider->createBuffer(size, intendedType, return sk_sp<GrGpuBuffer>(
kDynamic_GrAccessPattern, resourceProvider->createBuffer(size, intendedType, kDynamic_GrAccessPattern, data));
GrResourceProvider::Flags::kNone,
data));
} }
sk_sp<const GrBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer( sk_sp<const GrGpuBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer(
GrGpuBufferType intendedType, size_t size, const void* data, const GrUniqueKey& key) { GrGpuBufferType intendedType, size_t size, const void* data, const GrUniqueKey& key) {
auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider(); auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider();
sk_sp<const GrBuffer> buffer = resourceProvider->findOrMakeStaticBuffer(intendedType, size, sk_sp<const GrGpuBuffer> buffer =
data, key); resourceProvider->findOrMakeStaticBuffer(intendedType, size, data, key);
// Static buffers should never have pending IO. // Static buffers should never have pending IO.
SkASSERT(!buffer || !buffer->resourcePriv().hasPendingIO_debugOnly()); SkASSERT(!buffer || !buffer->resourcePriv().hasPendingIO_debugOnly());
return buffer; return buffer;

View File

@ -86,11 +86,11 @@ public:
bool instatiateProxy(GrSurfaceProxy*); bool instatiateProxy(GrSurfaceProxy*);
// Creates a GPU buffer with a "dynamic" access pattern. // Creates a GPU buffer with a "dynamic" access pattern.
sk_sp<GrBuffer> makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr); sk_sp<GrGpuBuffer> makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr);
// Either finds and refs, or creates a static GPU buffer with the given data. // Either finds and refs, or creates a static GPU buffer with the given data.
sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data, sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data,
const GrUniqueKey&); const GrUniqueKey&);
uint32_t contextID() const; uint32_t contextID() const;
const GrCaps* caps() const; const GrCaps* caps() const;

View File

@ -8,8 +8,8 @@
#ifndef GrProcessor_DEFINED #ifndef GrProcessor_DEFINED
#define GrProcessor_DEFINED #define GrProcessor_DEFINED
#include "GrBuffer.h"
#include "GrColor.h" #include "GrColor.h"
#include "GrGpuBuffer.h"
#include "GrProcessorUnitTest.h" #include "GrProcessorUnitTest.h"
#include "GrSamplerState.h" #include "GrSamplerState.h"
#include "GrShaderVar.h" #include "GrShaderVar.h"

View File

@ -6,13 +6,13 @@
*/ */
#include "GrResourceProvider.h" #include "GrResourceProvider.h"
#include "../private/GrSingleOwner.h"
#include "GrBackendSemaphore.h" #include "GrBackendSemaphore.h"
#include "GrBuffer.h"
#include "GrCaps.h" #include "GrCaps.h"
#include "GrContext.h" #include "GrContext.h"
#include "GrContextPriv.h" #include "GrContextPriv.h"
#include "GrGpu.h" #include "GrGpu.h"
#include "GrGpuBuffer.h"
#include "GrPath.h" #include "GrPath.h"
#include "GrPathRendering.h" #include "GrPathRendering.h"
#include "GrProxyProvider.h" #include "GrProxyProvider.h"
@ -22,7 +22,6 @@
#include "GrSemaphore.h" #include "GrSemaphore.h"
#include "GrStencilAttachment.h" #include "GrStencilAttachment.h"
#include "GrTexturePriv.h" #include "GrTexturePriv.h"
#include "../private/GrSingleOwner.h"
#include "SkGr.h" #include "SkGr.h"
#include "SkMathPriv.h" #include "SkMathPriv.h"
@ -285,35 +284,34 @@ sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueK
: sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key)); : sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
} }
sk_sp<const GrBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType, sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size, size_t size,
const void* data, const void* data,
const GrUniqueKey& key) { const GrUniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrBuffer>(key)) { if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
return std::move(buffer); return std::move(buffer);
} }
if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, Flags::kNone, if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, data)) {
data)) {
// We shouldn't bin and/or cache static buffers. // We shouldn't bin and/or cache static buffers.
SkASSERT(buffer->sizeInBytes() == size); SkASSERT(buffer->size() == size);
SkASSERT(!buffer->resourcePriv().getScratchKey().isValid()); SkASSERT(!buffer->resourcePriv().getScratchKey().isValid());
SkASSERT(!buffer->resourcePriv().hasPendingIO_debugOnly()); SkASSERT(!buffer->resourcePriv().hasPendingIO_debugOnly());
buffer->resourcePriv().setUniqueKey(key); buffer->resourcePriv().setUniqueKey(key);
return sk_sp<const GrBuffer>(buffer); return sk_sp<const GrGpuBuffer>(buffer);
} }
return nullptr; return nullptr;
} }
sk_sp<const GrBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern, sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize, int patternSize,
int reps, int reps,
int vertCount, int vertCount,
const GrUniqueKey& key) { const GrUniqueKey& key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t); size_t bufferSize = patternSize * reps * sizeof(uint16_t);
// This is typically used in GrMeshDrawOps, so we assume kNoPendingIO. // This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
sk_sp<GrBuffer> buffer(this->createBuffer(bufferSize, GrGpuBufferType::kIndex, sk_sp<GrGpuBuffer> buffer(
kStatic_GrAccessPattern, Flags::kNone)); this->createBuffer(bufferSize, GrGpuBufferType::kIndex, kStatic_GrAccessPattern));
if (!buffer) { if (!buffer) {
return nullptr; return nullptr;
} }
@ -343,7 +341,7 @@ sk_sp<const GrBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint1
static constexpr int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1; static constexpr int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
sk_sp<const GrBuffer> GrResourceProvider::createQuadIndexBuffer() { sk_sp<const GrGpuBuffer> GrResourceProvider::createQuadIndexBuffer() {
GR_STATIC_ASSERT(4 * kMaxQuads <= 65535); GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 }; static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 };
return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey); return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
@ -360,36 +358,24 @@ sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle&
return this->gpu()->pathRendering()->createPath(path, style); return this->gpu()->pathRendering()->createPath(path, style);
} }
sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType, sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, Flags flags, GrAccessPattern accessPattern,
const void* data) { const void* data) {
if (this->isAbandoned()) { if (this->isAbandoned()) {
return nullptr; return nullptr;
} }
if (kDynamic_GrAccessPattern != accessPattern) { if (kDynamic_GrAccessPattern != accessPattern) {
return this->gpu()->createBuffer(size, intendedType, accessPattern, data); return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
} }
if (!(flags & Flags::kRequireGpuMemory) &&
this->gpu()->caps()->preferClientSideDynamicBuffers() &&
GrBufferTypeIsVertexOrIndex(intendedType) &&
kDynamic_GrAccessPattern == accessPattern) {
return GrBuffer::MakeCPUBacked(this->gpu(), size, intendedType, data);
}
// bin by pow2 with a reasonable min // bin by pow2 with a reasonable min
static const size_t MIN_SIZE = 1 << 12; static const size_t MIN_SIZE = 1 << 12;
size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size)); size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
GrScratchKey key; GrScratchKey key;
GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key); GrGpuBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
auto scratchFlags = GrResourceCache::ScratchFlags::kNone; auto buffer =
if (flags & Flags::kNoPendingIO) { sk_sp<GrGpuBuffer>(static_cast<GrGpuBuffer*>(this->cache()->findAndRefScratchResource(
scratchFlags = GrResourceCache::ScratchFlags::kRequireNoPendingIO; key, allocSize, GrResourceCache::ScratchFlags::kNone)));
} else {
scratchFlags = GrResourceCache::ScratchFlags::kPreferNoPendingIO;
}
auto buffer = sk_sp<GrBuffer>(static_cast<GrBuffer*>(
this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags)));
if (!buffer) { if (!buffer) {
buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern); buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
if (!buffer) { if (!buffer) {
@ -399,7 +385,6 @@ sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType in
if (data) { if (data) {
buffer->updateData(data, size); buffer->updateData(data, size);
} }
SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
return buffer; return buffer;
} }

View File

@ -8,8 +8,8 @@
#ifndef GrResourceProvider_DEFINED #ifndef GrResourceProvider_DEFINED
#define GrResourceProvider_DEFINED #define GrResourceProvider_DEFINED
#include "GrBuffer.h"
#include "GrContextOptions.h" #include "GrContextOptions.h"
#include "GrGpuBuffer.h"
#include "GrResourceCache.h" #include "GrResourceCache.h"
#include "SkImageInfoPriv.h" #include "SkImageInfoPriv.h"
#include "SkScalerContext.h" #include "SkScalerContext.h"
@ -51,11 +51,6 @@ public:
* Make this automatic: https://bug.skia.org/4156 * Make this automatic: https://bug.skia.org/4156
*/ */
kNoPendingIO = 0x1, kNoPendingIO = 0x1,
/** Normally the caps may indicate a preference for client-side buffers. Set this flag when
* creating a buffer to guarantee it resides in GPU memory.
*/
kRequireGpuMemory = 0x2,
}; };
GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*, GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*,
@ -66,7 +61,9 @@ public:
* must be sure that if a resource of exists in the cache with the given unique key then it is * must be sure that if a resource of exists in the cache with the given unique key then it is
* of type T. * of type T.
*/ */
template <typename T = GrGpuResource> sk_sp<T> findByUniqueKey(const GrUniqueKey& key) { template <typename T = GrGpuResource>
typename std::enable_if<std::is_base_of<GrGpuResource, T>::value, sk_sp<T>>::type
findByUniqueKey(const GrUniqueKey& key) {
return sk_sp<T>(static_cast<T*>(this->findResourceByUniqueKey(key).release())); return sk_sp<T>(static_cast<T*>(this->findResourceByUniqueKey(key).release()));
} }
@ -145,8 +142,8 @@ public:
* *
* @return The buffer if successful, otherwise nullptr. * @return The buffer if successful, otherwise nullptr.
*/ */
sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size, sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
const void* data, const GrUniqueKey& key); const void* data, const GrUniqueKey& key);
/** /**
* Either finds and refs, or creates an index buffer with a repeating pattern for drawing * Either finds and refs, or creates an index buffer with a repeating pattern for drawing
@ -161,12 +158,12 @@ public:
* *
* @return The index buffer if successful, otherwise nullptr. * @return The index buffer if successful, otherwise nullptr.
*/ */
sk_sp<const GrBuffer> findOrCreatePatternedIndexBuffer(const uint16_t* pattern, sk_sp<const GrGpuBuffer> findOrCreatePatternedIndexBuffer(const uint16_t* pattern,
int patternSize, int patternSize,
int reps, int reps,
int vertCount, int vertCount,
const GrUniqueKey& key) { const GrUniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrBuffer>(key)) { if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
return std::move(buffer); return std::move(buffer);
} }
return this->createPatternedIndexBuffer(pattern, patternSize, reps, vertCount, key); return this->createPatternedIndexBuffer(pattern, patternSize, reps, vertCount, key);
@ -179,8 +176,8 @@ public:
* Draw with GrPrimitiveType::kTriangles * Draw with GrPrimitiveType::kTriangles
* @ return the quad index buffer * @ return the quad index buffer
*/ */
sk_sp<const GrBuffer> refQuadIndexBuffer() { sk_sp<const GrGpuBuffer> refQuadIndexBuffer() {
if (auto buffer = this->findByUniqueKey<const GrBuffer>(fQuadIndexBufferKey)) { if (auto buffer = this->findByUniqueKey<const GrGpuBuffer>(fQuadIndexBufferKey)) {
return buffer; return buffer;
} }
return this->createQuadIndexBuffer(); return this->createQuadIndexBuffer();
@ -205,8 +202,8 @@ public:
* *
* @return the buffer if successful, otherwise nullptr. * @return the buffer if successful, otherwise nullptr.
*/ */
sk_sp<GrBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, Flags, sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data = nullptr); const void* data = nullptr);
/** /**
* If passed in render target already has a stencil buffer, return true. Otherwise attempt to * If passed in render target already has a stencil buffer, return true. Otherwise attempt to
@ -286,13 +283,13 @@ private:
return !SkToBool(fCache); return !SkToBool(fCache);
} }
sk_sp<const GrBuffer> createPatternedIndexBuffer(const uint16_t* pattern, sk_sp<const GrGpuBuffer> createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize, int patternSize,
int reps, int reps,
int vertCount, int vertCount,
const GrUniqueKey& key); const GrUniqueKey& key);
sk_sp<const GrBuffer> createQuadIndexBuffer(); sk_sp<const GrGpuBuffer> createQuadIndexBuffer();
GrResourceCache* fCache; GrResourceCache* fCache;
GrGpu* fGpu; GrGpu* fGpu;

View File

@ -100,7 +100,7 @@ public:
// Appends a GrMesh that will draw the provided instances. The instanceBuffer must be an array // Appends a GrMesh that will draw the provided instances. The instanceBuffer must be an array
// of either TriPointInstance or QuadPointInstance, depending on this processor's RendererPass, // of either TriPointInstance or QuadPointInstance, depending on this processor's RendererPass,
// with coordinates in the desired shape's final atlas-space position. // with coordinates in the desired shape's final atlas-space position.
void appendMesh(sk_sp<GrBuffer> instanceBuffer, int instanceCount, int baseInstance, void appendMesh(sk_sp<GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
SkTArray<GrMesh>* out) const { SkTArray<GrMesh>* out) const {
if (Impl::kGeometryShader == fImpl) { if (Impl::kGeometryShader == fImpl) {
this->appendGSMesh(std::move(instanceBuffer), instanceCount, baseInstance, out); this->appendGSMesh(std::move(instanceBuffer), instanceCount, baseInstance, out);
@ -250,9 +250,9 @@ private:
void initGS(); void initGS();
void initVS(GrResourceProvider*); void initVS(GrResourceProvider*);
void appendGSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance, void appendGSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
SkTArray<GrMesh>* out) const; SkTArray<GrMesh>* out) const;
void appendVSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance, void appendVSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
SkTArray<GrMesh>* out) const; SkTArray<GrMesh>* out) const;
GrGLSLPrimitiveProcessor* createGSImpl(std::unique_ptr<Shader>) const; GrGLSLPrimitiveProcessor* createGSImpl(std::unique_ptr<Shader>) const;
@ -269,8 +269,8 @@ private:
// Used by VSImpl. // Used by VSImpl.
Attribute fInstanceAttributes[2]; Attribute fInstanceAttributes[2];
sk_sp<const GrBuffer> fVSVertexBuffer; sk_sp<const GrGpuBuffer> fVSVertexBuffer;
sk_sp<const GrBuffer> fVSIndexBuffer; sk_sp<const GrGpuBuffer> fVSIndexBuffer;
int fVSNumIndicesPerInstance; int fVSNumIndicesPerInstance;
GrPrimitiveType fVSTriangleType; GrPrimitiveType fVSTriangleType;

View File

@ -396,7 +396,7 @@ void GrCCCoverageProcessor::initGS() {
this->setWillUseGeoShader(); this->setWillUseGeoShader();
} }
void GrCCCoverageProcessor::appendGSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, void GrCCCoverageProcessor::appendGSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
int baseInstance, SkTArray<GrMesh>* out) const { int baseInstance, SkTArray<GrMesh>* out) const {
// GSImpl doesn't actually make instanced draw calls. Instead, we feed transposed x,y point // GSImpl doesn't actually make instanced draw calls. Instead, we feed transposed x,y point
// values to the GPU in a regular vertex array and draw kLines (see initGS). Then, each vertex // values to the GPU in a regular vertex array and draw kLines (see initGS). Then, each vertex

View File

@ -528,7 +528,7 @@ void GrCCCoverageProcessor::initVS(GrResourceProvider* rp) {
} }
} }
void GrCCCoverageProcessor::appendVSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, void GrCCCoverageProcessor::appendVSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
int baseInstance, SkTArray<GrMesh>* out) const { int baseInstance, SkTArray<GrMesh>* out) const {
SkASSERT(Impl::kVertexShader == fImpl); SkASSERT(Impl::kVertexShader == fImpl);
GrMesh& mesh = out->emplace_back(fVSTriangleType); GrMesh& mesh = out->emplace_back(fVSTriangleType);

View File

@ -106,7 +106,7 @@ private:
PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes]; PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes];
int fMaxMeshesPerDraw = 0; int fMaxMeshesPerDraw = 0;
sk_sp<GrBuffer> fInstanceBuffer; sk_sp<GrGpuBuffer> fInstanceBuffer;
PrimitiveTallies fBaseInstances[kNumScissorModes]; PrimitiveTallies fBaseInstances[kNumScissorModes];
mutable SkSTArray<32, GrMesh> fMeshesScratchBuffer; mutable SkSTArray<32, GrMesh> fMeshesScratchBuffer;
mutable SkSTArray<32, SkIRect> fScissorRectScratchBuffer; mutable SkSTArray<32, SkIRect> fScissorRectScratchBuffer;

View File

@ -34,7 +34,7 @@ static constexpr float kOctoEdgeNorms[8 * 4] = {
GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey); GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
sk_sp<const GrBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) { sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) {
GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey); GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms), return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms),
kOctoEdgeNorms, gVertexBufferKey); kOctoEdgeNorms, gVertexBufferKey);
@ -64,7 +64,7 @@ GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kInstanceAttribs[]; constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kInstanceAttribs[];
constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kEdgeNormsAttrib; constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kEdgeNormsAttrib;
sk_sp<const GrBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) { sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) {
GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey); GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
if (onFlushRP->caps()->usePrimitiveRestart()) { if (onFlushRP->caps()->usePrimitiveRestart()) {
return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex, return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,

View File

@ -66,8 +66,8 @@ public:
GR_STATIC_ASSERT(4 * 12 == sizeof(Instance)); GR_STATIC_ASSERT(4 * 12 == sizeof(Instance));
static sk_sp<const GrBuffer> FindVertexBuffer(GrOnFlushResourceProvider*); static sk_sp<const GrGpuBuffer> FindVertexBuffer(GrOnFlushResourceProvider*);
static sk_sp<const GrBuffer> FindIndexBuffer(GrOnFlushResourceProvider*); static sk_sp<const GrGpuBuffer> FindIndexBuffer(GrOnFlushResourceProvider*);
GrCCPathProcessor(const GrTextureProxy* atlas, GrCCPathProcessor(const GrTextureProxy* atlas,
const SkMatrix& viewMatrixIfUsingLocalCoords = SkMatrix::I()); const SkMatrix& viewMatrixIfUsingLocalCoords = SkMatrix::I());

View File

@ -105,15 +105,15 @@ public:
// Accessors used by draw calls, once the resources have been finalized. // Accessors used by draw calls, once the resources have been finalized.
const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; } const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
const GrCCStroker& stroker() const { SkASSERT(!this->isMapped()); return fStroker; } const GrCCStroker& stroker() const { SkASSERT(!this->isMapped()); return fStroker; }
sk_sp<const GrBuffer> refIndexBuffer() const { sk_sp<const GrGpuBuffer> refIndexBuffer() const {
SkASSERT(!this->isMapped()); SkASSERT(!this->isMapped());
return fIndexBuffer; return fIndexBuffer;
} }
sk_sp<const GrBuffer> refVertexBuffer() const { sk_sp<const GrGpuBuffer> refVertexBuffer() const {
SkASSERT(!this->isMapped()); SkASSERT(!this->isMapped());
return fVertexBuffer; return fVertexBuffer;
} }
sk_sp<const GrBuffer> refInstanceBuffer() const { sk_sp<const GrGpuBuffer> refInstanceBuffer() const {
SkASSERT(!this->isMapped()); SkASSERT(!this->isMapped());
return fInstanceBuffer; return fInstanceBuffer;
} }
@ -131,9 +131,9 @@ private:
GrCCAtlasStack fCopyAtlasStack; GrCCAtlasStack fCopyAtlasStack;
GrCCAtlasStack fRenderedAtlasStack; GrCCAtlasStack fRenderedAtlasStack;
const sk_sp<const GrBuffer> fIndexBuffer; const sk_sp<const GrGpuBuffer> fIndexBuffer;
const sk_sp<const GrBuffer> fVertexBuffer; const sk_sp<const GrGpuBuffer> fVertexBuffer;
const sk_sp<GrBuffer> fInstanceBuffer; const sk_sp<GrGpuBuffer> fInstanceBuffer;
GrCCPathProcessor::Instance* fPathInstanceData = nullptr; GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
int fNextCopyInstanceIdx; int fNextCopyInstanceIdx;

View File

@ -497,7 +497,7 @@ public:
} }
} }
sk_sp<GrBuffer> finish() { sk_sp<GrGpuBuffer> finish() {
SkASSERT(this->isMapped()); SkASSERT(this->isMapped());
SkASSERT(!memcmp(fNextInstances, fEndInstances, sizeof(fNextInstances))); SkASSERT(!memcmp(fNextInstances, fEndInstances, sizeof(fNextInstances)));
fInstanceBuffer->unmap(); fInstanceBuffer->unmap();
@ -543,7 +543,7 @@ private:
InstanceTallies* fCurrNextInstances; InstanceTallies* fCurrNextInstances;
SkDEBUGCODE(const InstanceTallies* fCurrEndInstances); SkDEBUGCODE(const InstanceTallies* fCurrEndInstances);
sk_sp<GrBuffer> fInstanceBuffer; sk_sp<GrGpuBuffer> fInstanceBuffer;
void* fInstanceBufferData = nullptr; void* fInstanceBufferData = nullptr;
InstanceTallies fNextInstances[2]; InstanceTallies fNextInstances[2];
SkDEBUGCODE(InstanceTallies fEndInstances[2]); SkDEBUGCODE(InstanceTallies fEndInstances[2]);

View File

@ -13,7 +13,7 @@
#include "SkNx.h" #include "SkNx.h"
#include "ccpr/GrCCStrokeGeometry.h" #include "ccpr/GrCCStrokeGeometry.h"
class GrBuffer; class GrGpuBuffer;
class GrCCCoverageProcessor; class GrCCCoverageProcessor;
class GrOnFlushResourceProvider; class GrOnFlushResourceProvider;
class GrOpFlushState; class GrOpFlushState;
@ -116,7 +116,7 @@ private:
GrSTAllocator<128, InstanceTallies> fTalliesAllocator; GrSTAllocator<128, InstanceTallies> fTalliesAllocator;
const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies}; const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies};
sk_sp<GrBuffer> fInstanceBuffer; sk_sp<GrGpuBuffer> fInstanceBuffer;
// The indices stored in batches are relative to these base instances. // The indices stored in batches are relative to these base instances.
InstanceTallies fBaseInstances[kNumScissorModes]; InstanceTallies fBaseInstances[kNumScissorModes];

View File

@ -176,8 +176,8 @@ void GrGLBuffer::onMap() {
case GrGLCaps::kMapBuffer_MapBufferType: { case GrGLCaps::kMapBuffer_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Let driver know it can discard the old data // Let driver know it can discard the old data
if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->sizeInBytes()) { if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
} }
GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break; break;
@ -185,30 +185,30 @@ void GrGLBuffer::onMap() {
case GrGLCaps::kMapBufferRange_MapBufferType: { case GrGLCaps::kMapBufferRange_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping. // Make sure the GL buffer size agrees with fDesc before mapping.
if (fGLSizeInBytes != this->sizeInBytes()) { if (fGLSizeInBytes != this->size()) {
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
} }
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT; GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) { if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
// TODO: Make this a function parameter. // TODO: Make this a function parameter.
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT; writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
} }
GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->sizeInBytes(), GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(),
readOnly ? GR_GL_MAP_READ_BIT : writeAccess)); readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
break; break;
} }
case GrGLCaps::kChromium_MapBufferType: { case GrGLCaps::kChromium_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping. // Make sure the GL buffer size agrees with fDesc before mapping.
if (fGLSizeInBytes != this->sizeInBytes()) { if (fGLSizeInBytes != this->size()) {
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
} }
GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->sizeInBytes(), GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY)); readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break; break;
} }
} }
fGLSizeInBytes = this->sizeInBytes(); fGLSizeInBytes = this->size();
VALIDATE(); VALIDATE();
} }
@ -251,15 +251,15 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isMapped()); SkASSERT(!this->isMapped());
VALIDATE(); VALIDATE();
if (srcSizeInBytes > this->sizeInBytes()) { if (srcSizeInBytes > this->size()) {
return false; return false;
} }
SkASSERT(srcSizeInBytes <= this->sizeInBytes()); SkASSERT(srcSizeInBytes <= this->size());
// bindbuffer handles dirty context // bindbuffer handles dirty context
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this); GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
if (this->glCaps().useBufferDataNullHint()) { if (this->glCaps().useBufferDataNullHint()) {
if (this->sizeInBytes() == srcSizeInBytes) { if (this->size() == srcSizeInBytes) {
GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage)); GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
} else { } else {
// Before we call glBufferSubData we give the driver a hint using // Before we call glBufferSubData we give the driver a hint using
@ -269,10 +269,10 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
// assign a different allocation for the new contents to avoid // assign a different allocation for the new contents to avoid
// flushing the gpu past draws consuming the old contents. // flushing the gpu past draws consuming the old contents.
// TODO I think we actually want to try calling bufferData here // TODO I think we actually want to try calling bufferData here
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage)); GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src)); GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
} }
fGLSizeInBytes = this->sizeInBytes(); fGLSizeInBytes = this->size();
} else { } else {
// Note that we're cheating on the size here. Currently no methods // Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated // allow a partial update that preserves contents of non-updated
@ -296,7 +296,7 @@ void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
void GrGLBuffer::validate() const { void GrGLBuffer::validate() const {
SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes); SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->sizeInBytes()); SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
} }
#endif #endif

View File

@ -8,13 +8,13 @@
#ifndef GrGLBuffer_DEFINED #ifndef GrGLBuffer_DEFINED
#define GrGLBuffer_DEFINED #define GrGLBuffer_DEFINED
#include "GrBuffer.h" #include "GrGpuBuffer.h"
#include "gl/GrGLTypes.h" #include "gl/GrGLTypes.h"
class GrGLGpu; class GrGLGpu;
class GrGLCaps; class GrGLCaps;
class GrGLBuffer : public GrBuffer { class GrGLBuffer : public GrGpuBuffer {
public: public:
static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType, static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data = nullptr); GrAccessPattern, const void* data = nullptr);
@ -28,7 +28,7 @@ public:
/** /**
* Returns the actual size of the underlying GL buffer object. In certain cases we may make this * Returns the actual size of the underlying GL buffer object. In certain cases we may make this
* smaller than the size reported by GrBuffer. * smaller than the size reported by GrGpuBuffer.
*/ */
size_t glSizeInBytes() const { return fGLSizeInBytes; } size_t glSizeInBytes() const { return fGLSizeInBytes; }
@ -62,7 +62,7 @@ private:
size_t fGLSizeInBytes; size_t fGLSizeInBytes;
bool fHasAttachedToTexture; bool fHasAttachedToTexture;
typedef GrBuffer INHERITED; typedef GrGpuBuffer INHERITED;
}; };
#endif #endif

View File

@ -8,6 +8,7 @@
#include "GrGLGpu.h" #include "GrGLGpu.h"
#include "GrBackendSemaphore.h" #include "GrBackendSemaphore.h"
#include "GrBackendSurface.h" #include "GrBackendSurface.h"
#include "GrCpuBuffer.h"
#include "GrFixedClip.h" #include "GrFixedClip.h"
#include "GrGLBuffer.h" #include "GrGLBuffer.h"
#include "GrGLGpuCommandBuffer.h" #include "GrGLGpuCommandBuffer.h"
@ -842,8 +843,8 @@ static inline GrGLint config_alignment(GrPixelConfig config) {
} }
bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height, bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset, GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
size_t rowBytes) { size_t offset, size_t rowBytes) {
GrGLTexture* glTex = static_cast<GrGLTexture*>(texture); GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
GrPixelConfig texConfig = glTex->config(); GrPixelConfig texConfig = glTex->config();
SkASSERT(this->caps()->isConfigTexturable(texConfig)); SkASSERT(this->caps()->isConfigTexturable(texConfig));
@ -864,7 +865,7 @@ bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width,
GL_CALL(BindTexture(glTex->target(), glTex->textureID())); GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
SkASSERT(!transferBuffer->isMapped()); SkASSERT(!transferBuffer->isMapped());
SkASSERT(!transferBuffer->isCPUBacked()); SkASSERT(!transferBuffer->isCpuBuffer());
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer); const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer); this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
@ -1847,8 +1848,8 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType, sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) { GrAccessPattern accessPattern, const void* data) {
return GrGLBuffer::Make(this, size, intendedType, accessPattern, data); return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
} }
@ -2063,7 +2064,8 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
GrGLAttribArrayState* attribState; GrGLAttribArrayState* attribState;
if (indexBuffer) { if (indexBuffer) {
SkASSERT(indexBuffer && !indexBuffer->isMapped()); SkASSERT(indexBuffer->isCpuBuffer() ||
!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer); attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer);
} else { } else {
attribState = fHWVertexArrayState.bindInternalVertexArray(this); attribState = fHWVertexArrayState.bindInternalVertexArray(this);
@ -2073,9 +2075,10 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart); attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart);
if (int vertexStride = fHWProgram->vertexStride()) { if (int vertexStride = fHWProgram->vertexStride()) {
SkASSERT(vertexBuffer && !vertexBuffer->isMapped()); SkASSERT(vertexBuffer);
size_t bufferOffset = vertexBuffer->baseOffset(); SkASSERT(vertexBuffer->isCpuBuffer() ||
bufferOffset += baseVertex * static_cast<size_t>(vertexStride); !static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
size_t bufferOffset = baseVertex * static_cast<size_t>(vertexStride);
for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) { for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) {
const auto& attrib = fHWProgram->vertexAttribute(i); const auto& attrib = fHWProgram->vertexAttribute(i);
static constexpr int kDivisor = 0; static constexpr int kDivisor = 0;
@ -2084,9 +2087,10 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
} }
} }
if (int instanceStride = fHWProgram->instanceStride()) { if (int instanceStride = fHWProgram->instanceStride()) {
SkASSERT(instanceBuffer && !instanceBuffer->isMapped()); SkASSERT(instanceBuffer);
size_t bufferOffset = instanceBuffer->baseOffset(); SkASSERT(instanceBuffer->isCpuBuffer() ||
bufferOffset += baseInstance * static_cast<size_t>(instanceStride); !static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
size_t bufferOffset = baseInstance * static_cast<size_t>(instanceStride);
int attribIdx = fHWProgram->numVertexAttributes(); int attribIdx = fHWProgram->numVertexAttributes();
for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) { for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) {
const auto& attrib = fHWProgram->instanceAttribute(i); const auto& attrib = fHWProgram->instanceAttribute(i);
@ -2107,13 +2111,14 @@ GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
} }
auto* bufferState = this->hwBufferState(type); auto* bufferState = this->hwBufferState(type);
if (buffer->isCPUBacked()) { if (buffer->isCpuBuffer()) {
if (!bufferState->fBufferZeroKnownBound) { if (!bufferState->fBufferZeroKnownBound) {
GL_CALL(BindBuffer(bufferState->fGLTarget, 0)); GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
bufferState->fBufferZeroKnownBound = true; bufferState->fBufferZeroKnownBound = true;
bufferState->fBoundBufferUniqueID.makeInvalid(); bufferState->fBoundBufferUniqueID.makeInvalid();
} }
} else if (buffer->uniqueID() != bufferState->fBoundBufferUniqueID) { } else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
bufferState->fBoundBufferUniqueID) {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer); const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID())); GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
bufferState->fBufferZeroKnownBound = false; bufferState->fBufferZeroKnownBound = false;
@ -2608,21 +2613,29 @@ void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* verte
fStats.incNumDraws(); fStats.incNumDraws();
} }
static const GrGLvoid* element_ptr(const GrBuffer* indexBuffer, int baseIndex) {
size_t baseOffset = baseIndex * sizeof(uint16_t);
if (indexBuffer->isCpuBuffer()) {
return static_cast<const GrCpuBuffer*>(indexBuffer)->data() + baseOffset;
} else {
return reinterpret_cast<const GrGLvoid*>(baseOffset);
}
}
void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer, void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer,
int indexCount, int baseIndex, uint16_t minIndexValue, int indexCount, int baseIndex, uint16_t minIndexValue,
uint16_t maxIndexValue, const GrBuffer* vertexBuffer, uint16_t maxIndexValue, const GrBuffer* vertexBuffer,
int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) { int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) {
const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
GrGLvoid* const indices = reinterpret_cast<void*>(indexBuffer->baseOffset() + const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
sizeof(uint16_t) * baseIndex);
this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart); this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart);
if (this->glCaps().drawRangeElementsSupport()) { if (this->glCaps().drawRangeElementsSupport()) {
GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount, GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount,
GR_GL_UNSIGNED_SHORT, indices)); GR_GL_UNSIGNED_SHORT, elementPtr));
} else { } else {
GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices)); GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr));
} }
fStats.incNumDraws(); fStats.incNumDraws();
} }
@ -2649,13 +2662,12 @@ void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
int instanceCount, int baseInstance, int instanceCount, int baseInstance,
GrPrimitiveRestart enablePrimitiveRestart) { GrPrimitiveRestart enablePrimitiveRestart) {
const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType); const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
GrGLvoid* indices = reinterpret_cast<void*>(indexBuffer->baseOffset() + const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
sizeof(uint16_t) * baseIndex);
int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount); int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
for (int i = 0; i < instanceCount; i += maxInstances) { for (int i = 0; i < instanceCount; i += maxInstances) {
this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i, this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i,
enablePrimitiveRestart); enablePrimitiveRestart);
GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices, GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr,
SkTMin(instanceCount - i, maxInstances))); SkTMin(instanceCount - i, maxInstances)));
fStats.incNumDraws(); fStats.incNumDraws();
} }

View File

@ -187,8 +187,8 @@ private:
sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const GrMipLevel texels[], int mipLevelCount) override; const GrMipLevel texels[], int mipLevelCount) override;
sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data) override; const void* data) override;
sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable, sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable,
GrIOType) override; GrIOType) override;
@ -233,7 +233,7 @@ private:
const GrMipLevel texels[], int mipLevelCount) override; const GrMipLevel texels[], int mipLevelCount) override;
bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType, bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override; GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
// Before calling any variation of TexImage, TexSubImage, etc..., call this to ensure that the // Before calling any variation of TexImage, TexSubImage, etc..., call this to ensure that the
// PIXEL_UNPACK_BUFFER is unbound. // PIXEL_UNPACK_BUFFER is unbound.

View File

@ -6,6 +6,7 @@
*/ */
#include "GrGLVertexArray.h" #include "GrGLVertexArray.h"
#include "GrCpuBuffer.h"
#include "GrGLBuffer.h" #include "GrGLBuffer.h"
#include "GrGLGpu.h" #include "GrGLGpu.h"
@ -89,14 +90,32 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu,
SkASSERT(index >= 0 && index < fAttribArrayStates.count()); SkASSERT(index >= 0 && index < fAttribArrayStates.count());
SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport()); SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport());
AttribArrayState* array = &fAttribArrayStates[index]; AttribArrayState* array = &fAttribArrayStates[index];
if (array->fVertexBufferUniqueID != vertexBuffer->uniqueID() || const char* offsetAsPtr;
bool bufferChanged = false;
if (vertexBuffer->isCpuBuffer()) {
if (!array->fUsingCpuBuffer) {
bufferChanged = true;
array->fUsingCpuBuffer = true;
}
offsetAsPtr = static_cast<const GrCpuBuffer*>(vertexBuffer)->data() + offsetInBytes;
} else {
auto gpuBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
if (array->fUsingCpuBuffer || array->fVertexBufferUniqueID != gpuBuffer->uniqueID()) {
bufferChanged = true;
array->fVertexBufferUniqueID = gpuBuffer->uniqueID();
}
offsetAsPtr = reinterpret_cast<const char*>(offsetInBytes);
}
if (bufferChanged ||
array->fCPUType != cpuType || array->fCPUType != cpuType ||
array->fGPUType != gpuType || array->fGPUType != gpuType ||
array->fStride != stride || array->fStride != stride ||
array->fOffset != offsetInBytes) { array->fOffset != offsetAsPtr) {
// We always have to call this if we're going to change the array pointer. 'array' is
// tracking the last buffer used to setup attrib pointers, not the last buffer bound.
// GrGLGpu will avoid redundant binds.
gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer); gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer);
const AttribLayout& layout = attrib_layout(cpuType); const AttribLayout& layout = attrib_layout(cpuType);
const GrGLvoid* offsetAsPtr = reinterpret_cast<const GrGLvoid*>(offsetInBytes);
if (GrSLTypeIsFloatType(gpuType)) { if (GrSLTypeIsFloatType(gpuType)) {
GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index, GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
layout.fCount, layout.fCount,
@ -113,11 +132,10 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu,
stride, stride,
offsetAsPtr)); offsetAsPtr));
} }
array->fVertexBufferUniqueID = vertexBuffer->uniqueID();
array->fCPUType = cpuType; array->fCPUType = cpuType;
array->fGPUType = gpuType; array->fGPUType = gpuType;
array->fStride = stride; array->fStride = stride;
array->fOffset = offsetInBytes; array->fOffset = offsetAsPtr;
} }
if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) { if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) {
SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect. SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect.
@ -179,15 +197,19 @@ GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) { GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) {
GrGLAttribArrayState* state = this->bind(gpu); GrGLAttribArrayState* state = this->bind(gpu);
if (state && fIndexBufferUniqueID != ibuff->uniqueID()) { if (!state) {
if (ibuff->isCPUBacked()) { return nullptr;
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0)); }
} else { if (ibuff->isCpuBuffer()) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
} else {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
if (fIndexBufferUniqueID != glBuffer->uniqueID()) {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff); const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, GR_GL_CALL(gpu->glInterface(),
glBuffer->bufferID())); BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, glBuffer->bufferID()));
fIndexBufferUniqueID = glBuffer->uniqueID();
} }
fIndexBufferUniqueID = ibuff->uniqueID();
} }
return state; return state;
} }

View File

@ -75,13 +75,15 @@ private:
void invalidate() { void invalidate() {
fVertexBufferUniqueID.makeInvalid(); fVertexBufferUniqueID.makeInvalid();
fDivisor = kInvalidDivisor; fDivisor = kInvalidDivisor;
fUsingCpuBuffer = false;
} }
GrGpuResource::UniqueID fVertexBufferUniqueID; GrGpuResource::UniqueID fVertexBufferUniqueID;
bool fUsingCpuBuffer;
GrVertexAttribType fCPUType; GrVertexAttribType fCPUType;
GrSLType fGPUType; GrSLType fGPUType;
GrGLsizei fStride; GrGLsizei fStride;
size_t fOffset; const GrGLvoid* fOffset;
int fDivisor; int fDivisor;
}; };

View File

@ -8,11 +8,11 @@
#ifndef GrMockBuffer_DEFINED #ifndef GrMockBuffer_DEFINED
#define GrMockBuffer_DEFINED #define GrMockBuffer_DEFINED
#include "GrBuffer.h"
#include "GrCaps.h" #include "GrCaps.h"
#include "GrGpuBuffer.h"
#include "GrMockGpu.h" #include "GrMockGpu.h"
class GrMockBuffer : public GrBuffer { class GrMockBuffer : public GrGpuBuffer {
public: public:
GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern) GrAccessPattern accessPattern)
@ -23,13 +23,13 @@ public:
private: private:
void onMap() override { void onMap() override {
if (GrCaps::kNone_MapFlags != this->getGpu()->caps()->mapBufferFlags()) { if (GrCaps::kNone_MapFlags != this->getGpu()->caps()->mapBufferFlags()) {
fMapPtr = sk_malloc_throw(this->sizeInBytes()); fMapPtr = sk_malloc_throw(this->size());
} }
} }
void onUnmap() override { sk_free(fMapPtr); } void onUnmap() override { sk_free(fMapPtr); }
bool onUpdateData(const void* src, size_t srcSizeInBytes) override { return true; } bool onUpdateData(const void* src, size_t srcSizeInBytes) override { return true; }
typedef GrBuffer INHERITED; typedef GrGpuBuffer INHERITED;
}; };
#endif #endif

View File

@ -183,9 +183,9 @@ sk_sp<GrRenderTarget> GrMockGpu::onWrapBackendTextureAsRenderTarget(const GrBack
new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc, rtInfo)); new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc, rtInfo));
} }
sk_sp<GrBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type, sk_sp<GrGpuBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern, const void*) { GrAccessPattern accessPattern, const void*) {
return sk_sp<GrBuffer>(new GrMockBuffer(this, sizeInBytes, type, accessPattern)); return sk_sp<GrGpuBuffer>(new GrMockBuffer(this, sizeInBytes, type, accessPattern));
} }
GrStencilAttachment* GrMockGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt, GrStencilAttachment* GrMockGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,

View File

@ -72,8 +72,8 @@ private:
sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
int sampleCnt) override; int sampleCnt) override;
sk_sp<GrBuffer> onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern, sk_sp<GrGpuBuffer> onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern,
const void*) override; const void*) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override { void* buffer, size_t rowBytes) override {
@ -86,7 +86,7 @@ private:
} }
bool onTransferPixels(GrTexture* texture, int left, int top, int width, int height, GrColorType, bool onTransferPixels(GrTexture* texture, int left, int top, int width, int height, GrColorType,
GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override { GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override {
return true; return true;
} }

View File

@ -8,14 +8,14 @@
#ifndef GrMtlBuffer_DEFINED #ifndef GrMtlBuffer_DEFINED
#define GrMtlBuffer_DEFINED #define GrMtlBuffer_DEFINED
#include "GrBuffer.h" #include "GrGpuBuffer.h"
#import <metal/metal.h> #import <metal/metal.h>
class GrMtlCaps; class GrMtlCaps;
class GrMtlGpu; class GrMtlGpu;
class GrMtlBuffer: public GrBuffer { class GrMtlBuffer: public GrGpuBuffer {
public: public:
static sk_sp<GrMtlBuffer> Make(GrMtlGpu*, size_t size, GrGpuBufferType intendedType, static sk_sp<GrMtlBuffer> Make(GrMtlGpu*, size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data = nullptr); GrAccessPattern, const void* data = nullptr);
@ -48,7 +48,7 @@ private:
id<MTLBuffer> fMtlBuffer; id<MTLBuffer> fMtlBuffer;
id<MTLBuffer> fMappedBuffer; id<MTLBuffer> fMappedBuffer;
typedef GrBuffer INHERITED; typedef GrGpuBuffer INHERITED;
}; };
#endif #endif

View File

@ -143,7 +143,8 @@ private:
sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&, sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
int sampleCnt) override; int sampleCnt) override;
sk_sp<GrBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern, const void*) override; sk_sp<GrGpuBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern,
const void*) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override; void* buffer, size_t rowBytes) override;
@ -153,7 +154,7 @@ private:
bool onTransferPixels(GrTexture*, bool onTransferPixels(GrTexture*,
int left, int top, int width, int height, int left, int top, int width, int height,
GrColorType, GrBuffer*, GrColorType, GrGpuBuffer*,
size_t offset, size_t rowBytes) override { size_t offset, size_t rowBytes) override {
return false; return false;
} }

View File

@ -131,8 +131,8 @@ void GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
fCmdBuffer = [fQueue commandBuffer]; fCmdBuffer = [fQueue commandBuffer];
} }
sk_sp<GrBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type, sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) { GrAccessPattern accessPattern, const void* data) {
return GrMtlBuffer::Make(this, size, type, accessPattern, data); return GrMtlBuffer::Make(this, size, type, accessPattern, data);
} }

View File

@ -274,8 +274,8 @@ void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
const GrBuffer* instanceBuffer) { const GrBuffer* instanceBuffer) {
size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1; size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
if (vertexBuffer) { if (vertexBuffer) {
SkASSERT(!vertexBuffer->isCPUBacked()); SkASSERT(!vertexBuffer->isCpuBuffer());
SkASSERT(!vertexBuffer->isMapped()); SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
auto mtlVertexBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer)->mtlBuffer(); auto mtlVertexBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer)->mtlBuffer();
SkASSERT(mtlVertexBuffer); SkASSERT(mtlVertexBuffer);
@ -284,8 +284,8 @@ void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
atIndex: bufferIndex++]; atIndex: bufferIndex++];
} }
if (instanceBuffer) { if (instanceBuffer) {
SkASSERT(!instanceBuffer->isCPUBacked()); SkASSERT(!instanceBuffer->isCpuBuffer());
SkASSERT(!instanceBuffer->isMapped()); SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
auto mtlInstanceBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer)->mtlBuffer(); auto mtlInstanceBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer)->mtlBuffer();
SkASSERT(mtlInstanceBuffer); SkASSERT(mtlInstanceBuffer);
@ -327,8 +327,8 @@ void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType prim
SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported. SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
id<MTLBuffer> mtlIndexBuffer; id<MTLBuffer> mtlIndexBuffer;
if (indexBuffer) { if (indexBuffer) {
SkASSERT(!indexBuffer->isCPUBacked()); SkASSERT(!indexBuffer->isCpuBuffer());
SkASSERT(!indexBuffer->isMapped()); SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer(); mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
SkASSERT(mtlIndexBuffer); SkASSERT(mtlIndexBuffer);

View File

@ -55,8 +55,8 @@ GrMtlPipelineState::GrMtlPipelineState(
, fXferProcessor(std::move(xferProcessor)) , fXferProcessor(std::move(xferProcessor))
, fFragmentProcessors(std::move(fragmentProcessors)) , fFragmentProcessors(std::move(fragmentProcessors))
, fFragmentProcessorCnt(fragmentProcessorCnt) , fFragmentProcessorCnt(fragmentProcessorCnt)
, fDataManager(uniforms, fGeometryUniformBuffer->sizeInBytes(), , fDataManager(uniforms, fGeometryUniformBuffer->size(),
fFragmentUniformBuffer->sizeInBytes()) { fFragmentUniformBuffer->size()) {
(void) fPixelFormat; // Suppress unused-var warning. (void) fPixelFormat; // Suppress unused-var warning.
} }

View File

@ -423,8 +423,7 @@ void GrAtlasTextOp::flush(GrMeshDrawOp::Target* target, FlushInfo* flushInfo) co
samplerState); samplerState);
} }
} }
int maxGlyphsPerDraw = int maxGlyphsPerDraw = static_cast<int>(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6);
static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles); GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerGlyph, kVerticesPerGlyph, mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerGlyph, kVerticesPerGlyph,
flushInfo->fGlyphsToFlush, maxGlyphsPerDraw); flushInfo->fGlyphsToFlush, maxGlyphsPerDraw);

View File

@ -227,7 +227,7 @@ void GrDrawVerticesOp::drawVolatile(Target* target) {
// Allocate buffers. // Allocate buffers.
size_t vertexStride = gp->vertexStride(); size_t vertexStride = gp->vertexStride();
sk_sp<const GrBuffer> vertexBuffer = nullptr; sk_sp<const GrBuffer> vertexBuffer;
int firstVertex = 0; int firstVertex = 0;
void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex); void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex);
if (!verts) { if (!verts) {
@ -235,7 +235,7 @@ void GrDrawVerticesOp::drawVolatile(Target* target) {
return; return;
} }
sk_sp<const GrBuffer> indexBuffer = nullptr; sk_sp<const GrBuffer> indexBuffer;
int firstIndex = 0; int firstIndex = 0;
uint16_t* indices = nullptr; uint16_t* indices = nullptr;
if (this->isIndexed()) { if (this->isIndexed()) {
@ -286,10 +286,9 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) {
indexKeyBuilder.finish(); indexKeyBuilder.finish();
// Try to grab data from the cache. // Try to grab data from the cache.
sk_sp<GrBuffer> vertexBuffer = rp->findByUniqueKey<GrBuffer>(vertexKey); sk_sp<GrGpuBuffer> vertexBuffer = rp->findByUniqueKey<GrGpuBuffer>(vertexKey);
sk_sp<GrBuffer> indexBuffer = this->isIndexed() ? sk_sp<GrGpuBuffer> indexBuffer =
rp->findByUniqueKey<GrBuffer>(indexKey) : this->isIndexed() ? rp->findByUniqueKey<GrGpuBuffer>(indexKey) : nullptr;
nullptr;
// Draw using the cached buffers if possible. // Draw using the cached buffers if possible.
if (vertexBuffer && (!this->isIndexed() || indexBuffer)) { if (vertexBuffer && (!this->isIndexed() || indexBuffer)) {
@ -300,10 +299,8 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) {
// Allocate vertex buffer. // Allocate vertex buffer.
size_t vertexStride = gp->vertexStride(); size_t vertexStride = gp->vertexStride();
vertexBuffer = rp->createBuffer(fVertexCount * vertexStride, vertexBuffer = rp->createBuffer(
GrGpuBufferType::kVertex, fVertexCount * vertexStride, GrGpuBufferType::kVertex, kStatic_GrAccessPattern);
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
void* verts = vertexBuffer ? vertexBuffer->map() : nullptr; void* verts = vertexBuffer ? vertexBuffer->map() : nullptr;
if (!verts) { if (!verts) {
SkDebugf("Could not allocate vertices\n"); SkDebugf("Could not allocate vertices\n");
@ -313,10 +310,8 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) {
// Allocate index buffer. // Allocate index buffer.
uint16_t* indices = nullptr; uint16_t* indices = nullptr;
if (this->isIndexed()) { if (this->isIndexed()) {
indexBuffer = rp->createBuffer(fIndexCount * sizeof(uint16_t), indexBuffer = rp->createBuffer(
GrGpuBufferType::kIndex, fIndexCount * sizeof(uint16_t), GrGpuBufferType::kIndex, kStatic_GrAccessPattern);
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr; indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr;
if (!indices) { if (!indices) {
SkDebugf("Could not allocate indices\n"); SkDebugf("Could not allocate indices\n");

View File

@ -45,10 +45,10 @@ void GrMeshDrawOp::PatternHelper::init(Target* target, GrPrimitiveType primitive
return; return;
} }
SkASSERT(vertexBuffer); SkASSERT(vertexBuffer);
size_t ibSize = indexBuffer->gpuMemorySize(); size_t ibSize = indexBuffer->size();
int maxRepetitions = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerRepetition)); int maxRepetitions = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerRepetition));
fMesh = target->allocMesh(primitiveType); fMesh = target->allocMesh(primitiveType);
fMesh->setIndexedPatterned(indexBuffer, indicesPerRepetition, verticesPerRepetition, fMesh->setIndexedPatterned(std::move(indexBuffer), indicesPerRepetition, verticesPerRepetition,
repeatCount, maxRepetitions); repeatCount, maxRepetitions);
fMesh->setVertexData(std::move(vertexBuffer), firstVertex); fMesh->setVertexData(std::move(vertexBuffer), firstVertex);
} }
@ -62,7 +62,7 @@ void GrMeshDrawOp::PatternHelper::recordDraw(
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
GrMeshDrawOp::QuadHelper::QuadHelper(Target* target, size_t vertexStride, int quadsToDraw) { GrMeshDrawOp::QuadHelper::QuadHelper(Target* target, size_t vertexStride, int quadsToDraw) {
sk_sp<const GrBuffer> quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer(); sk_sp<const GrGpuBuffer> quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer();
if (!quadIndexBuffer) { if (!quadIndexBuffer) {
SkDebugf("Could not get quad index buffer."); SkDebugf("Could not get quad index buffer.");
return; return;

View File

@ -34,8 +34,9 @@ protected:
space for the vertices and flushes the draws to the GrMeshDrawOp::Target. */ space for the vertices and flushes the draws to the GrMeshDrawOp::Target. */
class PatternHelper { class PatternHelper {
public: public:
PatternHelper(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer>, PatternHelper(Target*, GrPrimitiveType, size_t vertexStride,
int verticesPerRepetition, int indicesPerRepetition, int repeatCount); sk_sp<const GrBuffer> indexBuffer, int verticesPerRepetition,
int indicesPerRepetition, int repeatCount);
/** Called to issue draws to the GrMeshDrawOp::Target.*/ /** Called to issue draws to the GrMeshDrawOp::Target.*/
void recordDraw(Target*, sk_sp<const GrGeometryProcessor>, const GrPipeline*, void recordDraw(Target*, sk_sp<const GrGeometryProcessor>, const GrPipeline*,
@ -45,7 +46,7 @@ protected:
protected: protected:
PatternHelper() = default; PatternHelper() = default;
void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer>, void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer> indexBuffer,
int verticesPerRepetition, int indicesPerRepetition, int repeatCount); int verticesPerRepetition, int indicesPerRepetition, int repeatCount);
private: private:

View File

@ -374,7 +374,7 @@ GR_DECLARE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
static const int kVertsPerAAFillRect = 8; static const int kVertsPerAAFillRect = 8;
static const int kIndicesPerAAFillRect = 30; static const int kIndicesPerAAFillRect = 30;
static sk_sp<const GrBuffer> get_index_buffer(GrResourceProvider* resourceProvider) { static sk_sp<const GrGpuBuffer> get_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey); GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
// clang-format off // clang-format off
@ -467,7 +467,7 @@ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const Vert
int quadCount) { int quadCount) {
if (spec.usesCoverageAA()) { if (spec.usesCoverageAA()) {
// AA quads use 8 vertices, basically nested rectangles // AA quads use 8 vertices, basically nested rectangles
sk_sp<const GrBuffer> ibuffer = get_index_buffer(target->resourceProvider()); sk_sp<const GrGpuBuffer> ibuffer = get_index_buffer(target->resourceProvider());
if (!ibuffer) { if (!ibuffer) {
return false; return false;
} }
@ -478,7 +478,7 @@ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const Vert
} else { } else {
// Non-AA quads use 4 vertices, and regular triangle strip layout // Non-AA quads use 4 vertices, and regular triangle strip layout
if (quadCount > 1) { if (quadCount > 1) {
sk_sp<const GrBuffer> ibuffer = target->resourceProvider()->refQuadIndexBuffer(); sk_sp<const GrGpuBuffer> ibuffer = target->resourceProvider()->refQuadIndexBuffer();
if (!ibuffer) { if (!ibuffer) {
return false; return false;
} }

View File

@ -109,7 +109,7 @@ private:
if (!numRects) { if (!numRects) {
return; return;
} }
sk_sp<const GrBuffer> indexBuffer = target->resourceProvider()->refQuadIndexBuffer(); sk_sp<const GrGpuBuffer> indexBuffer = target->resourceProvider()->refQuadIndexBuffer();
if (!indexBuffer) { if (!indexBuffer) {
SkDebugf("Could not allocate indices\n"); SkDebugf("Could not allocate indices\n");
return; return;

View File

@ -794,7 +794,7 @@ private:
if (flushInfo->fInstancesToFlush) { if (flushInfo->fInstancesToFlush) {
GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles); GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
int maxInstancesPerDraw = int maxInstancesPerDraw =
static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6); static_cast<int>(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6);
mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerQuad, kVerticesPerQuad, mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerQuad, kVerticesPerQuad,
flushInfo->fInstancesToFlush, maxInstancesPerDraw); flushInfo->fInstancesToFlush, maxInstancesPerDraw);
mesh->setVertexData(flushInfo->fVertexBuffer, flushInfo->fVertexOffset); mesh->setVertexData(flushInfo->fVertexBuffer, flushInfo->fVertexOffset);

View File

@ -420,7 +420,7 @@ private:
static const int kBevelVertexCnt = 24; static const int kBevelVertexCnt = 24;
static const int kNumBevelRectsInIndexBuffer = 256; static const int kNumBevelRectsInIndexBuffer = 256;
static sk_sp<const GrBuffer> GetIndexBuffer(GrResourceProvider*, bool miterStroke); static sk_sp<const GrGpuBuffer> GetIndexBuffer(GrResourceProvider*, bool miterStroke);
const SkMatrix& viewMatrix() const { return fViewMatrix; } const SkMatrix& viewMatrix() const { return fViewMatrix; }
bool miterStroke() const { return fMiterStroke; } bool miterStroke() const { return fMiterStroke; }
@ -472,7 +472,7 @@ void AAStrokeRectOp::onPrepareDraws(Target* target) {
int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt; int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt;
int instanceCount = fRects.count(); int instanceCount = fRects.count();
sk_sp<const GrBuffer> indexBuffer = sk_sp<const GrGpuBuffer> indexBuffer =
GetIndexBuffer(target->resourceProvider(), this->miterStroke()); GetIndexBuffer(target->resourceProvider(), this->miterStroke());
if (!indexBuffer) { if (!indexBuffer) {
SkDebugf("Could not allocate indices\n"); SkDebugf("Could not allocate indices\n");
@ -503,8 +503,8 @@ void AAStrokeRectOp::onPrepareDraws(Target* target) {
helper.recordDraw(target, std::move(gp), pipe.fPipeline, pipe.fFixedDynamicState); helper.recordDraw(target, std::move(gp), pipe.fPipeline, pipe.fFixedDynamicState);
} }
sk_sp<const GrBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider, sk_sp<const GrGpuBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider,
bool miterStroke) { bool miterStroke) {
if (miterStroke) { if (miterStroke) {
// clang-format off // clang-format off
static const uint16_t gMiterIndices[] = { static const uint16_t gMiterIndices[] = {

View File

@ -53,7 +53,7 @@ private:
} }
}; };
bool cache_match(GrBuffer* vertexBuffer, SkScalar tol, int* actualCount) { bool cache_match(GrGpuBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
if (!vertexBuffer) { if (!vertexBuffer) {
return false; return false;
} }
@ -78,8 +78,7 @@ public:
void* lock(int vertexCount) override { void* lock(int vertexCount) override {
size_t size = vertexCount * stride(); size_t size = vertexCount * stride();
fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex, fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
kStatic_GrAccessPattern, kStatic_GrAccessPattern);
GrResourceProvider::Flags::kNone);
if (!fVertexBuffer.get()) { if (!fVertexBuffer.get()) {
return nullptr; return nullptr;
} }
@ -99,10 +98,10 @@ public:
} }
fVertices = nullptr; fVertices = nullptr;
} }
sk_sp<GrBuffer> detachVertexBuffer() { return std::move(fVertexBuffer); } sk_sp<GrGpuBuffer> detachVertexBuffer() { return std::move(fVertexBuffer); }
private: private:
sk_sp<GrBuffer> fVertexBuffer; sk_sp<GrGpuBuffer> fVertexBuffer;
GrResourceProvider* fResourceProvider; GrResourceProvider* fResourceProvider;
bool fCanMapVB; bool fCanMapVB;
void* fVertices; void* fVertices;
@ -261,7 +260,7 @@ private:
memset(&builder[shapeKeyDataCnt], 0, sizeof(fDevClipBounds)); memset(&builder[shapeKeyDataCnt], 0, sizeof(fDevClipBounds));
} }
builder.finish(); builder.finish();
sk_sp<GrBuffer> cachedVertexBuffer(rp->findByUniqueKey<GrBuffer>(key)); sk_sp<GrGpuBuffer> cachedVertexBuffer(rp->findByUniqueKey<GrGpuBuffer>(key));
int actualCount; int actualCount;
SkScalar tol = GrPathUtils::kDefaultTolerance; SkScalar tol = GrPathUtils::kDefaultTolerance;
tol = GrPathUtils::scaleToleranceToSrc(tol, fViewMatrix, fShape.bounds()); tol = GrPathUtils::scaleToleranceToSrc(tol, fViewMatrix, fShape.bounds());
@ -286,7 +285,7 @@ private:
if (count == 0) { if (count == 0) {
return; return;
} }
sk_sp<GrBuffer> vb = allocator.detachVertexBuffer(); sk_sp<GrGpuBuffer> vb = allocator.detachVertexBuffer();
TessInfo info; TessInfo info;
info.fTolerance = isLinear ? 0 : tol; info.fTolerance = isLinear ? 0 : tol;
info.fCount = count; info.fCount = count;

View File

@ -343,9 +343,9 @@ void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
} }
/////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
sk_sp<GrBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type, sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) { GrAccessPattern accessPattern, const void* data) {
sk_sp<GrBuffer> buff; sk_sp<GrGpuBuffer> buff;
switch (type) { switch (type) {
case GrGpuBufferType::kVertex: case GrGpuBufferType::kVertex:
SkASSERT(kDynamic_GrAccessPattern == accessPattern || SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
@ -419,7 +419,7 @@ bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, in
} }
bool GrVkGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height, bool GrVkGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
size_t bufferOffset, size_t rowBytes) { size_t bufferOffset, size_t rowBytes) {
// Can't transfer compressed data // Can't transfer compressed data
SkASSERT(!GrPixelConfigIsCompressed(texture->config())); SkASSERT(!GrPixelConfigIsCompressed(texture->config()));

View File

@ -198,8 +198,8 @@ private:
sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&, sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
const GrVkDrawableInfo&) override; const GrVkDrawableInfo&) override;
sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern, sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
const void* data) override; const void* data) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType, bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override; void* buffer, size_t rowBytes) override;
@ -208,7 +208,7 @@ private:
const GrMipLevel texels[], int mipLevelCount) override; const GrMipLevel texels[], int mipLevelCount) override;
bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType, bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override; GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src, bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src,
GrSurfaceOrigin srcOrigin, const SkIRect& srcRect, GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,

View File

@ -593,9 +593,9 @@ void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, con
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer, void GrVkGpuRTCommandBuffer::bindGeometry(const GrGpuBuffer* indexBuffer,
const GrBuffer* vertexBuffer, const GrGpuBuffer* vertexBuffer,
const GrBuffer* instanceBuffer) { const GrGpuBuffer* instanceBuffer) {
GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf(); GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
// There is no need to put any memory barriers to make sure host writes have finished here. // There is no need to put any memory barriers to make sure host writes have finished here.
// When a command buffer is submitted to a queue, there is an implicit memory barrier that // When a command buffer is submitted to a queue, there is an implicit memory barrier that
@ -608,7 +608,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
if (vertexBuffer) { if (vertexBuffer) {
SkASSERT(vertexBuffer); SkASSERT(vertexBuffer);
SkASSERT(!vertexBuffer->isCPUBacked());
SkASSERT(!vertexBuffer->isMapped()); SkASSERT(!vertexBuffer->isMapped());
currCmdBuf->bindInputBuffer(fGpu, binding++, currCmdBuf->bindInputBuffer(fGpu, binding++,
@ -617,7 +616,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
if (instanceBuffer) { if (instanceBuffer) {
SkASSERT(instanceBuffer); SkASSERT(instanceBuffer);
SkASSERT(!instanceBuffer->isCPUBacked());
SkASSERT(!instanceBuffer->isMapped()); SkASSERT(!instanceBuffer->isMapped());
currCmdBuf->bindInputBuffer(fGpu, binding++, currCmdBuf->bindInputBuffer(fGpu, binding++,
@ -626,7 +624,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
if (indexBuffer) { if (indexBuffer) {
SkASSERT(indexBuffer); SkASSERT(indexBuffer);
SkASSERT(!indexBuffer->isMapped()); SkASSERT(!indexBuffer->isMapped());
SkASSERT(!indexBuffer->isCPUBacked());
currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer)); currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
} }
@ -807,7 +804,11 @@ void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType,
int instanceCount, int instanceCount,
int baseInstance) { int baseInstance) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo]; CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
this->bindGeometry(nullptr, vertexBuffer, instanceBuffer); SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
this->bindGeometry(nullptr, gpuVertexBuffer, gpuInstanceBuffer);
cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance); cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
fGpu->stats()->incNumDraws(); fGpu->stats()->incNumDraws();
} }
@ -824,7 +825,13 @@ void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType,
GrPrimitiveRestart restart) { GrPrimitiveRestart restart) {
SkASSERT(restart == GrPrimitiveRestart::kNo); SkASSERT(restart == GrPrimitiveRestart::kNo);
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo]; CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
this->bindGeometry(indexBuffer, vertexBuffer, instanceBuffer); SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
SkASSERT(!indexBuffer->isCpuBuffer());
auto gpuIndexxBuffer = static_cast<const GrGpuBuffer*>(indexBuffer);
auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
this->bindGeometry(gpuIndexxBuffer, gpuVertexBuffer, gpuInstanceBuffer);
cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount, cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
baseIndex, baseVertex, baseInstance); baseIndex, baseVertex, baseInstance);
fGpu->stats()->incNumDraws(); fGpu->stats()->incNumDraws();

View File

@ -96,9 +96,9 @@ private:
GrGpu* gpu() override; GrGpu* gpu() override;
// Bind vertex and index buffers // Bind vertex and index buffers
void bindGeometry(const GrBuffer* indexBuffer, void bindGeometry(const GrGpuBuffer* indexBuffer,
const GrBuffer* vertexBuffer, const GrGpuBuffer* vertexBuffer,
const GrBuffer* instanceBuffer); const GrGpuBuffer* instanceBuffer);
GrVkPipelineState* prepareDrawState(const GrPrimitiveProcessor&, GrVkPipelineState* prepareDrawState(const GrPrimitiveProcessor&,
const GrPipeline&, const GrPipeline&,

View File

@ -50,7 +50,7 @@ void GrVkIndexBuffer::onAbandon() {
void GrVkIndexBuffer::onMap() { void GrVkIndexBuffer::onMap() {
if (!this->wasDestroyed()) { if (!this->wasDestroyed()) {
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu()); this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
} }
} }

View File

@ -8,13 +8,12 @@
#ifndef GrVkIndexBuffer_DEFINED #ifndef GrVkIndexBuffer_DEFINED
#define GrVkIndexBuffer_DEFINED #define GrVkIndexBuffer_DEFINED
#include "GrBuffer.h" #include "GrGpuBuffer.h"
#include "GrVkBuffer.h" #include "GrVkBuffer.h"
class GrVkGpu; class GrVkGpu;
class GrVkIndexBuffer : public GrBuffer, public GrVkBuffer { class GrVkIndexBuffer : public GrGpuBuffer, public GrVkBuffer {
public: public:
static sk_sp<GrVkIndexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic); static sk_sp<GrVkIndexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic);
@ -32,7 +31,7 @@ private:
GrVkGpu* getVkGpu() const; GrVkGpu* getVkGpu() const;
typedef GrBuffer INHERITED; typedef GrGpuBuffer INHERITED;
}; };
#endif #endif

View File

@ -8,14 +8,13 @@
#ifndef GrVkTransferBuffer_DEFINED #ifndef GrVkTransferBuffer_DEFINED
#define GrVkTransferBuffer_DEFINED #define GrVkTransferBuffer_DEFINED
#include "GrBuffer.h" #include "GrGpuBuffer.h"
#include "GrVkBuffer.h" #include "GrVkBuffer.h"
#include "vk/GrVkTypes.h" #include "vk/GrVkTypes.h"
class GrVkGpu; class GrVkGpu;
class GrVkTransferBuffer : public GrBuffer, public GrVkBuffer { class GrVkTransferBuffer : public GrGpuBuffer, public GrVkBuffer {
public: public:
static sk_sp<GrVkTransferBuffer> Make(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type); static sk_sp<GrVkTransferBuffer> Make(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
@ -31,7 +30,7 @@ private:
void onMap() override { void onMap() override {
if (!this->wasDestroyed()) { if (!this->wasDestroyed()) {
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu()); this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
} }
} }
@ -51,7 +50,7 @@ private:
return reinterpret_cast<GrVkGpu*>(this->getGpu()); return reinterpret_cast<GrVkGpu*>(this->getGpu());
} }
typedef GrBuffer INHERITED; typedef GrGpuBuffer INHERITED;
}; };
#endif #endif

View File

@ -49,7 +49,7 @@ void GrVkVertexBuffer::onAbandon() {
void GrVkVertexBuffer::onMap() { void GrVkVertexBuffer::onMap() {
if (!this->wasDestroyed()) { if (!this->wasDestroyed()) {
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu()); this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
} }
} }

View File

@ -8,12 +8,12 @@
#ifndef GrVkVertexBuffer_DEFINED #ifndef GrVkVertexBuffer_DEFINED
#define GrVkVertexBuffer_DEFINED #define GrVkVertexBuffer_DEFINED
#include "GrBuffer.h" #include "GrGpuBuffer.h"
#include "GrVkBuffer.h" #include "GrVkBuffer.h"
class GrVkGpu; class GrVkGpu;
class GrVkVertexBuffer : public GrBuffer, public GrVkBuffer { class GrVkVertexBuffer : public GrGpuBuffer, public GrVkBuffer {
public: public:
static sk_sp<GrVkVertexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic); static sk_sp<GrVkVertexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic);
@ -31,7 +31,7 @@ private:
GrVkGpu* getVkGpu() const; GrVkGpu* getVkGpu() const;
typedef GrBuffer INHERITED; typedef GrGpuBuffer INHERITED;
}; };
#endif #endif

View File

@ -375,8 +375,7 @@ GrGLSLPrimitiveProcessor* GrMeshTestProcessor::createGLSLInstance(const GrShader
template<typename T> template<typename T>
sk_sp<const GrBuffer> DrawMeshHelper::makeVertexBuffer(const T* data, int count) { sk_sp<const GrBuffer> DrawMeshHelper::makeVertexBuffer(const T* data, int count) {
return sk_sp<const GrBuffer>(fState->resourceProvider()->createBuffer( return sk_sp<const GrBuffer>(fState->resourceProvider()->createBuffer(
count * sizeof(T), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, count * sizeof(T), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, data));
GrResourceProvider::Flags::kRequireGpuMemory, data));
} }
sk_sp<const GrBuffer> DrawMeshHelper::getIndexBuffer() { sk_sp<const GrBuffer> DrawMeshHelper::getIndexBuffer() {

View File

@ -194,9 +194,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrPipelineDynamicStateTest, reporter, ctxInfo
{d, d, kMeshColors[3]} {d, d, kMeshColors[3]}
}; };
sk_sp<const GrBuffer> vbuff( sk_sp<const GrBuffer> vbuff(rp->createBuffer(sizeof(vdata), GrGpuBufferType::kVertex,
rp->createBuffer(sizeof(vdata), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, kDynamic_GrAccessPattern, vdata));
GrResourceProvider::Flags::kRequireGpuMemory, vdata));
if (!vbuff) { if (!vbuff) {
ERRORF(reporter, "vbuff is null."); ERRORF(reporter, "vbuff is null.");
return; return;

View File

@ -77,7 +77,7 @@ public:
return std::unique_ptr<GrFragmentProcessor>(new TestFP(std::move(child))); return std::unique_ptr<GrFragmentProcessor>(new TestFP(std::move(child)));
} }
static std::unique_ptr<GrFragmentProcessor> Make(const SkTArray<sk_sp<GrTextureProxy>>& proxies, static std::unique_ptr<GrFragmentProcessor> Make(const SkTArray<sk_sp<GrTextureProxy>>& proxies,
const SkTArray<sk_sp<GrBuffer>>& buffers) { const SkTArray<sk_sp<GrGpuBuffer>>& buffers) {
return std::unique_ptr<GrFragmentProcessor>(new TestFP(proxies, buffers)); return std::unique_ptr<GrFragmentProcessor>(new TestFP(proxies, buffers));
} }
@ -93,7 +93,8 @@ public:
} }
private: private:
TestFP(const SkTArray<sk_sp<GrTextureProxy>>& proxies, const SkTArray<sk_sp<GrBuffer>>& buffers) TestFP(const SkTArray<sk_sp<GrTextureProxy>>& proxies,
const SkTArray<sk_sp<GrGpuBuffer>>& buffers)
: INHERITED(kTestFP_ClassID, kNone_OptimizationFlags), fSamplers(4) { : INHERITED(kTestFP_ClassID, kNone_OptimizationFlags), fSamplers(4) {
for (const auto& proxy : proxies) { for (const auto& proxy : proxies) {
fSamplers.emplace_back(proxy); fSamplers.emplace_back(proxy);
@ -185,7 +186,7 @@ DEF_GPUTEST_FOR_ALL_CONTEXTS(ProcessorRefTest, reporter, ctxInfo) {
SkBudgeted::kYes); SkBudgeted::kYes);
{ {
SkTArray<sk_sp<GrTextureProxy>> proxies; SkTArray<sk_sp<GrTextureProxy>> proxies;
SkTArray<sk_sp<GrBuffer>> buffers; SkTArray<sk_sp<GrGpuBuffer>> buffers;
proxies.push_back(proxy1); proxies.push_back(proxy1);
auto fp = TestFP::Make(std::move(proxies), std::move(buffers)); auto fp = TestFP::Make(std::move(proxies), std::move(buffers));
for (int i = 0; i < parentCnt; ++i) { for (int i = 0; i < parentCnt; ++i) {

View File

@ -83,9 +83,8 @@ void basic_transfer_test(skiatest::Reporter* reporter, GrContext* context, GrCol
// create and fill transfer buffer // create and fill transfer buffer
size_t size = rowBytes*kBufferHeight; size_t size = rowBytes*kBufferHeight;
auto bufferFlags = GrResourceProvider::Flags::kNoPendingIO; sk_sp<GrGpuBuffer> buffer(resourceProvider->createBuffer(size, GrGpuBufferType::kXferCpuToGpu,
sk_sp<GrBuffer> buffer(resourceProvider->createBuffer(size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern));
kDynamic_GrAccessPattern, bufferFlags));
if (!buffer) { if (!buffer) {
return; return;
} }