Use different classes for client side arrays and GPU buffer objects.

GrBuffer is a base class for GrGpuBuffer and GrCpuBuffer. GrGpuBuffer is a
GrGpuResource and the others are not. This allows GrCpuBuffers to exist
outside of the GrGpuResourceCache.

Also removes flags from GrResourceProvider buffer factory function. The
only flag still in use was kRequireGpuMemory. Now CPU buffers are made
without using GrResourceProvider.

Change-Id: I82670d1316e28fd6331ca36b26c8c4ead33846f9
Reviewed-on: https://skia-review.googlesource.com/c/188823
Commit-Queue: Brian Salomon <bsalomon@google.com>
Reviewed-by: Robert Phillips <robertphillips@google.com>
This commit is contained in:
Brian Salomon 2019-02-07 11:31:24 -05:00 committed by Skia Commit-Bot
parent f4766758aa
commit dbf7072a59
66 changed files with 569 additions and 535 deletions

View File

@ -116,8 +116,7 @@ private:
{100, fY+100},
};
sk_sp<const GrBuffer> vertexBuffer(flushState->resourceProvider()->createBuffer(
sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone, vertices));
sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, vertices));
if (!vertexBuffer) {
return;
}

View File

@ -141,8 +141,7 @@ private:
{+1, +1},
};
sk_sp<const GrBuffer> vertexBuffer(flushState->resourceProvider()->createBuffer(
sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone, vertices));
sizeof(vertices), GrGpuBufferType::kVertex, kStatic_GrAccessPattern, vertices));
if (!vertexBuffer) {
return;
}

View File

@ -64,7 +64,6 @@ skia_gpu_sources = [
"$_src/gpu/GrBitmapTextureMaker.h",
"$_src/gpu/GrBlurUtils.cpp",
"$_src/gpu/GrBlurUtils.h",
"$_src/gpu/GrBuffer.cpp",
"$_src/gpu/GrBuffer.h",
"$_src/gpu/GrBufferAllocPool.cpp",
"$_src/gpu/GrBufferAllocPool.h",
@ -83,6 +82,7 @@ skia_gpu_sources = [
"$_src/gpu/GrContextThreadSafeProxy.cpp",
"$_src/gpu/GrContextThreadSafeProxyPriv.h",
"$_src/gpu/GrCoordTransform.h",
"$_src/gpu/GrCpuBuffer.h",
"$_src/gpu/GrDDLContext.cpp",
"$_src/gpu/GrDefaultGeoProcFactory.cpp",
"$_src/gpu/GrDefaultGeoProcFactory.h",
@ -108,6 +108,8 @@ skia_gpu_sources = [
"$_src/gpu/GrGlyph.h",
"$_src/gpu/GrGpu.cpp",
"$_src/gpu/GrGpu.h",
"$_src/gpu/GrGpuBuffer.cpp",
"$_src/gpu/GrGpuBuffer.h",
"$_src/gpu/GrGpuResourceCacheAccess.h",
"$_src/gpu/GrGpuCommandBuffer.cpp",
"$_src/gpu/GrGpuCommandBuffer.h",

View File

@ -93,7 +93,6 @@ protected:
bool internalHasUniqueRef() const { return fRefCnt == 1; }
private:
friend class GrIORefProxy; // needs to forward on wrapped IO calls
// This is for a unit test.
template <typename T>
friend void testingOnly_getIORefCnts(const T*, int* refCnt, int* readCnt, int* writeCnt);
@ -120,7 +119,6 @@ private:
this->didRemoveRefOrPendingIO(kPendingWrite_CntType);
}
private:
void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const {
if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) {
static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved);
@ -131,6 +129,7 @@ private:
mutable int32_t fPendingReads;
mutable int32_t fPendingWrites;
friend class GrIORefProxy; // needs to forward on wrapped IO calls
friend class GrResourceCache; // to check IO ref counts.
template <typename, GrIOType> friend class GrPendingIOResource;

View File

@ -829,19 +829,6 @@ enum class GrGpuBufferType {
};
static const int kGrGpuBufferTypeCount = static_cast<int>(GrGpuBufferType::kXferGpuToCpu) + 1;
static inline bool GrBufferTypeIsVertexOrIndex(GrGpuBufferType type) {
switch (type) {
case GrGpuBufferType::kVertex:
case GrGpuBufferType::kIndex:
return true;
case GrGpuBufferType::kXferCpuToGpu:
case GrGpuBufferType::kXferGpuToCpu:
return false;
}
SK_ABORT("Unexpected GrGpuBufferType.");
return false;
}
/**
* Provides a performance hint regarding the frequency at which a data store will be accessed.
*/

View File

@ -342,19 +342,17 @@ void CCPRGeometryView::DrawCoverageCountOp::onExecute(GrOpFlushState* state,
SkSTArray<1, GrMesh> mesh;
if (PrimitiveType::kCubics == fView->fPrimitiveType ||
PrimitiveType::kConics == fView->fPrimitiveType) {
sk_sp<GrBuffer> instBuff(
sk_sp<GrGpuBuffer> instBuff(
rp->createBuffer(fView->fQuadPointInstances.count() * sizeof(QuadPointInstance),
GrGpuBufferType::kVertex, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kRequireGpuMemory,
fView->fQuadPointInstances.begin()));
if (!fView->fQuadPointInstances.empty() && instBuff) {
proc.appendMesh(std::move(instBuff), fView->fQuadPointInstances.count(), 0, &mesh);
}
} else {
sk_sp<GrBuffer> instBuff(
sk_sp<GrGpuBuffer> instBuff(
rp->createBuffer(fView->fTriPointInstances.count() * sizeof(TriPointInstance),
GrGpuBufferType::kVertex, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kRequireGpuMemory,
fView->fTriPointInstances.begin()));
if (!fView->fTriPointInstances.empty() && instBuff) {
proc.appendMesh(std::move(instBuff), fView->fTriPointInstances.count(), 0, &mesh);

View File

@ -1,72 +0,0 @@
/*
* Copyright 2016 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrBuffer.h"
#include "GrGpu.h"
#include "GrCaps.h"
sk_sp<GrBuffer> GrBuffer::MakeCPUBacked(GrGpu* gpu, size_t sizeInBytes,
GrGpuBufferType intendedType, const void* data) {
SkASSERT(GrBufferTypeIsVertexOrIndex(intendedType));
void* cpuData;
if (gpu->caps()->mustClearUploadedBufferData()) {
cpuData = sk_calloc_throw(sizeInBytes);
} else {
cpuData = sk_malloc_throw(sizeInBytes);
}
if (data) {
memcpy(cpuData, data, sizeInBytes);
}
return sk_sp<GrBuffer>(new GrBuffer(gpu, sizeInBytes, intendedType, cpuData));
}
GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, void* cpuData)
: INHERITED(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
, fAccessPattern(kDynamic_GrAccessPattern)
, fCPUData(cpuData)
, fIntendedType(type) {
this->registerWithCache(SkBudgeted::kNo);
}
GrBuffer::GrBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type, GrAccessPattern pattern)
: INHERITED(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
, fAccessPattern(pattern)
, fCPUData(nullptr)
, fIntendedType(type) {
// Subclass registers with cache.
}
void GrBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
// a chunk of memory we can use/reuse for any type of data. We really only need to
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
builder[0] = SkToU32(intendedType);
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
builder[2] = (uint32_t)((uint64_t)size >> 32);
}
}
bool GrBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(this->isCPUBacked());
memcpy(fCPUData, src, srcSizeInBytes);
return true;
}
void GrBuffer::computeScratchKey(GrScratchKey* key) const {
if (!this->isCPUBacked() && SkIsPow2(fSizeInBytes) &&
kDynamic_GrAccessPattern == fAccessPattern) {
ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2016 Google Inc.
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
@ -8,124 +8,29 @@
#ifndef GrBuffer_DEFINED
#define GrBuffer_DEFINED
#include "GrGpuResource.h"
#include "GrTypes.h"
class GrGpu;
class GrBuffer : public GrGpuResource {
/** Base class for a GPU buffer object or a client side arrays. */
class GrBuffer {
public:
/**
* Creates a client-side buffer.
*/
static SK_WARN_UNUSED_RESULT sk_sp<GrBuffer> MakeCPUBacked(GrGpu*, size_t sizeInBytes,
GrGpuBufferType,
const void* data = nullptr);
GrBuffer(const GrBuffer&) = delete;
GrBuffer& operator=(const GrBuffer&) = delete;
/**
* Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
* "static" and "stream" patterns are disqualified by nature from being cached and reused.)
*/
static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*);
virtual ~GrBuffer() = default;
GrAccessPattern accessPattern() const { return fAccessPattern; }
size_t sizeInBytes() const { return fSizeInBytes; }
// Our subclasses derive from different ref counting base classes. In order to use base
// class pointers with sk_sp we virtualize ref() and unref().
virtual void ref() const = 0;
virtual void unref() const = 0;
/**
* Returns true if the buffer is a wrapper around a CPU array. If true it
* indicates that map will always succeed and will be free.
*/
bool isCPUBacked() const { return SkToBool(fCPUData); }
size_t baseOffset() const { return reinterpret_cast<size_t>(fCPUData); }
/** Size of the buffer in bytes. */
virtual size_t size() const = 0;
/**
* Maps the buffer to be written by the CPU.
*
* The previous content of the buffer is invalidated. It is an error
* to draw from the buffer while it is mapped. It may fail if the backend
* doesn't support mapping the buffer. If the buffer is CPU backed then
* it will always succeed and is a free operation. Once a buffer is mapped,
* subsequent calls to map() are ignored.
*
* Note that buffer mapping does not go through GrContext and therefore is
* not serialized with other operations.
*
* @return a pointer to the data or nullptr if the map fails.
*/
void* map() {
if (!fMapPtr) {
this->onMap();
}
return fMapPtr;
}
/**
* Unmaps the buffer.
*
* The pointer returned by the previous map call will no longer be valid.
*/
void unmap() {
SkASSERT(fMapPtr);
this->onUnmap();
fMapPtr = nullptr;
}
/**
Queries whether the buffer has been mapped.
@return true if the buffer is mapped, false otherwise.
*/
bool isMapped() const { return SkToBool(fMapPtr); }
/**
* Updates the buffer data.
*
* The size of the buffer will be preserved. The src data will be
* placed at the beginning of the buffer and any remaining contents will
* be undefined. srcSizeInBytes must be <= to the buffer size.
*
* The buffer must not be mapped.
*
* Note that buffer updates do not go through GrContext and therefore are
* not serialized with other operations.
*
* @return returns true if the update succeeds, false otherwise.
*/
bool updateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isMapped());
SkASSERT(srcSizeInBytes <= fSizeInBytes);
return this->onUpdateData(src, srcSizeInBytes);
}
~GrBuffer() override {
sk_free(fCPUData);
}
/** Is this an instance of GrCpuBuffer? Otherwise, an instance of GrGpuBuffer. */
virtual bool isCpuBuffer() const = 0;
protected:
GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern);
GrGpuBufferType intendedType() const { return fIntendedType; }
void* fMapPtr;
private:
/**
* Internal constructor to make a CPU-backed buffer.
*/
GrBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, void* cpuData);
virtual void onMap() { SkASSERT(this->isCPUBacked()); fMapPtr = fCPUData; }
virtual void onUnmap() { SkASSERT(this->isCPUBacked()); }
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes);
size_t onGpuMemorySize() const override { return fSizeInBytes; } // TODO: zero for cpu backed?
const char* getResourceType() const override { return "Buffer Object"; }
void computeScratchKey(GrScratchKey* key) const override;
size_t fSizeInBytes;
GrAccessPattern fAccessPattern;
void* fCPUData;
GrGpuBufferType fIntendedType;
typedef GrGpuResource INHERITED;
GrBuffer() = default;
};
#endif

View File

@ -6,12 +6,12 @@
*/
#include "GrBufferAllocPool.h"
#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrContextPriv.h"
#include "GrCpuBuffer.h"
#include "GrGpu.h"
#include "GrGpuBuffer.h"
#include "GrResourceProvider.h"
#include "GrTypes.h"
#include "SkMacros.h"
@ -24,15 +24,14 @@
static void VALIDATE(bool = false) {}
#endif
#define UNMAP_BUFFER(block) \
do { \
TRACE_EVENT_INSTANT1("skia.gpu", \
"GrBufferAllocPool Unmapping Buffer", \
TRACE_EVENT_SCOPE_THREAD, \
"percent_unwritten", \
(float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
(block).fBuffer->unmap(); \
} while (false)
#define UNMAP_BUFFER(block) \
do { \
TRACE_EVENT_INSTANT1("skia.gpu", "GrBufferAllocPool Unmapping Buffer", \
TRACE_EVENT_SCOPE_THREAD, "percent_unwritten", \
(float)((block).fBytesFree) / (block).fBuffer->size()); \
SkASSERT(!block.fBuffer->isCpuBuffer()); \
static_cast<GrGpuBuffer*>(block.fBuffer.get())->unmap(); \
} while (false)
constexpr size_t GrBufferAllocPool::kDefaultBufferSize;
@ -47,7 +46,7 @@ GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu, GrGpuBufferType bufferType, voi
void GrBufferAllocPool::deleteBlocks() {
if (fBlocks.count()) {
GrBuffer* buffer = fBlocks.back().fBuffer.get();
if (buffer->isMapped()) {
if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
UNMAP_BUFFER(fBlocks.back());
}
}
@ -78,11 +77,14 @@ void GrBufferAllocPool::unmap() {
if (fBufferPtr) {
BufferBlock& block = fBlocks.back();
if (block.fBuffer->isMapped()) {
UNMAP_BUFFER(block);
} else {
size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
this->flushCpuData(fBlocks.back(), flushSize);
GrBuffer* buffer = block.fBuffer.get();
if (!buffer->isCpuBuffer()) {
if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
UNMAP_BUFFER(block);
} else {
size_t flushSize = block.fBuffer->size() - block.fBytesFree;
this->flushCpuData(fBlocks.back(), flushSize);
}
}
fBufferPtr = nullptr;
}
@ -94,21 +96,25 @@ void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
bool wasDestroyed = false;
if (fBufferPtr) {
SkASSERT(!fBlocks.empty());
if (!fBlocks.back().fBuffer->isMapped()) {
const GrBuffer* buffer = fBlocks.back().fBuffer.get();
if (!buffer->isCpuBuffer() && !static_cast<const GrGpuBuffer*>(buffer)->isMapped()) {
SkASSERT(fCpuData == fBufferPtr);
}
} else {
SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
} else if (!fBlocks.empty()) {
const GrBuffer* buffer = fBlocks.back().fBuffer.get();
SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
}
size_t bytesInUse = 0;
for (int i = 0; i < fBlocks.count() - 1; ++i) {
SkASSERT(!fBlocks[i].fBuffer->isMapped());
const GrBuffer* buffer = fBlocks[i].fBuffer.get();
SkASSERT(buffer->isCpuBuffer() || !static_cast<const GrGpuBuffer*>(buffer)->isMapped());
}
for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
if (fBlocks[i].fBuffer->wasDestroyed()) {
GrBuffer* buffer = fBlocks[i].fBuffer.get();
if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->wasDestroyed()) {
wasDestroyed = true;
} else {
size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
size_t bytes = fBlocks[i].fBuffer->size() - fBlocks[i].fBytesFree;
bytesInUse += bytes;
SkASSERT(bytes || unusedBlockAllowed);
}
@ -137,7 +143,7 @@ void* GrBufferAllocPool::makeSpace(size_t size,
if (fBufferPtr) {
BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
SkSafeMath safeMath;
size_t alignedSize = safeMath.add(pad, size);
@ -192,7 +198,7 @@ void* GrBufferAllocPool::makeSpaceAtLeast(size_t minSize,
if (fBufferPtr) {
BufferBlock& back = fBlocks.back();
size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
size_t usedBytes = back.fBuffer->size() - back.fBytesFree;
size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
if ((minSize + pad) <= back.fBytesFree) {
// Consume padding first, to make subsequent alignment math easier
@ -250,13 +256,14 @@ void GrBufferAllocPool::putBack(size_t bytes) {
// caller shouldn't try to put back more than they've taken
SkASSERT(!fBlocks.empty());
BufferBlock& block = fBlocks.back();
size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
size_t bytesUsed = block.fBuffer->size() - block.fBytesFree;
if (bytes >= bytesUsed) {
bytes -= bytesUsed;
fBytesInUse -= bytesUsed;
// if we locked a vb to satisfy the make space and we're releasing
// beyond it, then unmap it.
if (block.fBuffer->isMapped()) {
GrBuffer* buffer = block.fBuffer.get();
if (!buffer->isCpuBuffer() && static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
UNMAP_BUFFER(block);
}
this->destroyBlock();
@ -284,32 +291,35 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
return false;
}
block.fBytesFree = block.fBuffer->gpuMemorySize();
block.fBytesFree = block.fBuffer->size();
if (fBufferPtr) {
SkASSERT(fBlocks.count() > 1);
BufferBlock& prev = fBlocks.fromBack(1);
if (prev.fBuffer->isMapped()) {
UNMAP_BUFFER(prev);
} else {
this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
GrBuffer* buffer = prev.fBuffer.get();
if (!buffer->isCpuBuffer()) {
if (static_cast<GrGpuBuffer*>(buffer)->isMapped()) {
UNMAP_BUFFER(prev);
} else {
this->flushCpuData(prev, prev.fBuffer->size() - prev.fBytesFree);
}
}
fBufferPtr = nullptr;
}
SkASSERT(!fBufferPtr);
// If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
// If the buffer is CPU-backed we "map" it because it is free to do so and saves a copy.
// Otherwise when buffer mapping is supported we map if the buffer size is greater than the
// threshold.
bool attemptMap = block.fBuffer->isCPUBacked();
if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
attemptMap = size > fGpu->caps()->bufferMapThreshold();
if (block.fBuffer->isCpuBuffer()) {
fBufferPtr = static_cast<GrCpuBuffer*>(block.fBuffer.get())->data();
SkASSERT(fBufferPtr);
} else {
if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
size > fGpu->caps()->bufferMapThreshold()) {
fBufferPtr = static_cast<GrGpuBuffer*>(block.fBuffer.get())->map();
}
}
if (attemptMap) {
fBufferPtr = block.fBuffer->map();
}
if (!fBufferPtr) {
fBufferPtr = this->resetCpuData(block.fBytesFree);
}
@ -321,7 +331,8 @@ bool GrBufferAllocPool::createBlock(size_t requestSize) {
void GrBufferAllocPool::destroyBlock() {
SkASSERT(!fBlocks.empty());
SkASSERT(!fBlocks.back().fBuffer->isMapped());
SkASSERT(fBlocks.back().fBuffer->isCpuBuffer() ||
!static_cast<GrGpuBuffer*>(fBlocks.back().fBuffer.get())->isMapped());
fBlocks.pop_back();
fBufferPtr = nullptr;
}
@ -345,11 +356,12 @@ void* GrBufferAllocPool::resetCpuData(size_t newSize) {
void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
GrBuffer* buffer = block.fBuffer.get();
SkASSERT(buffer);
SkASSERT(block.fBuffer.get());
SkASSERT(!block.fBuffer.get()->isCpuBuffer());
GrGpuBuffer* buffer = static_cast<GrGpuBuffer*>(block.fBuffer.get());
SkASSERT(!buffer->isMapped());
SkASSERT(fCpuData == fBufferPtr);
SkASSERT(flushSize <= buffer->gpuMemorySize());
SkASSERT(flushSize <= buffer->size());
VALIDATE(true);
if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
@ -368,8 +380,10 @@ void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize)
sk_sp<GrBuffer> GrBufferAllocPool::getBuffer(size_t size) {
auto resourceProvider = fGpu->getContext()->priv().resourceProvider();
return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
if (fGpu->caps()->preferClientSideDynamicBuffers()) {
return GrCpuBuffer::Make(size);
}
return resourceProvider->createBuffer(size, fBufferType, kDynamic_GrAccessPattern);
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -8,13 +8,14 @@
#ifndef GrBufferAllocPool_DEFINED
#define GrBufferAllocPool_DEFINED
#include "GrGpuBuffer.h"
#include "GrTypesPriv.h"
#include "SkNoncopyable.h"
#include "SkTArray.h"
#include "SkTDArray.h"
#include "SkTypes.h"
class GrBuffer;
class GrGpu;
/**

36
src/gpu/GrCpuBuffer.h Normal file
View File

@ -0,0 +1,36 @@
/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrCpuBuffer_DEFINED
#define GrCpuBuffer_DEFINED
#include "GrBuffer.h"
#include "GrNonAtomicRef.h"
class GrCpuBuffer final : public GrNonAtomicRef<GrCpuBuffer>, public GrBuffer {
public:
static sk_sp<GrCpuBuffer> Make(size_t size) {
SkASSERT(size > 0);
auto mem = ::operator new(sizeof(GrCpuBuffer) + size);
return sk_sp<GrCpuBuffer>(new (mem) GrCpuBuffer((char*)mem + sizeof(GrCpuBuffer), size));
}
void ref() const override { GrNonAtomicRef<GrCpuBuffer>::ref(); }
void unref() const override { GrNonAtomicRef<GrCpuBuffer>::unref(); }
size_t size() const override { return fSize; }
bool isCpuBuffer() const override { return true; }
char* data() { return reinterpret_cast<char*>(fData); }
const char* data() const { return reinterpret_cast<const char*>(fData); }
private:
GrCpuBuffer(void* data, size_t size) : fData(data), fSize(size) {}
void* fData;
size_t fSize;
};
#endif

View File

@ -10,7 +10,6 @@
#include "GrBackendSemaphore.h"
#include "GrBackendSurface.h"
#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrContextPriv.h"
@ -217,10 +216,10 @@ sk_sp<GrRenderTarget> GrGpu::onWrapVulkanSecondaryCBAsRenderTarget(const SkImage
return nullptr;
}
sk_sp<GrBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
sk_sp<GrGpuBuffer> GrGpu::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
this->handleDirtyContext();
sk_sp<GrBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
sk_sp<GrGpuBuffer> buffer = this->onCreateBuffer(size, intendedType, accessPattern, data);
if (!this->caps()->reuseScratchBuffers()) {
buffer->resourcePriv().removeScratchKey();
}
@ -303,7 +302,7 @@ bool GrGpu::writePixels(GrSurface* surface, int left, int top, int width, int he
}
bool GrGpu::transferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset,
GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t offset,
size_t rowBytes) {
SkASSERT(texture);
SkASSERT(transferBuffer);

View File

@ -22,7 +22,7 @@
class GrBackendRenderTarget;
class GrBackendSemaphore;
class GrBuffer;
class GrGpuBuffer;
class GrContext;
struct GrContextOptions;
class GrGLContext;
@ -140,8 +140,8 @@ public:
*
* @return the buffer if successful, otherwise nullptr.
*/
sk_sp<GrBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data = nullptr);
sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data = nullptr);
/**
* Resolves MSAA.
@ -217,7 +217,7 @@ public:
* means rows are tightly packed.
*/
bool transferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset,
GrColorType bufferColorType, GrGpuBuffer* transferBuffer, size_t offset,
size_t rowBytes);
// After the client interacts directly with the 3D context state the GrGpu
@ -472,8 +472,8 @@ private:
virtual sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
const GrVkDrawableInfo&);
virtual sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data) = 0;
virtual sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data) = 0;
// overridden by backend-specific derived class to perform the surface read
virtual bool onReadPixels(GrSurface*, int left, int top, int width, int height, GrColorType,
@ -485,7 +485,7 @@ private:
// overridden by backend-specific derived class to perform the texture transfer
virtual bool onTransferPixels(GrTexture*, int left, int top, int width, int height,
GrColorType colorType, GrBuffer* transferBuffer, size_t offset,
GrColorType colorType, GrGpuBuffer* transferBuffer, size_t offset,
size_t rowBytes) = 0;
// overridden by backend-specific derived class to perform the resolve

38
src/gpu/GrGpuBuffer.cpp Normal file
View File

@ -0,0 +1,38 @@
/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "GrGpuBuffer.h"
#include "GrCaps.h"
#include "GrGpu.h"
GrGpuBuffer::GrGpuBuffer(GrGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern pattern)
: GrGpuResource(gpu)
, fMapPtr(nullptr)
, fSizeInBytes(sizeInBytes)
, fAccessPattern(pattern)
, fIntendedType(type) {}
void GrGpuBuffer::ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType intendedType,
GrScratchKey* key) {
static const GrScratchKey::ResourceType kType = GrScratchKey::GenerateResourceType();
GrScratchKey::Builder builder(key, kType, 1 + (sizeof(size_t) + 3) / 4);
// TODO: There's not always reason to cache a buffer by type. In some (all?) APIs it's just
// a chunk of memory we can use/reuse for any type of data. We really only need to
// differentiate between the "read" types (e.g. kGpuToCpu_BufferType) and "draw" types.
builder[0] = SkToU32(intendedType);
builder[1] = (uint32_t)size;
if (sizeof(size_t) > 4) {
builder[2] = (uint32_t)((uint64_t)size >> 32);
}
}
void GrGpuBuffer::computeScratchKey(GrScratchKey* key) const {
if (SkIsPow2(fSizeInBytes) && kDynamic_GrAccessPattern == fAccessPattern) {
ComputeScratchKeyForDynamicVBO(fSizeInBytes, fIntendedType, key);
}
}

113
src/gpu/GrGpuBuffer.h Normal file
View File

@ -0,0 +1,113 @@
/*
* Copyright 2019 Google Inc.
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrGpuBuffer_DEFINED
#define GrGpuBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuResource.h"
class GrGpu;
class GrGpuBuffer : public GrGpuResource, public GrBuffer {
public:
/**
* Computes a scratch key for a GPU-side buffer with a "dynamic" access pattern. (Buffers with
* "static" and "stream" patterns are disqualified by nature from being cached and reused.)
*/
static void ComputeScratchKeyForDynamicVBO(size_t size, GrGpuBufferType, GrScratchKey*);
GrAccessPattern accessPattern() const { return fAccessPattern; }
size_t size() const final { return fSizeInBytes; }
void ref() const final { GrGpuResource::ref(); }
void unref() const final { GrGpuResource::unref(); }
/**
* Maps the buffer to be written by the CPU.
*
* The previous content of the buffer is invalidated. It is an error
* to draw from the buffer while it is mapped. It may fail if the backend
* doesn't support mapping the buffer. If the buffer is CPU backed then
* it will always succeed and is a free operation. Once a buffer is mapped,
* subsequent calls to map() are ignored.
*
* Note that buffer mapping does not go through GrContext and therefore is
* not serialized with other operations.
*
* @return a pointer to the data or nullptr if the map fails.
*/
void* map() {
if (!fMapPtr) {
this->onMap();
}
return fMapPtr;
}
/**
* Unmaps the buffer.
*
* The pointer returned by the previous map call will no longer be valid.
*/
void unmap() {
SkASSERT(fMapPtr);
this->onUnmap();
fMapPtr = nullptr;
}
/**
Queries whether the buffer has been mapped.
@return true if the buffer is mapped, false otherwise.
*/
bool isMapped() const { return SkToBool(fMapPtr); }
bool isCpuBuffer() const final { return false; }
/**
* Updates the buffer data.
*
* The size of the buffer will be preserved. The src data will be
* placed at the beginning of the buffer and any remaining contents will
* be undefined. srcSizeInBytes must be <= to the buffer size.
*
* The buffer must not be mapped.
*
* Note that buffer updates do not go through GrContext and therefore are
* not serialized with other operations.
*
* @return returns true if the update succeeds, false otherwise.
*/
bool updateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isMapped());
SkASSERT(srcSizeInBytes <= fSizeInBytes);
return this->onUpdateData(src, srcSizeInBytes);
}
protected:
GrGpuBuffer(GrGpu*, size_t sizeInBytes, GrGpuBufferType, GrAccessPattern);
GrGpuBufferType intendedType() const { return fIntendedType; }
void* fMapPtr;
private:
virtual void onMap() = 0;
virtual void onUnmap() = 0;
virtual bool onUpdateData(const void* src, size_t srcSizeInBytes) = 0;
size_t onGpuMemorySize() const override { return fSizeInBytes; }
const char* getResourceType() const override { return "Buffer Object"; }
void computeScratchKey(GrScratchKey* key) const override;
size_t fSizeInBytes;
GrAccessPattern fAccessPattern;
GrGpuBufferType fIntendedType;
};
#endif

View File

@ -10,6 +10,7 @@
#include "GrBuffer.h"
#include "GrPendingIOResource.h"
#include "GrGpuBuffer.h"
class GrPrimitiveProcessor;
@ -42,8 +43,9 @@ public:
void setInstanced(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance,
int vertexCount);
void setIndexedInstanced(sk_sp<const GrBuffer>, int indexCount, sk_sp<const GrBuffer>,
int instanceCount, int baseInstance, GrPrimitiveRestart);
void setIndexedInstanced(sk_sp<const GrBuffer> indexBuffer, int indexCount,
sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
int baseInstance, GrPrimitiveRestart);
void setVertexData(sk_sp<const GrBuffer> vertexBuffer, int baseVertex = 0);
@ -127,8 +129,8 @@ private:
};
inline void GrMesh::setNonIndexedNonInstanced(int vertexCount) {
fIndexBuffer.reset(nullptr);
fInstanceBuffer.reset(nullptr);
fIndexBuffer.reset();
fInstanceBuffer.reset();
fNonIndexNonInstanceData.fVertexCount = vertexCount;
fPrimitiveRestart = GrPrimitiveRestart::kNo;
}

View File

@ -73,20 +73,18 @@ bool GrOnFlushResourceProvider::instatiateProxy(GrSurfaceProxy* proxy) {
return proxy->instantiate(resourceProvider);
}
sk_sp<GrBuffer> GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size,
const void* data) {
sk_sp<GrGpuBuffer> GrOnFlushResourceProvider::makeBuffer(GrGpuBufferType intendedType, size_t size,
const void* data) {
auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider();
return sk_sp<GrBuffer>(resourceProvider->createBuffer(size, intendedType,
kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kNone,
data));
return sk_sp<GrGpuBuffer>(
resourceProvider->createBuffer(size, intendedType, kDynamic_GrAccessPattern, data));
}
sk_sp<const GrBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer(
sk_sp<const GrGpuBuffer> GrOnFlushResourceProvider::findOrMakeStaticBuffer(
GrGpuBufferType intendedType, size_t size, const void* data, const GrUniqueKey& key) {
auto resourceProvider = fDrawingMgr->getContext()->priv().resourceProvider();
sk_sp<const GrBuffer> buffer = resourceProvider->findOrMakeStaticBuffer(intendedType, size,
data, key);
sk_sp<const GrGpuBuffer> buffer =
resourceProvider->findOrMakeStaticBuffer(intendedType, size, data, key);
// Static buffers should never have pending IO.
SkASSERT(!buffer || !buffer->resourcePriv().hasPendingIO_debugOnly());
return buffer;

View File

@ -86,11 +86,11 @@ public:
bool instatiateProxy(GrSurfaceProxy*);
// Creates a GPU buffer with a "dynamic" access pattern.
sk_sp<GrBuffer> makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr);
sk_sp<GrGpuBuffer> makeBuffer(GrGpuBufferType, size_t, const void* data = nullptr);
// Either finds and refs, or creates a static GPU buffer with the given data.
sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data,
const GrUniqueKey&);
sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType, size_t, const void* data,
const GrUniqueKey&);
uint32_t contextID() const;
const GrCaps* caps() const;

View File

@ -8,8 +8,8 @@
#ifndef GrProcessor_DEFINED
#define GrProcessor_DEFINED
#include "GrBuffer.h"
#include "GrColor.h"
#include "GrGpuBuffer.h"
#include "GrProcessorUnitTest.h"
#include "GrSamplerState.h"
#include "GrShaderVar.h"

View File

@ -6,13 +6,13 @@
*/
#include "GrResourceProvider.h"
#include "../private/GrSingleOwner.h"
#include "GrBackendSemaphore.h"
#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrContext.h"
#include "GrContextPriv.h"
#include "GrGpu.h"
#include "GrGpuBuffer.h"
#include "GrPath.h"
#include "GrPathRendering.h"
#include "GrProxyProvider.h"
@ -22,7 +22,6 @@
#include "GrSemaphore.h"
#include "GrStencilAttachment.h"
#include "GrTexturePriv.h"
#include "../private/GrSingleOwner.h"
#include "SkGr.h"
#include "SkMathPriv.h"
@ -285,35 +284,34 @@ sk_sp<GrGpuResource> GrResourceProvider::findResourceByUniqueKey(const GrUniqueK
: sk_sp<GrGpuResource>(fCache->findAndRefUniqueResource(key));
}
sk_sp<const GrBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size,
const void* data,
const GrUniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrBuffer>(key)) {
sk_sp<const GrGpuBuffer> GrResourceProvider::findOrMakeStaticBuffer(GrGpuBufferType intendedType,
size_t size,
const void* data,
const GrUniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
return std::move(buffer);
}
if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, Flags::kNone,
data)) {
if (auto buffer = this->createBuffer(size, intendedType, kStatic_GrAccessPattern, data)) {
// We shouldn't bin and/or cache static buffers.
SkASSERT(buffer->sizeInBytes() == size);
SkASSERT(buffer->size() == size);
SkASSERT(!buffer->resourcePriv().getScratchKey().isValid());
SkASSERT(!buffer->resourcePriv().hasPendingIO_debugOnly());
buffer->resourcePriv().setUniqueKey(key);
return sk_sp<const GrBuffer>(buffer);
return sk_sp<const GrGpuBuffer>(buffer);
}
return nullptr;
}
sk_sp<const GrBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key) {
sk_sp<const GrGpuBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key) {
size_t bufferSize = patternSize * reps * sizeof(uint16_t);
// This is typically used in GrMeshDrawOps, so we assume kNoPendingIO.
sk_sp<GrBuffer> buffer(this->createBuffer(bufferSize, GrGpuBufferType::kIndex,
kStatic_GrAccessPattern, Flags::kNone));
sk_sp<GrGpuBuffer> buffer(
this->createBuffer(bufferSize, GrGpuBufferType::kIndex, kStatic_GrAccessPattern));
if (!buffer) {
return nullptr;
}
@ -343,7 +341,7 @@ sk_sp<const GrBuffer> GrResourceProvider::createPatternedIndexBuffer(const uint1
static constexpr int kMaxQuads = 1 << 12; // max possible: (1 << 14) - 1;
sk_sp<const GrBuffer> GrResourceProvider::createQuadIndexBuffer() {
sk_sp<const GrGpuBuffer> GrResourceProvider::createQuadIndexBuffer() {
GR_STATIC_ASSERT(4 * kMaxQuads <= 65535);
static const uint16_t kPattern[] = { 0, 1, 2, 2, 1, 3 };
return this->createPatternedIndexBuffer(kPattern, 6, kMaxQuads, 4, fQuadIndexBufferKey);
@ -360,36 +358,24 @@ sk_sp<GrPath> GrResourceProvider::createPath(const SkPath& path, const GrStyle&
return this->gpu()->pathRendering()->createPath(path, style);
}
sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, Flags flags,
const void* data) {
sk_sp<GrGpuBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern,
const void* data) {
if (this->isAbandoned()) {
return nullptr;
}
if (kDynamic_GrAccessPattern != accessPattern) {
return this->gpu()->createBuffer(size, intendedType, accessPattern, data);
}
if (!(flags & Flags::kRequireGpuMemory) &&
this->gpu()->caps()->preferClientSideDynamicBuffers() &&
GrBufferTypeIsVertexOrIndex(intendedType) &&
kDynamic_GrAccessPattern == accessPattern) {
return GrBuffer::MakeCPUBacked(this->gpu(), size, intendedType, data);
}
// bin by pow2 with a reasonable min
static const size_t MIN_SIZE = 1 << 12;
size_t allocSize = SkTMax(MIN_SIZE, GrNextSizePow2(size));
GrScratchKey key;
GrBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
auto scratchFlags = GrResourceCache::ScratchFlags::kNone;
if (flags & Flags::kNoPendingIO) {
scratchFlags = GrResourceCache::ScratchFlags::kRequireNoPendingIO;
} else {
scratchFlags = GrResourceCache::ScratchFlags::kPreferNoPendingIO;
}
auto buffer = sk_sp<GrBuffer>(static_cast<GrBuffer*>(
this->cache()->findAndRefScratchResource(key, allocSize, scratchFlags)));
GrGpuBuffer::ComputeScratchKeyForDynamicVBO(allocSize, intendedType, &key);
auto buffer =
sk_sp<GrGpuBuffer>(static_cast<GrGpuBuffer*>(this->cache()->findAndRefScratchResource(
key, allocSize, GrResourceCache::ScratchFlags::kNone)));
if (!buffer) {
buffer = this->gpu()->createBuffer(allocSize, intendedType, kDynamic_GrAccessPattern);
if (!buffer) {
@ -399,7 +385,6 @@ sk_sp<GrBuffer> GrResourceProvider::createBuffer(size_t size, GrGpuBufferType in
if (data) {
buffer->updateData(data, size);
}
SkASSERT(!buffer->isCPUBacked()); // We should only cache real VBOs.
return buffer;
}

View File

@ -8,8 +8,8 @@
#ifndef GrResourceProvider_DEFINED
#define GrResourceProvider_DEFINED
#include "GrBuffer.h"
#include "GrContextOptions.h"
#include "GrGpuBuffer.h"
#include "GrResourceCache.h"
#include "SkImageInfoPriv.h"
#include "SkScalerContext.h"
@ -51,11 +51,6 @@ public:
* Make this automatic: https://bug.skia.org/4156
*/
kNoPendingIO = 0x1,
/** Normally the caps may indicate a preference for client-side buffers. Set this flag when
* creating a buffer to guarantee it resides in GPU memory.
*/
kRequireGpuMemory = 0x2,
};
GrResourceProvider(GrGpu*, GrResourceCache*, GrSingleOwner*,
@ -66,7 +61,9 @@ public:
* must be sure that if a resource of exists in the cache with the given unique key then it is
* of type T.
*/
template <typename T = GrGpuResource> sk_sp<T> findByUniqueKey(const GrUniqueKey& key) {
template <typename T = GrGpuResource>
typename std::enable_if<std::is_base_of<GrGpuResource, T>::value, sk_sp<T>>::type
findByUniqueKey(const GrUniqueKey& key) {
return sk_sp<T>(static_cast<T*>(this->findResourceByUniqueKey(key).release()));
}
@ -145,8 +142,8 @@ public:
*
* @return The buffer if successful, otherwise nullptr.
*/
sk_sp<const GrBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
const void* data, const GrUniqueKey& key);
sk_sp<const GrGpuBuffer> findOrMakeStaticBuffer(GrGpuBufferType intendedType, size_t size,
const void* data, const GrUniqueKey& key);
/**
* Either finds and refs, or creates an index buffer with a repeating pattern for drawing
@ -161,12 +158,12 @@ public:
*
* @return The index buffer if successful, otherwise nullptr.
*/
sk_sp<const GrBuffer> findOrCreatePatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrBuffer>(key)) {
sk_sp<const GrGpuBuffer> findOrCreatePatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key) {
if (auto buffer = this->findByUniqueKey<GrGpuBuffer>(key)) {
return std::move(buffer);
}
return this->createPatternedIndexBuffer(pattern, patternSize, reps, vertCount, key);
@ -179,8 +176,8 @@ public:
* Draw with GrPrimitiveType::kTriangles
* @ return the quad index buffer
*/
sk_sp<const GrBuffer> refQuadIndexBuffer() {
if (auto buffer = this->findByUniqueKey<const GrBuffer>(fQuadIndexBufferKey)) {
sk_sp<const GrGpuBuffer> refQuadIndexBuffer() {
if (auto buffer = this->findByUniqueKey<const GrGpuBuffer>(fQuadIndexBufferKey)) {
return buffer;
}
return this->createQuadIndexBuffer();
@ -205,8 +202,8 @@ public:
*
* @return the buffer if successful, otherwise nullptr.
*/
sk_sp<GrBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern, Flags,
const void* data = nullptr);
sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data = nullptr);
/**
* If passed in render target already has a stencil buffer, return true. Otherwise attempt to
@ -286,13 +283,13 @@ private:
return !SkToBool(fCache);
}
sk_sp<const GrBuffer> createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key);
sk_sp<const GrGpuBuffer> createPatternedIndexBuffer(const uint16_t* pattern,
int patternSize,
int reps,
int vertCount,
const GrUniqueKey& key);
sk_sp<const GrBuffer> createQuadIndexBuffer();
sk_sp<const GrGpuBuffer> createQuadIndexBuffer();
GrResourceCache* fCache;
GrGpu* fGpu;

View File

@ -100,7 +100,7 @@ public:
// Appends a GrMesh that will draw the provided instances. The instanceBuffer must be an array
// of either TriPointInstance or QuadPointInstance, depending on this processor's RendererPass,
// with coordinates in the desired shape's final atlas-space position.
void appendMesh(sk_sp<GrBuffer> instanceBuffer, int instanceCount, int baseInstance,
void appendMesh(sk_sp<GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
SkTArray<GrMesh>* out) const {
if (Impl::kGeometryShader == fImpl) {
this->appendGSMesh(std::move(instanceBuffer), instanceCount, baseInstance, out);
@ -250,9 +250,9 @@ private:
void initGS();
void initVS(GrResourceProvider*);
void appendGSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance,
void appendGSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
SkTArray<GrMesh>* out) const;
void appendVSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount, int baseInstance,
void appendVSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount, int baseInstance,
SkTArray<GrMesh>* out) const;
GrGLSLPrimitiveProcessor* createGSImpl(std::unique_ptr<Shader>) const;
@ -269,8 +269,8 @@ private:
// Used by VSImpl.
Attribute fInstanceAttributes[2];
sk_sp<const GrBuffer> fVSVertexBuffer;
sk_sp<const GrBuffer> fVSIndexBuffer;
sk_sp<const GrGpuBuffer> fVSVertexBuffer;
sk_sp<const GrGpuBuffer> fVSIndexBuffer;
int fVSNumIndicesPerInstance;
GrPrimitiveType fVSTriangleType;

View File

@ -396,7 +396,7 @@ void GrCCCoverageProcessor::initGS() {
this->setWillUseGeoShader();
}
void GrCCCoverageProcessor::appendGSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
void GrCCCoverageProcessor::appendGSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
int baseInstance, SkTArray<GrMesh>* out) const {
// GSImpl doesn't actually make instanced draw calls. Instead, we feed transposed x,y point
// values to the GPU in a regular vertex array and draw kLines (see initGS). Then, each vertex

View File

@ -528,7 +528,7 @@ void GrCCCoverageProcessor::initVS(GrResourceProvider* rp) {
}
}
void GrCCCoverageProcessor::appendVSMesh(sk_sp<const GrBuffer> instanceBuffer, int instanceCount,
void GrCCCoverageProcessor::appendVSMesh(sk_sp<const GrGpuBuffer> instanceBuffer, int instanceCount,
int baseInstance, SkTArray<GrMesh>* out) const {
SkASSERT(Impl::kVertexShader == fImpl);
GrMesh& mesh = out->emplace_back(fVSTriangleType);

View File

@ -106,7 +106,7 @@ private:
PrimitiveTallies fTotalPrimitiveCounts[kNumScissorModes];
int fMaxMeshesPerDraw = 0;
sk_sp<GrBuffer> fInstanceBuffer;
sk_sp<GrGpuBuffer> fInstanceBuffer;
PrimitiveTallies fBaseInstances[kNumScissorModes];
mutable SkSTArray<32, GrMesh> fMeshesScratchBuffer;
mutable SkSTArray<32, SkIRect> fScissorRectScratchBuffer;

View File

@ -34,7 +34,7 @@ static constexpr float kOctoEdgeNorms[8 * 4] = {
GR_DECLARE_STATIC_UNIQUE_KEY(gVertexBufferKey);
sk_sp<const GrBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) {
sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindVertexBuffer(GrOnFlushResourceProvider* onFlushRP) {
GR_DEFINE_STATIC_UNIQUE_KEY(gVertexBufferKey);
return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kVertex, sizeof(kOctoEdgeNorms),
kOctoEdgeNorms, gVertexBufferKey);
@ -64,7 +64,7 @@ GR_DECLARE_STATIC_UNIQUE_KEY(gIndexBufferKey);
constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kInstanceAttribs[];
constexpr GrPrimitiveProcessor::Attribute GrCCPathProcessor::kEdgeNormsAttrib;
sk_sp<const GrBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) {
sk_sp<const GrGpuBuffer> GrCCPathProcessor::FindIndexBuffer(GrOnFlushResourceProvider* onFlushRP) {
GR_DEFINE_STATIC_UNIQUE_KEY(gIndexBufferKey);
if (onFlushRP->caps()->usePrimitiveRestart()) {
return onFlushRP->findOrMakeStaticBuffer(GrGpuBufferType::kIndex,

View File

@ -66,8 +66,8 @@ public:
GR_STATIC_ASSERT(4 * 12 == sizeof(Instance));
static sk_sp<const GrBuffer> FindVertexBuffer(GrOnFlushResourceProvider*);
static sk_sp<const GrBuffer> FindIndexBuffer(GrOnFlushResourceProvider*);
static sk_sp<const GrGpuBuffer> FindVertexBuffer(GrOnFlushResourceProvider*);
static sk_sp<const GrGpuBuffer> FindIndexBuffer(GrOnFlushResourceProvider*);
GrCCPathProcessor(const GrTextureProxy* atlas,
const SkMatrix& viewMatrixIfUsingLocalCoords = SkMatrix::I());

View File

@ -105,15 +105,15 @@ public:
// Accessors used by draw calls, once the resources have been finalized.
const GrCCFiller& filler() const { SkASSERT(!this->isMapped()); return fFiller; }
const GrCCStroker& stroker() const { SkASSERT(!this->isMapped()); return fStroker; }
sk_sp<const GrBuffer> refIndexBuffer() const {
sk_sp<const GrGpuBuffer> refIndexBuffer() const {
SkASSERT(!this->isMapped());
return fIndexBuffer;
}
sk_sp<const GrBuffer> refVertexBuffer() const {
sk_sp<const GrGpuBuffer> refVertexBuffer() const {
SkASSERT(!this->isMapped());
return fVertexBuffer;
}
sk_sp<const GrBuffer> refInstanceBuffer() const {
sk_sp<const GrGpuBuffer> refInstanceBuffer() const {
SkASSERT(!this->isMapped());
return fInstanceBuffer;
}
@ -131,9 +131,9 @@ private:
GrCCAtlasStack fCopyAtlasStack;
GrCCAtlasStack fRenderedAtlasStack;
const sk_sp<const GrBuffer> fIndexBuffer;
const sk_sp<const GrBuffer> fVertexBuffer;
const sk_sp<GrBuffer> fInstanceBuffer;
const sk_sp<const GrGpuBuffer> fIndexBuffer;
const sk_sp<const GrGpuBuffer> fVertexBuffer;
const sk_sp<GrGpuBuffer> fInstanceBuffer;
GrCCPathProcessor::Instance* fPathInstanceData = nullptr;
int fNextCopyInstanceIdx;

View File

@ -497,7 +497,7 @@ public:
}
}
sk_sp<GrBuffer> finish() {
sk_sp<GrGpuBuffer> finish() {
SkASSERT(this->isMapped());
SkASSERT(!memcmp(fNextInstances, fEndInstances, sizeof(fNextInstances)));
fInstanceBuffer->unmap();
@ -543,7 +543,7 @@ private:
InstanceTallies* fCurrNextInstances;
SkDEBUGCODE(const InstanceTallies* fCurrEndInstances);
sk_sp<GrBuffer> fInstanceBuffer;
sk_sp<GrGpuBuffer> fInstanceBuffer;
void* fInstanceBufferData = nullptr;
InstanceTallies fNextInstances[2];
SkDEBUGCODE(InstanceTallies fEndInstances[2]);

View File

@ -13,7 +13,7 @@
#include "SkNx.h"
#include "ccpr/GrCCStrokeGeometry.h"
class GrBuffer;
class GrGpuBuffer;
class GrCCCoverageProcessor;
class GrOnFlushResourceProvider;
class GrOpFlushState;
@ -116,7 +116,7 @@ private:
GrSTAllocator<128, InstanceTallies> fTalliesAllocator;
const InstanceTallies* fInstanceCounts[kNumScissorModes] = {&fZeroTallies, &fZeroTallies};
sk_sp<GrBuffer> fInstanceBuffer;
sk_sp<GrGpuBuffer> fInstanceBuffer;
// The indices stored in batches are relative to these base instances.
InstanceTallies fBaseInstances[kNumScissorModes];

View File

@ -176,8 +176,8 @@ void GrGLBuffer::onMap() {
case GrGLCaps::kMapBuffer_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Let driver know it can discard the old data
if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->sizeInBytes()) {
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
if (this->glCaps().useBufferDataNullHint() || fGLSizeInBytes != this->size()) {
GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
}
GL_CALL_RET(fMapPtr, MapBuffer(target, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break;
@ -185,30 +185,30 @@ void GrGLBuffer::onMap() {
case GrGLCaps::kMapBufferRange_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (fGLSizeInBytes != this->sizeInBytes()) {
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
if (fGLSizeInBytes != this->size()) {
GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
}
GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
if (GrGpuBufferType::kXferCpuToGpu != fIntendedType) {
// TODO: Make this a function parameter.
writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
}
GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->sizeInBytes(),
readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
GL_CALL_RET(fMapPtr, MapBufferRange(target, 0, this->size(),
readOnly ? GR_GL_MAP_READ_BIT : writeAccess));
break;
}
case GrGLCaps::kChromium_MapBufferType: {
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
// Make sure the GL buffer size agrees with fDesc before mapping.
if (fGLSizeInBytes != this->sizeInBytes()) {
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
if (fGLSizeInBytes != this->size()) {
GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
}
GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->sizeInBytes(),
readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
GL_CALL_RET(fMapPtr, MapBufferSubData(target, 0, this->size(),
readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
break;
}
}
fGLSizeInBytes = this->sizeInBytes();
fGLSizeInBytes = this->size();
VALIDATE();
}
@ -251,15 +251,15 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
SkASSERT(!this->isMapped());
VALIDATE();
if (srcSizeInBytes > this->sizeInBytes()) {
if (srcSizeInBytes > this->size()) {
return false;
}
SkASSERT(srcSizeInBytes <= this->sizeInBytes());
SkASSERT(srcSizeInBytes <= this->size());
// bindbuffer handles dirty context
GrGLenum target = this->glGpu()->bindBuffer(fIntendedType, this);
if (this->glCaps().useBufferDataNullHint()) {
if (this->sizeInBytes() == srcSizeInBytes) {
if (this->size() == srcSizeInBytes) {
GL_CALL(BufferData(target, (GrGLsizeiptr) srcSizeInBytes, src, fUsage));
} else {
// Before we call glBufferSubData we give the driver a hint using
@ -269,10 +269,10 @@ bool GrGLBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
// assign a different allocation for the new contents to avoid
// flushing the gpu past draws consuming the old contents.
// TODO I think we actually want to try calling bufferData here
GL_CALL(BufferData(target, this->sizeInBytes(), nullptr, fUsage));
GL_CALL(BufferData(target, this->size(), nullptr, fUsage));
GL_CALL(BufferSubData(target, 0, (GrGLsizeiptr) srcSizeInBytes, src));
}
fGLSizeInBytes = this->sizeInBytes();
fGLSizeInBytes = this->size();
} else {
// Note that we're cheating on the size here. Currently no methods
// allow a partial update that preserves contents of non-updated
@ -296,7 +296,7 @@ void GrGLBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
void GrGLBuffer::validate() const {
SkASSERT(0 != fBufferID || 0 == fGLSizeInBytes);
SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->sizeInBytes());
SkASSERT(nullptr == fMapPtr || fGLSizeInBytes <= this->size());
}
#endif

View File

@ -8,13 +8,13 @@
#ifndef GrGLBuffer_DEFINED
#define GrGLBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuBuffer.h"
#include "gl/GrGLTypes.h"
class GrGLGpu;
class GrGLCaps;
class GrGLBuffer : public GrBuffer {
class GrGLBuffer : public GrGpuBuffer {
public:
static sk_sp<GrGLBuffer> Make(GrGLGpu*, size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data = nullptr);
@ -28,7 +28,7 @@ public:
/**
* Returns the actual size of the underlying GL buffer object. In certain cases we may make this
* smaller than the size reported by GrBuffer.
* smaller than the size reported by GrGpuBuffer.
*/
size_t glSizeInBytes() const { return fGLSizeInBytes; }
@ -62,7 +62,7 @@ private:
size_t fGLSizeInBytes;
bool fHasAttachedToTexture;
typedef GrBuffer INHERITED;
typedef GrGpuBuffer INHERITED;
};
#endif

View File

@ -8,6 +8,7 @@
#include "GrGLGpu.h"
#include "GrBackendSemaphore.h"
#include "GrBackendSurface.h"
#include "GrCpuBuffer.h"
#include "GrFixedClip.h"
#include "GrGLBuffer.h"
#include "GrGLGpuCommandBuffer.h"
@ -842,8 +843,8 @@ static inline GrGLint config_alignment(GrPixelConfig config) {
}
bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer, size_t offset,
size_t rowBytes) {
GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
size_t offset, size_t rowBytes) {
GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
GrPixelConfig texConfig = glTex->config();
SkASSERT(this->caps()->isConfigTexturable(texConfig));
@ -864,7 +865,7 @@ bool GrGLGpu::onTransferPixels(GrTexture* texture, int left, int top, int width,
GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
SkASSERT(!transferBuffer->isMapped());
SkASSERT(!transferBuffer->isCPUBacked());
SkASSERT(!transferBuffer->isCpuBuffer());
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
this->bindBuffer(GrGpuBufferType::kXferCpuToGpu, glBuffer);
@ -1847,8 +1848,8 @@ GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRen
////////////////////////////////////////////////////////////////////////////////
sk_sp<GrBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
sk_sp<GrGpuBuffer> GrGLGpu::onCreateBuffer(size_t size, GrGpuBufferType intendedType,
GrAccessPattern accessPattern, const void* data) {
return GrGLBuffer::Make(this, size, intendedType, accessPattern, data);
}
@ -2063,7 +2064,8 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
GrGLAttribArrayState* attribState;
if (indexBuffer) {
SkASSERT(indexBuffer && !indexBuffer->isMapped());
SkASSERT(indexBuffer->isCpuBuffer() ||
!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
attribState = fHWVertexArrayState.bindInternalVertexArray(this, indexBuffer);
} else {
attribState = fHWVertexArrayState.bindInternalVertexArray(this);
@ -2073,9 +2075,10 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
attribState->enableVertexArrays(this, numAttribs, enablePrimitiveRestart);
if (int vertexStride = fHWProgram->vertexStride()) {
SkASSERT(vertexBuffer && !vertexBuffer->isMapped());
size_t bufferOffset = vertexBuffer->baseOffset();
bufferOffset += baseVertex * static_cast<size_t>(vertexStride);
SkASSERT(vertexBuffer);
SkASSERT(vertexBuffer->isCpuBuffer() ||
!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
size_t bufferOffset = baseVertex * static_cast<size_t>(vertexStride);
for (int i = 0; i < fHWProgram->numVertexAttributes(); ++i) {
const auto& attrib = fHWProgram->vertexAttribute(i);
static constexpr int kDivisor = 0;
@ -2084,9 +2087,10 @@ void GrGLGpu::setupGeometry(const GrBuffer* indexBuffer,
}
}
if (int instanceStride = fHWProgram->instanceStride()) {
SkASSERT(instanceBuffer && !instanceBuffer->isMapped());
size_t bufferOffset = instanceBuffer->baseOffset();
bufferOffset += baseInstance * static_cast<size_t>(instanceStride);
SkASSERT(instanceBuffer);
SkASSERT(instanceBuffer->isCpuBuffer() ||
!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
size_t bufferOffset = baseInstance * static_cast<size_t>(instanceStride);
int attribIdx = fHWProgram->numVertexAttributes();
for (int i = 0; i < fHWProgram->numInstanceAttributes(); ++i, ++attribIdx) {
const auto& attrib = fHWProgram->instanceAttribute(i);
@ -2107,13 +2111,14 @@ GrGLenum GrGLGpu::bindBuffer(GrGpuBufferType type, const GrBuffer* buffer) {
}
auto* bufferState = this->hwBufferState(type);
if (buffer->isCPUBacked()) {
if (buffer->isCpuBuffer()) {
if (!bufferState->fBufferZeroKnownBound) {
GL_CALL(BindBuffer(bufferState->fGLTarget, 0));
bufferState->fBufferZeroKnownBound = true;
bufferState->fBoundBufferUniqueID.makeInvalid();
}
} else if (buffer->uniqueID() != bufferState->fBoundBufferUniqueID) {
} else if (static_cast<const GrGpuBuffer*>(buffer)->uniqueID() !=
bufferState->fBoundBufferUniqueID) {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
GL_CALL(BindBuffer(bufferState->fGLTarget, glBuffer->bufferID()));
bufferState->fBufferZeroKnownBound = false;
@ -2608,21 +2613,29 @@ void GrGLGpu::sendMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* verte
fStats.incNumDraws();
}
static const GrGLvoid* element_ptr(const GrBuffer* indexBuffer, int baseIndex) {
size_t baseOffset = baseIndex * sizeof(uint16_t);
if (indexBuffer->isCpuBuffer()) {
return static_cast<const GrCpuBuffer*>(indexBuffer)->data() + baseOffset;
} else {
return reinterpret_cast<const GrGLvoid*>(baseOffset);
}
}
void GrGLGpu::sendIndexedMeshToGpu(GrPrimitiveType primitiveType, const GrBuffer* indexBuffer,
int indexCount, int baseIndex, uint16_t minIndexValue,
uint16_t maxIndexValue, const GrBuffer* vertexBuffer,
int baseVertex, GrPrimitiveRestart enablePrimitiveRestart) {
const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
GrGLvoid* const indices = reinterpret_cast<void*>(indexBuffer->baseOffset() +
sizeof(uint16_t) * baseIndex);
const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, nullptr, 0, enablePrimitiveRestart);
if (this->glCaps().drawRangeElementsSupport()) {
GL_CALL(DrawRangeElements(glPrimType, minIndexValue, maxIndexValue, indexCount,
GR_GL_UNSIGNED_SHORT, indices));
GR_GL_UNSIGNED_SHORT, elementPtr));
} else {
GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices));
GL_CALL(DrawElements(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr));
}
fStats.incNumDraws();
}
@ -2649,13 +2662,12 @@ void GrGLGpu::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType,
int instanceCount, int baseInstance,
GrPrimitiveRestart enablePrimitiveRestart) {
const GrGLenum glPrimType = gr_primitive_type_to_gl_mode(primitiveType);
GrGLvoid* indices = reinterpret_cast<void*>(indexBuffer->baseOffset() +
sizeof(uint16_t) * baseIndex);
const GrGLvoid* elementPtr = element_ptr(indexBuffer, baseIndex);
int maxInstances = this->glCaps().maxInstancesPerDrawWithoutCrashing(instanceCount);
for (int i = 0; i < instanceCount; i += maxInstances) {
this->setupGeometry(indexBuffer, vertexBuffer, baseVertex, instanceBuffer, baseInstance + i,
enablePrimitiveRestart);
GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, indices,
GL_CALL(DrawElementsInstanced(glPrimType, indexCount, GR_GL_UNSIGNED_SHORT, elementPtr,
SkTMin(instanceCount - i, maxInstances)));
fStats.incNumDraws();
}

View File

@ -187,8 +187,8 @@ private:
sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
const GrMipLevel texels[], int mipLevelCount) override;
sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data) override;
sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
const void* data) override;
sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership, GrWrapCacheable,
GrIOType) override;
@ -233,7 +233,7 @@ private:
const GrMipLevel texels[], int mipLevelCount) override;
bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
// Before calling any variation of TexImage, TexSubImage, etc..., call this to ensure that the
// PIXEL_UNPACK_BUFFER is unbound.

View File

@ -6,6 +6,7 @@
*/
#include "GrGLVertexArray.h"
#include "GrCpuBuffer.h"
#include "GrGLBuffer.h"
#include "GrGLGpu.h"
@ -89,14 +90,32 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu,
SkASSERT(index >= 0 && index < fAttribArrayStates.count());
SkASSERT(0 == divisor || gpu->caps()->instanceAttribSupport());
AttribArrayState* array = &fAttribArrayStates[index];
if (array->fVertexBufferUniqueID != vertexBuffer->uniqueID() ||
const char* offsetAsPtr;
bool bufferChanged = false;
if (vertexBuffer->isCpuBuffer()) {
if (!array->fUsingCpuBuffer) {
bufferChanged = true;
array->fUsingCpuBuffer = true;
}
offsetAsPtr = static_cast<const GrCpuBuffer*>(vertexBuffer)->data() + offsetInBytes;
} else {
auto gpuBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
if (array->fUsingCpuBuffer || array->fVertexBufferUniqueID != gpuBuffer->uniqueID()) {
bufferChanged = true;
array->fVertexBufferUniqueID = gpuBuffer->uniqueID();
}
offsetAsPtr = reinterpret_cast<const char*>(offsetInBytes);
}
if (bufferChanged ||
array->fCPUType != cpuType ||
array->fGPUType != gpuType ||
array->fStride != stride ||
array->fOffset != offsetInBytes) {
array->fOffset != offsetAsPtr) {
// We always have to call this if we're going to change the array pointer. 'array' is
// tracking the last buffer used to setup attrib pointers, not the last buffer bound.
// GrGLGpu will avoid redundant binds.
gpu->bindBuffer(GrGpuBufferType::kVertex, vertexBuffer);
const AttribLayout& layout = attrib_layout(cpuType);
const GrGLvoid* offsetAsPtr = reinterpret_cast<const GrGLvoid*>(offsetInBytes);
if (GrSLTypeIsFloatType(gpuType)) {
GR_GL_CALL(gpu->glInterface(), VertexAttribPointer(index,
layout.fCount,
@ -113,11 +132,10 @@ void GrGLAttribArrayState::set(GrGLGpu* gpu,
stride,
offsetAsPtr));
}
array->fVertexBufferUniqueID = vertexBuffer->uniqueID();
array->fCPUType = cpuType;
array->fGPUType = gpuType;
array->fStride = stride;
array->fOffset = offsetInBytes;
array->fOffset = offsetAsPtr;
}
if (gpu->caps()->instanceAttribSupport() && array->fDivisor != divisor) {
SkASSERT(0 == divisor || 1 == divisor); // not necessarily a requirement but what we expect.
@ -179,15 +197,19 @@ GrGLAttribArrayState* GrGLVertexArray::bind(GrGLGpu* gpu) {
GrGLAttribArrayState* GrGLVertexArray::bindWithIndexBuffer(GrGLGpu* gpu, const GrBuffer* ibuff) {
GrGLAttribArrayState* state = this->bind(gpu);
if (state && fIndexBufferUniqueID != ibuff->uniqueID()) {
if (ibuff->isCPUBacked()) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
} else {
if (!state) {
return nullptr;
}
if (ibuff->isCpuBuffer()) {
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, 0));
} else {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
if (fIndexBufferUniqueID != glBuffer->uniqueID()) {
const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(ibuff);
GR_GL_CALL(gpu->glInterface(), BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER,
glBuffer->bufferID()));
GR_GL_CALL(gpu->glInterface(),
BindBuffer(GR_GL_ELEMENT_ARRAY_BUFFER, glBuffer->bufferID()));
fIndexBufferUniqueID = glBuffer->uniqueID();
}
fIndexBufferUniqueID = ibuff->uniqueID();
}
return state;
}

View File

@ -75,13 +75,15 @@ private:
void invalidate() {
fVertexBufferUniqueID.makeInvalid();
fDivisor = kInvalidDivisor;
fUsingCpuBuffer = false;
}
GrGpuResource::UniqueID fVertexBufferUniqueID;
bool fUsingCpuBuffer;
GrVertexAttribType fCPUType;
GrSLType fGPUType;
GrGLsizei fStride;
size_t fOffset;
const GrGLvoid* fOffset;
int fDivisor;
};

View File

@ -8,11 +8,11 @@
#ifndef GrMockBuffer_DEFINED
#define GrMockBuffer_DEFINED
#include "GrBuffer.h"
#include "GrCaps.h"
#include "GrGpuBuffer.h"
#include "GrMockGpu.h"
class GrMockBuffer : public GrBuffer {
class GrMockBuffer : public GrGpuBuffer {
public:
GrMockBuffer(GrMockGpu* gpu, size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern)
@ -23,13 +23,13 @@ public:
private:
void onMap() override {
if (GrCaps::kNone_MapFlags != this->getGpu()->caps()->mapBufferFlags()) {
fMapPtr = sk_malloc_throw(this->sizeInBytes());
fMapPtr = sk_malloc_throw(this->size());
}
}
void onUnmap() override { sk_free(fMapPtr); }
bool onUpdateData(const void* src, size_t srcSizeInBytes) override { return true; }
typedef GrBuffer INHERITED;
typedef GrGpuBuffer INHERITED;
};
#endif

View File

@ -183,9 +183,9 @@ sk_sp<GrRenderTarget> GrMockGpu::onWrapBackendTextureAsRenderTarget(const GrBack
new GrMockRenderTarget(this, GrMockRenderTarget::kWrapped, desc, rtInfo));
}
sk_sp<GrBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern, const void*) {
return sk_sp<GrBuffer>(new GrMockBuffer(this, sizeInBytes, type, accessPattern));
sk_sp<GrGpuBuffer> GrMockGpu::onCreateBuffer(size_t sizeInBytes, GrGpuBufferType type,
GrAccessPattern accessPattern, const void*) {
return sk_sp<GrGpuBuffer>(new GrMockBuffer(this, sizeInBytes, type, accessPattern));
}
GrStencilAttachment* GrMockGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,

View File

@ -72,8 +72,8 @@ private:
sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
int sampleCnt) override;
sk_sp<GrBuffer> onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern,
const void*) override;
sk_sp<GrGpuBuffer> onCreateBuffer(size_t sizeInBytes, GrGpuBufferType, GrAccessPattern,
const void*) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override {
@ -86,7 +86,7 @@ private:
}
bool onTransferPixels(GrTexture* texture, int left, int top, int width, int height, GrColorType,
GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override {
GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override {
return true;
}

View File

@ -8,14 +8,14 @@
#ifndef GrMtlBuffer_DEFINED
#define GrMtlBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuBuffer.h"
#import <metal/metal.h>
class GrMtlCaps;
class GrMtlGpu;
class GrMtlBuffer: public GrBuffer {
class GrMtlBuffer: public GrGpuBuffer {
public:
static sk_sp<GrMtlBuffer> Make(GrMtlGpu*, size_t size, GrGpuBufferType intendedType,
GrAccessPattern, const void* data = nullptr);
@ -48,7 +48,7 @@ private:
id<MTLBuffer> fMtlBuffer;
id<MTLBuffer> fMappedBuffer;
typedef GrBuffer INHERITED;
typedef GrGpuBuffer INHERITED;
};
#endif

View File

@ -143,7 +143,8 @@ private:
sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
int sampleCnt) override;
sk_sp<GrBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern, const void*) override;
sk_sp<GrGpuBuffer> onCreateBuffer(size_t, GrGpuBufferType, GrAccessPattern,
const void*) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override;
@ -153,7 +154,7 @@ private:
bool onTransferPixels(GrTexture*,
int left, int top, int width, int height,
GrColorType, GrBuffer*,
GrColorType, GrGpuBuffer*,
size_t offset, size_t rowBytes) override {
return false;
}

View File

@ -131,8 +131,8 @@ void GrMtlGpu::submitCommandBuffer(SyncQueue sync) {
fCmdBuffer = [fQueue commandBuffer];
}
sk_sp<GrBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) {
sk_sp<GrGpuBuffer> GrMtlGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) {
return GrMtlBuffer::Make(this, size, type, accessPattern, data);
}

View File

@ -274,8 +274,8 @@ void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
const GrBuffer* instanceBuffer) {
size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1;
if (vertexBuffer) {
SkASSERT(!vertexBuffer->isCPUBacked());
SkASSERT(!vertexBuffer->isMapped());
SkASSERT(!vertexBuffer->isCpuBuffer());
SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped());
auto mtlVertexBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer)->mtlBuffer();
SkASSERT(mtlVertexBuffer);
@ -284,8 +284,8 @@ void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer,
atIndex: bufferIndex++];
}
if (instanceBuffer) {
SkASSERT(!instanceBuffer->isCPUBacked());
SkASSERT(!instanceBuffer->isMapped());
SkASSERT(!instanceBuffer->isCpuBuffer());
SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped());
auto mtlInstanceBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer)->mtlBuffer();
SkASSERT(mtlInstanceBuffer);
@ -327,8 +327,8 @@ void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType prim
SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported.
id<MTLBuffer> mtlIndexBuffer;
if (indexBuffer) {
SkASSERT(!indexBuffer->isCPUBacked());
SkASSERT(!indexBuffer->isMapped());
SkASSERT(!indexBuffer->isCpuBuffer());
SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped());
mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer();
SkASSERT(mtlIndexBuffer);

View File

@ -55,8 +55,8 @@ GrMtlPipelineState::GrMtlPipelineState(
, fXferProcessor(std::move(xferProcessor))
, fFragmentProcessors(std::move(fragmentProcessors))
, fFragmentProcessorCnt(fragmentProcessorCnt)
, fDataManager(uniforms, fGeometryUniformBuffer->sizeInBytes(),
fFragmentUniformBuffer->sizeInBytes()) {
, fDataManager(uniforms, fGeometryUniformBuffer->size(),
fFragmentUniformBuffer->size()) {
(void) fPixelFormat; // Suppress unused-var warning.
}

View File

@ -423,8 +423,7 @@ void GrAtlasTextOp::flush(GrMeshDrawOp::Target* target, FlushInfo* flushInfo) co
samplerState);
}
}
int maxGlyphsPerDraw =
static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
int maxGlyphsPerDraw = static_cast<int>(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6);
GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerGlyph, kVerticesPerGlyph,
flushInfo->fGlyphsToFlush, maxGlyphsPerDraw);

View File

@ -227,7 +227,7 @@ void GrDrawVerticesOp::drawVolatile(Target* target) {
// Allocate buffers.
size_t vertexStride = gp->vertexStride();
sk_sp<const GrBuffer> vertexBuffer = nullptr;
sk_sp<const GrBuffer> vertexBuffer;
int firstVertex = 0;
void* verts = target->makeVertexSpace(vertexStride, fVertexCount, &vertexBuffer, &firstVertex);
if (!verts) {
@ -235,7 +235,7 @@ void GrDrawVerticesOp::drawVolatile(Target* target) {
return;
}
sk_sp<const GrBuffer> indexBuffer = nullptr;
sk_sp<const GrBuffer> indexBuffer;
int firstIndex = 0;
uint16_t* indices = nullptr;
if (this->isIndexed()) {
@ -286,10 +286,9 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) {
indexKeyBuilder.finish();
// Try to grab data from the cache.
sk_sp<GrBuffer> vertexBuffer = rp->findByUniqueKey<GrBuffer>(vertexKey);
sk_sp<GrBuffer> indexBuffer = this->isIndexed() ?
rp->findByUniqueKey<GrBuffer>(indexKey) :
nullptr;
sk_sp<GrGpuBuffer> vertexBuffer = rp->findByUniqueKey<GrGpuBuffer>(vertexKey);
sk_sp<GrGpuBuffer> indexBuffer =
this->isIndexed() ? rp->findByUniqueKey<GrGpuBuffer>(indexKey) : nullptr;
// Draw using the cached buffers if possible.
if (vertexBuffer && (!this->isIndexed() || indexBuffer)) {
@ -300,10 +299,8 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) {
// Allocate vertex buffer.
size_t vertexStride = gp->vertexStride();
vertexBuffer = rp->createBuffer(fVertexCount * vertexStride,
GrGpuBufferType::kVertex,
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
vertexBuffer = rp->createBuffer(
fVertexCount * vertexStride, GrGpuBufferType::kVertex, kStatic_GrAccessPattern);
void* verts = vertexBuffer ? vertexBuffer->map() : nullptr;
if (!verts) {
SkDebugf("Could not allocate vertices\n");
@ -313,10 +310,8 @@ void GrDrawVerticesOp::drawNonVolatile(Target* target) {
// Allocate index buffer.
uint16_t* indices = nullptr;
if (this->isIndexed()) {
indexBuffer = rp->createBuffer(fIndexCount * sizeof(uint16_t),
GrGpuBufferType::kIndex,
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
indexBuffer = rp->createBuffer(
fIndexCount * sizeof(uint16_t), GrGpuBufferType::kIndex, kStatic_GrAccessPattern);
indices = indexBuffer ? static_cast<uint16_t*>(indexBuffer->map()) : nullptr;
if (!indices) {
SkDebugf("Could not allocate indices\n");

View File

@ -45,10 +45,10 @@ void GrMeshDrawOp::PatternHelper::init(Target* target, GrPrimitiveType primitive
return;
}
SkASSERT(vertexBuffer);
size_t ibSize = indexBuffer->gpuMemorySize();
size_t ibSize = indexBuffer->size();
int maxRepetitions = static_cast<int>(ibSize / (sizeof(uint16_t) * indicesPerRepetition));
fMesh = target->allocMesh(primitiveType);
fMesh->setIndexedPatterned(indexBuffer, indicesPerRepetition, verticesPerRepetition,
fMesh->setIndexedPatterned(std::move(indexBuffer), indicesPerRepetition, verticesPerRepetition,
repeatCount, maxRepetitions);
fMesh->setVertexData(std::move(vertexBuffer), firstVertex);
}
@ -62,7 +62,7 @@ void GrMeshDrawOp::PatternHelper::recordDraw(
//////////////////////////////////////////////////////////////////////////////
GrMeshDrawOp::QuadHelper::QuadHelper(Target* target, size_t vertexStride, int quadsToDraw) {
sk_sp<const GrBuffer> quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer();
sk_sp<const GrGpuBuffer> quadIndexBuffer = target->resourceProvider()->refQuadIndexBuffer();
if (!quadIndexBuffer) {
SkDebugf("Could not get quad index buffer.");
return;

View File

@ -34,8 +34,9 @@ protected:
space for the vertices and flushes the draws to the GrMeshDrawOp::Target. */
class PatternHelper {
public:
PatternHelper(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer>,
int verticesPerRepetition, int indicesPerRepetition, int repeatCount);
PatternHelper(Target*, GrPrimitiveType, size_t vertexStride,
sk_sp<const GrBuffer> indexBuffer, int verticesPerRepetition,
int indicesPerRepetition, int repeatCount);
/** Called to issue draws to the GrMeshDrawOp::Target.*/
void recordDraw(Target*, sk_sp<const GrGeometryProcessor>, const GrPipeline*,
@ -45,7 +46,7 @@ protected:
protected:
PatternHelper() = default;
void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer>,
void init(Target*, GrPrimitiveType, size_t vertexStride, sk_sp<const GrBuffer> indexBuffer,
int verticesPerRepetition, int indicesPerRepetition, int repeatCount);
private:

View File

@ -374,7 +374,7 @@ GR_DECLARE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
static const int kVertsPerAAFillRect = 8;
static const int kIndicesPerAAFillRect = 30;
static sk_sp<const GrBuffer> get_index_buffer(GrResourceProvider* resourceProvider) {
static sk_sp<const GrGpuBuffer> get_index_buffer(GrResourceProvider* resourceProvider) {
GR_DEFINE_STATIC_UNIQUE_KEY(gAAFillRectIndexBufferKey);
// clang-format off
@ -467,7 +467,7 @@ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const Vert
int quadCount) {
if (spec.usesCoverageAA()) {
// AA quads use 8 vertices, basically nested rectangles
sk_sp<const GrBuffer> ibuffer = get_index_buffer(target->resourceProvider());
sk_sp<const GrGpuBuffer> ibuffer = get_index_buffer(target->resourceProvider());
if (!ibuffer) {
return false;
}
@ -478,7 +478,7 @@ bool ConfigureMeshIndices(GrMeshDrawOp::Target* target, GrMesh* mesh, const Vert
} else {
// Non-AA quads use 4 vertices, and regular triangle strip layout
if (quadCount > 1) {
sk_sp<const GrBuffer> ibuffer = target->resourceProvider()->refQuadIndexBuffer();
sk_sp<const GrGpuBuffer> ibuffer = target->resourceProvider()->refQuadIndexBuffer();
if (!ibuffer) {
return false;
}

View File

@ -109,7 +109,7 @@ private:
if (!numRects) {
return;
}
sk_sp<const GrBuffer> indexBuffer = target->resourceProvider()->refQuadIndexBuffer();
sk_sp<const GrGpuBuffer> indexBuffer = target->resourceProvider()->refQuadIndexBuffer();
if (!indexBuffer) {
SkDebugf("Could not allocate indices\n");
return;

View File

@ -794,7 +794,7 @@ private:
if (flushInfo->fInstancesToFlush) {
GrMesh* mesh = target->allocMesh(GrPrimitiveType::kTriangles);
int maxInstancesPerDraw =
static_cast<int>(flushInfo->fIndexBuffer->gpuMemorySize() / sizeof(uint16_t) / 6);
static_cast<int>(flushInfo->fIndexBuffer->size() / sizeof(uint16_t) / 6);
mesh->setIndexedPatterned(flushInfo->fIndexBuffer, kIndicesPerQuad, kVerticesPerQuad,
flushInfo->fInstancesToFlush, maxInstancesPerDraw);
mesh->setVertexData(flushInfo->fVertexBuffer, flushInfo->fVertexOffset);

View File

@ -420,7 +420,7 @@ private:
static const int kBevelVertexCnt = 24;
static const int kNumBevelRectsInIndexBuffer = 256;
static sk_sp<const GrBuffer> GetIndexBuffer(GrResourceProvider*, bool miterStroke);
static sk_sp<const GrGpuBuffer> GetIndexBuffer(GrResourceProvider*, bool miterStroke);
const SkMatrix& viewMatrix() const { return fViewMatrix; }
bool miterStroke() const { return fMiterStroke; }
@ -472,7 +472,7 @@ void AAStrokeRectOp::onPrepareDraws(Target* target) {
int indicesPerInstance = this->miterStroke() ? kMiterIndexCnt : kBevelIndexCnt;
int instanceCount = fRects.count();
sk_sp<const GrBuffer> indexBuffer =
sk_sp<const GrGpuBuffer> indexBuffer =
GetIndexBuffer(target->resourceProvider(), this->miterStroke());
if (!indexBuffer) {
SkDebugf("Could not allocate indices\n");
@ -503,8 +503,8 @@ void AAStrokeRectOp::onPrepareDraws(Target* target) {
helper.recordDraw(target, std::move(gp), pipe.fPipeline, pipe.fFixedDynamicState);
}
sk_sp<const GrBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider,
bool miterStroke) {
sk_sp<const GrGpuBuffer> AAStrokeRectOp::GetIndexBuffer(GrResourceProvider* resourceProvider,
bool miterStroke) {
if (miterStroke) {
// clang-format off
static const uint16_t gMiterIndices[] = {

View File

@ -53,7 +53,7 @@ private:
}
};
bool cache_match(GrBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
bool cache_match(GrGpuBuffer* vertexBuffer, SkScalar tol, int* actualCount) {
if (!vertexBuffer) {
return false;
}
@ -78,8 +78,7 @@ public:
void* lock(int vertexCount) override {
size_t size = vertexCount * stride();
fVertexBuffer = fResourceProvider->createBuffer(size, GrGpuBufferType::kVertex,
kStatic_GrAccessPattern,
GrResourceProvider::Flags::kNone);
kStatic_GrAccessPattern);
if (!fVertexBuffer.get()) {
return nullptr;
}
@ -99,10 +98,10 @@ public:
}
fVertices = nullptr;
}
sk_sp<GrBuffer> detachVertexBuffer() { return std::move(fVertexBuffer); }
sk_sp<GrGpuBuffer> detachVertexBuffer() { return std::move(fVertexBuffer); }
private:
sk_sp<GrBuffer> fVertexBuffer;
sk_sp<GrGpuBuffer> fVertexBuffer;
GrResourceProvider* fResourceProvider;
bool fCanMapVB;
void* fVertices;
@ -261,7 +260,7 @@ private:
memset(&builder[shapeKeyDataCnt], 0, sizeof(fDevClipBounds));
}
builder.finish();
sk_sp<GrBuffer> cachedVertexBuffer(rp->findByUniqueKey<GrBuffer>(key));
sk_sp<GrGpuBuffer> cachedVertexBuffer(rp->findByUniqueKey<GrGpuBuffer>(key));
int actualCount;
SkScalar tol = GrPathUtils::kDefaultTolerance;
tol = GrPathUtils::scaleToleranceToSrc(tol, fViewMatrix, fShape.bounds());
@ -286,7 +285,7 @@ private:
if (count == 0) {
return;
}
sk_sp<GrBuffer> vb = allocator.detachVertexBuffer();
sk_sp<GrGpuBuffer> vb = allocator.detachVertexBuffer();
TessInfo info;
info.fTolerance = isLinear ? 0 : tol;
info.fCount = count;

View File

@ -343,9 +343,9 @@ void GrVkGpu::submitCommandBuffer(SyncQueue sync) {
}
///////////////////////////////////////////////////////////////////////////////
sk_sp<GrBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) {
sk_sp<GrBuffer> buff;
sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
GrAccessPattern accessPattern, const void* data) {
sk_sp<GrGpuBuffer> buff;
switch (type) {
case GrGpuBufferType::kVertex:
SkASSERT(kDynamic_GrAccessPattern == accessPattern ||
@ -419,7 +419,7 @@ bool GrVkGpu::onWritePixels(GrSurface* surface, int left, int top, int width, in
}
bool GrVkGpu::onTransferPixels(GrTexture* texture, int left, int top, int width, int height,
GrColorType bufferColorType, GrBuffer* transferBuffer,
GrColorType bufferColorType, GrGpuBuffer* transferBuffer,
size_t bufferOffset, size_t rowBytes) {
// Can't transfer compressed data
SkASSERT(!GrPixelConfigIsCompressed(texture->config()));

View File

@ -198,8 +198,8 @@ private:
sk_sp<GrRenderTarget> onWrapVulkanSecondaryCBAsRenderTarget(const SkImageInfo&,
const GrVkDrawableInfo&) override;
sk_sp<GrBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
const void* data) override;
sk_sp<GrGpuBuffer> onCreateBuffer(size_t size, GrGpuBufferType type, GrAccessPattern,
const void* data) override;
bool onReadPixels(GrSurface* surface, int left, int top, int width, int height, GrColorType,
void* buffer, size_t rowBytes) override;
@ -208,7 +208,7 @@ private:
const GrMipLevel texels[], int mipLevelCount) override;
bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
GrGpuBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src,
GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,

View File

@ -593,9 +593,9 @@ void GrVkGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, con
////////////////////////////////////////////////////////////////////////////////
void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
const GrBuffer* vertexBuffer,
const GrBuffer* instanceBuffer) {
void GrVkGpuRTCommandBuffer::bindGeometry(const GrGpuBuffer* indexBuffer,
const GrGpuBuffer* vertexBuffer,
const GrGpuBuffer* instanceBuffer) {
GrVkSecondaryCommandBuffer* currCmdBuf = fCommandBufferInfos[fCurrentCmdInfo].currentCmdBuf();
// There is no need to put any memory barriers to make sure host writes have finished here.
// When a command buffer is submitted to a queue, there is an implicit memory barrier that
@ -608,7 +608,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
if (vertexBuffer) {
SkASSERT(vertexBuffer);
SkASSERT(!vertexBuffer->isCPUBacked());
SkASSERT(!vertexBuffer->isMapped());
currCmdBuf->bindInputBuffer(fGpu, binding++,
@ -617,7 +616,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
if (instanceBuffer) {
SkASSERT(instanceBuffer);
SkASSERT(!instanceBuffer->isCPUBacked());
SkASSERT(!instanceBuffer->isMapped());
currCmdBuf->bindInputBuffer(fGpu, binding++,
@ -626,7 +624,6 @@ void GrVkGpuRTCommandBuffer::bindGeometry(const GrBuffer* indexBuffer,
if (indexBuffer) {
SkASSERT(indexBuffer);
SkASSERT(!indexBuffer->isMapped());
SkASSERT(!indexBuffer->isCPUBacked());
currCmdBuf->bindIndexBuffer(fGpu, static_cast<const GrVkIndexBuffer*>(indexBuffer));
}
@ -807,7 +804,11 @@ void GrVkGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType,
int instanceCount,
int baseInstance) {
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
this->bindGeometry(nullptr, vertexBuffer, instanceBuffer);
SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
this->bindGeometry(nullptr, gpuVertexBuffer, gpuInstanceBuffer);
cbInfo.currentCmdBuf()->draw(fGpu, vertexCount, instanceCount, baseVertex, baseInstance);
fGpu->stats()->incNumDraws();
}
@ -824,7 +825,13 @@ void GrVkGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType,
GrPrimitiveRestart restart) {
SkASSERT(restart == GrPrimitiveRestart::kNo);
CommandBufferInfo& cbInfo = fCommandBufferInfos[fCurrentCmdInfo];
this->bindGeometry(indexBuffer, vertexBuffer, instanceBuffer);
SkASSERT(!vertexBuffer || !vertexBuffer->isCpuBuffer());
SkASSERT(!instanceBuffer || !instanceBuffer->isCpuBuffer());
SkASSERT(!indexBuffer->isCpuBuffer());
auto gpuIndexxBuffer = static_cast<const GrGpuBuffer*>(indexBuffer);
auto gpuVertexBuffer = static_cast<const GrGpuBuffer*>(vertexBuffer);
auto gpuInstanceBuffer = static_cast<const GrGpuBuffer*>(instanceBuffer);
this->bindGeometry(gpuIndexxBuffer, gpuVertexBuffer, gpuInstanceBuffer);
cbInfo.currentCmdBuf()->drawIndexed(fGpu, indexCount, instanceCount,
baseIndex, baseVertex, baseInstance);
fGpu->stats()->incNumDraws();

View File

@ -96,9 +96,9 @@ private:
GrGpu* gpu() override;
// Bind vertex and index buffers
void bindGeometry(const GrBuffer* indexBuffer,
const GrBuffer* vertexBuffer,
const GrBuffer* instanceBuffer);
void bindGeometry(const GrGpuBuffer* indexBuffer,
const GrGpuBuffer* vertexBuffer,
const GrGpuBuffer* instanceBuffer);
GrVkPipelineState* prepareDrawState(const GrPrimitiveProcessor&,
const GrPipeline&,

View File

@ -50,7 +50,7 @@ void GrVkIndexBuffer::onAbandon() {
void GrVkIndexBuffer::onMap() {
if (!this->wasDestroyed()) {
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
}
}

View File

@ -8,13 +8,12 @@
#ifndef GrVkIndexBuffer_DEFINED
#define GrVkIndexBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuBuffer.h"
#include "GrVkBuffer.h"
class GrVkGpu;
class GrVkIndexBuffer : public GrBuffer, public GrVkBuffer {
class GrVkIndexBuffer : public GrGpuBuffer, public GrVkBuffer {
public:
static sk_sp<GrVkIndexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic);
@ -32,7 +31,7 @@ private:
GrVkGpu* getVkGpu() const;
typedef GrBuffer INHERITED;
typedef GrGpuBuffer INHERITED;
};
#endif

View File

@ -8,14 +8,13 @@
#ifndef GrVkTransferBuffer_DEFINED
#define GrVkTransferBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuBuffer.h"
#include "GrVkBuffer.h"
#include "vk/GrVkTypes.h"
class GrVkGpu;
class GrVkTransferBuffer : public GrBuffer, public GrVkBuffer {
class GrVkTransferBuffer : public GrGpuBuffer, public GrVkBuffer {
public:
static sk_sp<GrVkTransferBuffer> Make(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type);
@ -31,7 +30,7 @@ private:
void onMap() override {
if (!this->wasDestroyed()) {
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
}
}
@ -51,7 +50,7 @@ private:
return reinterpret_cast<GrVkGpu*>(this->getGpu());
}
typedef GrBuffer INHERITED;
typedef GrGpuBuffer INHERITED;
};
#endif

View File

@ -49,7 +49,7 @@ void GrVkVertexBuffer::onAbandon() {
void GrVkVertexBuffer::onMap() {
if (!this->wasDestroyed()) {
this->GrBuffer::fMapPtr = this->vkMap(this->getVkGpu());
this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
}
}

View File

@ -8,12 +8,12 @@
#ifndef GrVkVertexBuffer_DEFINED
#define GrVkVertexBuffer_DEFINED
#include "GrBuffer.h"
#include "GrGpuBuffer.h"
#include "GrVkBuffer.h"
class GrVkGpu;
class GrVkVertexBuffer : public GrBuffer, public GrVkBuffer {
class GrVkVertexBuffer : public GrGpuBuffer, public GrVkBuffer {
public:
static sk_sp<GrVkVertexBuffer> Make(GrVkGpu* gpu, size_t size, bool dynamic);
@ -31,7 +31,7 @@ private:
GrVkGpu* getVkGpu() const;
typedef GrBuffer INHERITED;
typedef GrGpuBuffer INHERITED;
};
#endif

View File

@ -375,8 +375,7 @@ GrGLSLPrimitiveProcessor* GrMeshTestProcessor::createGLSLInstance(const GrShader
template<typename T>
sk_sp<const GrBuffer> DrawMeshHelper::makeVertexBuffer(const T* data, int count) {
return sk_sp<const GrBuffer>(fState->resourceProvider()->createBuffer(
count * sizeof(T), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kRequireGpuMemory, data));
count * sizeof(T), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern, data));
}
sk_sp<const GrBuffer> DrawMeshHelper::getIndexBuffer() {

View File

@ -194,9 +194,8 @@ DEF_GPUTEST_FOR_RENDERING_CONTEXTS(GrPipelineDynamicStateTest, reporter, ctxInfo
{d, d, kMeshColors[3]}
};
sk_sp<const GrBuffer> vbuff(
rp->createBuffer(sizeof(vdata), GrGpuBufferType::kVertex, kDynamic_GrAccessPattern,
GrResourceProvider::Flags::kRequireGpuMemory, vdata));
sk_sp<const GrBuffer> vbuff(rp->createBuffer(sizeof(vdata), GrGpuBufferType::kVertex,
kDynamic_GrAccessPattern, vdata));
if (!vbuff) {
ERRORF(reporter, "vbuff is null.");
return;

View File

@ -77,7 +77,7 @@ public:
return std::unique_ptr<GrFragmentProcessor>(new TestFP(std::move(child)));
}
static std::unique_ptr<GrFragmentProcessor> Make(const SkTArray<sk_sp<GrTextureProxy>>& proxies,
const SkTArray<sk_sp<GrBuffer>>& buffers) {
const SkTArray<sk_sp<GrGpuBuffer>>& buffers) {
return std::unique_ptr<GrFragmentProcessor>(new TestFP(proxies, buffers));
}
@ -93,7 +93,8 @@ public:
}
private:
TestFP(const SkTArray<sk_sp<GrTextureProxy>>& proxies, const SkTArray<sk_sp<GrBuffer>>& buffers)
TestFP(const SkTArray<sk_sp<GrTextureProxy>>& proxies,
const SkTArray<sk_sp<GrGpuBuffer>>& buffers)
: INHERITED(kTestFP_ClassID, kNone_OptimizationFlags), fSamplers(4) {
for (const auto& proxy : proxies) {
fSamplers.emplace_back(proxy);
@ -185,7 +186,7 @@ DEF_GPUTEST_FOR_ALL_CONTEXTS(ProcessorRefTest, reporter, ctxInfo) {
SkBudgeted::kYes);
{
SkTArray<sk_sp<GrTextureProxy>> proxies;
SkTArray<sk_sp<GrBuffer>> buffers;
SkTArray<sk_sp<GrGpuBuffer>> buffers;
proxies.push_back(proxy1);
auto fp = TestFP::Make(std::move(proxies), std::move(buffers));
for (int i = 0; i < parentCnt; ++i) {

View File

@ -83,9 +83,8 @@ void basic_transfer_test(skiatest::Reporter* reporter, GrContext* context, GrCol
// create and fill transfer buffer
size_t size = rowBytes*kBufferHeight;
auto bufferFlags = GrResourceProvider::Flags::kNoPendingIO;
sk_sp<GrBuffer> buffer(resourceProvider->createBuffer(size, GrGpuBufferType::kXferCpuToGpu,
kDynamic_GrAccessPattern, bufferFlags));
sk_sp<GrGpuBuffer> buffer(resourceProvider->createBuffer(size, GrGpuBufferType::kXferCpuToGpu,
kDynamic_GrAccessPattern));
if (!buffer) {
return;
}