Remove old GrVkBuffer class and rename Buffer2 to Buffer.

Bug: skia:11226
Change-Id: I5f507a0d3ffe0a2698b10b0535486986c2a8b5b7
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/367977
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Jim Van Verth <jvanverth@google.com>
This commit is contained in:
Greg Daniel 2021-02-08 13:55:26 -05:00 committed by Skia Commit-Bot
parent 38d92ef668
commit af1d193d46
14 changed files with 291 additions and 788 deletions

View File

@ -713,8 +713,6 @@ skia_vk_sources = [
"$_src/gpu/vk/GrVkAttachment.h",
"$_src/gpu/vk/GrVkBuffer.cpp",
"$_src/gpu/vk/GrVkBuffer.h",
"$_src/gpu/vk/GrVkBuffer2.cpp",
"$_src/gpu/vk/GrVkBuffer2.h",
"$_src/gpu/vk/GrVkCaps.cpp",
"$_src/gpu/vk/GrVkCaps.h",
"$_src/gpu/vk/GrVkCommandBuffer.cpp",

View File

@ -1,5 +1,5 @@
/*
* Copyright 2015 Google Inc.
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
@ -9,72 +9,116 @@
#include "include/gpu/GrDirectContext.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/vk/GrVkBuffer2.h"
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/vk/GrVkDescriptorSet.h"
#include "src/gpu/vk/GrVkGpu.h"
#include "src/gpu/vk/GrVkMemory.h"
#include "src/gpu/vk/GrVkUtil.h"
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
#ifdef SK_DEBUG
#define VALIDATE() this->validate()
#else
#define VALIDATE() do {} while(false)
#endif
using BufferUsage = GrVkMemoryAllocator::BufferUsage;
static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
switch (type) {
case GrVkBuffer::kVertex_Type: // fall through
case GrVkBuffer::kIndex_Type: // fall through
case GrVkBuffer::kIndirect_Type: // fall through
case GrVkBuffer::kTexel_Type:
return dynamic ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
case GrVkBuffer::kUniform_Type: // fall through
case GrVkBuffer::kCopyRead_Type:
SkASSERT(dynamic);
return BufferUsage::kCpuWritesGpuReads;
case GrVkBuffer::kCopyWrite_Type:
return BufferUsage::kGpuWritesCpuReads;
}
SK_ABORT("Invalid GrVkBuffer::Type");
GrVkBuffer::GrVkBuffer(GrVkGpu* gpu,
size_t sizeInBytes,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern,
VkBuffer buffer,
const GrVkAlloc& alloc,
const GrVkDescriptorSet* uniformDescriptorSet)
: GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern)
, fBuffer(buffer)
, fAlloc(alloc)
, fUniformDescriptorSet(uniformDescriptorSet) {
// We always require dynamic buffers to be mappable
SkASSERT(accessPattern != kDynamic_GrAccessPattern || this->isVkMappable());
SkASSERT(bufferType != GrGpuBufferType::kUniform || uniformDescriptorSet);
this->registerWithCache(SkBudgeted::kYes);
}
const GrVkBuffer::Resource* GrVkBuffer::Create(GrVkGpu* gpu, const Desc& desc) {
SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
VkBuffer buffer;
GrVkAlloc alloc;
static const GrVkDescriptorSet* make_uniform_desc_set(GrVkGpu* gpu, VkBuffer buffer, size_t size) {
const GrVkDescriptorSet* descriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
if (!descriptorSet) {
return nullptr;
}
VkDescriptorBufferInfo bufferInfo;
memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
bufferInfo.buffer = buffer;
bufferInfo.offset = 0;
bufferInfo.range = size;
VkWriteDescriptorSet descriptorWrite;
memset(&descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrite.pNext = nullptr;
descriptorWrite.dstSet = *descriptorSet->descriptorSet();
descriptorWrite.dstBinding = GrVkUniformHandler::kUniformBinding;
descriptorWrite.dstArrayElement = 0;
descriptorWrite.descriptorCount = 1;
descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrite.pImageInfo = nullptr;
descriptorWrite.pBufferInfo = &bufferInfo;
descriptorWrite.pTexelBufferView = nullptr;
GR_VK_CALL(gpu->vkInterface(),
UpdateDescriptorSets(gpu->device(), 1, &descriptorWrite, 0, nullptr));
return descriptorSet;
}
sk_sp<GrVkBuffer> GrVkBuffer::Make(GrVkGpu* gpu,
size_t size,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern) {
VkBuffer buffer;
GrVkAlloc alloc;
// The only time we don't require mappable buffers is when we have a static access pattern and
// we're on a device where gpu only memory has faster reads on the gpu than memory that is also
// mappable on the cpu. Protected memory always uses mappable buffers.
bool requiresMappable = gpu->protectedContext() ||
accessPattern == kDynamic_GrAccessPattern ||
accessPattern == kStream_GrAccessPattern ||
!gpu->vkCaps().gpuOnlyBuffersMorePerformant();
using BufferUsage = GrVkMemoryAllocator::BufferUsage;
BufferUsage allocUsage;
// create the buffer object
VkBufferCreateInfo bufInfo;
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufInfo.flags = 0;
bufInfo.size = desc.fSizeInBytes;
switch (desc.fType) {
case kVertex_Type:
bufInfo.size = size;
switch (bufferType) {
case GrGpuBufferType::kVertex:
bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
break;
case kIndex_Type:
case GrGpuBufferType::kIndex:
bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
break;
case kIndirect_Type:
case GrGpuBufferType::kDrawIndirect:
bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
break;
case kUniform_Type:
case GrGpuBufferType::kUniform:
bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
allocUsage = BufferUsage::kCpuWritesGpuReads;
break;
case kCopyRead_Type:
case GrGpuBufferType::kXferCpuToGpu:
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
allocUsage = BufferUsage::kCpuWritesGpuReads;
break;
case kCopyWrite_Type:
case GrGpuBufferType::kXferGpuToCpu:
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
allocUsage = BufferUsage::kGpuWritesCpuReads;
break;
case kTexel_Type:
bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT;
}
if (!desc.fDynamic) {
// We may not always get a mappable buffer for non dynamic access buffers. Thus we set the
// transfer dst usage bit in case we need to do a copy to write data.
// TODO: It doesn't really hurt setting this extra usage flag, but maybe we can narrow the scope
// of buffers we set it on more than just not dynamic.
if (!requiresMappable) {
bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
@ -88,125 +132,79 @@ const GrVkBuffer::Resource* GrVkBuffer::Create(GrVkGpu* gpu, const Desc& desc) {
return nullptr;
}
if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
buffer,
get_buffer_usage(desc.fType, desc.fDynamic),
&alloc)) {
if (!GrVkMemory::AllocAndBindBufferMemory(gpu, buffer, allocUsage, &alloc)) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
return nullptr;
}
const GrVkBuffer::Resource* resource = new GrVkBuffer::Resource(gpu, buffer, alloc);
if (!resource) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
GrVkMemory::FreeBufferMemory(gpu, alloc);
return nullptr;
}
return resource;
}
void GrVkBuffer::addMemoryBarrier(const GrVkGpu* gpu,
VkAccessFlags srcAccessMask,
VkAccessFlags dstAccesMask,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion) const {
VkBufferMemoryBarrier bufferMemoryBarrier = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
nullptr, // pNext
srcAccessMask, // srcAccessMask
dstAccesMask, // dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
this->buffer(), // buffer
0, // offset
fDesc.fSizeInBytes, // size
};
// TODO: restrict to area of buffer we're interested in
gpu->addBufferMemoryBarrier(this->resource(), srcStageMask, dstStageMask, byRegion,
&bufferMemoryBarrier);
}
void GrVkBuffer::Resource::freeGPUData() const {
SkASSERT(fBuffer);
SkASSERT(fAlloc.fMemory);
VK_CALL(fGpu, DestroyBuffer(fGpu->device(), fBuffer, nullptr));
GrVkMemory::FreeBufferMemory(fGpu, fAlloc);
}
void GrVkBuffer::vkRelease(GrVkGpu* gpu) {
VALIDATE();
if (this->vkIsMapped()) {
// Only unmap resources that are not backed by a CPU buffer. Otherwise we may end up
// creating a new transfer buffer resources that sends us into a spiral of creating and
// destroying resources if we are at our budget limit. Also there really isn't a need to
// upload the CPU data if we are deleting this buffer.
if (fDesc.fDynamic) {
this->vkUnmap(gpu);
// If this is a uniform buffer we must setup a descriptor set
const GrVkDescriptorSet* uniformDescSet = nullptr;
if (bufferType == GrGpuBufferType::kUniform) {
uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
if (!uniformDescSet) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
GrVkMemory::FreeBufferMemory(gpu, alloc);
return nullptr;
}
}
fResource->recycle();
fResource = nullptr;
if (!fDesc.fDynamic) {
delete[] (unsigned char*)fMapPtr;
}
fMapPtr = nullptr;
VALIDATE();
return sk_sp<GrVkBuffer>(new GrVkBuffer(gpu, size, bufferType, accessPattern, buffer, alloc,
uniformDescSet));
}
VkAccessFlags buffer_type_to_access_flags(GrVkBuffer::Type type) {
void GrVkBuffer::vkMap(size_t size) {
SkASSERT(!fMapPtr);
if (this->isVkMappable()) {
// Not every buffer will use command buffer usage refs and instead the command buffer just
// holds normal refs. Systems higher up in Ganesh should be making sure not to reuse a
// buffer that currently has a ref held by something else. However, we do need to make sure
// there isn't a buffer with just a command buffer usage that is trying to be mapped.
SkASSERT(this->internalHasNoCommandBufferUsages());
SkASSERT(fAlloc.fSize > 0);
SkASSERT(fAlloc.fSize >= size);
fMapPtr = GrVkMemory::MapAlloc(this->getVkGpu(), fAlloc);
if (fMapPtr && this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
GrVkMemory::InvalidateMappedAlloc(this->getVkGpu(), fAlloc, 0, size);
}
}
}
void GrVkBuffer::vkUnmap(size_t size) {
SkASSERT(fMapPtr && this->isVkMappable());
SkASSERT(fAlloc.fSize > 0);
SkASSERT(fAlloc.fSize >= size);
GrVkGpu* gpu = this->getVkGpu();
GrVkMemory::FlushMappedAlloc(gpu, fAlloc, 0, size);
GrVkMemory::UnmapAlloc(gpu, fAlloc);
}
static VkAccessFlags buffer_type_to_access_flags(GrGpuBufferType type) {
switch (type) {
case GrVkBuffer::kIndex_Type:
case GrGpuBufferType::kIndex:
return VK_ACCESS_INDEX_READ_BIT;
case GrVkBuffer::kVertex_Type:
case GrGpuBufferType::kVertex:
return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
default:
// This helper is only called for static buffers so we should only ever see index or
// vertex buffers types
SkASSERT(false);
return 0;
SkUNREACHABLE;
}
}
void GrVkBuffer::internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer) {
VALIDATE();
SkASSERT(!this->vkIsMapped());
if (!fResource->unique()) {
SkASSERT(fDesc.fDynamic);
// in use by the command buffer, so we need to create a new one
fResource->recycle();
fResource = this->createResource(gpu, fDesc);
if (createdNewBuffer) {
*createdNewBuffer = true;
}
}
if (fDesc.fDynamic) {
const GrVkAlloc& alloc = this->alloc();
SkASSERT(alloc.fSize > 0);
SkASSERT(alloc.fSize >= size);
SkASSERT(0 == fOffset);
fMapPtr = GrVkMemory::MapAlloc(gpu, alloc);
} else {
SkASSERT(!fMapPtr);
fMapPtr = new unsigned char[this->size()];
}
VALIDATE();
}
void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t size) {
void GrVkBuffer::copyCpuDataToGpuBuffer(const void* src, size_t size) {
SkASSERT(src);
GrVkGpu* gpu = this->getVkGpu();
// We should never call this method in protected contexts.
SkASSERT(!gpu->protectedContext());
// The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
// to 65536 bytes and a size the is 4 byte aligned.
if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
gpu->updateBuffer(this, src, this->offset(), size);
gpu->updateBuffer(sk_ref_sp(this), src, /*offset=*/0, size);
} else {
GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
@ -215,66 +213,116 @@ void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t si
return;
}
gpu->copyBuffer(std::move(transferBuffer), this, 0, this->offset(), size);
gpu->copyBuffer(std::move(transferBuffer), sk_ref_sp(this), /*srcOffset=*/0,
/*dstOffset=*/0, size);
}
this->addMemoryBarrier(gpu,
VK_ACCESS_TRANSFER_WRITE_BIT,
buffer_type_to_access_flags(fDesc.fType),
this->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
buffer_type_to_access_flags(this->intendedType()),
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
false);
/*byRegion=*/false);
}
void GrVkBuffer::internalUnmap(GrVkGpu* gpu, size_t size) {
VALIDATE();
SkASSERT(this->vkIsMapped());
void GrVkBuffer::addMemoryBarrier(VkAccessFlags srcAccessMask,
VkAccessFlags dstAccesMask,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion) const {
VkBufferMemoryBarrier bufferMemoryBarrier = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
nullptr, // pNext
srcAccessMask, // srcAccessMask
dstAccesMask, // dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
fBuffer, // buffer
0, // offset
this->size(), // size
};
if (fDesc.fDynamic) {
const GrVkAlloc& alloc = this->alloc();
SkASSERT(alloc.fSize > 0);
SkASSERT(alloc.fSize >= size);
// We currently don't use fOffset
SkASSERT(0 == fOffset);
// TODO: restrict to area of buffer we're interested in
this->getVkGpu()->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion,
&bufferMemoryBarrier);
}
GrVkMemory::FlushMappedAlloc(gpu, alloc, 0, size);
GrVkMemory::UnmapAlloc(gpu, alloc);
void GrVkBuffer::vkRelease() {
if (this->wasDestroyed()) {
return;
}
if (fMapPtr) {
this->vkUnmap(this->size());
fMapPtr = nullptr;
} else {
SkASSERT(fMapPtr);
this->copyCpuDataToGpuBuffer(gpu, fMapPtr, size);
}
if (fUniformDescriptorSet) {
fUniformDescriptorSet->recycle();
fUniformDescriptorSet = nullptr;
}
SkASSERT(fBuffer);
SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
VK_CALL(this->getVkGpu(), DestroyBuffer(this->getVkGpu()->device(), fBuffer, nullptr));
fBuffer = VK_NULL_HANDLE;
GrVkMemory::FreeBufferMemory(this->getVkGpu(), fAlloc);
fAlloc.fMemory = VK_NULL_HANDLE;
fAlloc.fBackendMemory = 0;
}
void GrVkBuffer::onRelease() {
this->vkRelease();
this->GrGpuBuffer::onRelease();
}
void GrVkBuffer::onAbandon() {
this->vkRelease();
this->GrGpuBuffer::onAbandon();
}
void GrVkBuffer::onMap() {
if (!this->wasDestroyed()) {
this->vkMap(this->size());
}
}
bool GrVkBuffer::vkIsMapped() const {
VALIDATE();
return SkToBool(fMapPtr);
void GrVkBuffer::onUnmap() {
if (!this->wasDestroyed()) {
this->vkUnmap(this->size());
}
}
bool GrVkBuffer::vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
bool* createdNewBuffer) {
if (srcSizeInBytes > fDesc.fSizeInBytes) {
bool GrVkBuffer::onUpdateData(const void* src, size_t srcSizeInBytes) {
if (this->wasDestroyed()) {
return false;
}
if (fDesc.fDynamic) {
this->internalMap(gpu, srcSizeInBytes, createdNewBuffer);
if (srcSizeInBytes > this->size()) {
return false;
}
if (this->isVkMappable()) {
this->vkMap(srcSizeInBytes);
if (!fMapPtr) {
return false;
}
memcpy(fMapPtr, src, srcSizeInBytes);
this->internalUnmap(gpu, srcSizeInBytes);
this->vkUnmap(srcSizeInBytes);
fMapPtr = nullptr;
} else {
this->copyCpuDataToGpuBuffer(gpu, src, srcSizeInBytes);
this->copyCpuDataToGpuBuffer(src, srcSizeInBytes);
}
return true;
}
void GrVkBuffer::validate() const {
SkASSERT(!fResource || kVertex_Type == fDesc.fType || kIndex_Type == fDesc.fType ||
kIndirect_Type == fDesc.fType || kTexel_Type == fDesc.fType ||
kCopyRead_Type == fDesc.fType || kCopyWrite_Type == fDesc.fType ||
kUniform_Type == fDesc.fType);
GrVkGpu* GrVkBuffer::getVkGpu() const {
SkASSERT(!this->wasDestroyed());
return static_cast<GrVkGpu*>(this->getGpu());
}
const VkDescriptorSet* GrVkBuffer::uniformDescriptorSet() const {
SkASSERT(fUniformDescriptorSet);
return fUniformDescriptorSet->descriptorSet();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright 2015 Google Inc.
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
@ -9,114 +9,64 @@
#define GrVkBuffer_DEFINED
#include "include/gpu/vk/GrVkTypes.h"
#include "src/gpu/vk/GrVkManagedResource.h"
#include "src/gpu/GrGpuBuffer.h"
class GrVkDescriptorSet;
class GrVkGpu;
/**
* This class serves as the base of GrVk*Buffer classes. It was written to avoid code
* duplication in those classes.
*/
class GrVkBuffer : public SkNoncopyable {
class GrVkBuffer : public GrGpuBuffer {
public:
virtual ~GrVkBuffer() {
// release should have been called by the owner of this object.
SkASSERT(!fResource);
delete [] (unsigned char*)fMapPtr;
}
static sk_sp<GrVkBuffer> Make(GrVkGpu* gpu,
size_t size,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern);
VkBuffer buffer() const { return fResource->fBuffer; }
const GrVkAlloc& alloc() const { return fResource->fAlloc; }
const GrVkRecycledResource* resource() const { return fResource; }
size_t size() const { return fDesc.fSizeInBytes; }
VkDeviceSize offset() const { return fOffset; }
VkBuffer vkBuffer() const { return fBuffer; }
void addMemoryBarrier(const GrVkGpu* gpu,
VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask,
void addMemoryBarrier(VkAccessFlags srcAccessMask,
VkAccessFlags dstAccesMask,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion) const;
enum Type {
kVertex_Type,
kIndex_Type,
kIndirect_Type,
kUniform_Type,
kTexel_Type,
kCopyRead_Type,
kCopyWrite_Type,
};
protected:
struct Desc {
size_t fSizeInBytes;
Type fType; // vertex buffer, index buffer, etc.
bool fDynamic;
};
class Resource : public GrVkRecycledResource {
public:
Resource(GrVkGpu* gpu, VkBuffer buf, const GrVkAlloc& alloc)
: GrVkRecycledResource(gpu), fBuffer(buf), fAlloc(alloc) {}
#ifdef SK_TRACE_MANAGED_RESOURCES
void dumpInfo() const override {
SkDebugf("GrVkBuffer: %d (%d refs)\n", fBuffer, this->getRefCnt());
}
#endif
VkBuffer fBuffer;
GrVkAlloc fAlloc;
protected:
void freeGPUData() const override;
private:
void onRecycle() const override { this->unref(); }
using INHERITED = GrVkRecycledResource;
};
// convenience routine for raw buffer creation
static const Resource* Create(GrVkGpu* gpu,
const Desc& descriptor);
GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
: fDesc(desc), fResource(resource), fOffset(0), fMapPtr(nullptr) {
}
void* vkMap(GrVkGpu* gpu) {
this->internalMap(gpu, fDesc.fSizeInBytes);
return fMapPtr;
}
void vkUnmap(GrVkGpu* gpu) { this->internalUnmap(gpu, this->size()); }
// If the caller passes in a non null createdNewBuffer, this function will set the bool to true
// if it creates a new VkBuffer to upload the data to.
bool vkUpdateData(GrVkGpu* gpu, const void* src, size_t srcSizeInBytes,
bool* createdNewBuffer = nullptr);
void vkRelease(GrVkGpu* gpu);
// If the buffer is a uniform buffer, return the descriptor set for that buffer. It is not valid
// to call this on non uniform buffers.
const VkDescriptorSet* uniformDescriptorSet() const;
private:
virtual const Resource* createResource(GrVkGpu* gpu,
const Desc& descriptor) {
return Create(gpu, descriptor);
}
GrVkBuffer(GrVkGpu* gpu,
size_t sizeInBytes,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern,
VkBuffer buffer,
const GrVkAlloc& alloc,
const GrVkDescriptorSet* uniformDescriptorSet);
void internalMap(GrVkGpu* gpu, size_t size, bool* createdNewBuffer = nullptr);
void internalUnmap(GrVkGpu* gpu, size_t size);
void copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* srcData, size_t size);
bool isVkMappable() const { return fAlloc.fFlags & GrVkAlloc::kMappable_Flag; }
void validate() const;
bool vkIsMapped() const;
bool vkIsMapped() const { return SkToBool(fMapPtr); }
void vkMap(size_t size);
void vkUnmap(size_t size);
void copyCpuDataToGpuBuffer(const void* srcData, size_t size);
Desc fDesc;
const Resource* fResource;
VkDeviceSize fOffset;
void* fMapPtr;
using INHERITED = SkNoncopyable;
void onMap() override;
void onUnmap() override;
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
void vkRelease();
void onAbandon() override;
void onRelease() override;
GrVkGpu* getVkGpu() const;
VkBuffer fBuffer;
GrVkAlloc fAlloc;
const GrVkDescriptorSet* fUniformDescriptorSet;
using INHERITED = GrGpuBuffer;
};
#endif

View File

@ -1,328 +0,0 @@
/*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#include "src/gpu/vk/GrVkBuffer2.h"
#include "include/gpu/GrDirectContext.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/GrResourceProvider.h"
#include "src/gpu/vk/GrVkDescriptorSet.h"
#include "src/gpu/vk/GrVkGpu.h"
#include "src/gpu/vk/GrVkMemory.h"
#include "src/gpu/vk/GrVkUtil.h"
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
GrVkBuffer2::GrVkBuffer2(GrVkGpu* gpu,
size_t sizeInBytes,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern,
VkBuffer buffer,
const GrVkAlloc& alloc,
const GrVkDescriptorSet* uniformDescriptorSet)
: GrGpuBuffer(gpu, sizeInBytes, bufferType, accessPattern)
, fBuffer(buffer)
, fAlloc(alloc)
, fUniformDescriptorSet(uniformDescriptorSet) {
// We always require dynamic buffers to be mappable
SkASSERT(accessPattern != kDynamic_GrAccessPattern || this->isVkMappable());
SkASSERT(bufferType != GrGpuBufferType::kUniform || uniformDescriptorSet);
this->registerWithCache(SkBudgeted::kYes);
}
static const GrVkDescriptorSet* make_uniform_desc_set(GrVkGpu* gpu, VkBuffer buffer, size_t size) {
const GrVkDescriptorSet* descriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
if (!descriptorSet) {
return nullptr;
}
VkDescriptorBufferInfo bufferInfo;
memset(&bufferInfo, 0, sizeof(VkDescriptorBufferInfo));
bufferInfo.buffer = buffer;
bufferInfo.offset = 0;
bufferInfo.range = size;
VkWriteDescriptorSet descriptorWrite;
memset(&descriptorWrite, 0, sizeof(VkWriteDescriptorSet));
descriptorWrite.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrite.pNext = nullptr;
descriptorWrite.dstSet = *descriptorSet->descriptorSet();
descriptorWrite.dstBinding = GrVkUniformHandler::kUniformBinding;
descriptorWrite.dstArrayElement = 0;
descriptorWrite.descriptorCount = 1;
descriptorWrite.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrite.pImageInfo = nullptr;
descriptorWrite.pBufferInfo = &bufferInfo;
descriptorWrite.pTexelBufferView = nullptr;
GR_VK_CALL(gpu->vkInterface(),
UpdateDescriptorSets(gpu->device(), 1, &descriptorWrite, 0, nullptr));
return descriptorSet;
}
sk_sp<GrVkBuffer2> GrVkBuffer2::Make(GrVkGpu* gpu,
size_t size,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern) {
VkBuffer buffer;
GrVkAlloc alloc;
// The only time we don't require mappable buffers is when we have a static access pattern and
// we're on a device where gpu only memory has faster reads on the gpu than memory that is also
// mappable on the cpu. Protected memory always uses mappable buffers.
bool requiresMappable = gpu->protectedContext() ||
accessPattern == kDynamic_GrAccessPattern ||
accessPattern == kStream_GrAccessPattern ||
!gpu->vkCaps().gpuOnlyBuffersMorePerformant();
using BufferUsage = GrVkMemoryAllocator::BufferUsage;
BufferUsage allocUsage;
// create the buffer object
VkBufferCreateInfo bufInfo;
memset(&bufInfo, 0, sizeof(VkBufferCreateInfo));
bufInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufInfo.flags = 0;
bufInfo.size = size;
switch (bufferType) {
case GrGpuBufferType::kVertex:
bufInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
break;
case GrGpuBufferType::kIndex:
bufInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
break;
case GrGpuBufferType::kDrawIndirect:
bufInfo.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT;
allocUsage = requiresMappable ? BufferUsage::kCpuWritesGpuReads : BufferUsage::kGpuOnly;
break;
case GrGpuBufferType::kUniform:
bufInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
allocUsage = BufferUsage::kCpuWritesGpuReads;
break;
case GrGpuBufferType::kXferCpuToGpu:
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
allocUsage = BufferUsage::kCpuWritesGpuReads;
break;
case GrGpuBufferType::kXferGpuToCpu:
bufInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
allocUsage = BufferUsage::kGpuWritesCpuReads;
break;
}
// We may not always get a mappable buffer for non dynamic access buffers. Thus we set the
// transfer dst usage bit in case we need to do a copy to write data.
// TODO: It doesn't really hurt setting this extra usage flag, but maybe we can narrow the scope
// of buffers we set it on more than just not dynamic.
if (!requiresMappable) {
bufInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
bufInfo.queueFamilyIndexCount = 0;
bufInfo.pQueueFamilyIndices = nullptr;
VkResult err;
err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
if (err) {
return nullptr;
}
if (!GrVkMemory::AllocAndBindBufferMemory(gpu, buffer, allocUsage, &alloc)) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
return nullptr;
}
// If this is a uniform buffer we must setup a descriptor set
const GrVkDescriptorSet* uniformDescSet = nullptr;
if (bufferType == GrGpuBufferType::kUniform) {
uniformDescSet = make_uniform_desc_set(gpu, buffer, size);
if (!uniformDescSet) {
VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
GrVkMemory::FreeBufferMemory(gpu, alloc);
return nullptr;
}
}
return sk_sp<GrVkBuffer2>(new GrVkBuffer2(gpu, size, bufferType, accessPattern, buffer, alloc,
uniformDescSet));
}
void GrVkBuffer2::vkMap(size_t size) {
SkASSERT(!fMapPtr);
if (this->isVkMappable()) {
// Not every buffer will use command buffer usage refs and instead the command buffer just
// holds normal refs. Systems higher up in Ganesh should be making sure not to reuse a
// buffer that currently has a ref held by something else. However, we do need to make sure
// there isn't a buffer with just a command buffer usage that is trying to be mapped.
SkASSERT(this->internalHasNoCommandBufferUsages());
SkASSERT(fAlloc.fSize > 0);
SkASSERT(fAlloc.fSize >= size);
fMapPtr = GrVkMemory::MapAlloc(this->getVkGpu(), fAlloc);
if (fMapPtr && this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
GrVkMemory::InvalidateMappedAlloc(this->getVkGpu(), fAlloc, 0, size);
}
}
}
void GrVkBuffer2::vkUnmap(size_t size) {
SkASSERT(fMapPtr && this->isVkMappable());
SkASSERT(fAlloc.fSize > 0);
SkASSERT(fAlloc.fSize >= size);
GrVkGpu* gpu = this->getVkGpu();
GrVkMemory::FlushMappedAlloc(gpu, fAlloc, 0, size);
GrVkMemory::UnmapAlloc(gpu, fAlloc);
}
static VkAccessFlags buffer_type_to_access_flags(GrGpuBufferType type) {
switch (type) {
case GrGpuBufferType::kIndex:
return VK_ACCESS_INDEX_READ_BIT;
case GrGpuBufferType::kVertex:
return VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT;
default:
// This helper is only called for static buffers so we should only ever see index or
// vertex buffers types
SkUNREACHABLE;
}
}
void GrVkBuffer2::copyCpuDataToGpuBuffer(const void* src, size_t size) {
SkASSERT(src);
GrVkGpu* gpu = this->getVkGpu();
// We should never call this method in protected contexts.
SkASSERT(!gpu->protectedContext());
// The vulkan api restricts the use of vkCmdUpdateBuffer to updates that are less than or equal
// to 65536 bytes and a size the is 4 byte aligned.
if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
gpu->updateBuffer(sk_ref_sp(this), src, /*offset=*/0, size);
} else {
GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern, src);
if (!transferBuffer) {
return;
}
gpu->copyBuffer(std::move(transferBuffer), sk_ref_sp(this), /*srcOffset=*/0,
/*dstOffset=*/0, size);
}
this->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
buffer_type_to_access_flags(this->intendedType()),
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
/*byRegion=*/false);
}
void GrVkBuffer2::addMemoryBarrier(VkAccessFlags srcAccessMask,
VkAccessFlags dstAccesMask,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion) const {
VkBufferMemoryBarrier bufferMemoryBarrier = {
VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
nullptr, // pNext
srcAccessMask, // srcAccessMask
dstAccesMask, // dstAccessMask
VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
fBuffer, // buffer
0, // offset
this->size(), // size
};
// TODO: restrict to area of buffer we're interested in
this->getVkGpu()->addBufferMemoryBarrier(srcStageMask, dstStageMask, byRegion,
&bufferMemoryBarrier);
}
void GrVkBuffer2::vkRelease() {
if (this->wasDestroyed()) {
return;
}
if (fMapPtr) {
this->vkUnmap(this->size());
fMapPtr = nullptr;
}
if (fUniformDescriptorSet) {
fUniformDescriptorSet->recycle();
fUniformDescriptorSet = nullptr;
}
SkASSERT(fBuffer);
SkASSERT(fAlloc.fMemory && fAlloc.fBackendMemory);
VK_CALL(this->getVkGpu(), DestroyBuffer(this->getVkGpu()->device(), fBuffer, nullptr));
fBuffer = VK_NULL_HANDLE;
GrVkMemory::FreeBufferMemory(this->getVkGpu(), fAlloc);
fAlloc.fMemory = VK_NULL_HANDLE;
fAlloc.fBackendMemory = 0;
}
void GrVkBuffer2::onRelease() {
this->vkRelease();
this->GrGpuBuffer::onRelease();
}
void GrVkBuffer2::onAbandon() {
this->vkRelease();
this->GrGpuBuffer::onAbandon();
}
void GrVkBuffer2::onMap() {
if (!this->wasDestroyed()) {
this->vkMap(this->size());
}
}
void GrVkBuffer2::onUnmap() {
if (!this->wasDestroyed()) {
this->vkUnmap(this->size());
}
}
bool GrVkBuffer2::onUpdateData(const void* src, size_t srcSizeInBytes) {
if (this->wasDestroyed()) {
return false;
}
if (srcSizeInBytes > this->size()) {
return false;
}
if (this->isVkMappable()) {
this->vkMap(srcSizeInBytes);
if (!fMapPtr) {
return false;
}
memcpy(fMapPtr, src, srcSizeInBytes);
this->vkUnmap(srcSizeInBytes);
fMapPtr = nullptr;
} else {
this->copyCpuDataToGpuBuffer(src, srcSizeInBytes);
}
return true;
}
GrVkGpu* GrVkBuffer2::getVkGpu() const {
SkASSERT(!this->wasDestroyed());
return static_cast<GrVkGpu*>(this->getGpu());
}
const VkDescriptorSet* GrVkBuffer2::uniformDescriptorSet() const {
SkASSERT(fUniformDescriptorSet);
return fUniformDescriptorSet->descriptorSet();
}

View File

@ -1,72 +0,0 @@
/*
* Copyright 2021 Google LLC
*
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file.
*/
#ifndef GrVkBuffer2_DEFINED
#define GrVkBuffer2_DEFINED
#include "include/gpu/vk/GrVkTypes.h"
#include "src/gpu/GrGpuBuffer.h"
class GrVkDescriptorSet;
class GrVkGpu;
class GrVkBuffer2 : public GrGpuBuffer {
public:
static sk_sp<GrVkBuffer2> Make(GrVkGpu* gpu,
size_t size,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern);
VkBuffer vkBuffer() const { return fBuffer; }
void addMemoryBarrier(VkAccessFlags srcAccessMask,
VkAccessFlags dstAccesMask,
VkPipelineStageFlags srcStageMask,
VkPipelineStageFlags dstStageMask,
bool byRegion) const;
// If the buffer is a uniform buffer, return the descriptor set for that buffer. It is not valid
// to call this on non uniform buffers.
const VkDescriptorSet* uniformDescriptorSet() const;
private:
GrVkBuffer2(GrVkGpu* gpu,
size_t sizeInBytes,
GrGpuBufferType bufferType,
GrAccessPattern accessPattern,
VkBuffer buffer,
const GrVkAlloc& alloc,
const GrVkDescriptorSet* uniformDescriptorSet);
bool isVkMappable() const { return fAlloc.fFlags & GrVkAlloc::kMappable_Flag; }
bool vkIsMapped() const { return SkToBool(fMapPtr); }
void vkMap(size_t size);
void vkUnmap(size_t size);
void copyCpuDataToGpuBuffer(const void* srcData, size_t size);
void onMap() override;
void onUnmap() override;
bool onUpdateData(const void* src, size_t srcSizeInBytes) override;
void vkRelease();
void onAbandon() override;
void onRelease() override;
GrVkGpu* getVkGpu() const;
VkBuffer fBuffer;
GrVkAlloc fAlloc;
const GrVkDescriptorSet* fUniformDescriptorSet;
using INHERITED = GrGpuBuffer;
};
#endif

View File

@ -8,7 +8,7 @@
#include "src/gpu/vk/GrVkCommandBuffer.h"
#include "include/core/SkRect.h"
#include "src/gpu/vk/GrVkBuffer2.h"
#include "src/gpu/vk/GrVkBuffer.h"
#include "src/gpu/vk/GrVkCommandPool.h"
#include "src/gpu/vk/GrVkFramebuffer.h"
#include "src/gpu/vk/GrVkGpu.h"
@ -181,7 +181,7 @@ void GrVkCommandBuffer::submitPipelineBarriers(const GrVkGpu* gpu, bool forSelfD
void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
sk_sp<const GrBuffer> buffer) {
VkBuffer vkBuffer = static_cast<const GrVkBuffer2*>(buffer.get())->vkBuffer();
VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
SkASSERT(VK_NULL_HANDLE != vkBuffer);
SkASSERT(binding < kMaxInputBuffers);
// TODO: once vbuffer->offset() no longer always returns 0, we will need to track the offset
@ -199,7 +199,7 @@ void GrVkCommandBuffer::bindInputBuffer(GrVkGpu* gpu, uint32_t binding,
}
void GrVkCommandBuffer::bindIndexBuffer(GrVkGpu* gpu, sk_sp<const GrBuffer> buffer) {
VkBuffer vkBuffer = static_cast<const GrVkBuffer2*>(buffer.get())->vkBuffer();
VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(buffer.get())->vkBuffer();
SkASSERT(VK_NULL_HANDLE != vkBuffer);
// TODO: once ibuffer->offset() no longer always returns 0, we will need to track the offset
// to know if we can skip binding or not.
@ -325,7 +325,7 @@ void GrVkCommandBuffer::drawIndirect(const GrVkGpu* gpu,
SkASSERT(fActiveRenderPass);
SkASSERT(!indirectBuffer->isCpuBuffer());
this->addingWork(gpu);
VkBuffer vkBuffer = static_cast<const GrVkBuffer2*>(indirectBuffer.get())->vkBuffer();
VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
GR_VK_CALL(gpu->vkInterface(), CmdDrawIndirect(fCmdBuffer,
vkBuffer,
offset,
@ -343,7 +343,7 @@ void GrVkCommandBuffer::drawIndexedIndirect(const GrVkGpu* gpu,
SkASSERT(fActiveRenderPass);
SkASSERT(!indirectBuffer->isCpuBuffer());
this->addingWork(gpu);
VkBuffer vkBuffer = static_cast<const GrVkBuffer2*>(indirectBuffer.get())->vkBuffer();
VkBuffer vkBuffer = static_cast<const GrVkBuffer*>(indirectBuffer.get())->vkBuffer();
GR_VK_CALL(gpu->vkInterface(), CmdDrawIndexedIndirect(fCmdBuffer,
vkBuffer,
offset,
@ -765,7 +765,7 @@ void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
SkASSERT(fIsActive);
SkASSERT(!fActiveRenderPass);
this->addingWork(gpu);
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(dstBuffer.get());
GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(dstBuffer.get());
GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
srcImage->image(),
srcLayout,
@ -795,36 +795,6 @@ void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
this->addResource(dstImage->resource());
}
void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
sk_sp<GrGpuBuffer> srcBuffer,
GrVkBuffer* dstBuffer,
uint32_t regionCount,
const VkBufferCopy* regions) {
SkASSERT(fIsActive);
SkASSERT(!fActiveRenderPass);
this->addingWork(gpu);
#ifdef SK_DEBUG
for (uint32_t i = 0; i < regionCount; ++i) {
const VkBufferCopy& region = regions[i];
SkASSERT(region.size > 0);
SkASSERT(region.srcOffset < srcBuffer->size());
SkASSERT(region.dstOffset < dstBuffer->size());
SkASSERT(region.srcOffset + region.size <= srcBuffer->size());
SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
}
#endif
const GrVkBuffer2* srcVk = static_cast<GrVkBuffer2*>(srcBuffer.get());
GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
srcVk->vkBuffer(),
dstBuffer->buffer(),
regionCount,
regions));
this->addGrBuffer(std::move(srcBuffer));
this->addResource(dstBuffer->resource());
}
void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
sk_sp<GrGpuBuffer> srcBuffer,
sk_sp<GrGpuBuffer> dstBuffer,
@ -844,8 +814,8 @@ void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
}
#endif
const GrVkBuffer2* srcVk = static_cast<GrVkBuffer2*>(srcBuffer.get());
const GrVkBuffer2* dstVk = static_cast<GrVkBuffer2*>(dstBuffer.get());
const GrVkBuffer* srcVk = static_cast<GrVkBuffer*>(srcBuffer.get());
const GrVkBuffer* dstVk = static_cast<GrVkBuffer*>(dstBuffer.get());
GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
srcVk->vkBuffer(),
@ -857,27 +827,7 @@ void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
}
void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
GrVkBuffer* dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize dataSize,
const void* data) {
SkASSERT(fIsActive);
SkASSERT(!fActiveRenderPass);
SkASSERT(0 == (dstOffset & 0x03)); // four byte aligned
// TODO: handle larger transfer sizes
SkASSERT(dataSize <= 65536);
SkASSERT(0 == (dataSize & 0x03)); // four byte aligned
this->addingWork(gpu);
this->addResource(dstBuffer->resource());
GR_VK_CALL(gpu->vkInterface(), CmdUpdateBuffer(fCmdBuffer,
dstBuffer->buffer(),
dstOffset,
dataSize,
(const uint32_t*) data));
}
void GrVkPrimaryCommandBuffer::updateBuffer(GrVkGpu* gpu,
sk_sp<GrVkBuffer2> dstBuffer,
sk_sp<GrVkBuffer> dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize dataSize,
const void* data) {

View File

@ -15,7 +15,6 @@
#include "src/gpu/vk/GrVkSemaphore.h"
#include "src/gpu/vk/GrVkUtil.h"
class GrVkBuffer;
class GrVkFramebuffer;
class GrVkImage;
class GrVkPipeline;
@ -287,12 +286,6 @@ public:
uint32_t copyRegionCount,
const VkBufferImageCopy* copyRegions);
void copyBuffer(GrVkGpu* gpu,
sk_sp<GrGpuBuffer> srcBuffer,
GrVkBuffer* dstBuffer,
uint32_t regionCount,
const VkBufferCopy* regions);
void copyBuffer(GrVkGpu* gpu,
sk_sp<GrGpuBuffer> srcBuffer,
sk_sp<GrGpuBuffer> dstBuffer,
@ -300,13 +293,7 @@ public:
const VkBufferCopy* regions);
void updateBuffer(GrVkGpu* gpu,
GrVkBuffer* dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize dataSize,
const void* data);
void updateBuffer(GrVkGpu* gpu,
sk_sp<GrVkBuffer2> dstBuffer,
sk_sp<GrVkBuffer> dstBuffer,
VkDeviceSize dstOffset,
VkDeviceSize dataSize,
const void* data);

View File

@ -28,7 +28,7 @@
#include "src/gpu/SkGpuDevice.h"
#include "src/gpu/SkGr.h"
#include "src/gpu/vk/GrVkAMDMemoryAllocator.h"
#include "src/gpu/vk/GrVkBuffer2.h"
#include "src/gpu/vk/GrVkBuffer.h"
#include "src/gpu/vk/GrVkCommandBuffer.h"
#include "src/gpu/vk/GrVkCommandPool.h"
#include "src/gpu/vk/GrVkImage.h"
@ -411,7 +411,7 @@ sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
break;
}
#endif
sk_sp<GrGpuBuffer> buff = GrVkBuffer2::Make(this, size, type, accessPattern);
sk_sp<GrGpuBuffer> buff = GrVkBuffer::Make(this, size, type, accessPattern);
if (data && buff) {
buff->updateData(data, size);
@ -524,7 +524,7 @@ bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int widt
VK_PIPELINE_STAGE_TRANSFER_BIT,
false);
const GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(transferBuffer.get());
const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
// Copy the buffer to the image.
this->currentCommandBuffer()->copyBufferToImage(this,
@ -592,7 +592,7 @@ bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int wi
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
transferBuffer, 1, &region);
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(transferBuffer.get());
GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
// Make sure the copy to buffer has finished.
vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_HOST_READ_BIT,
@ -941,7 +941,7 @@ bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int widt
// the buffer is coming from the staging manager and the staging manager will make sure the
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
// upload in the frame.
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(slice.fBuffer);
GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
this->currentCommandBuffer()->copyBufferToImage(this,
vkBuffer->vkBuffer(),
uploadTexture,
@ -1016,7 +1016,7 @@ bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* uploadTexture,
// the buffer is coming from the staging manager and the staging manager will make sure the
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
// upload in the frame.
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(slice.fBuffer);
GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
this->currentCommandBuffer()->copyBufferToImage(this,
vkBuffer->vkBuffer(),
uploadTexture,
@ -1175,20 +1175,6 @@ sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
////////////////////////////////////////////////////////////////////////////////
void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
GrVkBuffer* dstBuffer,
VkDeviceSize srcOffset,
VkDeviceSize dstOffset, VkDeviceSize size) {
if (!this->currentCommandBuffer()) {
return;
}
VkBufferCopy copyRegion;
copyRegion.srcOffset = srcOffset;
copyRegion.dstOffset = dstOffset;
copyRegion.size = size;
this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), dstBuffer, 1, &copyRegion);
}
void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
sk_sp<GrGpuBuffer> dstBuffer,
VkDeviceSize srcOffset,
@ -1205,18 +1191,7 @@ void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
&copyRegion);
}
bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
VkDeviceSize offset, VkDeviceSize size) {
if (!this->currentCommandBuffer()) {
return false;
}
// Update the buffer
this->currentCommandBuffer()->updateBuffer(this, buffer, offset, size, src);
return true;
}
bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer2> buffer, const void* src,
bool GrVkGpu::updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src,
VkDeviceSize offset, VkDeviceSize size) {
if (!this->currentCommandBuffer()) {
return false;
@ -1750,12 +1725,12 @@ bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
}
cmdBuffer->addGrSurface(texture);
const GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(slice.fBuffer);
// Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
// because we don't need the command buffer to ref the buffer here. The reason being is that
// the buffer is coming from the staging manager and the staging manager will make sure the
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for
// every upload in the frame.
const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(slice.fBuffer);
cmdBuffer->copyBufferToImage(this, vkBuffer->vkBuffer(),
texture.get(), texture->currentLayout(), regions.count(),
regions.begin());
@ -2570,7 +2545,7 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
return false;
}
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(transferBuffer.get());
GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(transferBuffer.get());
// Copy the image to a buffer so we can map it to cpu memory
region.bufferOffset = 0;

View File

@ -22,8 +22,7 @@
class GrDirectContext;
class GrPipeline;
class GrVkBuffer2;
class GrVkBufferImpl;
class GrVkBuffer;
class GrVkCommandPool;
class GrVkMemoryAllocator;
class GrVkPipeline;
@ -170,12 +169,9 @@ public:
std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
VkDeviceSize dstOffset, VkDeviceSize size);
void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer,
VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size);
bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
bool updateBuffer(sk_sp<GrVkBuffer2> buffer, const void* src, VkDeviceSize offset,
bool updateBuffer(sk_sp<GrVkBuffer> buffer, const void* src, VkDeviceSize offset,
VkDeviceSize size);
enum PersistentCacheKeyType : uint32_t {

View File

@ -10,7 +10,7 @@
#include "include/gpu/GrDirectContext.h"
#include "src/core/SkTraceEvent.h"
#include "src/gpu/GrDirectContextPriv.h"
#include "src/gpu/vk/GrVkBuffer2.h"
#include "src/gpu/vk/GrVkBuffer.h"
#include "src/gpu/vk/GrVkCommandBuffer.h"
#include "src/gpu/vk/GrVkDescriptorSet.h"
#include "src/gpu/vk/GrVkGpu.h"
@ -199,7 +199,7 @@ bool GrVkMSAALoadManager::loadMSAAFromResolve(GrVkGpu* gpu,
if (!uniformBuffer) {
return false;
}
GrVkBuffer2* vkUniformBuffer = static_cast<GrVkBuffer2*>(uniformBuffer.get());
GrVkBuffer* vkUniformBuffer = static_cast<GrVkBuffer*>(uniformBuffer.get());
static_assert(GrVkUniformHandler::kUniformBufferDescSet < GrVkUniformHandler::kInputDescSet);
commandBuffer->bindDescriptorSets(gpu, fPipelineLayout,
GrVkUniformHandler::kUniformBufferDescSet,

View File

@ -11,7 +11,6 @@
#include "include/gpu/vk/GrVkMemoryAllocator.h"
#include "include/gpu/vk/GrVkTypes.h"
#include "include/private/SkTArray.h"
#include "src/gpu/vk/GrVkBuffer.h"
class GrVkGpu;

View File

@ -17,7 +17,7 @@
#include "src/gpu/GrPipeline.h"
#include "src/gpu/GrRenderTarget.h"
#include "src/gpu/vk/GrVkAttachment.h"
#include "src/gpu/vk/GrVkBuffer2.h"
#include "src/gpu/vk/GrVkBuffer.h"
#include "src/gpu/vk/GrVkCommandBuffer.h"
#include "src/gpu/vk/GrVkCommandPool.h"
#include "src/gpu/vk/GrVkGpu.h"

View File

@ -14,7 +14,7 @@
#include "src/gpu/glsl/GrGLSLFragmentProcessor.h"
#include "src/gpu/glsl/GrGLSLGeometryProcessor.h"
#include "src/gpu/glsl/GrGLSLXferProcessor.h"
#include "src/gpu/vk/GrVkBuffer2.h"
#include "src/gpu/vk/GrVkBuffer.h"
#include "src/gpu/vk/GrVkCommandBuffer.h"
#include "src/gpu/vk/GrVkDescriptorPool.h"
#include "src/gpu/vk/GrVkDescriptorSet.h"
@ -93,7 +93,7 @@ bool GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
return false;
}
if (uniformBuffer) {
const GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(uniformBuffer.get());
const GrVkBuffer* vkBuffer = static_cast<GrVkBuffer*>(uniformBuffer.get());
static const int kUniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
commandBuffer->bindDescriptorSets(gpu, fPipeline->layout(), kUniformDSIdx, /*setCount=*/1,
vkBuffer->uniformDescriptorSet(),

View File

@ -16,7 +16,7 @@
class GrPipeline;
class GrStencilSettings;
class GrVkBuffer2;
class GrVkBuffer;
class GrVkCommandBuffer;
class GrVkDescriptorPool;
class GrVkDescriptorSet;