Switch vulkan transfer buffers to new buffer class.
Bug: skia:11226 Change-Id: I58fc7e02dc9ea9a06e855107aad4fbbd7b98d347 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/366316 Commit-Queue: Greg Daniel <egdaniel@google.com> Reviewed-by: Jim Van Verth <jvanverth@google.com>
This commit is contained in:
parent
f3ac2afbb6
commit
2d4c32987a
@ -775,8 +775,6 @@ skia_vk_sources = [
|
||||
"$_src/gpu/vk/GrVkTexture.h",
|
||||
"$_src/gpu/vk/GrVkTextureRenderTarget.cpp",
|
||||
"$_src/gpu/vk/GrVkTextureRenderTarget.h",
|
||||
"$_src/gpu/vk/GrVkTransferBuffer.cpp",
|
||||
"$_src/gpu/vk/GrVkTransferBuffer.h",
|
||||
"$_src/gpu/vk/GrVkTypesPriv.cpp",
|
||||
"$_src/gpu/vk/GrVkUniformHandler.cpp",
|
||||
"$_src/gpu/vk/GrVkUniformHandler.h",
|
||||
|
@ -255,7 +255,9 @@ public:
|
||||
*
|
||||
* @return the buffer if successful, otherwise nullptr.
|
||||
*/
|
||||
sk_sp<GrGpuBuffer> createBuffer(size_t size, GrGpuBufferType intendedType, GrAccessPattern,
|
||||
sk_sp<GrGpuBuffer> createBuffer(size_t size,
|
||||
GrGpuBufferType intendedType,
|
||||
GrAccessPattern,
|
||||
const void* data = nullptr);
|
||||
|
||||
/**
|
||||
|
@ -1281,7 +1281,7 @@ GrSurfaceContext::PixelTransferResult GrSurfaceContext::transferPixels(GrColorTy
|
||||
size_t rowBytes = GrColorTypeBytesPerPixel(supportedRead.fColorType) * rect.width();
|
||||
size_t size = rowBytes * rect.height();
|
||||
auto buffer = direct->priv().resourceProvider()->createBuffer(
|
||||
size, GrGpuBufferType::kXferGpuToCpu, GrAccessPattern::kStream_GrAccessPattern);
|
||||
size, GrGpuBufferType::kXferGpuToCpu, GrAccessPattern::kDynamic_GrAccessPattern);
|
||||
if (!buffer) {
|
||||
return {};
|
||||
}
|
||||
|
@ -7,9 +7,11 @@
|
||||
|
||||
#include "src/gpu/vk/GrVkBuffer.h"
|
||||
|
||||
#include "include/gpu/GrDirectContext.h"
|
||||
#include "src/gpu/GrDirectContextPriv.h"
|
||||
#include "src/gpu/vk/GrVkBuffer2.h"
|
||||
#include "src/gpu/vk/GrVkGpu.h"
|
||||
#include "src/gpu/vk/GrVkMemory.h"
|
||||
#include "src/gpu/vk/GrVkTransferBuffer.h"
|
||||
#include "src/gpu/vk/GrVkUtil.h"
|
||||
|
||||
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
|
||||
@ -206,18 +208,14 @@ void GrVkBuffer::copyCpuDataToGpuBuffer(GrVkGpu* gpu, const void* src, size_t si
|
||||
if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
|
||||
gpu->updateBuffer(this, src, this->offset(), size);
|
||||
} else {
|
||||
sk_sp<GrVkTransferBuffer> transferBuffer =
|
||||
GrVkTransferBuffer::Make(gpu, size, GrVkBuffer::kCopyRead_Type,
|
||||
kStream_GrAccessPattern);
|
||||
GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
|
||||
sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
|
||||
size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern, src);
|
||||
if (!transferBuffer) {
|
||||
return;
|
||||
}
|
||||
|
||||
char* buffer = (char*) transferBuffer->map();
|
||||
memcpy (buffer, src, size);
|
||||
transferBuffer->unmap();
|
||||
|
||||
gpu->copyBuffer(transferBuffer.get(), this, 0, this->offset(), size);
|
||||
gpu->copyBuffer(std::move(transferBuffer), this, 0, this->offset(), size);
|
||||
}
|
||||
this->addMemoryBarrier(gpu,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
|
@ -7,10 +7,12 @@
|
||||
|
||||
#include "src/gpu/vk/GrVkBuffer2.h"
|
||||
|
||||
#include "include/gpu/GrDirectContext.h"
|
||||
#include "src/gpu/GrDirectContextPriv.h"
|
||||
#include "src/gpu/GrResourceProvider.h"
|
||||
#include "src/gpu/vk/GrVkDescriptorSet.h"
|
||||
#include "src/gpu/vk/GrVkGpu.h"
|
||||
#include "src/gpu/vk/GrVkMemory.h"
|
||||
#include "src/gpu/vk/GrVkTransferBuffer.h"
|
||||
#include "src/gpu/vk/GrVkUtil.h"
|
||||
|
||||
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
|
||||
@ -32,6 +34,14 @@ GrVkBuffer2::GrVkBuffer2(GrVkGpu* gpu,
|
||||
this->registerWithCache(SkBudgeted::kYes);
|
||||
}
|
||||
|
||||
sk_sp<GrVkBuffer2> GrVkBuffer2::MakeTransferSrc(GrVkGpu* gpu, size_t size ) {
|
||||
return Make(gpu, size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern);
|
||||
}
|
||||
|
||||
sk_sp<GrVkBuffer2> GrVkBuffer2::MakeTransferDst(GrVkGpu* gpu, size_t size) {
|
||||
return Make(gpu, size, GrGpuBufferType::kXferGpuToCpu, kDynamic_GrAccessPattern);
|
||||
}
|
||||
|
||||
sk_sp<GrVkBuffer2> GrVkBuffer2::MakeUniform(GrVkGpu* gpu, size_t size) {
|
||||
return Make(gpu, size, GrGpuBufferType::kUniform, kDynamic_GrAccessPattern);
|
||||
}
|
||||
@ -162,6 +172,9 @@ void GrVkBuffer2::vkMap(size_t size) {
|
||||
SkASSERT(fAlloc.fSize > 0);
|
||||
SkASSERT(fAlloc.fSize >= size);
|
||||
fMapPtr = GrVkMemory::MapAlloc(this->getVkGpu(), fAlloc);
|
||||
if (fMapPtr && this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
|
||||
GrVkMemory::InvalidateMappedAlloc(this->getVkGpu(), fAlloc, 0, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -202,19 +215,15 @@ void GrVkBuffer2::copyCpuDataToGpuBuffer(const void* src, size_t size) {
|
||||
if ((size <= 65536) && (0 == (size & 0x3)) && !gpu->vkCaps().avoidUpdateBuffers()) {
|
||||
gpu->updateBuffer(sk_ref_sp(this), src, /*offset=*/0, size);
|
||||
} else {
|
||||
sk_sp<GrVkTransferBuffer> transferBuffer =
|
||||
GrVkTransferBuffer::Make(gpu, size, GrVkBuffer::kCopyRead_Type,
|
||||
kStream_GrAccessPattern);
|
||||
GrResourceProvider* resourceProvider = gpu->getContext()->priv().resourceProvider();
|
||||
sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
|
||||
size, GrGpuBufferType::kXferCpuToGpu, kDynamic_GrAccessPattern, src);
|
||||
if (!transferBuffer) {
|
||||
return;
|
||||
}
|
||||
|
||||
char* buffer = (char*)transferBuffer->map();
|
||||
memcpy(buffer, src, size);
|
||||
transferBuffer->unmap();
|
||||
|
||||
gpu->copyBuffer(transferBuffer.get(), sk_ref_sp(this), /*srcOffset=*/0, /*dstOffset=*/0,
|
||||
size);
|
||||
gpu->copyBuffer(std::move(transferBuffer), sk_ref_sp(this), /*srcOffset=*/0,
|
||||
/*dstOffset=*/0, size);
|
||||
}
|
||||
|
||||
this->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
|
@ -16,6 +16,8 @@ class GrVkGpu;
|
||||
|
||||
class GrVkBuffer2 : public GrGpuBuffer {
|
||||
public:
|
||||
static sk_sp<GrVkBuffer2> MakeTransferSrc(GrVkGpu* gpu, size_t);
|
||||
static sk_sp<GrVkBuffer2> MakeTransferDst(GrVkGpu* gpu, size_t);
|
||||
static sk_sp<GrVkBuffer2> MakeUniform(GrVkGpu* gpu, size_t size);
|
||||
|
||||
VkBuffer vkBuffer() const { return fBuffer; }
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "src/gpu/vk/GrVkPipelineState.h"
|
||||
#include "src/gpu/vk/GrVkRenderPass.h"
|
||||
#include "src/gpu/vk/GrVkRenderTarget.h"
|
||||
#include "src/gpu/vk/GrVkTransferBuffer.h"
|
||||
#include "src/gpu/vk/GrVkUtil.h"
|
||||
|
||||
void GrVkCommandBuffer::invalidateState() {
|
||||
@ -440,13 +439,21 @@ void GrVkPrimaryCommandBuffer::begin(GrVkGpu* gpu) {
|
||||
fIsActive = true;
|
||||
}
|
||||
|
||||
void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu) {
|
||||
void GrVkPrimaryCommandBuffer::end(GrVkGpu* gpu, bool abandoningBuffer) {
|
||||
SkASSERT(fIsActive);
|
||||
SkASSERT(!fActiveRenderPass);
|
||||
|
||||
this->submitPipelineBarriers(gpu);
|
||||
// If we are in the process of abandoning the context then the GrResourceCache will have freed
|
||||
// all resources before destroying the GrVkGpu. When we destroy the GrVkGpu we call end on the
|
||||
// command buffer to keep all our state tracking consistent. However, the vulkan validation
|
||||
// layers complain about calling end on a command buffer that contains resources that have
|
||||
// already been deleted. From the vulkan API it isn't required to end the command buffer to
|
||||
// delete it, so we just skip the vulkan API calls and update our own state tracking.
|
||||
if (!abandoningBuffer) {
|
||||
this->submitPipelineBarriers(gpu);
|
||||
|
||||
GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
|
||||
GR_VK_CALL_ERRCHECK(gpu, EndCommandBuffer(fCmdBuffer));
|
||||
}
|
||||
this->invalidateState();
|
||||
fIsActive = false;
|
||||
fHasWork = false;
|
||||
@ -755,24 +762,25 @@ void GrVkPrimaryCommandBuffer::blitImage(const GrVkGpu* gpu,
|
||||
void GrVkPrimaryCommandBuffer::copyImageToBuffer(const GrVkGpu* gpu,
|
||||
GrVkImage* srcImage,
|
||||
VkImageLayout srcLayout,
|
||||
GrVkTransferBuffer* dstBuffer,
|
||||
sk_sp<GrGpuBuffer> dstBuffer,
|
||||
uint32_t copyRegionCount,
|
||||
const VkBufferImageCopy* copyRegions) {
|
||||
SkASSERT(fIsActive);
|
||||
SkASSERT(!fActiveRenderPass);
|
||||
this->addingWork(gpu);
|
||||
this->addResource(srcImage->resource());
|
||||
this->addResource(dstBuffer->resource());
|
||||
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(dstBuffer.get());
|
||||
GR_VK_CALL(gpu->vkInterface(), CmdCopyImageToBuffer(fCmdBuffer,
|
||||
srcImage->image(),
|
||||
srcLayout,
|
||||
dstBuffer->buffer(),
|
||||
vkBuffer->vkBuffer(),
|
||||
copyRegionCount,
|
||||
copyRegions));
|
||||
this->addResource(srcImage->resource());
|
||||
this->addGrBuffer(std::move(dstBuffer));
|
||||
}
|
||||
|
||||
void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
|
||||
GrVkTransferBuffer* srcBuffer,
|
||||
VkBuffer srcBuffer,
|
||||
GrVkImage* dstImage,
|
||||
VkImageLayout dstLayout,
|
||||
uint32_t copyRegionCount,
|
||||
@ -780,19 +788,19 @@ void GrVkPrimaryCommandBuffer::copyBufferToImage(const GrVkGpu* gpu,
|
||||
SkASSERT(fIsActive);
|
||||
SkASSERT(!fActiveRenderPass);
|
||||
this->addingWork(gpu);
|
||||
this->addResource(srcBuffer->resource());
|
||||
this->addResource(dstImage->resource());
|
||||
|
||||
GR_VK_CALL(gpu->vkInterface(), CmdCopyBufferToImage(fCmdBuffer,
|
||||
srcBuffer->buffer(),
|
||||
srcBuffer,
|
||||
dstImage->image(),
|
||||
dstLayout,
|
||||
copyRegionCount,
|
||||
copyRegions));
|
||||
this->addResource(dstImage->resource());
|
||||
}
|
||||
|
||||
|
||||
void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
|
||||
GrVkBuffer* srcBuffer,
|
||||
sk_sp<GrGpuBuffer> srcBuffer,
|
||||
GrVkBuffer* dstBuffer,
|
||||
uint32_t regionCount,
|
||||
const VkBufferCopy* regions) {
|
||||
@ -809,18 +817,20 @@ void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
|
||||
SkASSERT(region.dstOffset + region.size <= dstBuffer->size());
|
||||
}
|
||||
#endif
|
||||
this->addResource(srcBuffer->resource());
|
||||
this->addResource(dstBuffer->resource());
|
||||
const GrVkBuffer2* srcVk = static_cast<GrVkBuffer2*>(srcBuffer.get());
|
||||
|
||||
GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
|
||||
srcBuffer->buffer(),
|
||||
srcVk->vkBuffer(),
|
||||
dstBuffer->buffer(),
|
||||
regionCount,
|
||||
regions));
|
||||
this->addGrBuffer(std::move(srcBuffer));
|
||||
this->addResource(dstBuffer->resource());
|
||||
}
|
||||
|
||||
void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
|
||||
GrVkBuffer* srcBuffer,
|
||||
sk_sp<GrVkBuffer2> dstBuffer,
|
||||
sk_sp<GrGpuBuffer> srcBuffer,
|
||||
sk_sp<GrGpuBuffer> dstBuffer,
|
||||
uint32_t regionCount,
|
||||
const VkBufferCopy* regions) {
|
||||
SkASSERT(fIsActive);
|
||||
@ -837,11 +847,15 @@ void GrVkPrimaryCommandBuffer::copyBuffer(GrVkGpu* gpu,
|
||||
}
|
||||
#endif
|
||||
|
||||
GR_VK_CALL(gpu->vkInterface(),
|
||||
CmdCopyBuffer(
|
||||
fCmdBuffer, srcBuffer->buffer(), dstBuffer->vkBuffer(), regionCount,
|
||||
regions));
|
||||
this->addResource(srcBuffer->resource());
|
||||
const GrVkBuffer2* srcVk = static_cast<GrVkBuffer2*>(srcBuffer.get());
|
||||
const GrVkBuffer2* dstVk = static_cast<GrVkBuffer2*>(dstBuffer.get());
|
||||
|
||||
GR_VK_CALL(gpu->vkInterface(), CmdCopyBuffer(fCmdBuffer,
|
||||
srcVk->vkBuffer(),
|
||||
dstVk->vkBuffer(),
|
||||
regionCount,
|
||||
regions));
|
||||
this->addGrBuffer(std::move(srcBuffer));
|
||||
this->addGrBuffer(std::move(dstBuffer));
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,6 @@ class GrVkPipeline;
|
||||
class GrVkPipelineState;
|
||||
class GrVkRenderPass;
|
||||
class GrVkRenderTarget;
|
||||
class GrVkTransferBuffer;
|
||||
|
||||
class GrVkCommandBuffer {
|
||||
public:
|
||||
@ -213,7 +212,7 @@ public:
|
||||
static GrVkPrimaryCommandBuffer* Create(GrVkGpu* gpu, VkCommandPool cmdPool);
|
||||
|
||||
void begin(GrVkGpu* gpu);
|
||||
void end(GrVkGpu* gpu);
|
||||
void end(GrVkGpu* gpu, bool abandoningBuffer = false);
|
||||
|
||||
// Begins render pass on this command buffer. The framebuffer from GrVkRenderTarget will be used
|
||||
// in the render pass.
|
||||
@ -275,26 +274,29 @@ public:
|
||||
void copyImageToBuffer(const GrVkGpu* gpu,
|
||||
GrVkImage* srcImage,
|
||||
VkImageLayout srcLayout,
|
||||
GrVkTransferBuffer* dstBuffer,
|
||||
sk_sp<GrGpuBuffer> dstBuffer,
|
||||
uint32_t copyRegionCount,
|
||||
const VkBufferImageCopy* copyRegions);
|
||||
|
||||
// All uses of copyBufferToImage are done with buffers from our staging manager. The staging
|
||||
// manager will handle making sure the command buffer refs the buffer. Thus we just pass in the
|
||||
// raw VkBuffer here and don't worry about refs.
|
||||
void copyBufferToImage(const GrVkGpu* gpu,
|
||||
GrVkTransferBuffer* srcBuffer,
|
||||
VkBuffer srcBuffer,
|
||||
GrVkImage* dstImage,
|
||||
VkImageLayout dstLayout,
|
||||
uint32_t copyRegionCount,
|
||||
const VkBufferImageCopy* copyRegions);
|
||||
|
||||
void copyBuffer(GrVkGpu* gpu,
|
||||
GrVkBuffer* srcBuffer,
|
||||
sk_sp<GrGpuBuffer> srcBuffer,
|
||||
GrVkBuffer* dstBuffer,
|
||||
uint32_t regionCount,
|
||||
const VkBufferCopy* regions);
|
||||
|
||||
void copyBuffer(GrVkGpu* gpu,
|
||||
GrVkBuffer* srcBuffer,
|
||||
sk_sp<GrVkBuffer2> dstBuffer,
|
||||
sk_sp<GrGpuBuffer> srcBuffer,
|
||||
sk_sp<GrGpuBuffer> dstBuffer,
|
||||
uint32_t regionCount,
|
||||
const VkBufferCopy* regions);
|
||||
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include "src/gpu/vk/GrVkSemaphore.h"
|
||||
#include "src/gpu/vk/GrVkTexture.h"
|
||||
#include "src/gpu/vk/GrVkTextureRenderTarget.h"
|
||||
#include "src/gpu/vk/GrVkTransferBuffer.h"
|
||||
#include "src/image/SkImage_Gpu.h"
|
||||
#include "src/image/SkSurface_Gpu.h"
|
||||
|
||||
@ -247,7 +246,7 @@ GrVkGpu::GrVkGpu(GrDirectContext* direct, const GrVkBackendContext& backendConte
|
||||
|
||||
void GrVkGpu::destroyResources() {
|
||||
if (fMainCmdPool) {
|
||||
fMainCmdPool->getPrimaryCommandBuffer()->end(this);
|
||||
fMainCmdPool->getPrimaryCommandBuffer()->end(this, /*abandoningBuffer=*/true);
|
||||
fMainCmdPool->close();
|
||||
}
|
||||
|
||||
@ -404,14 +403,12 @@ sk_sp<GrGpuBuffer> GrVkGpu::onCreateBuffer(size_t size, GrGpuBufferType type,
|
||||
accessPattern == kDynamic_GrAccessPattern);
|
||||
break;
|
||||
case GrGpuBufferType::kXferCpuToGpu:
|
||||
SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
|
||||
accessPattern == kStream_GrAccessPattern);
|
||||
buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyRead_Type, accessPattern);
|
||||
SkASSERT(accessPattern == kDynamic_GrAccessPattern);
|
||||
buff = GrVkBuffer2::MakeTransferSrc(this, size);
|
||||
break;
|
||||
case GrGpuBufferType::kXferGpuToCpu:
|
||||
SkASSERT(accessPattern == kDynamic_GrAccessPattern ||
|
||||
accessPattern == kStream_GrAccessPattern);
|
||||
buff = GrVkTransferBuffer::Make(this, size, GrVkBuffer::kCopyWrite_Type, accessPattern);
|
||||
SkASSERT(accessPattern == kDynamic_GrAccessPattern);
|
||||
buff = GrVkBuffer2::MakeTransferDst(this, size);
|
||||
break;
|
||||
case GrGpuBufferType::kUniform:
|
||||
SkASSERT(accessPattern == kDynamic_GrAccessPattern);
|
||||
@ -502,8 +499,7 @@ bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int widt
|
||||
// Can't transfer compressed data
|
||||
SkASSERT(!GrVkFormatIsCompressed(vkTex->imageFormat()));
|
||||
|
||||
GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer.get());
|
||||
if (!vkBuffer) {
|
||||
if (!transferBuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -530,13 +526,16 @@ bool GrVkGpu::onTransferPixelsTo(GrTexture* texture, int left, int top, int widt
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
false);
|
||||
|
||||
// Copy the buffer to the image
|
||||
const GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(transferBuffer.get());
|
||||
|
||||
// Copy the buffer to the image.
|
||||
this->currentCommandBuffer()->copyBufferToImage(this,
|
||||
vkBuffer,
|
||||
vkBuffer->vkBuffer(),
|
||||
vkTex,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
1,
|
||||
®ion);
|
||||
this->currentCommandBuffer()->addGrBuffer(std::move(transferBuffer));
|
||||
|
||||
vkTex->markMipmapsDirty();
|
||||
return true;
|
||||
@ -557,8 +556,6 @@ bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int wi
|
||||
return false;
|
||||
}
|
||||
|
||||
GrVkTransferBuffer* vkBuffer = static_cast<GrVkTransferBuffer*>(transferBuffer.get());
|
||||
|
||||
GrVkImage* srcImage;
|
||||
if (GrVkRenderTarget* rt = static_cast<GrVkRenderTarget*>(surface->asRenderTarget())) {
|
||||
// Reading from render targets that wrap a secondary command buffer is not allowed since
|
||||
@ -595,11 +592,11 @@ bool GrVkGpu::onTransferPixelsFrom(GrSurface* surface, int left, int top, int wi
|
||||
|
||||
this->currentCommandBuffer()->copyImageToBuffer(this, srcImage,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
||||
vkBuffer, 1, ®ion);
|
||||
transferBuffer, 1, ®ion);
|
||||
|
||||
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(transferBuffer.get());
|
||||
// Make sure the copy to buffer has finished.
|
||||
vkBuffer->addMemoryBarrier(this,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_HOST_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_HOST_BIT,
|
||||
@ -941,9 +938,14 @@ bool GrVkGpu::uploadTexDataOptimal(GrVkTexture* tex, int left, int top, int widt
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
false);
|
||||
|
||||
// Copy the buffer to the image
|
||||
// Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
|
||||
// because we don't need the command buffer to ref the buffer here. The reason being is that
|
||||
// the buffer is coming from the staging manager and the staging manager will make sure the
|
||||
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
|
||||
// upload in the frame.
|
||||
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(slice.fBuffer);
|
||||
this->currentCommandBuffer()->copyBufferToImage(this,
|
||||
static_cast<GrVkTransferBuffer*>(slice.fBuffer),
|
||||
vkBuffer->vkBuffer(),
|
||||
uploadTexture,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
regions.count(),
|
||||
@ -1011,9 +1013,14 @@ bool GrVkGpu::uploadTexDataCompressed(GrVkTexture* uploadTexture,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
false);
|
||||
|
||||
// Copy the buffer to the image
|
||||
// Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
|
||||
// because we don't need the command buffer to ref the buffer here. The reason being is that
|
||||
// the buffer is coming from the staging manager and the staging manager will make sure the
|
||||
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for ever
|
||||
// upload in the frame.
|
||||
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(slice.fBuffer);
|
||||
this->currentCommandBuffer()->copyBufferToImage(this,
|
||||
static_cast<GrVkTransferBuffer*>(slice.fBuffer),
|
||||
vkBuffer->vkBuffer(),
|
||||
uploadTexture,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
regions.count(),
|
||||
@ -1170,7 +1177,9 @@ sk_sp<GrTexture> GrVkGpu::onCreateCompressedTexture(SkISize dimensions,
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
|
||||
void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
|
||||
GrVkBuffer* dstBuffer,
|
||||
VkDeviceSize srcOffset,
|
||||
VkDeviceSize dstOffset, VkDeviceSize size) {
|
||||
if (!this->currentCommandBuffer()) {
|
||||
return;
|
||||
@ -1179,11 +1188,14 @@ void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceS
|
||||
copyRegion.srcOffset = srcOffset;
|
||||
copyRegion.dstOffset = dstOffset;
|
||||
copyRegion.size = size;
|
||||
this->currentCommandBuffer()->copyBuffer(this, srcBuffer, dstBuffer, 1, ©Region);
|
||||
this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), dstBuffer, 1, ©Region);
|
||||
}
|
||||
|
||||
void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, sk_sp<GrVkBuffer2> dstBuffer,
|
||||
VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size) {
|
||||
void GrVkGpu::copyBuffer(sk_sp<GrGpuBuffer> srcBuffer,
|
||||
sk_sp<GrGpuBuffer> dstBuffer,
|
||||
VkDeviceSize srcOffset,
|
||||
VkDeviceSize dstOffset,
|
||||
VkDeviceSize size) {
|
||||
if (!this->currentCommandBuffer()) {
|
||||
return;
|
||||
}
|
||||
@ -1191,7 +1203,8 @@ void GrVkGpu::copyBuffer(GrVkBuffer* srcBuffer, sk_sp<GrVkBuffer2> dstBuffer,
|
||||
copyRegion.srcOffset = srcOffset;
|
||||
copyRegion.dstOffset = dstOffset;
|
||||
copyRegion.size = size;
|
||||
this->currentCommandBuffer()->copyBuffer(this, srcBuffer, std::move(dstBuffer), 1, ©Region);
|
||||
this->currentCommandBuffer()->copyBuffer(this, std::move(srcBuffer), std::move(dstBuffer), 1,
|
||||
©Region);
|
||||
}
|
||||
|
||||
bool GrVkGpu::updateBuffer(GrVkBuffer* buffer, const void* src,
|
||||
@ -1739,7 +1752,13 @@ bool GrVkGpu::onUpdateBackendTexture(const GrBackendTexture& backendTexture,
|
||||
}
|
||||
|
||||
cmdBuffer->addGrSurface(texture);
|
||||
cmdBuffer->copyBufferToImage(this, static_cast<GrVkTransferBuffer*>(slice.fBuffer),
|
||||
const GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(slice.fBuffer);
|
||||
// Copy the buffer to the image. This call takes the raw VkBuffer instead of a GrGpuBuffer
|
||||
// because we don't need the command buffer to ref the buffer here. The reason being is that
|
||||
// the buffer is coming from the staging manager and the staging manager will make sure the
|
||||
// command buffer has a ref on the buffer. This avoids having to add and remove a ref for
|
||||
// every upload in the frame.
|
||||
cmdBuffer->copyBufferToImage(this, vkBuffer->vkBuffer(),
|
||||
texture.get(), texture->currentLayout(), regions.count(),
|
||||
regions.begin());
|
||||
}
|
||||
@ -2544,14 +2563,19 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
|
||||
|
||||
size_t transBufferRowBytes = bpp * region.imageExtent.width;
|
||||
size_t imageRows = region.imageExtent.height;
|
||||
auto transferBuffer = sk_sp<GrVkTransferBuffer>(
|
||||
static_cast<GrVkTransferBuffer*>(this->createBuffer(transBufferRowBytes * imageRows,
|
||||
GrGpuBufferType::kXferGpuToCpu,
|
||||
kStream_GrAccessPattern)
|
||||
.release()));
|
||||
GrResourceProvider* resourceProvider = this->getContext()->priv().resourceProvider();
|
||||
sk_sp<GrGpuBuffer> transferBuffer = resourceProvider->createBuffer(
|
||||
transBufferRowBytes * imageRows, GrGpuBufferType::kXferGpuToCpu,
|
||||
kDynamic_GrAccessPattern);
|
||||
|
||||
if (!transferBuffer) {
|
||||
return false;
|
||||
}
|
||||
|
||||
GrVkBuffer2* vkBuffer = static_cast<GrVkBuffer2*>(transferBuffer.get());
|
||||
|
||||
// Copy the image to a buffer so we can map it to cpu memory
|
||||
region.bufferOffset = transferBuffer->offset();
|
||||
region.bufferOffset = 0;
|
||||
region.bufferRowLength = 0; // Forces RowLength to be width. We handle the rowBytes below.
|
||||
region.bufferImageHeight = 0; // Forces height to be tightly packed. Only useful for 3d images.
|
||||
region.imageSubresource = { VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1 };
|
||||
@ -2559,17 +2583,16 @@ bool GrVkGpu::onReadPixels(GrSurface* surface, int left, int top, int width, int
|
||||
this->currentCommandBuffer()->copyImageToBuffer(this,
|
||||
image,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
|
||||
transferBuffer.get(),
|
||||
transferBuffer,
|
||||
1,
|
||||
®ion);
|
||||
|
||||
// make sure the copy to buffer has finished
|
||||
transferBuffer->addMemoryBarrier(this,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_HOST_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_HOST_BIT,
|
||||
false);
|
||||
vkBuffer->addMemoryBarrier(VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_HOST_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_HOST_BIT,
|
||||
false);
|
||||
|
||||
// We need to submit the current command buffer to the Queue and make sure it finishes before
|
||||
// we can copy the data out of the buffer.
|
||||
|
@ -171,10 +171,10 @@ public:
|
||||
|
||||
std::unique_ptr<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
|
||||
|
||||
void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
|
||||
VkDeviceSize dstOffset, VkDeviceSize size);
|
||||
void copyBuffer(GrVkBuffer* srcBuffer, sk_sp<GrVkBuffer2> dstBuffer, VkDeviceSize srcOffset,
|
||||
void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
|
||||
VkDeviceSize dstOffset, VkDeviceSize size);
|
||||
void copyBuffer(sk_sp<GrGpuBuffer> srcBuffer, sk_sp<GrGpuBuffer> dstBuffer,
|
||||
VkDeviceSize srcOffset, VkDeviceSize dstOffset, VkDeviceSize size);
|
||||
bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
|
||||
bool updateBuffer(sk_sp<GrVkBuffer2> buffer, const void* src, VkDeviceSize offset,
|
||||
VkDeviceSize size);
|
||||
|
@ -1,65 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#include "include/core/SkTraceMemoryDump.h"
|
||||
#include "src/gpu/vk/GrVkGpu.h"
|
||||
#include "src/gpu/vk/GrVkTransferBuffer.h"
|
||||
|
||||
sk_sp<GrVkTransferBuffer> GrVkTransferBuffer::Make(GrVkGpu* gpu, size_t size,
|
||||
GrVkBuffer::Type type,
|
||||
GrAccessPattern access) {
|
||||
GrVkBuffer::Desc desc;
|
||||
desc.fDynamic = true;
|
||||
SkASSERT(GrVkBuffer::kCopyRead_Type == type || GrVkBuffer::kCopyWrite_Type == type);
|
||||
desc.fType = type;
|
||||
desc.fSizeInBytes = size;
|
||||
|
||||
const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
|
||||
if (!bufferResource) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
GrVkTransferBuffer* buffer = new GrVkTransferBuffer(gpu, desc, access, bufferResource);
|
||||
if (!buffer) {
|
||||
bufferResource->unref();
|
||||
}
|
||||
return sk_sp<GrVkTransferBuffer>(buffer);
|
||||
}
|
||||
|
||||
GrVkTransferBuffer::GrVkTransferBuffer(GrVkGpu* gpu,
|
||||
const GrVkBuffer::Desc& desc,
|
||||
GrAccessPattern access,
|
||||
const GrVkBuffer::Resource* bufferResource)
|
||||
: INHERITED(gpu, desc.fSizeInBytes,
|
||||
kCopyRead_Type == desc.fType ? GrGpuBufferType::kXferCpuToGpu
|
||||
: GrGpuBufferType::kXferGpuToCpu,
|
||||
access)
|
||||
, GrVkBuffer(desc, bufferResource) {
|
||||
this->registerWithCache(SkBudgeted::kYes);
|
||||
}
|
||||
|
||||
void GrVkTransferBuffer::onRelease() {
|
||||
if (!this->wasDestroyed()) {
|
||||
this->vkRelease(this->getVkGpu());
|
||||
}
|
||||
INHERITED::onRelease();
|
||||
}
|
||||
|
||||
void GrVkTransferBuffer::onAbandon() {
|
||||
if (!this->wasDestroyed()) {
|
||||
this->vkRelease(this->getVkGpu());
|
||||
}
|
||||
INHERITED::onAbandon();
|
||||
}
|
||||
|
||||
void GrVkTransferBuffer::setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const {
|
||||
SkString buffer_id;
|
||||
buffer_id.appendU64((uint64_t)this->buffer());
|
||||
traceMemoryDump->setMemoryBacking(dumpName.c_str(), "vk_buffer",
|
||||
buffer_id.c_str());
|
||||
}
|
@ -1,55 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Google Inc.
|
||||
*
|
||||
* Use of this source code is governed by a BSD-style license that can be
|
||||
* found in the LICENSE file.
|
||||
*/
|
||||
|
||||
#ifndef GrVkTransferBuffer_DEFINED
|
||||
#define GrVkTransferBuffer_DEFINED
|
||||
|
||||
#include "include/gpu/vk/GrVkTypes.h"
|
||||
#include "src/gpu/GrGpuBuffer.h"
|
||||
#include "src/gpu/vk/GrVkBuffer.h"
|
||||
|
||||
class GrVkGpu;
|
||||
|
||||
class GrVkTransferBuffer : public GrGpuBuffer, public GrVkBuffer {
|
||||
public:
|
||||
static sk_sp<GrVkTransferBuffer> Make(GrVkGpu* gpu, size_t size, GrVkBuffer::Type type,
|
||||
GrAccessPattern access);
|
||||
|
||||
protected:
|
||||
void onAbandon() override;
|
||||
void onRelease() override;
|
||||
|
||||
private:
|
||||
GrVkTransferBuffer(GrVkGpu* gpu, const GrVkBuffer::Desc& desc, GrAccessPattern access,
|
||||
const GrVkBuffer::Resource* resource);
|
||||
void setMemoryBacking(SkTraceMemoryDump* traceMemoryDump,
|
||||
const SkString& dumpName) const override;
|
||||
|
||||
void onMap() override {
|
||||
this->GrGpuBuffer::fMapPtr = this->vkMap(this->getVkGpu());
|
||||
if (this->GrGpuBuffer::fMapPtr &&
|
||||
this->intendedType() == GrGpuBufferType::kXferGpuToCpu) {
|
||||
const GrVkAlloc& alloc = this->alloc();
|
||||
GrVkMemory::InvalidateMappedAlloc(this->getVkGpu(), alloc, 0, alloc.fSize);
|
||||
}
|
||||
}
|
||||
|
||||
void onUnmap() override { this->vkUnmap(this->getVkGpu()); }
|
||||
|
||||
bool onUpdateData(const void* src, size_t srcSizeInBytes) override {
|
||||
SK_ABORT("Not implemented for transfer buffers.");
|
||||
}
|
||||
|
||||
GrVkGpu* getVkGpu() const {
|
||||
SkASSERT(!this->wasDestroyed());
|
||||
return reinterpret_cast<GrVkGpu*>(this->getGpu());
|
||||
}
|
||||
|
||||
using INHERITED = GrGpuBuffer;
|
||||
};
|
||||
|
||||
#endif
|
Loading…
Reference in New Issue
Block a user