Update min Vulkan version to 1.0.8.0, and fix various bugs

With updating the SDK, the debug layers also showed multiple bugs.
I have fixed those as well in this CL. These include:

1. Incorrectly tracking the allocated descriptor sets from the descriptor pools

2. Using MemoryBarriers inside render passes.

3. Correctly setting the Stencil Image layout anytime we are using a render pass with a stencil attachment

4. Setting the correct aspect mask for Depth/Stencil in a barrier.

TBR=bsalomon@google.com

BUG=skia:
GOLD_TRYBOT_URL= https://gold.skia.org/search2?unt=true&query=source_type%3Dgm&master=false&issue=1906623002

Review URL: https://codereview.chromium.org/1906623002
This commit is contained in:
egdaniel 2016-04-21 08:03:10 -07:00 committed by Commit bot
parent 12d62a7d51
commit 58a8d9214a
15 changed files with 134 additions and 76 deletions

View File

@ -35,6 +35,7 @@ struct GrVkTextureInfo {
VkDeviceMemory fAlloc; // this may be null iff the texture is an RT and uses borrow semantics VkDeviceMemory fAlloc; // this may be null iff the texture is an RT and uses borrow semantics
VkImageTiling fImageTiling; VkImageTiling fImageTiling;
VkImageLayout fImageLayout; VkImageLayout fImageLayout;
VkFormat fFormat;
}; };
GR_STATIC_ASSERT(sizeof(GrBackendObject) >= sizeof(const GrVkTextureInfo*)); GR_STATIC_ASSERT(sizeof(GrBackendObject) >= sizeof(const GrVkTextureInfo*));

View File

@ -16,15 +16,14 @@
#ifdef ENABLE_VK_LAYERS #ifdef ENABLE_VK_LAYERS
const char* kDebugLayerNames[] = { const char* kDebugLayerNames[] = {
// elements of VK_LAYER_LUNARG_standard_validation // elements of VK_LAYER_LUNARG_standard_validation
"VK_LAYER_LUNARG_threading", "VK_LAYER_GOOGLE_threading",
"VK_LAYER_LUNARG_param_checker", "VK_LAYER_LUNARG_parameter_validation",
"VK_LAYER_LUNARG_device_limits", "VK_LAYER_LUNARG_device_limits",
"VK_LAYER_LUNARG_object_tracker", "VK_LAYER_LUNARG_object_tracker",
"VK_LAYER_LUNARG_image", "VK_LAYER_LUNARG_image",
"VK_LAYER_LUNARG_mem_tracker", "VK_LAYER_LUNARG_core_validation",
"VK_LAYER_LUNARG_draw_state",
"VK_LAYER_LUNARG_swapchain", "VK_LAYER_LUNARG_swapchain",
//"VK_LAYER_GOOGLE_unique_objects", "VK_LAYER_GOOGLE_unique_objects",
// not included in standard_validation // not included in standard_validation
//"VK_LAYER_LUNARG_api_dump", //"VK_LAYER_LUNARG_api_dump",
//"VK_LAYER_LUNARG_vktrace", //"VK_LAYER_LUNARG_vktrace",
@ -33,7 +32,7 @@ const char* kDebugLayerNames[] = {
#endif #endif
// the minimum version of Vulkan supported // the minimum version of Vulkan supported
const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 3); const uint32_t kGrVkMinimumVersion = VK_MAKE_VERSION(1, 0, 8);
// Create the base Vulkan objects needed by the GrVkGpu object // Create the base Vulkan objects needed by the GrVkGpu object
const GrVkBackendContext* GrVkBackendContext::Create() { const GrVkBackendContext* GrVkBackendContext::Create() {

View File

@ -141,6 +141,7 @@ void GrVkCommandBuffer::submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu
submitInfo.pNext = nullptr; submitInfo.pNext = nullptr;
submitInfo.waitSemaphoreCount = 0; submitInfo.waitSemaphoreCount = 0;
submitInfo.pWaitSemaphores = nullptr; submitInfo.pWaitSemaphores = nullptr;
submitInfo.pWaitDstStageMask = 0;
submitInfo.commandBufferCount = 1; submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &fCmdBuffer; submitInfo.pCommandBuffers = &fCmdBuffer;
submitInfo.signalSemaphoreCount = 0; submitInfo.signalSemaphoreCount = 0;
@ -195,6 +196,11 @@ void GrVkCommandBuffer::pipelineBarrier(const GrVkGpu* gpu,
BarrierType barrierType, BarrierType barrierType,
void* barrier) const { void* barrier) const {
SkASSERT(fIsActive); SkASSERT(fIsActive);
// For images we can have barriers inside of render passes but they require us to add more
// support in subpasses which need self dependencies to have barriers inside them. Also, we can
// never have buffer barriers inside of a render pass. For now we will just assert that we are
// not in a render pass.
SkASSERT(!fActiveRenderPass);
VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0; VkDependencyFlags dependencyFlags = byRegion ? VK_DEPENDENCY_BY_REGION_BIT : 0;
switch (barrierType) { switch (barrierType) {
@ -390,7 +396,6 @@ void GrVkCommandBuffer::bindDescriptorSets(const GrVkGpu* gpu,
void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) { void GrVkCommandBuffer::bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) {
SkASSERT(fIsActive); SkASSERT(fIsActive);
SkASSERT(fActiveRenderPass);
GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer, GR_VK_CALL(gpu->vkInterface(), CmdBindPipeline(fCmdBuffer,
VK_PIPELINE_BIND_POINT_GRAPHICS, VK_PIPELINE_BIND_POINT_GRAPHICS,
pipeline->pipeline())); pipeline->pipeline()));

View File

@ -82,6 +82,8 @@ public:
} }
} }
void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline);
void bindDescriptorSets(const GrVkGpu* gpu, void bindDescriptorSets(const GrVkGpu* gpu,
GrVkPipelineState*, GrVkPipelineState*,
VkPipelineLayout layout, VkPipelineLayout layout,
@ -154,9 +156,6 @@ public:
int numRects, int numRects,
const VkClearRect* clearRects) const; const VkClearRect* clearRects) const;
void bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline);
void drawIndexed(const GrVkGpu* gpu, void drawIndexed(const GrVkGpu* gpu,
uint32_t indexCount, uint32_t indexCount,
uint32_t instanceCount, uint32_t instanceCount,

View File

@ -619,18 +619,15 @@ GrRenderTarget* GrVkGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDe
void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc, void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
const GrNonInstancedMesh& mesh) { const GrNonInstancedMesh& mesh) {
// There is no need to put any memory barriers to make sure host writes have finished here.
// When a command buffer is submitted to a queue, there is an implicit memory barrier that
// occurs for all host writes. Additionally, BufferMemoryBarriers are not allowed inside of
// an active RenderPass.
GrVkVertexBuffer* vbuf; GrVkVertexBuffer* vbuf;
vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer(); vbuf = (GrVkVertexBuffer*)mesh.vertexBuffer();
SkASSERT(vbuf); SkASSERT(vbuf);
SkASSERT(!vbuf->isMapped()); SkASSERT(!vbuf->isMapped());
vbuf->addMemoryBarrier(this,
VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
false);
fCurrentCmdBuffer->bindVertexBuffer(this, vbuf); fCurrentCmdBuffer->bindVertexBuffer(this, vbuf);
if (mesh.isIndexed()) { if (mesh.isIndexed()) {
@ -638,13 +635,6 @@ void GrVkGpu::bindGeometry(const GrPrimitiveProcessor& primProc,
SkASSERT(ibuf); SkASSERT(ibuf);
SkASSERT(!ibuf->isMapped()); SkASSERT(!ibuf->isMapped());
ibuf->addMemoryBarrier(this,
VK_ACCESS_HOST_WRITE_BIT,
VK_ACCESS_INDEX_READ_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
false);
fCurrentCmdBuffer->bindIndexBuffer(this, ibuf); fCurrentCmdBuffer->bindIndexBuffer(this, ibuf);
} }
} }
@ -783,6 +773,7 @@ GrBackendObject GrVkGpu::createTestingOnlyBackendTexture(void* srcData, int w, i
info->fAlloc = alloc; info->fAlloc = alloc;
info->fImageTiling = imageTiling; info->fImageTiling = imageTiling;
info->fImageLayout = initialLayout; info->fImageLayout = initialLayout;
info->fFormat = pixelFormat;
return (GrBackendObject)info; return (GrBackendObject)info;
} }
@ -933,8 +924,7 @@ void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo
VkImageLayout origDstLayout = vkStencil->currentLayout(); VkImageLayout origDstLayout = vkStencil->currentLayout();
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT; VkAccessFlags dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
VkPipelineStageFlags srcStageMask = VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkStencil->setImageLayout(this, vkStencil->setImageLayout(this,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
@ -944,6 +934,21 @@ void GrVkGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bo
dstStageMask, dstStageMask,
false); false);
// Change layout of our render target so it can be used as the color attachment. This is what
// the render pass expects when it begins.
VkImageLayout layout = vkRT->currentLayout();
srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
vkRT->setImageLayout(this,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
srcAccessMask,
dstAccessMask,
srcStageMask,
dstStageMask,
false);
VkClearRect clearRect; VkClearRect clearRect;
// Flip rect if necessary // Flip rect if necessary
SkIRect vkRect = rect; SkIRect vkRect = rect;
@ -990,8 +995,7 @@ void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color
if (rect.width() != target->width() || rect.height() != target->height()) { if (rect.width() != target->width() || rect.height() != target->height()) {
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; VkAccessFlags dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
VkPipelineStageFlags srcStageMask = VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkRT->setImageLayout(this, vkRT->setImageLayout(this,
VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
@ -1001,6 +1005,25 @@ void GrVkGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color
dstStageMask, dstStageMask,
false); false);
// If we are using a stencil attachment we also need to change its layout to what the render
// pass is expecting.
if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment()) {
GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
origDstLayout = vkStencil->currentLayout();
srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT;
srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(origDstLayout);
dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
vkStencil->setImageLayout(this,
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
srcAccessMask,
dstAccessMask,
srcStageMask,
dstStageMask,
false);
}
VkClearRect clearRect; VkClearRect clearRect;
// Flip rect if necessary // Flip rect if necessary
SkIRect vkRect = rect; SkIRect vkRect = rect;
@ -1483,7 +1506,6 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline,
const GrVkRenderPass* renderPass = vkRT->simpleRenderPass(); const GrVkRenderPass* renderPass = vkRT->simpleRenderPass();
SkASSERT(renderPass); SkASSERT(renderPass);
fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
GrPrimitiveType primitiveType = meshes[0].primitiveType(); GrPrimitiveType primitiveType = meshes[0].primitiveType();
sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline, sk_sp<GrVkPipelineState> pipelineState = this->prepareDrawState(pipeline,
@ -1496,8 +1518,6 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline,
// Change layout of our render target so it can be used as the color attachment // Change layout of our render target so it can be used as the color attachment
VkImageLayout layout = vkRT->currentLayout(); VkImageLayout layout = vkRT->currentLayout();
// Our color attachment is purely a destination and won't be read so don't need to flush or
// invalidate any caches
VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout); VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT; VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout); VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
@ -1511,8 +1531,7 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline,
false); false);
// If we are using a stencil attachment we also need to update its layout // If we are using a stencil attachment we also need to update its layout
if (!pipeline.getStencil().isDisabled()) { if (GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment()) {
GrStencilAttachment* stencil = vkRT->renderTargetPriv().getStencilAttachment();
GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil; GrVkStencilAttachment* vkStencil = (GrVkStencilAttachment*)stencil;
VkImageLayout origDstLayout = vkStencil->currentLayout(); VkImageLayout origDstLayout = vkStencil->currentLayout();
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout); VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(origDstLayout);
@ -1530,12 +1549,9 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline,
false); false);
} }
fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
for (int i = 0; i < meshCount; ++i) { for (int i = 0; i < meshCount; ++i) {
if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
this->xferBarrier(pipeline.getRenderTarget(), barrierType);
}
const GrMesh& mesh = meshes[i]; const GrMesh& mesh = meshes[i];
GrMesh::Iterator iter; GrMesh::Iterator iter;
const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh); const GrNonInstancedMesh* nonIdxMesh = iter.init(mesh);
@ -1547,6 +1563,10 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline,
pipelineState->freeTempResources(this); pipelineState->freeTempResources(this);
SkDEBUGCODE(pipelineState = nullptr); SkDEBUGCODE(pipelineState = nullptr);
primitiveType = nonIdxMesh->primitiveType(); primitiveType = nonIdxMesh->primitiveType();
// It is illegal for us to have the necessary memory barriers for when we write and
// update the uniform buffers in prepareDrawState while in an active render pass.
// Thus we must end the current one and then start it up again.
fCurrentCmdBuffer->endRenderPass(this);
pipelineState = this->prepareDrawState(pipeline, pipelineState = this->prepareDrawState(pipeline,
primProc, primProc,
primitiveType, primitiveType,
@ -1554,6 +1574,7 @@ void GrVkGpu::onDraw(const GrPipeline& pipeline,
if (!pipelineState) { if (!pipelineState) {
return; return;
} }
fCurrentCmdBuffer->beginRenderPass(this, renderPass, *vkRT);
} }
SkASSERT(pipelineState); SkASSERT(pipelineState);
this->bindGeometry(primProc, *nonIdxMesh); this->bindGeometry(primProc, *nonIdxMesh);

View File

@ -12,6 +12,19 @@
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X) #define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
switch (format) {
case VK_FORMAT_S8_UINT:
return VK_IMAGE_ASPECT_STENCIL_BIT;
case VK_FORMAT_D24_UNORM_S8_UINT: // fallthrough
case VK_FORMAT_D32_SFLOAT_S8_UINT:
return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
default:
SkASSERT(GrVkFormatToPixelConfig(format, nullptr));
return VK_IMAGE_ASPECT_COLOR_BIT;
}
}
void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout, void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
VkAccessFlags srcAccessMask, VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask, VkAccessFlags dstAccessMask,
@ -24,7 +37,7 @@ void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
if (newLayout == fCurrentLayout) { if (newLayout == fCurrentLayout) {
return; return;
} }
VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fResource->fFormat);
VkImageMemoryBarrier imageMemoryBarrier = { VkImageMemoryBarrier imageMemoryBarrier = {
VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
NULL, // pNext NULL, // pNext
@ -35,7 +48,7 @@ void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex VK_QUEUE_FAMILY_IGNORED, // srcQueueFamilyIndex
VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex VK_QUEUE_FAMILY_IGNORED, // dstQueueFamilyIndex
fResource->fImage, // image fResource->fImage, // image
{ VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1 } // subresourceRange { aspectFlags, 0, 1, 0, 1 } // subresourceRange
}; };
// TODO: restrict to area of image we're interested in // TODO: restrict to area of image we're interested in
@ -91,7 +104,7 @@ const GrVkImage::Resource* GrVkImage::CreateResource(const GrVkGpu* gpu,
(VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling) ? Resource::kLinearTiling_Flag (VK_IMAGE_TILING_LINEAR == imageDesc.fImageTiling) ? Resource::kLinearTiling_Flag
: Resource::kNo_Flags; : Resource::kNo_Flags;
return (new GrVkImage::Resource(image, alloc, flags)); return (new GrVkImage::Resource(image, alloc, flags, imageDesc.fFormat));
} }
GrVkImage::~GrVkImage() { GrVkImage::~GrVkImage() {

View File

@ -28,11 +28,17 @@ public:
VkImage fImage; VkImage fImage;
VkDeviceMemory fAlloc; VkDeviceMemory fAlloc;
Flags fFlags; Flags fFlags;
VkFormat fFormat;
Resource() : INHERITED(), fImage(VK_NULL_HANDLE), fAlloc(VK_NULL_HANDLE), fFlags(kNo_Flags) {} Resource()
: INHERITED()
, fImage(VK_NULL_HANDLE)
, fAlloc(VK_NULL_HANDLE)
, fFlags(kNo_Flags)
, fFormat(VK_FORMAT_UNDEFINED) {}
Resource(VkImage image, VkDeviceMemory alloc, Flags flags) Resource(VkImage image, VkDeviceMemory alloc, Flags flags, VkFormat format)
: fImage(image), fAlloc(alloc), fFlags(flags) {} : fImage(image), fAlloc(alloc), fFlags(flags), fFormat(format) {}
~Resource() override {} ~Resource() override {}
private: private:
@ -44,8 +50,8 @@ public:
// for wrapped textures // for wrapped textures
class BorrowedResource : public Resource { class BorrowedResource : public Resource {
public: public:
BorrowedResource(VkImage image, VkDeviceMemory alloc, Flags flags) BorrowedResource(VkImage image, VkDeviceMemory alloc, Flags flags, VkFormat format)
: Resource(image, alloc, flags) {} : Resource(image, alloc, flags, format) {}
private: private:
void freeGPUData(const GrVkGpu* gpu) const override; void freeGPUData(const GrVkGpu* gpu) const override;
}; };
@ -70,7 +76,8 @@ public:
VkImageLayout currentLayout() const { return fCurrentLayout; } VkImageLayout currentLayout() const { return fCurrentLayout; }
void setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout, void setImageLayout(const GrVkGpu* gpu,
VkImageLayout newLayout,
VkAccessFlags srcAccessMask, VkAccessFlags srcAccessMask,
VkAccessFlags dstAccessMask, VkAccessFlags dstAccessMask,
VkPipelineStageFlags srcStageMask, VkPipelineStageFlags srcStageMask,

View File

@ -408,30 +408,29 @@ void GrVkPipelineState::addUniformResources(GrVkCommandBuffer& commandBuffer) {
void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) { void GrVkPipelineState::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
if (fPool) { if (fPool) {
fPool->unref(gpu); fPool->unref(gpu);
SkASSERT(fMaxDescriptorSets < (SK_MaxU32 >> 1)); if (fMaxDescriptors < kMaxDescLimit >> 1) {
if (fMaxDescriptorSets < kMaxDescSetLimit >> 1) { fMaxDescriptors = fMaxDescriptors << 1;
fMaxDescriptorSets = fMaxDescriptorSets << 1;
} else { } else {
fMaxDescriptorSets = kMaxDescSetLimit; fMaxDescriptors = kMaxDescLimit;
} }
} }
if (fMaxDescriptorSets) { if (fMaxDescriptors) {
fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType, fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
fMaxDescriptorSets); fMaxDescriptors);
} }
SkASSERT(fPool || !fMaxDescriptorSets); SkASSERT(fPool || !fMaxDescriptors);
} }
void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds) { void GrVkPipelineState::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds) {
if (!fMaxDescriptorSets) { if (!fMaxDescriptors) {
return; return;
} }
if (fCurrentDescriptorSet == fMaxDescriptorSets) { if (fCurrentDescriptorCount == fMaxDescriptors) {
this->getNewPool(gpu); this->getNewPool(gpu);
fCurrentDescriptorSet = 0; fCurrentDescriptorCount = 0;
} }
fCurrentDescriptorSet++; fCurrentDescriptorCount += fDescCountPerSet;
VkDescriptorSetAllocateInfo dsAllocateInfo; VkDescriptorSetAllocateInfo dsAllocateInfo;
memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo)); memset(&dsAllocateInfo, 0, sizeof(VkDescriptorSetAllocateInfo));

View File

@ -166,10 +166,11 @@ private:
uint32_t descCount, GrVkGpu* gpu) uint32_t descCount, GrVkGpu* gpu)
: fDescLayout(layout) : fDescLayout(layout)
, fDescType(type) , fDescType(type)
, fCurrentDescriptorSet(0) , fDescCountPerSet(descCount)
, fCurrentDescriptorCount(0)
, fPool(nullptr) { , fPool(nullptr) {
SkASSERT(descCount < (kMaxDescSetLimit >> 2)); SkASSERT(descCount < kMaxDescLimit >> 2);
fMaxDescriptorSets = descCount << 2; fMaxDescriptors = fDescCountPerSet << 2;
this->getNewPool(gpu); this->getNewPool(gpu);
} }
@ -185,12 +186,13 @@ private:
VkDescriptorSetLayout fDescLayout; VkDescriptorSetLayout fDescLayout;
VkDescriptorType fDescType; VkDescriptorType fDescType;
uint32_t fMaxDescriptorSets; uint32_t fDescCountPerSet;
uint32_t fCurrentDescriptorSet; uint32_t fMaxDescriptors;
uint32_t fCurrentDescriptorCount;
GrVkDescriptorPool* fPool; GrVkDescriptorPool* fPool;
private: private:
static const uint32_t kMaxDescSetLimit = 1 << 10; static const uint32_t kMaxDescLimit = 1 << 10;
void getNewPool(GrVkGpu* gpu); void getNewPool(GrVkGpu* gpu);
}; };

View File

@ -216,9 +216,12 @@ GrVkRenderTarget::CreateWrappedRenderTarget(GrVkGpu* gpu,
const GrVkImage::Resource* imageResource; const GrVkImage::Resource* imageResource;
if (kBorrowed_LifeCycle == lifeCycle) { if (kBorrowed_LifeCycle == lifeCycle) {
imageResource = new GrVkImage::BorrowedResource(info->fImage, info->fAlloc, flags); imageResource = new GrVkImage::BorrowedResource(info->fImage,
info->fAlloc,
flags,
info->fFormat);
} else { } else {
imageResource = new GrVkImage::Resource(info->fImage, info->fAlloc, flags); imageResource = new GrVkImage::Resource(info->fImage, info->fAlloc, flags, info->fFormat);
} }
if (!imageResource) { if (!imageResource) {
return nullptr; return nullptr;

View File

@ -42,7 +42,8 @@ GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu,
imageDesc.fLevels = 1; imageDesc.fLevels = 1;
imageDesc.fSamples = sampleCnt; imageDesc.fSamples = sampleCnt;
imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL; imageDesc.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
imageDesc.fUsageFlags = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT; imageDesc.fUsageFlags = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSFER_DST_BIT;
imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; imageDesc.fMemProps = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc); const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);

View File

@ -87,9 +87,12 @@ GrVkTexture* GrVkTexture::CreateWrappedTexture(GrVkGpu* gpu, const GrSurfaceDesc
const GrVkImage::Resource* imageResource; const GrVkImage::Resource* imageResource;
if (kBorrowed_LifeCycle == lifeCycle) { if (kBorrowed_LifeCycle == lifeCycle) {
imageResource = new GrVkImage::BorrowedResource(info->fImage, info->fAlloc, flags); imageResource = new GrVkImage::BorrowedResource(info->fImage,
info->fAlloc,
flags,
info->fFormat);
} else { } else {
imageResource = new GrVkImage::Resource(info->fImage, info->fAlloc, flags); imageResource = new GrVkImage::Resource(info->fImage, info->fAlloc, flags, info->fFormat);
} }
if (!imageResource) { if (!imageResource) {
return nullptr; return nullptr;

View File

@ -152,9 +152,12 @@ GrVkTextureRenderTarget::CreateWrappedTextureRenderTarget(GrVkGpu* gpu,
const GrVkImage::Resource* imageResource; const GrVkImage::Resource* imageResource;
if (kBorrowed_LifeCycle == lifeCycle) { if (kBorrowed_LifeCycle == lifeCycle) {
imageResource = new GrVkImage::BorrowedResource(info->fImage, info->fAlloc, flags); imageResource = new GrVkImage::BorrowedResource(info->fImage,
info->fAlloc,
flags,
info->fFormat);
} else { } else {
imageResource = new GrVkImage::Resource(info->fImage, info->fAlloc, flags); imageResource = new GrVkImage::Resource(info->fImage, info->fAlloc, flags, info->fFormat);
} }
if (!imageResource) { if (!imageResource) {
return nullptr; return nullptr;

View File

@ -240,14 +240,14 @@ bool VulkanTestContext::createSwapchain(uint32_t width, uint32_t height)
nullptr)); nullptr));
} }
GrVkFormatToPixelConfig(swapchainCreateInfo.imageFormat, &fPixelConfig); this->createBuffers(swapchainCreateInfo.imageFormat);
this->createBuffers();
return true; return true;
} }
void VulkanTestContext::createBuffers() { void VulkanTestContext::createBuffers(VkFormat format) {
GrVkFormatToPixelConfig(format, &fPixelConfig);
GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, GetSwapchainImagesKHR(fBackendContext->fDevice, GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, GetSwapchainImagesKHR(fBackendContext->fDevice,
fSwapchain, fSwapchain,
&fImageCount, &fImageCount,
@ -271,6 +271,7 @@ void VulkanTestContext::createBuffers() {
info.fAlloc = nullptr; info.fAlloc = nullptr;
info.fImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; info.fImageLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
info.fImageTiling = VK_IMAGE_TILING_OPTIMAL; info.fImageTiling = VK_IMAGE_TILING_OPTIMAL;
info.fFormat = format;
desc.fWidth = fWidth; desc.fWidth = fWidth;
desc.fHeight = fHeight; desc.fHeight = fHeight;
desc.fConfig = fPixelConfig; desc.fConfig = fPixelConfig;
@ -511,13 +512,14 @@ SkSurface* VulkanTestContext::getBackbufferSurface() {
GR_VK_CALL_ERRCHECK(fBackendContext->fInterface, GR_VK_CALL_ERRCHECK(fBackendContext->fInterface,
EndCommandBuffer(backbuffer->fTransitionCmdBuffers[0])); EndCommandBuffer(backbuffer->fTransitionCmdBuffers[0]));
VkPipelineStageFlags waitDstStageFlags = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
// insert the layout transfer into the queue and wait on the acquire // insert the layout transfer into the queue and wait on the acquire
VkSubmitInfo submitInfo; VkSubmitInfo submitInfo;
memset(&submitInfo, 0, sizeof(VkSubmitInfo)); memset(&submitInfo, 0, sizeof(VkSubmitInfo));
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.waitSemaphoreCount = 1; submitInfo.waitSemaphoreCount = 1;
submitInfo.pWaitSemaphores = &backbuffer->fAcquireSemaphore; submitInfo.pWaitSemaphores = &backbuffer->fAcquireSemaphore;
submitInfo.pWaitDstStageMask = 0; submitInfo.pWaitDstStageMask = &waitDstStageFlags;
submitInfo.commandBufferCount = 1; submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &backbuffer->fTransitionCmdBuffers[0]; submitInfo.pCommandBuffers = &backbuffer->fTransitionCmdBuffers[0];
submitInfo.signalSemaphoreCount = 0; submitInfo.signalSemaphoreCount = 0;

View File

@ -64,7 +64,7 @@ private:
BackbufferInfo* getAvailableBackbuffer(); BackbufferInfo* getAvailableBackbuffer();
bool createSwapchain(uint32_t width, uint32_t height); bool createSwapchain(uint32_t width, uint32_t height);
void createBuffers(); void createBuffers(VkFormat format);
void destroyBuffers(); void destroyBuffers();
SkAutoTUnref<const GrVkBackendContext> fBackendContext; SkAutoTUnref<const GrVkBackendContext> fBackendContext;