Move vulkan layout helpers from GrVkMemory to GrVkImage.

Bug: skia:
Change-Id: Iebcf5844a0b469dea1e96e351f91239ff512f708
Reviewed-on: https://skia-review.googlesource.com/129934
Reviewed-by: Jim Van Verth <jvanverth@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2018-05-24 12:34:29 -04:00 committed by Skia Commit-Bot
parent 4ade54d81b
commit 6ddbafcc89
6 changed files with 65 additions and 65 deletions

View File

@ -1391,7 +1391,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
barrier.srcAccessMask = GrVkImage::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@ -1400,7 +1400,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
barrier.image = image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
VK_CALL(CmdPipelineBarrier(cmdBuffer, GrVkImage::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, nullptr, 0, nullptr, 1,
&barrier));
initialLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
@ -1432,7 +1432,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
memset(&barrier, 0, sizeof(VkImageMemoryBarrier));
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = nullptr;
barrier.srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(initialLayout);
barrier.srcAccessMask = GrVkImage::LayoutToSrcAccessMask(initialLayout);
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
barrier.oldLayout = initialLayout;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
@ -1441,7 +1441,7 @@ bool GrVkGpu::createTestingOnlyVkImage(GrPixelConfig config, int w, int h, bool
barrier.image = image;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
VK_CALL(CmdPipelineBarrier(cmdBuffer,
GrVkMemory::LayoutToPipelineStageFlags(initialLayout),
GrVkImage::LayoutToPipelineStageFlags(initialLayout),
VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
0,
0, nullptr,

View File

@ -12,6 +12,58 @@
#define VK_CALL(GPU, X) GR_VK_CALL(GPU->vkInterface(), X)
VkPipelineStageFlags GrVkImage::LayoutToPipelineStageFlags(const VkImageLayout layout) {
if (VK_IMAGE_LAYOUT_GENERAL == layout) {
return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
} else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
return VK_PIPELINE_STAGE_TRANSFER_BIT;
} else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
} else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
return VK_PIPELINE_STAGE_HOST_BIT;
}
SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
}
VkAccessFlags GrVkImage::LayoutToSrcAccessMask(const VkImageLayout layout) {
// Currently we assume we will never being doing any explict shader writes (this doesn't include
// color attachment or depth/stencil writes). So we will ignore the
// VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
// We can only directly access the host memory if we are in preinitialized or general layout,
// and the image is linear.
// TODO: Add check for linear here so we are not always adding host to general, and we should
// only be in preinitialized if we are linear
VkAccessFlags flags = 0;;
if (VK_IMAGE_LAYOUT_GENERAL == layout) {
flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
} else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
flags = VK_ACCESS_HOST_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
flags = VK_ACCESS_TRANSFER_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
flags = VK_ACCESS_TRANSFER_READ_BIT;
} else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
flags = VK_ACCESS_SHADER_READ_BIT;
}
return flags;
}
VkImageAspectFlags vk_format_to_aspect_flags(VkFormat format) {
switch (format) {
case VK_FORMAT_S8_UINT:
@ -42,8 +94,8 @@ void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout,
return;
}
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(currentLayout);
VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(currentLayout);
VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(currentLayout);
VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineStageFlags(currentLayout);
VkImageAspectFlags aspectFlags = vk_format_to_aspect_flags(fInfo.fFormat);
VkImageMemoryBarrier imageMemoryBarrier = {

View File

@ -100,6 +100,10 @@ public:
void setResourceRelease(sk_sp<GrReleaseProcHelper> releaseHelper);
// Helpers to use for setting the layout of the VkImage
static VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
static VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
protected:
void releaseImage(const GrVkGpu* gpu);
void abandonImage();

View File

@ -246,58 +246,6 @@ void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
}
}
VkPipelineStageFlags GrVkMemory::LayoutToPipelineStageFlags(const VkImageLayout layout) {
if (VK_IMAGE_LAYOUT_GENERAL == layout) {
return VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
} else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
return VK_PIPELINE_STAGE_TRANSFER_BIT;
} else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL == layout ||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
return VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT;
} else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
return VK_PIPELINE_STAGE_HOST_BIT;
}
SkASSERT(VK_IMAGE_LAYOUT_UNDEFINED == layout);
return VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
}
VkAccessFlags GrVkMemory::LayoutToSrcAccessMask(const VkImageLayout layout) {
// Currently we assume we will never being doing any explict shader writes (this doesn't include
// color attachment or depth/stencil writes). So we will ignore the
// VK_MEMORY_OUTPUT_SHADER_WRITE_BIT.
// We can only directly access the host memory if we are in preinitialized or general layout,
// and the image is linear.
// TODO: Add check for linear here so we are not always adding host to general, and we should
// only be in preinitialized if we are linear
VkAccessFlags flags = 0;;
if (VK_IMAGE_LAYOUT_GENERAL == layout) {
flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
VK_ACCESS_TRANSFER_WRITE_BIT |
VK_ACCESS_TRANSFER_READ_BIT |
VK_ACCESS_SHADER_READ_BIT |
VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_HOST_READ_BIT;
} else if (VK_IMAGE_LAYOUT_PREINITIALIZED == layout) {
flags = VK_ACCESS_HOST_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL == layout) {
flags = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL == layout) {
flags = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL == layout) {
flags = VK_ACCESS_TRANSFER_WRITE_BIT;
} else if (VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL == layout) {
flags = VK_ACCESS_TRANSFER_READ_BIT;
} else if (VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL == layout) {
flags = VK_ACCESS_SHADER_READ_BIT;
}
return flags;
}
void GrVkMemory::FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size) {
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {

View File

@ -34,10 +34,6 @@ namespace GrVkMemory {
GrVkAlloc* alloc);
void FreeImageMemory(const GrVkGpu* gpu, bool linearTiling, const GrVkAlloc& alloc);
VkPipelineStageFlags LayoutToPipelineStageFlags(const VkImageLayout layout);
VkAccessFlags LayoutToSrcAccessMask(const VkImageLayout layout);
void FlushMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,
VkDeviceSize size);
void InvalidateMappedAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc, VkDeviceSize offset,

View File

@ -12,8 +12,8 @@
#include "SkSurface.h"
#include "VulkanWindowContext.h"
#include "vk/GrVkImage.h"
#include "vk/GrVkInterface.h"
#include "vk/GrVkMemory.h"
#include "vk/GrVkUtil.h"
#include "vk/GrVkTypes.h"
@ -565,9 +565,9 @@ void VulkanWindowContext::swapBuffers() {
SkASSERT(imageInfo.fImage == fImages[backbuffer->fImageIndex]);
VkImageLayout layout = imageInfo.fImageLayout;
VkPipelineStageFlags srcStageMask = GrVkMemory::LayoutToPipelineStageFlags(layout);
VkPipelineStageFlags srcStageMask = GrVkImage::LayoutToPipelineStageFlags(layout);
VkPipelineStageFlags dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT;
VkAccessFlags srcAccessMask = GrVkMemory::LayoutToSrcAccessMask(layout);
VkAccessFlags srcAccessMask = GrVkImage::LayoutToSrcAccessMask(layout);
VkAccessFlags dstAccessMask = VK_ACCESS_MEMORY_READ_BIT;
VkImageMemoryBarrier imageMemoryBarrier = {