Miscellaneous updates to handle more vulkan creation failures.

Bug: skia:9603
Change-Id: Idc282775d75e1d136fdf83e98a2e7e3e871f9b08
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/253958
Reviewed-by: Chris Dalton <csmartdalton@google.com>
Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
Greg Daniel 2019-11-11 13:42:30 -05:00 committed by Skia Commit-Bot
parent a8e985e87a
commit 95f0b1673f
12 changed files with 110 additions and 51 deletions

View File

@ -65,7 +65,12 @@ void GrVkCommandPool::reset(GrVkGpu* gpu) {
SkASSERT(!fOpen);
fOpen = true;
fPrimaryCommandBuffer->recycleSecondaryCommandBuffers(gpu);
GR_VK_CALL_ERRCHECK(gpu, ResetCommandPool(gpu->device(), fCommandPool, 0));
// We can't use the normal result macro calls here because we may call reset on a different
// thread and we can't be modifying the lost state on the GrVkGpu. We just call
// vkResetCommandPool and assume the "next" vulkan call will catch the lost device.
SkDEBUGCODE(VkResult result = )GR_VK_CALL(gpu->vkInterface(),
ResetCommandPool(gpu->device(), fCommandPool, 0));
SkASSERT(result == VK_SUCCESS || result == VK_ERROR_DEVICE_LOST);
}
void GrVkCommandPool::releaseResources(GrVkGpu* gpu) {

View File

@ -45,10 +45,6 @@ bool GrVkDescriptorPool::isCompatible(VkDescriptorType type, uint32_t count) con
return fType == type && count <= fCount;
}
void GrVkDescriptorPool::reset(GrVkGpu* gpu) {
GR_VK_CALL_ERRCHECK(gpu, ResetDescriptorPool(gpu->device(), fDescPool, 0));
}
void GrVkDescriptorPool::freeGPUData(GrVkGpu* gpu) const {
// Destroying the VkDescriptorPool will automatically free and delete any VkDescriptorSets
// allocated from the pool.

View File

@ -24,8 +24,6 @@ public:
VkDescriptorPool descPool() const { return fDescPool; }
void reset(GrVkGpu* gpu);
// Returns whether or not this descriptor pool could be used, assuming it gets fully reset and
// not in use by another draw, to support the requested type and count.
bool isCompatible(VkDescriptorType type, uint32_t count) const;

View File

@ -60,8 +60,7 @@
#endif
#define VK_CALL(X) GR_VK_CALL(this->vkInterface(), X)
#define VK_CALL_RET(RET, X) GR_VK_CALL_RET(this->vkInterface(), RET, X)
#define VK_CALL_ERRCHECK(X) GR_VK_CALL_ERRCHECK(this, X)
#define VK_CALL_RET(RET, X) GR_VK_CALL_RESULT(this, RET, X)
sk_sp<GrGpu> GrVkGpu::Make(const GrVkBackendContext& backendContext,
const GrContextOptions& options, GrContext* context) {
@ -1498,14 +1497,13 @@ static void set_image_layout(const GrVkInterface* vkInterface, VkCommandBuffer c
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = info->fImage;
barrier.subresourceRange = {VK_IMAGE_ASPECT_COLOR_BIT, 0, mipLevels, 0, 1};
GR_VK_CALL(vkInterface, CmdPipelineBarrier(
cmdBuffer,
srcStageMask,
dstStageMask,
0,
0, nullptr,
0, nullptr,
1, &barrier));
GR_VK_CALL(vkInterface, CmdPipelineBarrier(cmdBuffer,
srcStageMask,
dstStageMask,
0,
0, nullptr,
0, nullptr,
1, &barrier));
info->fImageLayout = newLayout;
}
@ -1583,7 +1581,7 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
};
VkCommandBuffer cmdBuffer;
err = VK_CALL(AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
VK_CALL_RET(err, AllocateCommandBuffers(fDevice, &cmdInfo, &cmdBuffer));
if (err) {
GrVkImage::DestroyImageInfo(this, info);
return false;
@ -1596,8 +1594,12 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
cmdBufferBeginInfo.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
cmdBufferBeginInfo.pInheritanceInfo = nullptr;
err = VK_CALL(BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
SkASSERT(!err);
VK_CALL_RET(err, BeginCommandBuffer(cmdBuffer, &cmdBufferBeginInfo));
if (err) {
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
GrVkImage::DestroyImageInfo(this, info);
return false;
}
// Set image layout and add barrier
set_image_layout(this->vkInterface(), cmdBuffer, info, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
@ -1621,7 +1623,7 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
bufInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
bufInfo.queueFamilyIndexCount = 0;
bufInfo.pQueueFamilyIndices = nullptr;
err = VK_CALL(CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
VK_CALL_RET(err, CreateBuffer(fDevice, &bufInfo, nullptr, &buffer));
if (err) {
GrVkImage::DestroyImageInfo(this, info);
@ -1707,8 +1709,16 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
}
// End CommandBuffer
err = VK_CALL(EndCommandBuffer(cmdBuffer));
SkASSERT(!err);
VK_CALL_RET(err, EndCommandBuffer(cmdBuffer));
if (err) {
GrVkImage::DestroyImageInfo(this, info);
if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
}
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
return false;
}
// Create Fence for queue
VkFenceCreateInfo fenceInfo;
@ -1718,8 +1728,16 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
fenceInfo.flags = 0;
VkFence fence = VK_NULL_HANDLE;
err = VK_CALL(CreateFence(fDevice, &fenceInfo, nullptr, &fence));
SkASSERT(!err);
VK_CALL_RET(err, CreateFence(fDevice, &fenceInfo, nullptr, &fence));
if (err) {
GrVkImage::DestroyImageInfo(this, info);
if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
GrVkMemory::FreeBufferMemory(this, GrVkBuffer::kCopyRead_Type, bufferAlloc);
VK_CALL(DestroyBuffer(fDevice, buffer, nullptr));
}
VK_CALL(FreeCommandBuffers(fDevice, fCmdPool->vkCommandPool(), 1, &cmdBuffer));
return false;
}
VkProtectedSubmitInfo protectedSubmitInfo;
if (fProtectedContext == GrProtected::kYes) {
@ -1740,10 +1758,11 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
submitInfo.pCommandBuffers = &cmdBuffer;
submitInfo.signalSemaphoreCount = 0;
submitInfo.pSignalSemaphores = nullptr;
err = VK_CALL(QueueSubmit(this->queue(), 1, &submitInfo, fence));
SkASSERT(!err);
VK_CALL_RET(err, QueueSubmit(this->queue(), 1, &submitInfo, fence));
err = VK_CALL(WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
if (!err) {
VK_CALL_RET(err, WaitForFences(this->device(), 1, &fence, VK_TRUE, UINT64_MAX));
}
if (VK_SUCCESS != err) {
GrVkImage::DestroyImageInfo(this, info);
if (buffer != VK_NULL_HANDLE) { // workaround for an older NVidia driver crash
@ -1756,7 +1775,7 @@ bool GrVkGpu::createVkImageForBackendSurface(VkFormat vkFormat,
SkDebugf("Fence failed to signal: %d\n", err);
SK_ABORT("failing");
} else {
SkDebugf("Fence failed: %d\n", err);
SkDebugf("Queue submit or fence wait failed: %d\n", err);
return false;
}
}
@ -2451,9 +2470,17 @@ GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
createInfo.pNext = nullptr;
createInfo.flags = 0;
VkFence fence = VK_NULL_HANDLE;
VkResult result;
VK_CALL_ERRCHECK(CreateFence(this->device(), &createInfo, nullptr, &fence));
VK_CALL(QueueSubmit(this->queue(), 0, nullptr, fence));
VK_CALL_RET(result, CreateFence(this->device(), &createInfo, nullptr, &fence));
if (result != VK_SUCCESS) {
return 0;
}
VK_CALL_RET(result, QueueSubmit(this->queue(), 0, nullptr, fence));
if (result != VK_SUCCESS) {
VK_CALL(DestroyFence(this->device(), fence, nullptr));
return 0;
}
GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(VkFence));
return (GrFence)fence;
@ -2462,7 +2489,8 @@ GrFence SK_WARN_UNUSED_RESULT GrVkGpu::insertFence() {
bool GrVkGpu::waitFence(GrFence fence, uint64_t timeout) {
SkASSERT(VK_NULL_HANDLE != (VkFence)fence);
VkResult result = VK_CALL(WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
VkResult result;
VK_CALL_RET(result, WaitForFences(this->device(), 1, (VkFence*)&fence, VK_TRUE, timeout));
return (VK_SUCCESS == result);
}

View File

@ -197,7 +197,11 @@ bool GrVkImage::InitImageInfo(GrVkGpu* gpu, const ImageDesc& imageDesc, GrVkImag
initialLayout // initialLayout
};
GR_VK_CALL_ERRCHECK(gpu, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
if (result != VK_SUCCESS) {
return false;
}
if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, isLinear, &alloc)) {
VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));

View File

@ -485,14 +485,18 @@ GrVkPipelineState* GrVkOpsRenderPass::prepareDrawState(
// same place (i.e., the target renderTargetProxy) they had best agree.
SkASSERT(programInfo.origin() == fOrigin);
pipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB);
if (!pipelineState->setAndBindUniforms(fGpu, fRenderTarget, programInfo, currentCB)) {
return nullptr;
}
// Check whether we need to bind textures between each GrMesh. If not we can bind them all now.
if (!programInfo.hasDynamicPrimProcTextures()) {
auto proxies = programInfo.hasFixedPrimProcTextures() ? programInfo.fixedPrimProcTextures()
: nullptr;
pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
proxies, currentCB);
if (!pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
proxies, currentCB)) {
return nullptr;
}
}
if (!programInfo.pipeline().isScissorEnabled()) {
@ -601,8 +605,15 @@ void GrVkOpsRenderPass::onDraw(const GrProgramInfo& programInfo,
}
if (hasDynamicTextures) {
auto meshProxies = programInfo.dynamicPrimProcTextures(i);
pipelineState->setAndBindTextures(fGpu, programInfo.primProc(), programInfo.pipeline(),
meshProxies, this->currentCommandBuffer());
if (!pipelineState->setAndBindTextures(fGpu, programInfo.primProc(),
programInfo.pipeline(), meshProxies,
this->currentCommandBuffer())) {
if (fGpu->isDeviceLost()) {
return;
} else {
continue;
}
}
}
SkASSERT(pipelineState);
mesh.sendToGpu(this);

View File

@ -98,7 +98,7 @@ void GrVkPipelineState::abandonGPUResources() {
}
}
void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
bool GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
const GrRenderTarget* renderTarget,
const GrProgramInfo& programInfo,
GrVkCommandBuffer* commandBuffer) {
@ -133,6 +133,9 @@ void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
fUniformDescriptorSet->recycle(gpu);
}
fUniformDescriptorSet = gpu->resourceProvider().getUniformDescriptorSet();
if (!fUniformDescriptorSet) {
return false;
}
this->writeUniformBuffers(gpu);
}
static const int kUniformDSIdx = GrVkUniformHandler::kUniformBufferDescSet;
@ -142,9 +145,10 @@ void GrVkPipelineState::setAndBindUniforms(GrVkGpu* gpu,
commandBuffer->addRecycledResource(fUniformDescriptorSet);
commandBuffer->addRecycledResource(fUniformBuffer->resource());
}
return true;
}
void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
bool GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
const GrPrimitiveProcessor& primProc,
const GrPipeline& pipeline,
const GrTextureProxy* const primProcTextures[],
@ -199,13 +203,15 @@ void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
commandBuffer->addRecycledResource(descriptorSet);
commandBuffer->bindDescriptorSets(gpu, this, fPipeline->layout(), kSamplerDSIdx, 1,
descriptorSet->descriptorSet(), 0, nullptr);
return;
return true;
}
}
const GrVkDescriptorSet* descriptorSet =
gpu->resourceProvider().getSamplerDescriptorSet(fSamplerDSHandle);
SkASSERT(descriptorSet);
if (!descriptorSet) {
return false;
}
for (int i = 0; i < fNumSamplers; ++i) {
const GrSamplerState& state = samplerBindings[i].fState;
@ -260,6 +266,7 @@ void GrVkPipelineState::setAndBindTextures(GrVkGpu* gpu,
commandBuffer->addRecycledResource(descriptorSet);
descriptorSet->recycle(gpu);
}
return true;
}
void set_uniform_descriptor_writes(VkWriteDescriptorSet* descriptorWrite,

View File

@ -53,13 +53,13 @@ public:
~GrVkPipelineState();
void setAndBindUniforms(GrVkGpu*, const GrRenderTarget*, const GrProgramInfo&,
bool setAndBindUniforms(GrVkGpu*, const GrRenderTarget*, const GrProgramInfo&,
GrVkCommandBuffer*);
/**
* This must be called after setAndBindUniforms() since that function invalidates texture
* bindings.
*/
void setAndBindTextures(GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&,
bool setAndBindTextures(GrVkGpu*, const GrPrimitiveProcessor&, const GrPipeline&,
const GrTextureProxy* const primitiveProcessorTextures[],
GrVkCommandBuffer*);

View File

@ -175,8 +175,12 @@ GrVkPipelineState* GrVkPipelineStateBuilder::finalize(VkRenderPass compatibleRen
layoutCreateInfo.pushConstantRangeCount = 0;
layoutCreateInfo.pPushConstantRanges = nullptr;
GR_VK_CALL_ERRCHECK(fGpu, CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr,
&pipelineLayout));
VkResult result;
GR_VK_CALL_RESULT(fGpu, result, CreatePipelineLayout(fGpu->device(), &layoutCreateInfo, nullptr,
&pipelineLayout));
if (result != VK_SUCCESS) {
return nullptr;
}
// We need to enable the following extensions so that the compiler can correctly make spir-v
// from our glsl shaders.

View File

@ -102,7 +102,12 @@ GrVkSampler* GrVkSampler::Create(GrVkGpu* gpu, const GrSamplerState& samplerStat
}
VkSampler sampler;
GR_VK_CALL_ERRCHECK(gpu, CreateSampler(gpu->device(), &createInfo, nullptr, &sampler));
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateSampler(gpu->device(), &createInfo, nullptr, &sampler));
if (result != VK_SUCCESS) {
ycbcrConversion->unref(gpu);
return nullptr;
}
return new GrVkSampler(sampler, ycbcrConversion, GenerateKey(samplerState, ycbcrInfo));
}

View File

@ -10,7 +10,7 @@
#include "src/gpu/vk/GrVkGpu.h"
GrVkSamplerYcbcrConversion* GrVkSamplerYcbcrConversion::Create(
const GrVkGpu* gpu, const GrVkYcbcrConversionInfo& info) {
GrVkGpu* gpu, const GrVkYcbcrConversionInfo& info) {
if (!gpu->vkCaps().supportsYcbcrConversion()) {
return nullptr;
}
@ -71,9 +71,10 @@ GrVkSamplerYcbcrConversion* GrVkSamplerYcbcrConversion::Create(
}
VkSamplerYcbcrConversion conversion;
GR_VK_CALL(gpu->vkInterface(), CreateSamplerYcbcrConversion(gpu->device(), &ycbcrCreateInfo,
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateSamplerYcbcrConversion(gpu->device(), &ycbcrCreateInfo,
nullptr, &conversion));
if (conversion == VK_NULL_HANDLE) {
if (result != VK_SUCCESS) {
return nullptr;
}

View File

@ -17,7 +17,7 @@ class GrVkGpu;
class GrVkSamplerYcbcrConversion : public GrVkResource {
public:
static GrVkSamplerYcbcrConversion* Create(const GrVkGpu* gpu, const GrVkYcbcrConversionInfo&);
static GrVkSamplerYcbcrConversion* Create(GrVkGpu* gpu, const GrVkYcbcrConversionInfo&);
VkSamplerYcbcrConversion ycbcrConversion() const { return fYcbcrConversion; }