Have vulkan command and descriptor sets handle failed creation.

This requires the switch to using more factory functions that can fail,
creating the vulkan objects in the factories (instead of ctor), and
making sure calling code handles the failed creations.

Bug: skia:9603
Change-Id: I22f1852e4cbefe584857f3adccf486cb1326cb68
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/252928
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Chris Dalton <csmartdalton@google.com>
This commit is contained in:
Greg Daniel 2019-11-06 09:21:55 -05:00 committed by Skia Commit-Bot
parent da53dd5e17
commit 9b63dc852e
6 changed files with 184 additions and 129 deletions

View File

@ -25,8 +25,12 @@ GrVkCommandPool* GrVkCommandPool::Create(GrVkGpu* gpu) {
cmdPoolCreateFlags, // CmdPoolCreateFlags
gpu->queueIndex(), // queueFamilyIndex
};
VkResult result;
VkCommandPool pool;
GR_VK_CALL_ERRCHECK(gpu, CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool));
GR_VK_CALL_RESULT(gpu, result, CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool));
if (result != VK_SUCCESS) {
return nullptr;
}
return new GrVkCommandPool(gpu, pool);
}

View File

@ -11,10 +11,8 @@
#include "src/gpu/vk/GrVkGpu.h"
GrVkDescriptorPool::GrVkDescriptorPool(GrVkGpu* gpu, VkDescriptorType type, uint32_t count)
: INHERITED()
, fType (type)
, fCount(count) {
GrVkDescriptorPool* GrVkDescriptorPool::Create(GrVkGpu* gpu, VkDescriptorType type,
uint32_t count) {
VkDescriptorPoolSize poolSize;
memset(&poolSize, 0, sizeof(VkDescriptorPoolSize));
poolSize.descriptorCount = count;
@ -30,9 +28,19 @@ GrVkDescriptorPool::GrVkDescriptorPool(GrVkGpu* gpu, VkDescriptorType type, uint
createInfo.poolSizeCount = 1;
createInfo.pPoolSizes = &poolSize;
GR_VK_CALL_ERRCHECK(gpu, CreateDescriptorPool(gpu->device(), &createInfo, nullptr, &fDescPool));
VkDescriptorPool pool;
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateDescriptorPool(gpu->device(), &createInfo, nullptr,
&pool));
if (result != VK_SUCCESS) {
return nullptr;
}
return new GrVkDescriptorPool(pool, type, count);
}
GrVkDescriptorPool::GrVkDescriptorPool(VkDescriptorPool pool, VkDescriptorType type, uint32_t count)
: INHERITED(), fType(type), fCount(count), fDescPool(pool) {}
bool GrVkDescriptorPool::isCompatible(VkDescriptorType type, uint32_t count) const {
return fType == type && count <= fCount;
}

View File

@ -20,7 +20,7 @@ class GrVkGpu;
*/
class GrVkDescriptorPool : public GrVkResource {
public:
GrVkDescriptorPool(GrVkGpu* gpu, VkDescriptorType type, uint32_t count);
static GrVkDescriptorPool* Create(GrVkGpu* gpu, VkDescriptorType type, uint32_t count);
VkDescriptorPool descPool() const { return fDescPool; }
@ -38,6 +38,8 @@ public:
#endif
private:
GrVkDescriptorPool(VkDescriptorPool pool, VkDescriptorType type, uint32_t count);
void freeGPUData(GrVkGpu* gpu) const override;
VkDescriptorType fType;

View File

@ -23,10 +23,8 @@ GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu
stages |= kGeometry_GrShaderFlag;
}
visibilities.push_back(stages);
SkTArray<const GrVkSampler*> samplers;
return new GrVkDescriptorSetManager(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities,
samplers);
return Create(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities, samplers);
}
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
@ -38,7 +36,7 @@ GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
visibilities.push_back(uniformHandler.samplerVisibility(i));
immutableSamplers.push_back(uniformHandler.immutableSampler(i));
}
return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers);
return Create(gpu, type, visibilities, immutableSamplers);
}
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
@ -48,14 +46,119 @@ GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
for (int i = 0 ; i < visibilities.count(); ++i) {
immutableSamplers.push_back(nullptr);
}
return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers);
return Create(gpu, type, visibilities, immutableSamplers);
}
GrVkDescriptorSetManager::GrVkDescriptorSetManager(
VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
VkShaderStageFlags flags = 0;
if (visibility & kVertex_GrShaderFlag) {
flags |= VK_SHADER_STAGE_VERTEX_BIT;
}
if (visibility & kGeometry_GrShaderFlag) {
flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
}
if (visibility & kFragment_GrShaderFlag) {
flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
}
return flags;
}
static bool get_layout_and_desc_count(GrVkGpu* gpu,
VkDescriptorType type,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers,
VkDescriptorSetLayout* descSetLayout,
uint32_t* descCountPerSet) {
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
uint32_t numBindings = visibilities.count();
std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
new VkDescriptorSetLayoutBinding[numBindings]);
for (uint32_t i = 0; i < numBindings; ++i) {
uint32_t visibility = visibilities[i];
dsSamplerBindings[i].binding = i;
dsSamplerBindings[i].descriptorType = type;
dsSamplerBindings[i].descriptorCount = 1;
dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
if (immutableSamplers[i]) {
dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
} else {
dsSamplerBindings[i].pImmutableSamplers = nullptr;
}
}
}
VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dsSamplerLayoutCreateInfo.pNext = nullptr;
dsSamplerLayoutCreateInfo.flags = 0;
dsSamplerLayoutCreateInfo.bindingCount = numBindings;
// Setting to nullptr fixes an error in the param checker validation layer. Even though
// bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
// null.
dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
VkResult result;
GR_VK_CALL_RESULT(gpu, result,
CreateDescriptorSetLayout(gpu->device(),
&dsSamplerLayoutCreateInfo,
nullptr,
descSetLayout));
if (result != VK_SUCCESS) {
return false;
}
*descCountPerSet = visibilities.count();
} else {
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
static constexpr int kUniformDescPerSet = 1;
SkASSERT(kUniformDescPerSet == visibilities.count());
// Create Uniform Buffer Descriptor
VkDescriptorSetLayoutBinding dsUniBinding;
memset(&dsUniBinding, 0, sizeof(dsUniBinding));
dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsUniBinding.descriptorCount = 1;
dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
dsUniBinding.pImmutableSamplers = nullptr;
VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
uniformLayoutCreateInfo.pNext = nullptr;
uniformLayoutCreateInfo.flags = 0;
uniformLayoutCreateInfo.bindingCount = 1;
uniformLayoutCreateInfo.pBindings = &dsUniBinding;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateDescriptorSetLayout(gpu->device(),
&uniformLayoutCreateInfo,
nullptr,
descSetLayout));
if (result != VK_SUCCESS) {
return false;
}
*descCountPerSet = kUniformDescPerSet;
}
return true;
}
GrVkDescriptorSetManager* GrVkDescriptorSetManager::Create(
GrVkGpu* gpu, VkDescriptorType type,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers)
: fPoolManager(type, gpu, visibilities, immutableSamplers) {
const SkTArray<const GrVkSampler*>& immutableSamplers) {
#ifdef SK_DEBUG
if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
SkASSERT(visibilities.count() == immutableSamplers.count());
@ -63,6 +166,22 @@ GrVkDescriptorSetManager::GrVkDescriptorSetManager(
SkASSERT(immutableSamplers.count() == 0);
}
#endif
VkDescriptorSetLayout descSetLayout;
uint32_t descCountPerSet;
if (!get_layout_and_desc_count(gpu, type, visibilities, immutableSamplers, &descSetLayout,
&descCountPerSet)) {
return nullptr;
}
return new GrVkDescriptorSetManager(gpu, type, descSetLayout, descCountPerSet, visibilities,
immutableSamplers);
}
GrVkDescriptorSetManager::GrVkDescriptorSetManager(
GrVkGpu* gpu, VkDescriptorType type, VkDescriptorSetLayout descSetLayout,
uint32_t descCountPerSet, const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers)
: fPoolManager(descSetLayout, type, descCountPerSet) {
for (int i = 0; i < visibilities.count(); ++i) {
fBindingVisibilities.push_back(visibilities[i]);
}
@ -84,7 +203,9 @@ const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu
fFreeSets.removeShuffle(count - 1);
} else {
VkDescriptorSet vkDS;
fPoolManager.getNewDescriptorSet(gpu, &vkDS);
if (!fPoolManager.getNewDescriptorSet(gpu, &vkDS)) {
return nullptr;
}
ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle);
}
@ -171,110 +292,19 @@ bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
////////////////////////////////////////////////////////////////////////////////
VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
VkShaderStageFlags flags = 0;
if (visibility & kVertex_GrShaderFlag) {
flags |= VK_SHADER_STAGE_VERTEX_BIT;
}
if (visibility & kGeometry_GrShaderFlag) {
flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
}
if (visibility & kFragment_GrShaderFlag) {
flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
}
return flags;
}
GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
VkDescriptorSetLayout layout,
VkDescriptorType type,
GrVkGpu* gpu,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers)
: fDescType(type)
uint32_t descCountPerSet)
: fDescLayout(layout)
, fDescType(type)
, fDescCountPerSet(descCountPerSet)
, fMaxDescriptors(kStartNumDescriptors)
, fCurrentDescriptorCount(0)
, fPool(nullptr) {
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
uint32_t numBindings = visibilities.count();
std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
new VkDescriptorSetLayoutBinding[numBindings]);
for (uint32_t i = 0; i < numBindings; ++i) {
uint32_t visibility = visibilities[i];
dsSamplerBindings[i].binding = i;
dsSamplerBindings[i].descriptorType = type;
dsSamplerBindings[i].descriptorCount = 1;
dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
if (immutableSamplers[i]) {
dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
} else {
dsSamplerBindings[i].pImmutableSamplers = nullptr;
}
}
}
VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dsSamplerLayoutCreateInfo.pNext = nullptr;
dsSamplerLayoutCreateInfo.flags = 0;
dsSamplerLayoutCreateInfo.bindingCount = numBindings;
// Setting to nullptr fixes an error in the param checker validation layer. Even though
// bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
// null.
dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
GR_VK_CALL_ERRCHECK(gpu, CreateDescriptorSetLayout(gpu->device(),
&dsSamplerLayoutCreateInfo,
nullptr,
&fDescLayout));
fDescCountPerSet = visibilities.count();
} else {
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
GR_STATIC_ASSERT(1 == kUniformDescPerSet);
SkASSERT(kUniformDescPerSet == visibilities.count());
// Create Uniform Buffer Descriptor
VkDescriptorSetLayoutBinding dsUniBinding;
memset(&dsUniBinding, 0, sizeof(dsUniBinding));
dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsUniBinding.descriptorCount = 1;
dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
dsUniBinding.pImmutableSamplers = nullptr;
VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
uniformLayoutCreateInfo.pNext = nullptr;
uniformLayoutCreateInfo.flags = 0;
uniformLayoutCreateInfo.bindingCount = 1;
uniformLayoutCreateInfo.pBindings = &dsUniBinding;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
GR_VK_CALL_ERRCHECK(gpu, CreateDescriptorSetLayout(gpu->device(),
&uniformLayoutCreateInfo,
nullptr,
&fDescLayout));
fDescCountPerSet = kUniformDescPerSet;
}
SkASSERT(fDescCountPerSet < kStartNumDescriptors);
fMaxDescriptors = kStartNumDescriptors;
SkASSERT(fMaxDescriptors > 0);
this->getNewPool(gpu);
}
void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
if (fPool) {
fPool->unref(gpu);
uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
@ -287,17 +317,19 @@ void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
}
fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
fMaxDescriptors);
SkASSERT(fPool);
return SkToBool(fPool);
}
void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
VkDescriptorSet* ds) {
if (!fMaxDescriptors) {
return;
return false;
}
fCurrentDescriptorCount += fDescCountPerSet;
if (fCurrentDescriptorCount > fMaxDescriptors) {
this->getNewPool(gpu);
if (!fPool || fCurrentDescriptorCount > fMaxDescriptors) {
if (!this->getNewPool(gpu) ) {
return false;
}
fCurrentDescriptorCount = fDescCountPerSet;
}
@ -308,7 +340,11 @@ void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGp
dsAllocateInfo.descriptorPool = fPool->descPool();
dsAllocateInfo.descriptorSetCount = 1;
dsAllocateInfo.pSetLayouts = &fDescLayout;
GR_VK_CALL_ERRCHECK(gpu, AllocateDescriptorSets(gpu->device(), &dsAllocateInfo, ds));
VkResult result;
GR_VK_CALL_RESULT(gpu, result, AllocateDescriptorSets(gpu->device(),
&dsAllocateInfo,
ds));
return result == VK_SUCCESS;
}
void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) {

View File

@ -50,17 +50,15 @@ public:
private:
struct DescriptorPoolManager {
DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers);
DescriptorPoolManager(VkDescriptorSetLayout, VkDescriptorType type,
uint32_t descCountPerSet);
~DescriptorPoolManager() {
SkASSERT(!fDescLayout);
SkASSERT(!fPool);
}
void getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds);
bool getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds);
void freeGPUResources(GrVkGpu* gpu);
void abandonGPUResources();
@ -74,16 +72,20 @@ private:
private:
enum {
kUniformDescPerSet = 1,
kMaxDescriptors = 1024,
kStartNumDescriptors = 16, // must be less than kMaxUniformDescriptors
};
void getNewPool(GrVkGpu* gpu);
bool getNewPool(GrVkGpu* gpu);
};
static GrVkDescriptorSetManager* Create(GrVkGpu* gpu,
VkDescriptorType,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers);
GrVkDescriptorSetManager(GrVkGpu* gpu,
VkDescriptorType,
VkDescriptorType, VkDescriptorSetLayout, uint32_t descCountPerSet,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers);

View File

@ -190,7 +190,7 @@ GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
VkDescriptorType type, uint32_t count) {
return new GrVkDescriptorPool(fGpu, type, count);
return GrVkDescriptorPool::Create(fGpu, type, count);
}
GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
@ -311,6 +311,9 @@ GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
fAvailableCommandPools.pop_back();
} else {
result = GrVkCommandPool::Create(fGpu);
if (!result) {
return nullptr;
}
}
SkASSERT(result->unique());
SkDEBUGCODE(