Have vulkan command and descriptor sets handle failed creation.

This requires the switch to using more factory functions that can fail,
creating the vulkan objects in the factories (instead of ctor), and
making sure calling code handles the failed creations.

Bug: skia:9603
Change-Id: I22f1852e4cbefe584857f3adccf486cb1326cb68
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/252928
Commit-Queue: Greg Daniel <egdaniel@google.com>
Reviewed-by: Chris Dalton <csmartdalton@google.com>
This commit is contained in:
Greg Daniel 2019-11-06 09:21:55 -05:00 committed by Skia Commit-Bot
parent da53dd5e17
commit 9b63dc852e
6 changed files with 184 additions and 129 deletions

View File

@ -25,8 +25,12 @@ GrVkCommandPool* GrVkCommandPool::Create(GrVkGpu* gpu) {
cmdPoolCreateFlags, // CmdPoolCreateFlags cmdPoolCreateFlags, // CmdPoolCreateFlags
gpu->queueIndex(), // queueFamilyIndex gpu->queueIndex(), // queueFamilyIndex
}; };
VkResult result;
VkCommandPool pool; VkCommandPool pool;
GR_VK_CALL_ERRCHECK(gpu, CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool)); GR_VK_CALL_RESULT(gpu, result, CreateCommandPool(gpu->device(), &cmdPoolInfo, nullptr, &pool));
if (result != VK_SUCCESS) {
return nullptr;
}
return new GrVkCommandPool(gpu, pool); return new GrVkCommandPool(gpu, pool);
} }

View File

@ -11,10 +11,8 @@
#include "src/gpu/vk/GrVkGpu.h" #include "src/gpu/vk/GrVkGpu.h"
GrVkDescriptorPool::GrVkDescriptorPool(GrVkGpu* gpu, VkDescriptorType type, uint32_t count) GrVkDescriptorPool* GrVkDescriptorPool::Create(GrVkGpu* gpu, VkDescriptorType type,
: INHERITED() uint32_t count) {
, fType (type)
, fCount(count) {
VkDescriptorPoolSize poolSize; VkDescriptorPoolSize poolSize;
memset(&poolSize, 0, sizeof(VkDescriptorPoolSize)); memset(&poolSize, 0, sizeof(VkDescriptorPoolSize));
poolSize.descriptorCount = count; poolSize.descriptorCount = count;
@ -30,9 +28,19 @@ GrVkDescriptorPool::GrVkDescriptorPool(GrVkGpu* gpu, VkDescriptorType type, uint
createInfo.poolSizeCount = 1; createInfo.poolSizeCount = 1;
createInfo.pPoolSizes = &poolSize; createInfo.pPoolSizes = &poolSize;
GR_VK_CALL_ERRCHECK(gpu, CreateDescriptorPool(gpu->device(), &createInfo, nullptr, &fDescPool)); VkDescriptorPool pool;
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateDescriptorPool(gpu->device(), &createInfo, nullptr,
&pool));
if (result != VK_SUCCESS) {
return nullptr;
}
return new GrVkDescriptorPool(pool, type, count);
} }
GrVkDescriptorPool::GrVkDescriptorPool(VkDescriptorPool pool, VkDescriptorType type, uint32_t count)
: INHERITED(), fType(type), fCount(count), fDescPool(pool) {}
bool GrVkDescriptorPool::isCompatible(VkDescriptorType type, uint32_t count) const { bool GrVkDescriptorPool::isCompatible(VkDescriptorType type, uint32_t count) const {
return fType == type && count <= fCount; return fType == type && count <= fCount;
} }

View File

@ -20,7 +20,7 @@ class GrVkGpu;
*/ */
class GrVkDescriptorPool : public GrVkResource { class GrVkDescriptorPool : public GrVkResource {
public: public:
GrVkDescriptorPool(GrVkGpu* gpu, VkDescriptorType type, uint32_t count); static GrVkDescriptorPool* Create(GrVkGpu* gpu, VkDescriptorType type, uint32_t count);
VkDescriptorPool descPool() const { return fDescPool; } VkDescriptorPool descPool() const { return fDescPool; }
@ -38,6 +38,8 @@ public:
#endif #endif
private: private:
GrVkDescriptorPool(VkDescriptorPool pool, VkDescriptorType type, uint32_t count);
void freeGPUData(GrVkGpu* gpu) const override; void freeGPUData(GrVkGpu* gpu) const override;
VkDescriptorType fType; VkDescriptorType fType;

View File

@ -23,10 +23,8 @@ GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateUniformManager(GrVkGpu
stages |= kGeometry_GrShaderFlag; stages |= kGeometry_GrShaderFlag;
} }
visibilities.push_back(stages); visibilities.push_back(stages);
SkTArray<const GrVkSampler*> samplers; SkTArray<const GrVkSampler*> samplers;
return new GrVkDescriptorSetManager(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities, return Create(gpu, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, visibilities, samplers);
samplers);
} }
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager( GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
@ -38,7 +36,7 @@ GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
visibilities.push_back(uniformHandler.samplerVisibility(i)); visibilities.push_back(uniformHandler.samplerVisibility(i));
immutableSamplers.push_back(uniformHandler.immutableSampler(i)); immutableSamplers.push_back(uniformHandler.immutableSampler(i));
} }
return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers); return Create(gpu, type, visibilities, immutableSamplers);
} }
GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager( GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
@ -48,14 +46,119 @@ GrVkDescriptorSetManager* GrVkDescriptorSetManager::CreateSamplerManager(
for (int i = 0 ; i < visibilities.count(); ++i) { for (int i = 0 ; i < visibilities.count(); ++i) {
immutableSamplers.push_back(nullptr); immutableSamplers.push_back(nullptr);
} }
return new GrVkDescriptorSetManager(gpu, type, visibilities, immutableSamplers); return Create(gpu, type, visibilities, immutableSamplers);
} }
GrVkDescriptorSetManager::GrVkDescriptorSetManager( VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
VkShaderStageFlags flags = 0;
if (visibility & kVertex_GrShaderFlag) {
flags |= VK_SHADER_STAGE_VERTEX_BIT;
}
if (visibility & kGeometry_GrShaderFlag) {
flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
}
if (visibility & kFragment_GrShaderFlag) {
flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
}
return flags;
}
static bool get_layout_and_desc_count(GrVkGpu* gpu,
VkDescriptorType type,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers,
VkDescriptorSetLayout* descSetLayout,
uint32_t* descCountPerSet) {
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
uint32_t numBindings = visibilities.count();
std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
new VkDescriptorSetLayoutBinding[numBindings]);
for (uint32_t i = 0; i < numBindings; ++i) {
uint32_t visibility = visibilities[i];
dsSamplerBindings[i].binding = i;
dsSamplerBindings[i].descriptorType = type;
dsSamplerBindings[i].descriptorCount = 1;
dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
if (immutableSamplers[i]) {
dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
} else {
dsSamplerBindings[i].pImmutableSamplers = nullptr;
}
}
}
VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dsSamplerLayoutCreateInfo.pNext = nullptr;
dsSamplerLayoutCreateInfo.flags = 0;
dsSamplerLayoutCreateInfo.bindingCount = numBindings;
// Setting to nullptr fixes an error in the param checker validation layer. Even though
// bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
// null.
dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
VkResult result;
GR_VK_CALL_RESULT(gpu, result,
CreateDescriptorSetLayout(gpu->device(),
&dsSamplerLayoutCreateInfo,
nullptr,
descSetLayout));
if (result != VK_SUCCESS) {
return false;
}
*descCountPerSet = visibilities.count();
} else {
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
static constexpr int kUniformDescPerSet = 1;
SkASSERT(kUniformDescPerSet == visibilities.count());
// Create Uniform Buffer Descriptor
VkDescriptorSetLayoutBinding dsUniBinding;
memset(&dsUniBinding, 0, sizeof(dsUniBinding));
dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsUniBinding.descriptorCount = 1;
dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
dsUniBinding.pImmutableSamplers = nullptr;
VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
uniformLayoutCreateInfo.pNext = nullptr;
uniformLayoutCreateInfo.flags = 0;
uniformLayoutCreateInfo.bindingCount = 1;
uniformLayoutCreateInfo.pBindings = &dsUniBinding;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
VkResult result;
GR_VK_CALL_RESULT(gpu, result, CreateDescriptorSetLayout(gpu->device(),
&uniformLayoutCreateInfo,
nullptr,
descSetLayout));
if (result != VK_SUCCESS) {
return false;
}
*descCountPerSet = kUniformDescPerSet;
}
return true;
}
GrVkDescriptorSetManager* GrVkDescriptorSetManager::Create(
GrVkGpu* gpu, VkDescriptorType type, GrVkGpu* gpu, VkDescriptorType type,
const SkTArray<uint32_t>& visibilities, const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers) const SkTArray<const GrVkSampler*>& immutableSamplers) {
: fPoolManager(type, gpu, visibilities, immutableSamplers) {
#ifdef SK_DEBUG #ifdef SK_DEBUG
if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) { if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) {
SkASSERT(visibilities.count() == immutableSamplers.count()); SkASSERT(visibilities.count() == immutableSamplers.count());
@ -63,6 +166,22 @@ GrVkDescriptorSetManager::GrVkDescriptorSetManager(
SkASSERT(immutableSamplers.count() == 0); SkASSERT(immutableSamplers.count() == 0);
} }
#endif #endif
VkDescriptorSetLayout descSetLayout;
uint32_t descCountPerSet;
if (!get_layout_and_desc_count(gpu, type, visibilities, immutableSamplers, &descSetLayout,
&descCountPerSet)) {
return nullptr;
}
return new GrVkDescriptorSetManager(gpu, type, descSetLayout, descCountPerSet, visibilities,
immutableSamplers);
}
GrVkDescriptorSetManager::GrVkDescriptorSetManager(
GrVkGpu* gpu, VkDescriptorType type, VkDescriptorSetLayout descSetLayout,
uint32_t descCountPerSet, const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers)
: fPoolManager(descSetLayout, type, descCountPerSet) {
for (int i = 0; i < visibilities.count(); ++i) { for (int i = 0; i < visibilities.count(); ++i) {
fBindingVisibilities.push_back(visibilities[i]); fBindingVisibilities.push_back(visibilities[i]);
} }
@ -84,7 +203,9 @@ const GrVkDescriptorSet* GrVkDescriptorSetManager::getDescriptorSet(GrVkGpu* gpu
fFreeSets.removeShuffle(count - 1); fFreeSets.removeShuffle(count - 1);
} else { } else {
VkDescriptorSet vkDS; VkDescriptorSet vkDS;
fPoolManager.getNewDescriptorSet(gpu, &vkDS); if (!fPoolManager.getNewDescriptorSet(gpu, &vkDS)) {
return nullptr;
}
ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle); ds = new GrVkDescriptorSet(vkDS, fPoolManager.fPool, handle);
} }
@ -171,110 +292,19 @@ bool GrVkDescriptorSetManager::isCompatible(VkDescriptorType type,
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
VkShaderStageFlags visibility_to_vk_stage_flags(uint32_t visibility) {
VkShaderStageFlags flags = 0;
if (visibility & kVertex_GrShaderFlag) {
flags |= VK_SHADER_STAGE_VERTEX_BIT;
}
if (visibility & kGeometry_GrShaderFlag) {
flags |= VK_SHADER_STAGE_GEOMETRY_BIT;
}
if (visibility & kFragment_GrShaderFlag) {
flags |= VK_SHADER_STAGE_FRAGMENT_BIT;
}
return flags;
}
GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager( GrVkDescriptorSetManager::DescriptorPoolManager::DescriptorPoolManager(
VkDescriptorSetLayout layout,
VkDescriptorType type, VkDescriptorType type,
GrVkGpu* gpu, uint32_t descCountPerSet)
const SkTArray<uint32_t>& visibilities, : fDescLayout(layout)
const SkTArray<const GrVkSampler*>& immutableSamplers) , fDescType(type)
: fDescType(type) , fDescCountPerSet(descCountPerSet)
, fMaxDescriptors(kStartNumDescriptors)
, fCurrentDescriptorCount(0) , fCurrentDescriptorCount(0)
, fPool(nullptr) { , fPool(nullptr) {
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type ||
VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER == type) {
uint32_t numBindings = visibilities.count();
std::unique_ptr<VkDescriptorSetLayoutBinding[]> dsSamplerBindings(
new VkDescriptorSetLayoutBinding[numBindings]);
for (uint32_t i = 0; i < numBindings; ++i) {
uint32_t visibility = visibilities[i];
dsSamplerBindings[i].binding = i;
dsSamplerBindings[i].descriptorType = type;
dsSamplerBindings[i].descriptorCount = 1;
dsSamplerBindings[i].stageFlags = visibility_to_vk_stage_flags(visibility);
if (VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER == type) {
if (immutableSamplers[i]) {
dsSamplerBindings[i].pImmutableSamplers = immutableSamplers[i]->samplerPtr();
} else {
dsSamplerBindings[i].pImmutableSamplers = nullptr;
}
}
}
VkDescriptorSetLayoutCreateInfo dsSamplerLayoutCreateInfo;
memset(&dsSamplerLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
dsSamplerLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
dsSamplerLayoutCreateInfo.pNext = nullptr;
dsSamplerLayoutCreateInfo.flags = 0;
dsSamplerLayoutCreateInfo.bindingCount = numBindings;
// Setting to nullptr fixes an error in the param checker validation layer. Even though
// bindingCount is 0 (which is valid), it still tries to validate pBindings unless it is
// null.
dsSamplerLayoutCreateInfo.pBindings = numBindings ? dsSamplerBindings.get() : nullptr;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
GR_VK_CALL_ERRCHECK(gpu, CreateDescriptorSetLayout(gpu->device(),
&dsSamplerLayoutCreateInfo,
nullptr,
&fDescLayout));
fDescCountPerSet = visibilities.count();
} else {
SkASSERT(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER == type);
GR_STATIC_ASSERT(1 == kUniformDescPerSet);
SkASSERT(kUniformDescPerSet == visibilities.count());
// Create Uniform Buffer Descriptor
VkDescriptorSetLayoutBinding dsUniBinding;
memset(&dsUniBinding, 0, sizeof(dsUniBinding));
dsUniBinding.binding = GrVkUniformHandler::kUniformBinding;
dsUniBinding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
dsUniBinding.descriptorCount = 1;
dsUniBinding.stageFlags = visibility_to_vk_stage_flags(visibilities[0]);
dsUniBinding.pImmutableSamplers = nullptr;
VkDescriptorSetLayoutCreateInfo uniformLayoutCreateInfo;
memset(&uniformLayoutCreateInfo, 0, sizeof(VkDescriptorSetLayoutCreateInfo));
uniformLayoutCreateInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
uniformLayoutCreateInfo.pNext = nullptr;
uniformLayoutCreateInfo.flags = 0;
uniformLayoutCreateInfo.bindingCount = 1;
uniformLayoutCreateInfo.pBindings = &dsUniBinding;
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
// skia:8713
__lsan::ScopedDisabler lsanDisabler;
#endif
GR_VK_CALL_ERRCHECK(gpu, CreateDescriptorSetLayout(gpu->device(),
&uniformLayoutCreateInfo,
nullptr,
&fDescLayout));
fDescCountPerSet = kUniformDescPerSet;
}
SkASSERT(fDescCountPerSet < kStartNumDescriptors);
fMaxDescriptors = kStartNumDescriptors;
SkASSERT(fMaxDescriptors > 0);
this->getNewPool(gpu);
} }
void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) { bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
if (fPool) { if (fPool) {
fPool->unref(gpu); fPool->unref(gpu);
uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1); uint32_t newPoolSize = fMaxDescriptors + ((fMaxDescriptors + 1) >> 1);
@ -287,17 +317,19 @@ void GrVkDescriptorSetManager::DescriptorPoolManager::getNewPool(GrVkGpu* gpu) {
} }
fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType, fPool = gpu->resourceProvider().findOrCreateCompatibleDescriptorPool(fDescType,
fMaxDescriptors); fMaxDescriptors);
SkASSERT(fPool); return SkToBool(fPool);
} }
void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu, bool GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGpu* gpu,
VkDescriptorSet* ds) { VkDescriptorSet* ds) {
if (!fMaxDescriptors) { if (!fMaxDescriptors) {
return; return false;
} }
fCurrentDescriptorCount += fDescCountPerSet; fCurrentDescriptorCount += fDescCountPerSet;
if (fCurrentDescriptorCount > fMaxDescriptors) { if (!fPool || fCurrentDescriptorCount > fMaxDescriptors) {
this->getNewPool(gpu); if (!this->getNewPool(gpu) ) {
return false;
}
fCurrentDescriptorCount = fDescCountPerSet; fCurrentDescriptorCount = fDescCountPerSet;
} }
@ -308,7 +340,11 @@ void GrVkDescriptorSetManager::DescriptorPoolManager::getNewDescriptorSet(GrVkGp
dsAllocateInfo.descriptorPool = fPool->descPool(); dsAllocateInfo.descriptorPool = fPool->descPool();
dsAllocateInfo.descriptorSetCount = 1; dsAllocateInfo.descriptorSetCount = 1;
dsAllocateInfo.pSetLayouts = &fDescLayout; dsAllocateInfo.pSetLayouts = &fDescLayout;
GR_VK_CALL_ERRCHECK(gpu, AllocateDescriptorSets(gpu->device(), &dsAllocateInfo, ds)); VkResult result;
GR_VK_CALL_RESULT(gpu, result, AllocateDescriptorSets(gpu->device(),
&dsAllocateInfo,
ds));
return result == VK_SUCCESS;
} }
void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) { void GrVkDescriptorSetManager::DescriptorPoolManager::freeGPUResources(GrVkGpu* gpu) {

View File

@ -50,17 +50,15 @@ public:
private: private:
struct DescriptorPoolManager { struct DescriptorPoolManager {
DescriptorPoolManager(VkDescriptorType type, GrVkGpu* gpu, DescriptorPoolManager(VkDescriptorSetLayout, VkDescriptorType type,
const SkTArray<uint32_t>& visibilities, uint32_t descCountPerSet);
const SkTArray<const GrVkSampler*>& immutableSamplers);
~DescriptorPoolManager() { ~DescriptorPoolManager() {
SkASSERT(!fDescLayout); SkASSERT(!fDescLayout);
SkASSERT(!fPool); SkASSERT(!fPool);
} }
void getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds); bool getNewDescriptorSet(GrVkGpu* gpu, VkDescriptorSet* ds);
void freeGPUResources(GrVkGpu* gpu); void freeGPUResources(GrVkGpu* gpu);
void abandonGPUResources(); void abandonGPUResources();
@ -74,16 +72,20 @@ private:
private: private:
enum { enum {
kUniformDescPerSet = 1,
kMaxDescriptors = 1024, kMaxDescriptors = 1024,
kStartNumDescriptors = 16, // must be less than kMaxUniformDescriptors kStartNumDescriptors = 16, // must be less than kMaxUniformDescriptors
}; };
void getNewPool(GrVkGpu* gpu); bool getNewPool(GrVkGpu* gpu);
}; };
static GrVkDescriptorSetManager* Create(GrVkGpu* gpu,
VkDescriptorType,
const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers);
GrVkDescriptorSetManager(GrVkGpu* gpu, GrVkDescriptorSetManager(GrVkGpu* gpu,
VkDescriptorType, VkDescriptorType, VkDescriptorSetLayout, uint32_t descCountPerSet,
const SkTArray<uint32_t>& visibilities, const SkTArray<uint32_t>& visibilities,
const SkTArray<const GrVkSampler*>& immutableSamplers); const SkTArray<const GrVkSampler*>& immutableSamplers);

View File

@ -190,7 +190,7 @@ GrVkResourceProvider::findRenderPass(const CompatibleRPHandle& compatibleHandle,
GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool( GrVkDescriptorPool* GrVkResourceProvider::findOrCreateCompatibleDescriptorPool(
VkDescriptorType type, uint32_t count) { VkDescriptorType type, uint32_t count) {
return new GrVkDescriptorPool(fGpu, type, count); return GrVkDescriptorPool::Create(fGpu, type, count);
} }
GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler( GrVkSampler* GrVkResourceProvider::findOrCreateCompatibleSampler(
@ -311,6 +311,9 @@ GrVkCommandPool* GrVkResourceProvider::findOrCreateCommandPool() {
fAvailableCommandPools.pop_back(); fAvailableCommandPools.pop_back();
} else { } else {
result = GrVkCommandPool::Create(fGpu); result = GrVkCommandPool::Create(fGpu);
if (!result) {
return nullptr;
}
} }
SkASSERT(result->unique()); SkASSERT(result->unique());
SkDEBUGCODE( SkDEBUGCODE(