Update failure calls in GrVkMemory and shader creation.
Bug: skia:9603 Change-Id: I8571150bb7efe2f127c00d1a06e77ec8fa905d8f Reviewed-on: https://skia-review.googlesource.com/c/skia/+/255526 Reviewed-by: Jim Van Verth <jvanverth@google.com> Commit-Queue: Greg Daniel <egdaniel@google.com>
This commit is contained in:
parent
b4f8540803
commit
b184e9654d
@ -19,7 +19,7 @@
|
|||||||
#define VALIDATE() do {} while(false)
|
#define VALIDATE() do {} while(false)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) {
|
const GrVkBuffer::Resource* GrVkBuffer::Create(GrVkGpu* gpu, const Desc& desc) {
|
||||||
SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
|
SkASSERT(!gpu->protectedContext() || (gpu->protectedContext() == desc.fDynamic));
|
||||||
VkBuffer buffer;
|
VkBuffer buffer;
|
||||||
GrVkAlloc alloc;
|
GrVkAlloc alloc;
|
||||||
|
@ -77,7 +77,7 @@ protected:
|
|||||||
};
|
};
|
||||||
|
|
||||||
// convenience routine for raw buffer creation
|
// convenience routine for raw buffer creation
|
||||||
static const Resource* Create(const GrVkGpu* gpu,
|
static const Resource* Create(GrVkGpu* gpu,
|
||||||
const Desc& descriptor);
|
const Desc& descriptor);
|
||||||
|
|
||||||
GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
|
GrVkBuffer(const Desc& desc, const GrVkBuffer::Resource* resource)
|
||||||
|
@ -30,7 +30,7 @@ static BufferUsage get_buffer_usage(GrVkBuffer::Type type, bool dynamic) {
|
|||||||
SK_ABORT("Invalid GrVkBuffer::Type");
|
SK_ABORT("Invalid GrVkBuffer::Type");
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
|
bool GrVkMemory::AllocAndBindBufferMemory(GrVkGpu* gpu,
|
||||||
VkBuffer buffer,
|
VkBuffer buffer,
|
||||||
GrVkBuffer::Type type,
|
GrVkBuffer::Type type,
|
||||||
bool dynamic,
|
bool dynamic,
|
||||||
@ -62,9 +62,9 @@ bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu,
|
|||||||
allocator->getAllocInfo(memory, alloc);
|
allocator->getAllocInfo(memory, alloc);
|
||||||
|
|
||||||
// Bind buffer
|
// Bind buffer
|
||||||
VkResult err = GR_VK_CALL(gpu->vkInterface(), BindBufferMemory(gpu->device(), buffer,
|
VkResult err;
|
||||||
alloc->fMemory,
|
GR_VK_CALL_RESULT(gpu, err, BindBufferMemory(gpu->device(), buffer, alloc->fMemory,
|
||||||
alloc->fOffset));
|
alloc->fOffset));
|
||||||
if (err) {
|
if (err) {
|
||||||
FreeBufferMemory(gpu, type, *alloc);
|
FreeBufferMemory(gpu, type, *alloc);
|
||||||
return false;
|
return false;
|
||||||
@ -85,7 +85,7 @@ void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type,
|
|||||||
|
|
||||||
const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
|
const VkDeviceSize kMaxSmallImageSize = 256 * 1024;
|
||||||
|
|
||||||
bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
|
bool GrVkMemory::AllocAndBindImageMemory(GrVkGpu* gpu,
|
||||||
VkImage image,
|
VkImage image,
|
||||||
bool linearTiling,
|
bool linearTiling,
|
||||||
GrVkAlloc* alloc) {
|
GrVkAlloc* alloc) {
|
||||||
@ -114,8 +114,9 @@ bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu,
|
|||||||
allocator->getAllocInfo(memory, alloc);
|
allocator->getAllocInfo(memory, alloc);
|
||||||
|
|
||||||
// Bind buffer
|
// Bind buffer
|
||||||
VkResult err = GR_VK_CALL(gpu->vkInterface(), BindImageMemory(gpu->device(), image,
|
VkResult err;
|
||||||
alloc->fMemory, alloc->fOffset));
|
GR_VK_CALL_RESULT(gpu, err, BindImageMemory(gpu->device(), image, alloc->fMemory,
|
||||||
|
alloc->fOffset));
|
||||||
if (err) {
|
if (err) {
|
||||||
FreeImageMemory(gpu, linearTiling, *alloc);
|
FreeImageMemory(gpu, linearTiling, *alloc);
|
||||||
return false;
|
return false;
|
||||||
@ -134,7 +135,7 @@ void GrVkMemory::FreeImageMemory(const GrVkGpu* gpu, bool linearTiling,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
|
void* GrVkMemory::MapAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc) {
|
||||||
SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
|
SkASSERT(GrVkAlloc::kMappable_Flag & alloc.fFlags);
|
||||||
#ifdef SK_DEBUG
|
#ifdef SK_DEBUG
|
||||||
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
|
if (alloc.fFlags & GrVkAlloc::kNoncoherent_Flag) {
|
||||||
@ -149,9 +150,9 @@ void* GrVkMemory::MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void* mapPtr;
|
void* mapPtr;
|
||||||
VkResult err = GR_VK_CALL(gpu->vkInterface(), MapMemory(gpu->device(), alloc.fMemory,
|
VkResult err;
|
||||||
alloc.fOffset,
|
GR_VK_CALL_RESULT(gpu, err, MapMemory(gpu->device(), alloc.fMemory, alloc.fOffset, alloc.fSize,
|
||||||
alloc.fSize, 0, &mapPtr));
|
0, &mapPtr));
|
||||||
if (err) {
|
if (err) {
|
||||||
mapPtr = nullptr;
|
mapPtr = nullptr;
|
||||||
}
|
}
|
||||||
|
@ -19,14 +19,14 @@ namespace GrVkMemory {
|
|||||||
* Allocates vulkan device memory and binds it to the gpu's device for the given object.
|
* Allocates vulkan device memory and binds it to the gpu's device for the given object.
|
||||||
* Returns true if allocation succeeded.
|
* Returns true if allocation succeeded.
|
||||||
*/
|
*/
|
||||||
bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
|
bool AllocAndBindBufferMemory(GrVkGpu* gpu,
|
||||||
VkBuffer buffer,
|
VkBuffer buffer,
|
||||||
GrVkBuffer::Type type,
|
GrVkBuffer::Type type,
|
||||||
bool dynamic,
|
bool dynamic,
|
||||||
GrVkAlloc* alloc);
|
GrVkAlloc* alloc);
|
||||||
void FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, const GrVkAlloc& alloc);
|
void FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, const GrVkAlloc& alloc);
|
||||||
|
|
||||||
bool AllocAndBindImageMemory(const GrVkGpu* gpu,
|
bool AllocAndBindImageMemory(GrVkGpu* gpu,
|
||||||
VkImage image,
|
VkImage image,
|
||||||
bool linearTiling,
|
bool linearTiling,
|
||||||
GrVkAlloc* alloc);
|
GrVkAlloc* alloc);
|
||||||
@ -36,7 +36,7 @@ namespace GrVkMemory {
|
|||||||
// the hood, we may map more than the range of the GrVkAlloc (e.g. the entire VkDeviceMemory),
|
// the hood, we may map more than the range of the GrVkAlloc (e.g. the entire VkDeviceMemory),
|
||||||
// but the pointer returned will always be to the start of the GrVkAlloc. The caller should also
|
// but the pointer returned will always be to the start of the GrVkAlloc. The caller should also
|
||||||
// never assume more than the GrVkAlloc block has been mapped.
|
// never assume more than the GrVkAlloc block has been mapped.
|
||||||
void* MapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
|
void* MapAlloc(GrVkGpu* gpu, const GrVkAlloc& alloc);
|
||||||
void UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
|
void UnmapAlloc(const GrVkGpu* gpu, const GrVkAlloc& alloc);
|
||||||
|
|
||||||
// For the Flush and Invalidate calls, the offset should be relative to the GrVkAlloc. Thus this
|
// For the Flush and Invalidate calls, the offset should be relative to the GrVkAlloc. Thus this
|
||||||
|
@ -112,7 +112,7 @@ SkSL::Program::Kind vk_shader_stage_to_skiasl_kind(VkShaderStageFlagBits stage)
|
|||||||
return SkSL::Program::kFragment_Kind;
|
return SkSL::Program::kFragment_Kind;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GrCompileVkShaderModule(const GrVkGpu* gpu,
|
bool GrCompileVkShaderModule(GrVkGpu* gpu,
|
||||||
const SkSL::String& shaderString,
|
const SkSL::String& shaderString,
|
||||||
VkShaderStageFlagBits stage,
|
VkShaderStageFlagBits stage,
|
||||||
VkShaderModule* shaderModule,
|
VkShaderModule* shaderModule,
|
||||||
@ -138,7 +138,7 @@ bool GrCompileVkShaderModule(const GrVkGpu* gpu,
|
|||||||
return GrInstallVkShaderModule(gpu, *outSPIRV, stage, shaderModule, stageInfo);
|
return GrInstallVkShaderModule(gpu, *outSPIRV, stage, shaderModule, stageInfo);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GrInstallVkShaderModule(const GrVkGpu* gpu,
|
bool GrInstallVkShaderModule(GrVkGpu* gpu,
|
||||||
const SkSL::String& spirv,
|
const SkSL::String& spirv,
|
||||||
VkShaderStageFlagBits stage,
|
VkShaderStageFlagBits stage,
|
||||||
VkShaderModule* shaderModule,
|
VkShaderModule* shaderModule,
|
||||||
@ -151,10 +151,9 @@ bool GrInstallVkShaderModule(const GrVkGpu* gpu,
|
|||||||
moduleCreateInfo.codeSize = spirv.size();
|
moduleCreateInfo.codeSize = spirv.size();
|
||||||
moduleCreateInfo.pCode = (const uint32_t*)spirv.c_str();
|
moduleCreateInfo.pCode = (const uint32_t*)spirv.c_str();
|
||||||
|
|
||||||
VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateShaderModule(gpu->device(),
|
VkResult err;
|
||||||
&moduleCreateInfo,
|
GR_VK_CALL_RESULT(gpu, err, CreateShaderModule(gpu->device(), &moduleCreateInfo, nullptr,
|
||||||
nullptr,
|
shaderModule));
|
||||||
shaderModule));
|
|
||||||
if (err) {
|
if (err) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -50,7 +50,7 @@ bool GrVkFormatColorTypePairIsValid(VkFormat, GrColorType);
|
|||||||
|
|
||||||
bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
|
bool GrSampleCountToVkSampleCount(uint32_t samples, VkSampleCountFlagBits* vkSamples);
|
||||||
|
|
||||||
bool GrCompileVkShaderModule(const GrVkGpu* gpu,
|
bool GrCompileVkShaderModule(GrVkGpu* gpu,
|
||||||
const SkSL::String& shaderString,
|
const SkSL::String& shaderString,
|
||||||
VkShaderStageFlagBits stage,
|
VkShaderStageFlagBits stage,
|
||||||
VkShaderModule* shaderModule,
|
VkShaderModule* shaderModule,
|
||||||
@ -59,7 +59,7 @@ bool GrCompileVkShaderModule(const GrVkGpu* gpu,
|
|||||||
SkSL::String* outSPIRV,
|
SkSL::String* outSPIRV,
|
||||||
SkSL::Program::Inputs* outInputs);
|
SkSL::Program::Inputs* outInputs);
|
||||||
|
|
||||||
bool GrInstallVkShaderModule(const GrVkGpu* gpu,
|
bool GrInstallVkShaderModule(GrVkGpu* gpu,
|
||||||
const SkSL::String& spirv,
|
const SkSL::String& spirv,
|
||||||
VkShaderStageFlagBits stage,
|
VkShaderStageFlagBits stage,
|
||||||
VkShaderModule* shaderModule,
|
VkShaderModule* shaderModule,
|
||||||
|
Loading…
Reference in New Issue
Block a user