2017-11-10 15:03:05 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2017 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "tools/gpu/vk/VkTestUtils.h"
|
2017-11-10 15:03:05 +00:00
|
|
|
|
|
|
|
#ifdef SK_VULKAN
|
|
|
|
|
2019-05-22 13:57:18 +00:00
|
|
|
#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
|
|
|
|
#if defined _WIN32
|
|
|
|
#define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
|
|
|
|
#else
|
|
|
|
#define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
2020-07-28 19:38:02 +00:00
|
|
|
#include <algorithm>
|
|
|
|
|
2020-01-02 16:22:39 +00:00
|
|
|
#if defined(SK_BUILD_FOR_UNIX)
|
|
|
|
#include <execinfo.h>
|
|
|
|
#endif
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "include/gpu/vk/GrVkBackendContext.h"
|
|
|
|
#include "include/gpu/vk/GrVkExtensions.h"
|
|
|
|
#include "src/core/SkAutoMalloc.h"
|
|
|
|
#include "src/ports/SkOSLibrary.h"
|
2017-11-10 15:03:05 +00:00
|
|
|
|
2019-02-12 16:14:47 +00:00
|
|
|
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
|
|
|
|
#include <sanitizer/lsan_interface.h>
|
|
|
|
#endif
|
|
|
|
|
2017-11-10 15:03:05 +00:00
|
|
|
namespace sk_gpu_test {
|
|
|
|
|
|
|
|
bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
|
2018-08-01 13:19:45 +00:00
|
|
|
PFN_vkGetDeviceProcAddr* devProc) {
|
2017-11-10 15:03:05 +00:00
|
|
|
static void* vkLib = nullptr;
|
|
|
|
static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
|
|
|
|
static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
|
|
|
|
if (!vkLib) {
|
2020-07-10 15:43:59 +00:00
|
|
|
vkLib = SkLoadDynamicLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
|
2017-11-10 15:03:05 +00:00
|
|
|
if (!vkLib) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-07-10 15:43:59 +00:00
|
|
|
localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
|
2017-11-10 15:03:05 +00:00
|
|
|
"vkGetInstanceProcAddr");
|
2020-07-10 15:43:59 +00:00
|
|
|
localDevProc = (PFN_vkGetDeviceProcAddr) SkGetProcedureAddress(vkLib,
|
2017-11-10 15:03:05 +00:00
|
|
|
"vkGetDeviceProcAddr");
|
|
|
|
}
|
|
|
|
if (!localInstProc || !localDevProc) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*instProc = localInstProc;
|
|
|
|
*devProc = localDevProc;
|
|
|
|
return true;
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Helper code to set up Vulkan context objects
|
|
|
|
|
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
const char* kDebugLayerNames[] = {
|
2020-01-06 18:29:53 +00:00
|
|
|
// single merged layer
|
|
|
|
"VK_LAYER_KHRONOS_validation",
|
2018-07-02 20:15:37 +00:00
|
|
|
// not included in standard_validation
|
|
|
|
//"VK_LAYER_LUNARG_api_dump",
|
|
|
|
//"VK_LAYER_LUNARG_vktrace",
|
|
|
|
//"VK_LAYER_LUNARG_screenshot",
|
|
|
|
};
|
|
|
|
|
2018-08-29 19:56:26 +00:00
|
|
|
static uint32_t remove_patch_version(uint32_t specVersion) {
|
|
|
|
return (specVersion >> 12) << 12;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the index into layers array for the layer we want. Returns -1 if not supported.
|
|
|
|
static int should_include_debug_layer(const char* layerName,
|
|
|
|
uint32_t layerCount, VkLayerProperties* layers,
|
|
|
|
uint32_t version) {
|
|
|
|
for (uint32_t i = 0; i < layerCount; ++i) {
|
|
|
|
if (!strcmp(layerName, layers[i].layerName)) {
|
|
|
|
// Since the layers intercept the vulkan calls and forward them on, we need to make sure
|
|
|
|
// layer was written against a version that isn't older than the version of Vulkan we're
|
|
|
|
// using so that it has all the api entry points.
|
|
|
|
if (version <= remove_patch_version(layers[i].specVersion)) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
2018-08-29 19:56:26 +00:00
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
2018-08-29 19:56:26 +00:00
|
|
|
return -1;
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
2018-08-02 17:55:49 +00:00
|
|
|
|
2020-01-02 16:22:39 +00:00
|
|
|
static void print_backtrace() {
|
|
|
|
#if defined(SK_BUILD_FOR_UNIX)
|
|
|
|
void* stack[64];
|
|
|
|
int count = backtrace(stack, SK_ARRAY_COUNT(stack));
|
|
|
|
backtrace_symbols_fd(stack, count, 2);
|
|
|
|
#else
|
|
|
|
// Please add implementations for other platforms.
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2018-07-02 20:16:44 +00:00
|
|
|
VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
|
|
|
|
VkDebugReportFlagsEXT flags,
|
|
|
|
VkDebugReportObjectTypeEXT objectType,
|
|
|
|
uint64_t object,
|
|
|
|
size_t location,
|
|
|
|
int32_t messageCode,
|
|
|
|
const char* pLayerPrefix,
|
|
|
|
const char* pMessage,
|
|
|
|
void* pUserData) {
|
|
|
|
if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
|
2020-06-09 13:05:09 +00:00
|
|
|
// See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
|
|
|
|
if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
|
|
|
|
strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
|
|
|
|
return VK_FALSE;
|
|
|
|
}
|
2020-10-09 14:43:54 +00:00
|
|
|
// See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
|
2020-10-09 16:46:54 +00:00
|
|
|
if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
|
|
|
|
strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
|
2020-10-09 14:43:54 +00:00
|
|
|
return VK_FALSE;
|
|
|
|
}
|
2018-07-02 20:16:44 +00:00
|
|
|
SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
2020-01-02 16:22:39 +00:00
|
|
|
print_backtrace();
|
|
|
|
SkDEBUGFAIL("Vulkan debug layer error");
|
2018-07-02 20:16:44 +00:00
|
|
|
return VK_TRUE; // skip further layers
|
|
|
|
} else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
|
2020-01-02 16:22:39 +00:00
|
|
|
SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
|
|
|
print_backtrace();
|
2018-07-02 20:16:44 +00:00
|
|
|
} else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
|
|
|
|
SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
2020-01-02 16:22:39 +00:00
|
|
|
print_backtrace();
|
2018-07-02 20:16:44 +00:00
|
|
|
} else {
|
|
|
|
SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
|
|
|
}
|
|
|
|
return VK_FALSE;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
|
|
|
|
|
|
|
|
static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
|
|
|
|
uint32_t specVersion,
|
|
|
|
SkTArray<VkExtensionProperties>* instanceExtensions,
|
|
|
|
SkTArray<VkLayerProperties>* instanceLayers) {
|
|
|
|
if (getProc == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
|
|
|
|
if (!EnumerateInstanceExtensionProperties ||
|
|
|
|
!EnumerateInstanceLayerProperties) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult res;
|
|
|
|
uint32_t layerCount = 0;
|
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
// instance layers
|
|
|
|
res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkLayerProperties* layers = new VkLayerProperties[layerCount];
|
|
|
|
res = EnumerateInstanceLayerProperties(&layerCount, layers);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] layers;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t nonPatchVersion = remove_patch_version(specVersion);
|
2018-08-29 19:56:26 +00:00
|
|
|
for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
|
|
|
|
int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
|
|
|
|
nonPatchVersion);
|
|
|
|
if (idx != -1) {
|
|
|
|
instanceLayers->push_back() = layers[idx];
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
delete[] layers;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// instance extensions
|
|
|
|
// via Vulkan implementation and implicitly enabled layers
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
instanceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete [] extensions;
|
|
|
|
|
|
|
|
// via explicitly enabled layers
|
|
|
|
layerCount = instanceLayers->count();
|
|
|
|
for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
instanceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete[] extensions;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
|
|
|
|
VkInstance inst, VkPhysicalDevice physDev,
|
|
|
|
SkTArray<VkExtensionProperties>* deviceExtensions,
|
|
|
|
SkTArray<VkLayerProperties>* deviceLayers) {
|
|
|
|
if (getProc == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
|
|
|
|
GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
|
|
|
|
|
|
|
|
if (!EnumerateDeviceExtensionProperties ||
|
|
|
|
!EnumerateDeviceLayerProperties) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult res;
|
|
|
|
// device layers
|
|
|
|
uint32_t layerCount = 0;
|
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkLayerProperties* layers = new VkLayerProperties[layerCount];
|
|
|
|
res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] layers;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t nonPatchVersion = remove_patch_version(specVersion);
|
2018-08-29 19:56:26 +00:00
|
|
|
for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
|
|
|
|
int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
|
|
|
|
nonPatchVersion);
|
|
|
|
if (idx != -1) {
|
|
|
|
deviceLayers->push_back() = layers[idx];
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
delete[] layers;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// device extensions
|
|
|
|
// via Vulkan implementation and implicitly enabled layers
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
deviceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete[] extensions;
|
|
|
|
|
|
|
|
// via explicitly enabled layers
|
|
|
|
layerCount = deviceLayers->count();
|
|
|
|
for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev,
|
|
|
|
(*deviceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev,
|
|
|
|
(*deviceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
deviceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete[] extensions;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-30 20:33:19 +00:00
|
|
|
#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
|
|
|
|
PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
|
|
|
|
|
|
|
|
#define ACQUIRE_VK_PROC(name, instance, device) \
|
|
|
|
PFN_vk##name grVk##name = \
|
|
|
|
reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
|
|
|
|
do { \
|
|
|
|
if (grVk##name == nullptr) { \
|
|
|
|
SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
|
|
|
|
if (device != VK_NULL_HANDLE) { \
|
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
|
|
|
|
} \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
|
|
|
|
PFN_vk##name grVk##name = \
|
|
|
|
reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
|
|
|
|
do { \
|
|
|
|
if (grVk##name == nullptr) { \
|
|
|
|
SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
|
2019-06-24 14:53:09 +00:00
|
|
|
return false; \
|
2018-11-30 20:33:19 +00:00
|
|
|
} \
|
|
|
|
} while (0)
|
2018-07-02 20:16:44 +00:00
|
|
|
|
2019-06-24 14:53:09 +00:00
|
|
|
static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
|
2018-07-02 20:16:44 +00:00
|
|
|
VkDebugReportCallbackEXT* debugCallback,
|
|
|
|
bool hasDebugExtension) {
|
|
|
|
if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
|
|
|
|
grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
|
|
|
|
*debugCallback = VK_NULL_HANDLE;
|
|
|
|
}
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
|
|
|
|
grVkDestroyInstance(inst, nullptr);
|
2019-06-24 14:53:09 +00:00
|
|
|
return true;
|
2018-07-02 20:16:44 +00:00
|
|
|
}
|
|
|
|
|
2019-06-24 14:53:09 +00:00
|
|
|
static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
|
|
|
|
uint32_t physDeviceVersion, GrVkExtensions* extensions,
|
|
|
|
VkPhysicalDeviceFeatures2* features, bool isProtected) {
|
2018-08-08 13:23:18 +00:00
|
|
|
SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
|
|
|
|
extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
|
|
|
|
|
|
|
|
// Setup all extension feature structs we may want to use.
|
|
|
|
void** tailPNext = &features->pNext;
|
|
|
|
|
2019-06-24 14:53:09 +00:00
|
|
|
// If |isProtected| is given, attach that first
|
|
|
|
VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
|
|
|
|
if (isProtected) {
|
|
|
|
SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
|
|
|
|
protectedMemoryFeatures =
|
|
|
|
(VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
|
|
|
|
sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
|
|
|
|
protectedMemoryFeatures->sType =
|
|
|
|
VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
|
|
|
|
protectedMemoryFeatures->pNext = nullptr;
|
|
|
|
*tailPNext = protectedMemoryFeatures;
|
|
|
|
tailPNext = &protectedMemoryFeatures->pNext;
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
|
|
|
|
if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
|
|
|
|
blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
|
|
|
|
sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
|
|
|
|
blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
|
|
|
|
blend->pNext = nullptr;
|
|
|
|
*tailPNext = blend;
|
|
|
|
tailPNext = &blend->pNext;
|
|
|
|
}
|
|
|
|
|
2018-12-03 15:08:21 +00:00
|
|
|
VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
|
|
|
|
if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
|
|
|
|
extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
|
|
|
|
ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
|
|
|
|
sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
|
|
|
|
ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
|
|
|
|
ycbcrFeature->pNext = nullptr;
|
2019-08-12 05:46:33 +00:00
|
|
|
ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
|
2018-12-03 15:08:21 +00:00
|
|
|
*tailPNext = ycbcrFeature;
|
|
|
|
tailPNext = &ycbcrFeature->pNext;
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
|
|
|
|
grVkGetPhysicalDeviceFeatures2(physDev, features);
|
|
|
|
} else {
|
|
|
|
SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
|
|
|
|
1));
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
|
|
|
|
grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
|
|
|
|
}
|
|
|
|
|
2019-06-24 14:53:09 +00:00
|
|
|
if (isProtected) {
|
|
|
|
if (!protectedMemoryFeatures->protectedMemory) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
2018-08-08 13:23:18 +00:00
|
|
|
// If we want to disable any extension features do so here.
|
|
|
|
}
|
|
|
|
|
2018-08-01 13:19:45 +00:00
|
|
|
bool CreateVkBackendContext(GrVkGetProc getProc,
|
2018-07-02 20:15:37 +00:00
|
|
|
GrVkBackendContext* ctx,
|
2018-08-01 17:25:41 +00:00
|
|
|
GrVkExtensions* extensions,
|
2018-08-08 13:23:18 +00:00
|
|
|
VkPhysicalDeviceFeatures2* features,
|
2018-07-02 20:16:44 +00:00
|
|
|
VkDebugReportCallbackEXT* debugCallback,
|
2018-07-02 20:15:37 +00:00
|
|
|
uint32_t* presentQueueIndexPtr,
|
2019-06-24 14:53:09 +00:00
|
|
|
CanPresentFn canPresent,
|
|
|
|
bool isProtected) {
|
2018-08-02 17:55:49 +00:00
|
|
|
VkResult err;
|
|
|
|
|
|
|
|
ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
uint32_t instanceVersion = 0;
|
|
|
|
if (!grVkEnumerateInstanceVersion) {
|
|
|
|
instanceVersion = VK_MAKE_VERSION(1, 0, 0);
|
|
|
|
} else {
|
|
|
|
err = grVkEnumerateInstanceVersion(&instanceVersion);
|
|
|
|
if (err) {
|
2019-06-24 14:53:09 +00:00
|
|
|
SkDebugf("failed to enumerate instance version. Err: %d\n", err);
|
2018-08-02 17:55:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
|
2019-06-24 14:53:09 +00:00
|
|
|
if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
|
|
|
|
SkDebugf("protected requires vk instance version 1.1\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-01-28 18:15:05 +00:00
|
|
|
uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
|
|
|
|
if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
|
|
|
|
// If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
|
|
|
|
// instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
|
|
|
|
// api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
|
|
|
|
// since that is the highest vulkan version.
|
|
|
|
apiVersion = VK_MAKE_VERSION(1, 1, 0);
|
|
|
|
}
|
|
|
|
|
2020-02-07 15:36:46 +00:00
|
|
|
instanceVersion = std::min(instanceVersion, apiVersion);
|
2018-08-02 17:55:49 +00:00
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
VkPhysicalDevice physDev;
|
|
|
|
VkDevice device;
|
|
|
|
VkInstance inst;
|
|
|
|
|
|
|
|
const VkApplicationInfo app_info = {
|
|
|
|
VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
"vktest", // pApplicationName
|
|
|
|
0, // applicationVersion
|
|
|
|
"vktest", // pEngineName
|
|
|
|
0, // engineVerison
|
2019-01-28 18:15:05 +00:00
|
|
|
apiVersion, // apiVersion
|
2018-07-02 20:15:37 +00:00
|
|
|
};
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
SkTArray<VkLayerProperties> instanceLayers;
|
|
|
|
SkTArray<VkExtensionProperties> instanceExtensions;
|
2018-07-02 20:15:37 +00:00
|
|
|
|
2018-08-02 17:55:49 +00:00
|
|
|
if (!init_instance_extensions_and_layers(getProc, instanceVersion,
|
2018-08-01 17:25:41 +00:00
|
|
|
&instanceExtensions,
|
|
|
|
&instanceLayers)) {
|
|
|
|
return false;
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
SkTArray<const char*> instanceLayerNames;
|
|
|
|
SkTArray<const char*> instanceExtensionNames;
|
|
|
|
for (int i = 0; i < instanceLayers.count(); ++i) {
|
|
|
|
instanceLayerNames.push_back(instanceLayers[i].layerName);
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
2018-08-01 17:25:41 +00:00
|
|
|
for (int i = 0; i < instanceExtensions.count(); ++i) {
|
2020-08-16 03:22:53 +00:00
|
|
|
if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
|
2018-08-01 17:25:41 +00:00
|
|
|
instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const VkInstanceCreateInfo instance_create = {
|
|
|
|
VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
0, // flags
|
|
|
|
&app_info, // pApplicationInfo
|
|
|
|
(uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
|
|
|
|
instanceLayerNames.begin(), // ppEnabledLayerNames
|
|
|
|
(uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
|
|
|
|
instanceExtensionNames.begin(), // ppEnabledExtensionNames
|
|
|
|
};
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
bool hasDebugExtension = false;
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
err = grVkCreateInstance(&instance_create, nullptr, &inst);
|
|
|
|
if (err < 0) {
|
|
|
|
SkDebugf("vkCreateInstance failed: %d\n", err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-07-02 20:16:44 +00:00
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
*debugCallback = VK_NULL_HANDLE;
|
|
|
|
for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
|
|
|
|
if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
|
|
|
|
hasDebugExtension = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (hasDebugExtension) {
|
|
|
|
// Setup callback creation information
|
|
|
|
VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
|
|
|
|
callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
|
|
|
|
callbackCreateInfo.pNext = nullptr;
|
|
|
|
callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
|
|
|
|
VK_DEBUG_REPORT_WARNING_BIT_EXT |
|
|
|
|
// VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
|
|
|
|
// VK_DEBUG_REPORT_DEBUG_BIT_EXT |
|
|
|
|
VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
|
|
|
|
callbackCreateInfo.pfnCallback = &DebugReportCallback;
|
|
|
|
callbackCreateInfo.pUserData = nullptr;
|
|
|
|
|
|
|
|
ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
|
|
|
|
// Register the callback
|
|
|
|
grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
|
2018-08-02 17:55:49 +00:00
|
|
|
ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
|
2018-07-02 20:15:37 +00:00
|
|
|
ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
|
|
|
|
|
|
|
|
uint32_t gpuCount;
|
|
|
|
err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
|
|
|
|
if (err) {
|
|
|
|
SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!gpuCount) {
|
|
|
|
SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Just returning the first physical device instead of getting the whole array.
|
|
|
|
// TODO: find best match for our needs
|
|
|
|
gpuCount = 1;
|
|
|
|
err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
|
|
|
|
// VK_INCOMPLETE is returned when the count we provide is less than the total device count.
|
|
|
|
if (err && VK_INCOMPLETE != err) {
|
|
|
|
SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-02 17:55:49 +00:00
|
|
|
VkPhysicalDeviceProperties physDeviceProperties;
|
|
|
|
grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
|
2020-02-07 15:36:46 +00:00
|
|
|
int physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
|
2018-08-02 17:55:49 +00:00
|
|
|
|
2019-06-24 14:53:09 +00:00
|
|
|
if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
|
|
|
|
SkDebugf("protected requires vk physical device version 1.1\n");
|
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
// query to get the initial queue props size
|
|
|
|
uint32_t queueCount;
|
|
|
|
grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
|
|
|
|
if (!queueCount) {
|
|
|
|
SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
|
|
|
|
// now get the actual queue props
|
|
|
|
VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
|
|
|
|
|
|
|
|
grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
|
|
|
|
|
|
|
|
// iterate to find the graphics queue
|
|
|
|
uint32_t graphicsQueueIndex = queueCount;
|
|
|
|
for (uint32_t i = 0; i < queueCount; i++) {
|
|
|
|
if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
|
|
|
|
graphicsQueueIndex = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (graphicsQueueIndex == queueCount) {
|
|
|
|
SkDebugf("Could not find any supported graphics queues.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate to find the present queue, if needed
|
|
|
|
uint32_t presentQueueIndex = queueCount;
|
|
|
|
if (presentQueueIndexPtr && canPresent) {
|
|
|
|
for (uint32_t i = 0; i < queueCount; i++) {
|
|
|
|
if (canPresent(inst, physDev, i)) {
|
|
|
|
presentQueueIndex = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (presentQueueIndex == queueCount) {
|
|
|
|
SkDebugf("Could not find any supported present queues.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*presentQueueIndexPtr = presentQueueIndex;
|
|
|
|
} else {
|
|
|
|
// Just setting this so we end up make a single queue for graphics since there was no
|
|
|
|
// request for a present queue.
|
|
|
|
presentQueueIndex = graphicsQueueIndex;
|
|
|
|
}
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
SkTArray<VkLayerProperties> deviceLayers;
|
|
|
|
SkTArray<VkExtensionProperties> deviceExtensions;
|
2018-08-02 17:55:49 +00:00
|
|
|
if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
|
2018-08-01 17:25:41 +00:00
|
|
|
inst, physDev,
|
|
|
|
&deviceExtensions,
|
|
|
|
&deviceLayers)) {
|
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
|
|
|
return false;
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
SkTArray<const char*> deviceLayerNames;
|
|
|
|
SkTArray<const char*> deviceExtensionNames;
|
2018-08-01 17:25:41 +00:00
|
|
|
for (int i = 0; i < deviceLayers.count(); ++i) {
|
|
|
|
deviceLayerNames.push_back(deviceLayers[i].layerName);
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
2020-07-09 19:04:48 +00:00
|
|
|
|
|
|
|
// We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
|
|
|
|
// extensions. So see if we have the KHR version and if so don't push back the EXT version in
|
|
|
|
// the next loop.
|
|
|
|
bool hasKHRBufferDeviceAddress = false;
|
|
|
|
for (int i = 0; i < deviceExtensions.count(); ++i) {
|
|
|
|
if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
|
|
|
|
hasKHRBufferDeviceAddress = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
for (int i = 0; i < deviceExtensions.count(); ++i) {
|
|
|
|
// Don't use experimental extensions since they typically don't work with debug layers and
|
|
|
|
// often are missing dependecy requirements for other extensions. Additionally, these are
|
|
|
|
// often left behind in the driver even after they've been promoted to real extensions.
|
2020-08-16 03:22:53 +00:00
|
|
|
if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
|
|
|
|
0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
|
2020-07-09 19:04:48 +00:00
|
|
|
|
2020-10-19 17:11:57 +00:00
|
|
|
// This is an nvidia extension that isn't supported by the debug layers so we get lots
|
|
|
|
// of warnings. We don't actually use it, so it is easiest to just not enable it.
|
2021-06-03 18:32:25 +00:00
|
|
|
if (0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency") ||
|
2021-06-16 20:42:13 +00:00
|
|
|
0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_acquire_winrt_display") ||
|
2021-06-23 15:52:57 +00:00
|
|
|
0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_cuda_kernel_launch") ||
|
|
|
|
0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_provoking_vertex")) {
|
2020-10-19 17:11:57 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2020-07-09 19:04:48 +00:00
|
|
|
if (!hasKHRBufferDeviceAddress ||
|
2020-08-16 03:22:53 +00:00
|
|
|
0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
|
2020-07-09 19:04:48 +00:00
|
|
|
deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
|
|
|
|
}
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
extensions->init(getProc, inst, physDev,
|
|
|
|
(uint32_t) instanceExtensionNames.count(),
|
|
|
|
instanceExtensionNames.begin(),
|
|
|
|
(uint32_t) deviceExtensionNames.count(),
|
|
|
|
deviceExtensionNames.begin());
|
|
|
|
|
|
|
|
memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
|
|
|
|
features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
|
|
|
|
features->pNext = nullptr;
|
|
|
|
|
|
|
|
VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
|
|
|
|
void* pointerToFeatures = nullptr;
|
|
|
|
if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
|
|
|
|
extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
|
2019-06-24 14:53:09 +00:00
|
|
|
if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
|
|
|
|
isProtected)) {
|
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
// If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
|
|
|
|
// the device creation will use that instead of the ppEnabledFeatures.
|
|
|
|
pointerToFeatures = features;
|
|
|
|
} else {
|
|
|
|
grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
|
|
|
|
}
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
// this looks like it would slow things down,
|
|
|
|
// and we can't depend on it on all platforms
|
2018-08-08 13:23:18 +00:00
|
|
|
deviceFeatures->robustBufferAccess = VK_FALSE;
|
2018-07-02 20:15:37 +00:00
|
|
|
|
2019-06-24 14:53:09 +00:00
|
|
|
VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
|
2018-07-02 20:15:37 +00:00
|
|
|
float queuePriorities[1] = { 0.0 };
|
|
|
|
// Here we assume no need for swapchain queue
|
|
|
|
// If one is needed, the client will need its own setup code
|
|
|
|
const VkDeviceQueueCreateInfo queueInfo[2] = {
|
|
|
|
{
|
|
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
|
|
|
|
nullptr, // pNext
|
2019-06-24 14:53:09 +00:00
|
|
|
flags, // VkDeviceQueueCreateFlags
|
2018-07-02 20:15:37 +00:00
|
|
|
graphicsQueueIndex, // queueFamilyIndex
|
|
|
|
1, // queueCount
|
|
|
|
queuePriorities, // pQueuePriorities
|
2019-06-24 14:53:09 +00:00
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
},
|
|
|
|
{
|
|
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
0, // VkDeviceQueueCreateFlags
|
|
|
|
presentQueueIndex, // queueFamilyIndex
|
|
|
|
1, // queueCount
|
|
|
|
queuePriorities, // pQueuePriorities
|
|
|
|
}
|
|
|
|
};
|
|
|
|
uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
|
|
|
|
|
|
|
|
const VkDeviceCreateInfo deviceInfo = {
|
2018-08-08 13:23:18 +00:00
|
|
|
VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
|
|
|
|
pointerToFeatures, // pNext
|
|
|
|
0, // VkDeviceCreateFlags
|
|
|
|
queueInfoCount, // queueCreateInfoCount
|
|
|
|
queueInfo, // pQueueCreateInfos
|
|
|
|
(uint32_t) deviceLayerNames.count(), // layerCount
|
|
|
|
deviceLayerNames.begin(), // ppEnabledLayerNames
|
|
|
|
(uint32_t) deviceExtensionNames.count(), // extensionCount
|
|
|
|
deviceExtensionNames.begin(), // ppEnabledExtensionNames
|
|
|
|
pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
|
2018-07-02 20:15:37 +00:00
|
|
|
};
|
|
|
|
|
2019-02-12 16:14:47 +00:00
|
|
|
{
|
|
|
|
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
|
|
|
|
// skia:8712
|
|
|
|
__lsan::ScopedDisabler lsanDisabler;
|
|
|
|
#endif
|
|
|
|
err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
if (err) {
|
|
|
|
SkDebugf("CreateDevice failed: %d\n", err);
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkQueue queue;
|
2019-06-24 14:53:09 +00:00
|
|
|
if (isProtected) {
|
|
|
|
ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
|
|
|
|
SkASSERT(grVkGetDeviceQueue2 != nullptr);
|
|
|
|
VkDeviceQueueInfo2 queue_info2 = {
|
|
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
|
|
|
|
graphicsQueueIndex, // queueFamilyIndex
|
|
|
|
0 // queueIndex
|
|
|
|
};
|
|
|
|
grVkGetDeviceQueue2(device, &queue_info2, &queue);
|
|
|
|
} else {
|
|
|
|
grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
ctx->fInstance = inst;
|
|
|
|
ctx->fPhysicalDevice = physDev;
|
|
|
|
ctx->fDevice = device;
|
|
|
|
ctx->fQueue = queue;
|
|
|
|
ctx->fGraphicsQueueIndex = graphicsQueueIndex;
|
2019-01-28 18:15:05 +00:00
|
|
|
ctx->fMaxAPIVersion = apiVersion;
|
2018-08-01 17:25:41 +00:00
|
|
|
ctx->fVkExtensions = extensions;
|
2018-08-08 13:23:18 +00:00
|
|
|
ctx->fDeviceFeatures2 = features;
|
2018-07-12 14:02:37 +00:00
|
|
|
ctx->fGetProc = getProc;
|
2018-07-02 20:15:37 +00:00
|
|
|
ctx->fOwnsInstanceAndDevice = false;
|
2019-06-24 14:53:09 +00:00
|
|
|
ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
|
|
|
|
// All Vulkan structs that could be part of the features chain will start with the
|
|
|
|
// structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
|
|
|
|
// so we can get access to the pNext for the next struct.
|
|
|
|
struct CommonVulkanHeader {
|
|
|
|
VkStructureType sType;
|
|
|
|
void* pNext;
|
|
|
|
};
|
|
|
|
|
|
|
|
void* pNext = features->pNext;
|
|
|
|
while (pNext) {
|
|
|
|
void* current = pNext;
|
|
|
|
pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
|
|
|
|
sk_free(current);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-06 18:11:56 +00:00
|
|
|
} // namespace sk_gpu_test
|
2017-11-10 15:03:05 +00:00
|
|
|
|
|
|
|
#endif
|