2017-11-10 15:03:05 +00:00
|
|
|
/*
|
|
|
|
* Copyright 2017 Google Inc.
|
|
|
|
*
|
|
|
|
* Use of this source code is governed by a BSD-style license that can be
|
|
|
|
* found in the LICENSE file.
|
|
|
|
*/
|
|
|
|
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "tools/gpu/vk/VkTestUtils.h"
|
2017-11-10 15:03:05 +00:00
|
|
|
|
|
|
|
#ifdef SK_VULKAN
|
|
|
|
|
2019-04-23 17:05:21 +00:00
|
|
|
#include "include/gpu/vk/GrVkBackendContext.h"
|
|
|
|
#include "include/gpu/vk/GrVkExtensions.h"
|
|
|
|
#include "src/core/SkAutoMalloc.h"
|
|
|
|
#include "src/ports/SkOSLibrary.h"
|
2017-11-10 15:03:05 +00:00
|
|
|
|
2019-02-12 16:14:47 +00:00
|
|
|
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
|
|
|
|
#include <sanitizer/lsan_interface.h>
|
|
|
|
#endif
|
|
|
|
|
2017-11-10 15:03:05 +00:00
|
|
|
namespace sk_gpu_test {
|
|
|
|
|
|
|
|
bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
|
2018-08-01 13:19:45 +00:00
|
|
|
PFN_vkGetDeviceProcAddr* devProc) {
|
2018-05-03 15:30:29 +00:00
|
|
|
#ifdef SK_MOLTENVK
|
|
|
|
// MoltenVK is a statically linked framework, so there is no Vulkan library to load.
|
|
|
|
*instProc = &vkGetInstanceProcAddr;
|
|
|
|
*devProc = &vkGetDeviceProcAddr;
|
|
|
|
return true;
|
|
|
|
#else
|
2017-11-10 15:03:05 +00:00
|
|
|
static void* vkLib = nullptr;
|
|
|
|
static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
|
|
|
|
static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
|
|
|
|
if (!vkLib) {
|
|
|
|
#if defined _WIN32
|
|
|
|
vkLib = DynamicLoadLibrary("vulkan-1.dll");
|
|
|
|
#else
|
|
|
|
vkLib = DynamicLoadLibrary("libvulkan.so");
|
|
|
|
#endif
|
|
|
|
if (!vkLib) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib,
|
|
|
|
"vkGetInstanceProcAddr");
|
|
|
|
localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib,
|
|
|
|
"vkGetDeviceProcAddr");
|
|
|
|
}
|
|
|
|
if (!localInstProc || !localDevProc) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*instProc = localInstProc;
|
|
|
|
*devProc = localDevProc;
|
|
|
|
return true;
|
2018-05-03 15:30:29 +00:00
|
|
|
#endif
|
2017-11-10 15:03:05 +00:00
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Helper code to set up Vulkan context objects
|
|
|
|
|
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
const char* kDebugLayerNames[] = {
|
|
|
|
// elements of VK_LAYER_LUNARG_standard_validation
|
|
|
|
"VK_LAYER_GOOGLE_threading",
|
|
|
|
"VK_LAYER_LUNARG_parameter_validation",
|
|
|
|
"VK_LAYER_LUNARG_object_tracker",
|
|
|
|
"VK_LAYER_LUNARG_core_validation",
|
|
|
|
"VK_LAYER_GOOGLE_unique_objects",
|
|
|
|
// not included in standard_validation
|
|
|
|
//"VK_LAYER_LUNARG_api_dump",
|
|
|
|
//"VK_LAYER_LUNARG_vktrace",
|
|
|
|
//"VK_LAYER_LUNARG_screenshot",
|
|
|
|
};
|
|
|
|
|
2018-08-29 19:56:26 +00:00
|
|
|
static uint32_t remove_patch_version(uint32_t specVersion) {
|
|
|
|
return (specVersion >> 12) << 12;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Returns the index into layers array for the layer we want. Returns -1 if not supported.
|
|
|
|
static int should_include_debug_layer(const char* layerName,
|
|
|
|
uint32_t layerCount, VkLayerProperties* layers,
|
|
|
|
uint32_t version) {
|
|
|
|
for (uint32_t i = 0; i < layerCount; ++i) {
|
|
|
|
if (!strcmp(layerName, layers[i].layerName)) {
|
|
|
|
// Since the layers intercept the vulkan calls and forward them on, we need to make sure
|
|
|
|
// layer was written against a version that isn't older than the version of Vulkan we're
|
|
|
|
// using so that it has all the api entry points.
|
|
|
|
if (version <= remove_patch_version(layers[i].specVersion)) {
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
return -1;
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
2018-08-29 19:56:26 +00:00
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
2018-08-29 19:56:26 +00:00
|
|
|
return -1;
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
2018-08-02 17:55:49 +00:00
|
|
|
|
2018-07-02 20:16:44 +00:00
|
|
|
VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
|
|
|
|
VkDebugReportFlagsEXT flags,
|
|
|
|
VkDebugReportObjectTypeEXT objectType,
|
|
|
|
uint64_t object,
|
|
|
|
size_t location,
|
|
|
|
int32_t messageCode,
|
|
|
|
const char* pLayerPrefix,
|
|
|
|
const char* pMessage,
|
|
|
|
void* pUserData) {
|
|
|
|
if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
|
|
|
|
SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
|
|
|
return VK_TRUE; // skip further layers
|
|
|
|
} else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
|
2018-08-29 19:56:26 +00:00
|
|
|
// There is currently a bug in the spec which doesn't have
|
|
|
|
// VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in
|
|
|
|
// VkDeviceCreateInfo. So we ignore that warning since it is wrong.
|
|
|
|
if (!strstr(pMessage,
|
|
|
|
"pCreateInfo->pNext chain includes a structure with unexpected VkStructureType "
|
|
|
|
"VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) {
|
|
|
|
SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
|
|
|
}
|
2018-07-02 20:16:44 +00:00
|
|
|
} else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
|
|
|
|
SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
|
|
|
} else {
|
|
|
|
SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
|
|
|
|
}
|
|
|
|
return VK_FALSE;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
|
|
|
|
|
|
|
|
static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
|
|
|
|
uint32_t specVersion,
|
|
|
|
SkTArray<VkExtensionProperties>* instanceExtensions,
|
|
|
|
SkTArray<VkLayerProperties>* instanceLayers) {
|
|
|
|
if (getProc == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
|
|
|
|
if (!EnumerateInstanceExtensionProperties ||
|
|
|
|
!EnumerateInstanceLayerProperties) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult res;
|
|
|
|
uint32_t layerCount = 0;
|
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
// instance layers
|
|
|
|
res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkLayerProperties* layers = new VkLayerProperties[layerCount];
|
|
|
|
res = EnumerateInstanceLayerProperties(&layerCount, layers);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] layers;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t nonPatchVersion = remove_patch_version(specVersion);
|
2018-08-29 19:56:26 +00:00
|
|
|
for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
|
|
|
|
int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
|
|
|
|
nonPatchVersion);
|
|
|
|
if (idx != -1) {
|
|
|
|
instanceLayers->push_back() = layers[idx];
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
delete[] layers;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// instance extensions
|
|
|
|
// via Vulkan implementation and implicitly enabled layers
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
instanceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete [] extensions;
|
|
|
|
|
|
|
|
// via explicitly enabled layers
|
|
|
|
layerCount = instanceLayers->count();
|
|
|
|
for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
instanceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete[] extensions;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
|
|
|
|
VkInstance inst, VkPhysicalDevice physDev,
|
|
|
|
SkTArray<VkExtensionProperties>* deviceExtensions,
|
|
|
|
SkTArray<VkLayerProperties>* deviceLayers) {
|
|
|
|
if (getProc == nullptr) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
|
|
|
|
GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
|
|
|
|
|
|
|
|
if (!EnumerateDeviceExtensionProperties ||
|
|
|
|
!EnumerateDeviceLayerProperties) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkResult res;
|
|
|
|
// device layers
|
|
|
|
uint32_t layerCount = 0;
|
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkLayerProperties* layers = new VkLayerProperties[layerCount];
|
|
|
|
res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] layers;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t nonPatchVersion = remove_patch_version(specVersion);
|
2018-08-29 19:56:26 +00:00
|
|
|
for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
|
|
|
|
int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
|
|
|
|
nonPatchVersion);
|
|
|
|
if (idx != -1) {
|
|
|
|
deviceLayers->push_back() = layers[idx];
|
2018-08-01 17:25:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
delete[] layers;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// device extensions
|
|
|
|
// via Vulkan implementation and implicitly enabled layers
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
deviceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete[] extensions;
|
|
|
|
|
|
|
|
// via explicitly enabled layers
|
|
|
|
layerCount = deviceLayers->count();
|
|
|
|
for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
|
|
|
|
uint32_t extensionCount = 0;
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev,
|
|
|
|
(*deviceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, nullptr);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
|
|
|
|
res = EnumerateDeviceExtensionProperties(physDev,
|
|
|
|
(*deviceLayers)[layerIndex].layerName,
|
|
|
|
&extensionCount, extensions);
|
|
|
|
if (VK_SUCCESS != res) {
|
|
|
|
delete[] extensions;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for (uint32_t i = 0; i < extensionCount; ++i) {
|
|
|
|
deviceExtensions->push_back() = extensions[i];
|
|
|
|
}
|
|
|
|
delete[] extensions;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-11-30 20:33:19 +00:00
|
|
|
#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
|
|
|
|
PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
|
|
|
|
|
|
|
|
#define ACQUIRE_VK_PROC(name, instance, device) \
|
|
|
|
PFN_vk##name grVk##name = \
|
|
|
|
reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
|
|
|
|
do { \
|
|
|
|
if (grVk##name == nullptr) { \
|
|
|
|
SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
|
|
|
|
if (device != VK_NULL_HANDLE) { \
|
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
|
|
|
|
} \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
|
|
|
|
PFN_vk##name grVk##name = \
|
|
|
|
reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
|
|
|
|
do { \
|
|
|
|
if (grVk##name == nullptr) { \
|
|
|
|
SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
|
|
|
|
return; \
|
|
|
|
} \
|
|
|
|
} while (0)
|
2018-07-02 20:16:44 +00:00
|
|
|
|
2018-08-01 13:19:45 +00:00
|
|
|
static void destroy_instance(GrVkGetProc getProc, VkInstance inst,
|
2018-07-02 20:16:44 +00:00
|
|
|
VkDebugReportCallbackEXT* debugCallback,
|
|
|
|
bool hasDebugExtension) {
|
|
|
|
if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
|
|
|
|
grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
|
|
|
|
*debugCallback = VK_NULL_HANDLE;
|
|
|
|
}
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
|
|
|
|
grVkDestroyInstance(inst, nullptr);
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
static void setup_extension_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
|
|
|
|
uint32_t physDeviceVersion, GrVkExtensions* extensions,
|
|
|
|
VkPhysicalDeviceFeatures2* features) {
|
|
|
|
SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
|
|
|
|
extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
|
|
|
|
|
|
|
|
// Setup all extension feature structs we may want to use.
|
|
|
|
|
|
|
|
void** tailPNext = &features->pNext;
|
|
|
|
|
|
|
|
VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
|
|
|
|
if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
|
|
|
|
blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
|
|
|
|
sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
|
|
|
|
blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
|
|
|
|
blend->pNext = nullptr;
|
|
|
|
*tailPNext = blend;
|
|
|
|
tailPNext = &blend->pNext;
|
|
|
|
}
|
|
|
|
|
2018-12-03 15:08:21 +00:00
|
|
|
VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
|
|
|
|
if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
|
|
|
|
extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
|
|
|
|
ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
|
|
|
|
sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
|
|
|
|
ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
|
|
|
|
ycbcrFeature->pNext = nullptr;
|
|
|
|
*tailPNext = ycbcrFeature;
|
|
|
|
tailPNext = &ycbcrFeature->pNext;
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
|
|
|
|
grVkGetPhysicalDeviceFeatures2(physDev, features);
|
|
|
|
} else {
|
|
|
|
SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
|
|
|
|
1));
|
|
|
|
ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
|
|
|
|
grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
|
|
|
|
}
|
|
|
|
|
|
|
|
// If we want to disable any extension features do so here.
|
|
|
|
}
|
|
|
|
|
2018-08-01 13:19:45 +00:00
|
|
|
bool CreateVkBackendContext(GrVkGetProc getProc,
|
2018-07-02 20:15:37 +00:00
|
|
|
GrVkBackendContext* ctx,
|
2018-08-01 17:25:41 +00:00
|
|
|
GrVkExtensions* extensions,
|
2018-08-08 13:23:18 +00:00
|
|
|
VkPhysicalDeviceFeatures2* features,
|
2018-07-02 20:16:44 +00:00
|
|
|
VkDebugReportCallbackEXT* debugCallback,
|
2018-07-02 20:15:37 +00:00
|
|
|
uint32_t* presentQueueIndexPtr,
|
|
|
|
CanPresentFn canPresent) {
|
2018-08-02 17:55:49 +00:00
|
|
|
VkResult err;
|
|
|
|
|
|
|
|
ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
uint32_t instanceVersion = 0;
|
|
|
|
if (!grVkEnumerateInstanceVersion) {
|
|
|
|
instanceVersion = VK_MAKE_VERSION(1, 0, 0);
|
|
|
|
} else {
|
|
|
|
err = grVkEnumerateInstanceVersion(&instanceVersion);
|
|
|
|
if (err) {
|
|
|
|
SkDebugf("failed ot enumerate instance version. Err: %d\n", err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
|
2019-01-28 18:15:05 +00:00
|
|
|
uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
|
|
|
|
if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
|
|
|
|
// If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
|
|
|
|
// instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
|
|
|
|
// api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
|
|
|
|
// since that is the highest vulkan version.
|
|
|
|
apiVersion = VK_MAKE_VERSION(1, 1, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
instanceVersion = SkTMin(instanceVersion, apiVersion);
|
2018-08-02 17:55:49 +00:00
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
VkPhysicalDevice physDev;
|
|
|
|
VkDevice device;
|
|
|
|
VkInstance inst;
|
|
|
|
|
|
|
|
const VkApplicationInfo app_info = {
|
|
|
|
VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
"vktest", // pApplicationName
|
|
|
|
0, // applicationVersion
|
|
|
|
"vktest", // pEngineName
|
|
|
|
0, // engineVerison
|
2019-01-28 18:15:05 +00:00
|
|
|
apiVersion, // apiVersion
|
2018-07-02 20:15:37 +00:00
|
|
|
};
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
SkTArray<VkLayerProperties> instanceLayers;
|
|
|
|
SkTArray<VkExtensionProperties> instanceExtensions;
|
2018-07-02 20:15:37 +00:00
|
|
|
|
2018-08-02 17:55:49 +00:00
|
|
|
if (!init_instance_extensions_and_layers(getProc, instanceVersion,
|
2018-08-01 17:25:41 +00:00
|
|
|
&instanceExtensions,
|
|
|
|
&instanceLayers)) {
|
|
|
|
return false;
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
SkTArray<const char*> instanceLayerNames;
|
|
|
|
SkTArray<const char*> instanceExtensionNames;
|
|
|
|
for (int i = 0; i < instanceLayers.count(); ++i) {
|
|
|
|
instanceLayerNames.push_back(instanceLayers[i].layerName);
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
2018-08-01 17:25:41 +00:00
|
|
|
for (int i = 0; i < instanceExtensions.count(); ++i) {
|
|
|
|
if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
|
|
|
|
instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const VkInstanceCreateInfo instance_create = {
|
|
|
|
VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
0, // flags
|
|
|
|
&app_info, // pApplicationInfo
|
|
|
|
(uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
|
|
|
|
instanceLayerNames.begin(), // ppEnabledLayerNames
|
|
|
|
(uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
|
|
|
|
instanceExtensionNames.begin(), // ppEnabledExtensionNames
|
|
|
|
};
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
bool hasDebugExtension = false;
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
|
|
|
|
err = grVkCreateInstance(&instance_create, nullptr, &inst);
|
|
|
|
if (err < 0) {
|
|
|
|
SkDebugf("vkCreateInstance failed: %d\n", err);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-07-02 20:16:44 +00:00
|
|
|
#ifdef SK_ENABLE_VK_LAYERS
|
|
|
|
*debugCallback = VK_NULL_HANDLE;
|
|
|
|
for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
|
|
|
|
if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
|
|
|
|
hasDebugExtension = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (hasDebugExtension) {
|
|
|
|
// Setup callback creation information
|
|
|
|
VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
|
|
|
|
callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
|
|
|
|
callbackCreateInfo.pNext = nullptr;
|
|
|
|
callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
|
|
|
|
VK_DEBUG_REPORT_WARNING_BIT_EXT |
|
|
|
|
// VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
|
|
|
|
// VK_DEBUG_REPORT_DEBUG_BIT_EXT |
|
|
|
|
VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
|
|
|
|
callbackCreateInfo.pfnCallback = &DebugReportCallback;
|
|
|
|
callbackCreateInfo.pUserData = nullptr;
|
|
|
|
|
|
|
|
ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
|
|
|
|
// Register the callback
|
|
|
|
grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
|
2018-08-02 17:55:49 +00:00
|
|
|
ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
|
2018-07-02 20:15:37 +00:00
|
|
|
ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
|
|
|
|
ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
|
|
|
|
|
|
|
|
uint32_t gpuCount;
|
|
|
|
err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
|
|
|
|
if (err) {
|
|
|
|
SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!gpuCount) {
|
|
|
|
SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// Just returning the first physical device instead of getting the whole array.
|
|
|
|
// TODO: find best match for our needs
|
|
|
|
gpuCount = 1;
|
|
|
|
err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
|
|
|
|
// VK_INCOMPLETE is returned when the count we provide is less than the total device count.
|
|
|
|
if (err && VK_INCOMPLETE != err) {
|
|
|
|
SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-02 17:55:49 +00:00
|
|
|
VkPhysicalDeviceProperties physDeviceProperties;
|
|
|
|
grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
|
2019-01-28 18:15:05 +00:00
|
|
|
int physDeviceVersion = SkTMin(physDeviceProperties.apiVersion, apiVersion);
|
2018-08-02 17:55:49 +00:00
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
// query to get the initial queue props size
|
|
|
|
uint32_t queueCount;
|
|
|
|
grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
|
|
|
|
if (!queueCount) {
|
|
|
|
SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
|
|
|
|
// now get the actual queue props
|
|
|
|
VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
|
|
|
|
|
|
|
|
grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
|
|
|
|
|
|
|
|
// iterate to find the graphics queue
|
|
|
|
uint32_t graphicsQueueIndex = queueCount;
|
|
|
|
for (uint32_t i = 0; i < queueCount; i++) {
|
|
|
|
if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
|
|
|
|
graphicsQueueIndex = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (graphicsQueueIndex == queueCount) {
|
|
|
|
SkDebugf("Could not find any supported graphics queues.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// iterate to find the present queue, if needed
|
|
|
|
uint32_t presentQueueIndex = queueCount;
|
|
|
|
if (presentQueueIndexPtr && canPresent) {
|
|
|
|
for (uint32_t i = 0; i < queueCount; i++) {
|
|
|
|
if (canPresent(inst, physDev, i)) {
|
|
|
|
presentQueueIndex = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (presentQueueIndex == queueCount) {
|
|
|
|
SkDebugf("Could not find any supported present queues.\n");
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*presentQueueIndexPtr = presentQueueIndex;
|
|
|
|
} else {
|
|
|
|
// Just setting this so we end up make a single queue for graphics since there was no
|
|
|
|
// request for a present queue.
|
|
|
|
presentQueueIndex = graphicsQueueIndex;
|
|
|
|
}
|
|
|
|
|
2018-08-01 17:25:41 +00:00
|
|
|
SkTArray<VkLayerProperties> deviceLayers;
|
|
|
|
SkTArray<VkExtensionProperties> deviceExtensions;
|
2018-08-02 17:55:49 +00:00
|
|
|
if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
|
2018-08-01 17:25:41 +00:00
|
|
|
inst, physDev,
|
|
|
|
&deviceExtensions,
|
|
|
|
&deviceLayers)) {
|
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
|
|
|
return false;
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
SkTArray<const char*> deviceLayerNames;
|
|
|
|
SkTArray<const char*> deviceExtensionNames;
|
2018-08-01 17:25:41 +00:00
|
|
|
for (int i = 0; i < deviceLayers.count(); ++i) {
|
|
|
|
deviceLayerNames.push_back(deviceLayers[i].layerName);
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
2018-08-01 17:25:41 +00:00
|
|
|
for (int i = 0; i < deviceExtensions.count(); ++i) {
|
|
|
|
// Don't use experimental extensions since they typically don't work with debug layers and
|
|
|
|
// often are missing dependecy requirements for other extensions. Additionally, these are
|
|
|
|
// often left behind in the driver even after they've been promoted to real extensions.
|
|
|
|
if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
|
|
|
|
strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
|
|
|
|
deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
extensions->init(getProc, inst, physDev,
|
|
|
|
(uint32_t) instanceExtensionNames.count(),
|
|
|
|
instanceExtensionNames.begin(),
|
|
|
|
(uint32_t) deviceExtensionNames.count(),
|
|
|
|
deviceExtensionNames.begin());
|
|
|
|
|
|
|
|
memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
|
|
|
|
features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
|
|
|
|
features->pNext = nullptr;
|
|
|
|
|
|
|
|
VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
|
|
|
|
void* pointerToFeatures = nullptr;
|
|
|
|
if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
|
|
|
|
extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
|
|
|
|
setup_extension_features(getProc, inst, physDev, physDeviceVersion, extensions, features);
|
|
|
|
// If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
|
|
|
|
// the device creation will use that instead of the ppEnabledFeatures.
|
|
|
|
pointerToFeatures = features;
|
|
|
|
} else {
|
|
|
|
grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
|
|
|
|
}
|
|
|
|
|
2018-07-02 20:15:37 +00:00
|
|
|
// this looks like it would slow things down,
|
|
|
|
// and we can't depend on it on all platforms
|
2018-08-08 13:23:18 +00:00
|
|
|
deviceFeatures->robustBufferAccess = VK_FALSE;
|
2018-07-02 20:15:37 +00:00
|
|
|
|
|
|
|
float queuePriorities[1] = { 0.0 };
|
|
|
|
// Here we assume no need for swapchain queue
|
|
|
|
// If one is needed, the client will need its own setup code
|
|
|
|
const VkDeviceQueueCreateInfo queueInfo[2] = {
|
|
|
|
{
|
|
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
0, // VkDeviceQueueCreateFlags
|
|
|
|
graphicsQueueIndex, // queueFamilyIndex
|
|
|
|
1, // queueCount
|
|
|
|
queuePriorities, // pQueuePriorities
|
|
|
|
},
|
|
|
|
{
|
|
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
|
|
|
|
nullptr, // pNext
|
|
|
|
0, // VkDeviceQueueCreateFlags
|
|
|
|
presentQueueIndex, // queueFamilyIndex
|
|
|
|
1, // queueCount
|
|
|
|
queuePriorities, // pQueuePriorities
|
|
|
|
}
|
|
|
|
};
|
|
|
|
uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
|
|
|
|
|
|
|
|
const VkDeviceCreateInfo deviceInfo = {
|
2018-08-08 13:23:18 +00:00
|
|
|
VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
|
|
|
|
pointerToFeatures, // pNext
|
|
|
|
0, // VkDeviceCreateFlags
|
|
|
|
queueInfoCount, // queueCreateInfoCount
|
|
|
|
queueInfo, // pQueueCreateInfos
|
|
|
|
(uint32_t) deviceLayerNames.count(), // layerCount
|
|
|
|
deviceLayerNames.begin(), // ppEnabledLayerNames
|
|
|
|
(uint32_t) deviceExtensionNames.count(), // extensionCount
|
|
|
|
deviceExtensionNames.begin(), // ppEnabledExtensionNames
|
|
|
|
pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
|
2018-07-02 20:15:37 +00:00
|
|
|
};
|
|
|
|
|
2019-02-12 16:14:47 +00:00
|
|
|
{
|
|
|
|
#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
|
|
|
|
// skia:8712
|
|
|
|
__lsan::ScopedDisabler lsanDisabler;
|
|
|
|
#endif
|
|
|
|
err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
|
|
|
|
}
|
2018-07-02 20:15:37 +00:00
|
|
|
if (err) {
|
|
|
|
SkDebugf("CreateDevice failed: %d\n", err);
|
2018-07-02 20:16:44 +00:00
|
|
|
destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
|
2018-07-02 20:15:37 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
VkQueue queue;
|
|
|
|
grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
|
|
|
|
|
|
|
|
ctx->fInstance = inst;
|
|
|
|
ctx->fPhysicalDevice = physDev;
|
|
|
|
ctx->fDevice = device;
|
|
|
|
ctx->fQueue = queue;
|
|
|
|
ctx->fGraphicsQueueIndex = graphicsQueueIndex;
|
2019-01-28 18:15:05 +00:00
|
|
|
ctx->fMaxAPIVersion = apiVersion;
|
2018-08-01 17:25:41 +00:00
|
|
|
ctx->fVkExtensions = extensions;
|
2018-08-08 13:23:18 +00:00
|
|
|
ctx->fDeviceFeatures2 = features;
|
2018-07-12 14:02:37 +00:00
|
|
|
ctx->fGetProc = getProc;
|
2018-07-02 20:15:37 +00:00
|
|
|
ctx->fOwnsInstanceAndDevice = false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-08-08 13:23:18 +00:00
|
|
|
void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
|
|
|
|
// All Vulkan structs that could be part of the features chain will start with the
|
|
|
|
// structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
|
|
|
|
// so we can get access to the pNext for the next struct.
|
|
|
|
struct CommonVulkanHeader {
|
|
|
|
VkStructureType sType;
|
|
|
|
void* pNext;
|
|
|
|
};
|
|
|
|
|
|
|
|
void* pNext = features->pNext;
|
|
|
|
while (pNext) {
|
|
|
|
void* current = pNext;
|
|
|
|
pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
|
|
|
|
sk_free(current);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-10 15:03:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|