blob: d7a10a2215b9d1a5ec30e39131b4694fa05f4c35 [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Hal Canary48cd11f2019-05-22 09:57:18 -040012#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18#endif
19
John Stiles34344e22020-07-28 15:38:02 -040020#include <algorithm>
21
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050022#if defined(SK_BUILD_FOR_UNIX)
23#include <execinfo.h>
24#endif
Mike Kleinc0bd9f92019-04-23 12:05:21 -050025#include "include/gpu/vk/GrVkBackendContext.h"
26#include "include/gpu/vk/GrVkExtensions.h"
27#include "src/core/SkAutoMalloc.h"
28#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050029
Ben Wagner7ad9b962019-02-12 11:14:47 -050030#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
31#include <sanitizer/lsan_interface.h>
32#endif
33
Greg Daniel35970ec2017-11-10 10:03:05 -050034namespace sk_gpu_test {
35
36bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040037 PFN_vkGetDeviceProcAddr* devProc) {
Greg Daniel35970ec2017-11-10 10:03:05 -050038 static void* vkLib = nullptr;
39 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
40 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
41 if (!vkLib) {
Mike Klein77482cb2020-07-10 10:43:59 -050042 vkLib = SkLoadDynamicLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
Greg Daniel35970ec2017-11-10 10:03:05 -050043 if (!vkLib) {
44 return false;
45 }
Mike Klein77482cb2020-07-10 10:43:59 -050046 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050047 "vkGetInstanceProcAddr");
Mike Klein77482cb2020-07-10 10:43:59 -050048 localDevProc = (PFN_vkGetDeviceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050049 "vkGetDeviceProcAddr");
50 }
51 if (!localInstProc || !localDevProc) {
52 return false;
53 }
54 *instProc = localInstProc;
55 *devProc = localDevProc;
56 return true;
Greg Daniel35970ec2017-11-10 10:03:05 -050057}
Greg Danielf730c182018-07-02 20:15:37 +000058
59////////////////////////////////////////////////////////////////////////////////
60// Helper code to set up Vulkan context objects
61
62#ifdef SK_ENABLE_VK_LAYERS
63const char* kDebugLayerNames[] = {
Robert Phillips41acc0e2020-01-06 13:29:53 -050064 // single merged layer
65 "VK_LAYER_KHRONOS_validation",
Greg Danielf730c182018-07-02 20:15:37 +000066 // not included in standard_validation
67 //"VK_LAYER_LUNARG_api_dump",
68 //"VK_LAYER_LUNARG_vktrace",
69 //"VK_LAYER_LUNARG_screenshot",
70};
Greg Danielf730c182018-07-02 20:15:37 +000071
Greg Danielac616c82018-08-29 15:56:26 -040072static uint32_t remove_patch_version(uint32_t specVersion) {
73 return (specVersion >> 12) << 12;
74}
75
76// Returns the index into layers array for the layer we want. Returns -1 if not supported.
77static int should_include_debug_layer(const char* layerName,
78 uint32_t layerCount, VkLayerProperties* layers,
79 uint32_t version) {
80 for (uint32_t i = 0; i < layerCount; ++i) {
81 if (!strcmp(layerName, layers[i].layerName)) {
82 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
83 // layer was written against a version that isn't older than the version of Vulkan we're
84 // using so that it has all the api entry points.
85 if (version <= remove_patch_version(layers[i].specVersion)) {
86 return i;
87 }
88 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040089 }
Greg Danielac616c82018-08-29 15:56:26 -040090
Greg Danielf730c182018-07-02 20:15:37 +000091 }
Greg Danielac616c82018-08-29 15:56:26 -040092 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040093}
Greg Daniel92aef4b2018-08-02 13:55:49 -040094
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050095static void print_backtrace() {
96#if defined(SK_BUILD_FOR_UNIX)
97 void* stack[64];
98 int count = backtrace(stack, SK_ARRAY_COUNT(stack));
99 backtrace_symbols_fd(stack, count, 2);
100#else
101 // Please add implementations for other platforms.
102#endif
103}
104
Greg Daniel37329b32018-07-02 20:16:44 +0000105VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
106 VkDebugReportFlagsEXT flags,
107 VkDebugReportObjectTypeEXT objectType,
108 uint64_t object,
109 size_t location,
110 int32_t messageCode,
111 const char* pLayerPrefix,
112 const char* pMessage,
113 void* pUserData) {
114 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
Greg Daniel8a6e53a2020-06-09 09:05:09 -0400115 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
116 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
117 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
118 return VK_FALSE;
119 }
Greg Daniele70a30d2020-10-09 10:43:54 -0400120 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
Greg Danielf52bca12020-10-09 12:46:54 -0400121 if (strstr(pMessage, "VUID-vkCmdDraw-None-02686") ||
122 strstr(pMessage, "VUID-vkCmdDrawIndexed-None-02686")) {
Greg Daniele70a30d2020-10-09 10:43:54 -0400123 return VK_FALSE;
124 }
Greg Daniel37329b32018-07-02 20:16:44 +0000125 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500126 print_backtrace();
127 SkDEBUGFAIL("Vulkan debug layer error");
Greg Daniel37329b32018-07-02 20:16:44 +0000128 return VK_TRUE; // skip further layers
129 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500130 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
131 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000132 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
133 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500134 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000135 } else {
136 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
137 }
138 return VK_FALSE;
139}
140#endif
141
Greg Daniel98bffae2018-08-01 13:25:41 -0400142#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
143
Greg Daniel98bffae2018-08-01 13:25:41 -0400144static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
145 uint32_t specVersion,
146 SkTArray<VkExtensionProperties>* instanceExtensions,
147 SkTArray<VkLayerProperties>* instanceLayers) {
148 if (getProc == nullptr) {
149 return false;
150 }
151
152 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
153 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
154
155 if (!EnumerateInstanceExtensionProperties ||
156 !EnumerateInstanceLayerProperties) {
157 return false;
158 }
159
160 VkResult res;
161 uint32_t layerCount = 0;
162#ifdef SK_ENABLE_VK_LAYERS
163 // instance layers
164 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
165 if (VK_SUCCESS != res) {
166 return false;
167 }
168 VkLayerProperties* layers = new VkLayerProperties[layerCount];
169 res = EnumerateInstanceLayerProperties(&layerCount, layers);
170 if (VK_SUCCESS != res) {
171 delete[] layers;
172 return false;
173 }
174
175 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400176 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
177 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
178 nonPatchVersion);
179 if (idx != -1) {
180 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400181 }
182 }
183 delete[] layers;
184#endif
185
186 // instance extensions
187 // via Vulkan implementation and implicitly enabled layers
John Stiles13c9f662021-08-16 12:16:29 -0400188 {
189 uint32_t extensionCount = 0;
190 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
191 if (VK_SUCCESS != res) {
192 return false;
193 }
194 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
195 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
196 if (VK_SUCCESS != res) {
197 delete[] extensions;
198 return false;
199 }
200 for (uint32_t i = 0; i < extensionCount; ++i) {
201 instanceExtensions->push_back() = extensions[i];
202 }
203 delete [] extensions;
Greg Daniel98bffae2018-08-01 13:25:41 -0400204 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400205
206 // via explicitly enabled layers
207 layerCount = instanceLayers->count();
208 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
209 uint32_t extensionCount = 0;
210 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
211 &extensionCount, nullptr);
212 if (VK_SUCCESS != res) {
213 return false;
214 }
215 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
216 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
217 &extensionCount, extensions);
218 if (VK_SUCCESS != res) {
219 delete[] extensions;
220 return false;
221 }
222 for (uint32_t i = 0; i < extensionCount; ++i) {
223 instanceExtensions->push_back() = extensions[i];
224 }
225 delete[] extensions;
226 }
227
228 return true;
229}
230
231static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
232 VkInstance inst, VkPhysicalDevice physDev,
233 SkTArray<VkExtensionProperties>* deviceExtensions,
234 SkTArray<VkLayerProperties>* deviceLayers) {
235 if (getProc == nullptr) {
236 return false;
237 }
238
239 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
240 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
241
242 if (!EnumerateDeviceExtensionProperties ||
243 !EnumerateDeviceLayerProperties) {
244 return false;
245 }
246
247 VkResult res;
248 // device layers
249 uint32_t layerCount = 0;
250#ifdef SK_ENABLE_VK_LAYERS
251 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
252 if (VK_SUCCESS != res) {
253 return false;
254 }
255 VkLayerProperties* layers = new VkLayerProperties[layerCount];
256 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
257 if (VK_SUCCESS != res) {
258 delete[] layers;
259 return false;
260 }
261
262 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400263 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
264 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
265 nonPatchVersion);
266 if (idx != -1) {
267 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400268 }
269 }
270 delete[] layers;
271#endif
272
273 // device extensions
274 // via Vulkan implementation and implicitly enabled layers
John Stiles13c9f662021-08-16 12:16:29 -0400275 {
276 uint32_t extensionCount = 0;
277 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
278 if (VK_SUCCESS != res) {
279 return false;
280 }
281 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
282 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
283 if (VK_SUCCESS != res) {
284 delete[] extensions;
285 return false;
286 }
287 for (uint32_t i = 0; i < extensionCount; ++i) {
288 deviceExtensions->push_back() = extensions[i];
289 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400290 delete[] extensions;
Greg Daniel98bffae2018-08-01 13:25:41 -0400291 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400292
293 // via explicitly enabled layers
294 layerCount = deviceLayers->count();
295 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
296 uint32_t extensionCount = 0;
297 res = EnumerateDeviceExtensionProperties(physDev,
298 (*deviceLayers)[layerIndex].layerName,
299 &extensionCount, nullptr);
300 if (VK_SUCCESS != res) {
301 return false;
302 }
303 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
304 res = EnumerateDeviceExtensionProperties(physDev,
305 (*deviceLayers)[layerIndex].layerName,
306 &extensionCount, extensions);
307 if (VK_SUCCESS != res) {
308 delete[] extensions;
309 return false;
310 }
311 for (uint32_t i = 0; i < extensionCount; ++i) {
312 deviceExtensions->push_back() = extensions[i];
313 }
314 delete[] extensions;
315 }
316
317 return true;
318}
319
Brian Salomon23356442018-11-30 15:33:19 -0500320#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
321 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400322
Brian Salomon23356442018-11-30 15:33:19 -0500323#define ACQUIRE_VK_PROC(name, instance, device) \
324 PFN_vk##name grVk##name = \
325 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
326 do { \
327 if (grVk##name == nullptr) { \
328 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
329 if (device != VK_NULL_HANDLE) { \
330 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
331 } \
332 return false; \
333 } \
334 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400335
Brian Salomon23356442018-11-30 15:33:19 -0500336#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
337 PFN_vk##name grVk##name = \
338 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
339 do { \
340 if (grVk##name == nullptr) { \
341 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400342 return false; \
Brian Salomon23356442018-11-30 15:33:19 -0500343 } \
344 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000345
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400346static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000347 VkDebugReportCallbackEXT* debugCallback,
348 bool hasDebugExtension) {
349 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
350 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
351 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
352 *debugCallback = VK_NULL_HANDLE;
353 }
354 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
355 grVkDestroyInstance(inst, nullptr);
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400356 return true;
Greg Daniel37329b32018-07-02 20:16:44 +0000357}
358
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400359static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
360 uint32_t physDeviceVersion, GrVkExtensions* extensions,
361 VkPhysicalDeviceFeatures2* features, bool isProtected) {
Greg Daniela0651ac2018-08-08 09:23:18 -0400362 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
363 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
364
365 // Setup all extension feature structs we may want to use.
Greg Daniela0651ac2018-08-08 09:23:18 -0400366 void** tailPNext = &features->pNext;
367
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400368 // If |isProtected| is given, attach that first
369 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
370 if (isProtected) {
371 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
372 protectedMemoryFeatures =
373 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
374 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
375 protectedMemoryFeatures->sType =
376 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
377 protectedMemoryFeatures->pNext = nullptr;
378 *tailPNext = protectedMemoryFeatures;
379 tailPNext = &protectedMemoryFeatures->pNext;
380 }
381
Greg Daniela0651ac2018-08-08 09:23:18 -0400382 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
383 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
384 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
385 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
386 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
387 blend->pNext = nullptr;
388 *tailPNext = blend;
389 tailPNext = &blend->pNext;
390 }
391
Greg Daniel7e000222018-12-03 10:08:21 -0500392 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
393 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
394 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
395 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
396 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
397 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
398 ycbcrFeature->pNext = nullptr;
Sergey Ulanov2739fd22019-08-11 22:46:33 -0700399 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
Greg Daniel7e000222018-12-03 10:08:21 -0500400 *tailPNext = ycbcrFeature;
401 tailPNext = &ycbcrFeature->pNext;
402 }
403
Greg Daniela0651ac2018-08-08 09:23:18 -0400404 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
405 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
406 grVkGetPhysicalDeviceFeatures2(physDev, features);
407 } else {
408 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
409 1));
410 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
411 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
412 }
413
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400414 if (isProtected) {
415 if (!protectedMemoryFeatures->protectedMemory) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400416 return false;
417 }
418 }
419 return true;
Greg Daniela0651ac2018-08-08 09:23:18 -0400420 // If we want to disable any extension features do so here.
421}
422
Greg Danield3e65aa2018-08-01 09:19:45 -0400423bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000424 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400425 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400426 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000427 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000428 uint32_t* presentQueueIndexPtr,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400429 CanPresentFn canPresent,
430 bool isProtected) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400431 VkResult err;
432
433 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
434 uint32_t instanceVersion = 0;
435 if (!grVkEnumerateInstanceVersion) {
436 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
437 } else {
438 err = grVkEnumerateInstanceVersion(&instanceVersion);
439 if (err) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400440 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400441 return false;
442 }
443 }
444 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400445 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
446 SkDebugf("protected requires vk instance version 1.1\n");
447 return false;
448 }
449
Greg Daniel41f0e282019-01-28 13:15:05 -0500450 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
451 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
452 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
453 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
454 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
455 // since that is the highest vulkan version.
456 apiVersion = VK_MAKE_VERSION(1, 1, 0);
457 }
458
Brian Osman788b9162020-02-07 10:36:46 -0500459 instanceVersion = std::min(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400460
Greg Danielf730c182018-07-02 20:15:37 +0000461 VkPhysicalDevice physDev;
462 VkDevice device;
463 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000464
465 const VkApplicationInfo app_info = {
466 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
467 nullptr, // pNext
468 "vktest", // pApplicationName
469 0, // applicationVersion
470 "vktest", // pEngineName
471 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500472 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000473 };
474
Greg Daniel98bffae2018-08-01 13:25:41 -0400475 SkTArray<VkLayerProperties> instanceLayers;
476 SkTArray<VkExtensionProperties> instanceExtensions;
477
Greg Daniel92aef4b2018-08-02 13:55:49 -0400478 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400479 &instanceExtensions,
480 &instanceLayers)) {
481 return false;
482 }
Greg Danielf730c182018-07-02 20:15:37 +0000483
484 SkTArray<const char*> instanceLayerNames;
485 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400486 for (int i = 0; i < instanceLayers.count(); ++i) {
487 instanceLayerNames.push_back(instanceLayers[i].layerName);
488 }
489 for (int i = 0; i < instanceExtensions.count(); ++i) {
John Stilesc1c3c6d2020-08-15 23:22:53 -0400490 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
Greg Daniel98bffae2018-08-01 13:25:41 -0400491 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000492 }
493 }
Greg Danielf730c182018-07-02 20:15:37 +0000494
495 const VkInstanceCreateInfo instance_create = {
496 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
497 nullptr, // pNext
498 0, // flags
499 &app_info, // pApplicationInfo
500 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
501 instanceLayerNames.begin(), // ppEnabledLayerNames
502 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
503 instanceExtensionNames.begin(), // ppEnabledExtensionNames
504 };
505
Greg Daniel98bffae2018-08-01 13:25:41 -0400506 bool hasDebugExtension = false;
507
Greg Danielf730c182018-07-02 20:15:37 +0000508 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
509 err = grVkCreateInstance(&instance_create, nullptr, &inst);
510 if (err < 0) {
511 SkDebugf("vkCreateInstance failed: %d\n", err);
512 return false;
513 }
514
Greg Daniel37329b32018-07-02 20:16:44 +0000515#ifdef SK_ENABLE_VK_LAYERS
516 *debugCallback = VK_NULL_HANDLE;
517 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
518 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
519 hasDebugExtension = true;
520 }
521 }
522 if (hasDebugExtension) {
523 // Setup callback creation information
524 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
525 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
526 callbackCreateInfo.pNext = nullptr;
527 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
528 VK_DEBUG_REPORT_WARNING_BIT_EXT |
529 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
530 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
531 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
532 callbackCreateInfo.pfnCallback = &DebugReportCallback;
533 callbackCreateInfo.pUserData = nullptr;
534
535 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
536 // Register the callback
537 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
538 }
539#endif
540
Greg Danielf730c182018-07-02 20:15:37 +0000541 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400542 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000543 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
544 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
545 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
546 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
547 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
548 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
549
550 uint32_t gpuCount;
551 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
552 if (err) {
553 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000554 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000555 return false;
556 }
557 if (!gpuCount) {
558 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000559 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000560 return false;
561 }
562 // Just returning the first physical device instead of getting the whole array.
563 // TODO: find best match for our needs
564 gpuCount = 1;
565 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
566 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
567 if (err && VK_INCOMPLETE != err) {
568 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000569 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000570 return false;
571 }
572
Greg Daniel92aef4b2018-08-02 13:55:49 -0400573 VkPhysicalDeviceProperties physDeviceProperties;
574 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Greg Danielf4e176b2021-08-11 18:31:17 -0400575 uint32_t physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400576
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400577 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
578 SkDebugf("protected requires vk physical device version 1.1\n");
579 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
580 return false;
581 }
582
Greg Danielf730c182018-07-02 20:15:37 +0000583 // query to get the initial queue props size
584 uint32_t queueCount;
585 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
586 if (!queueCount) {
587 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000588 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000589 return false;
590 }
591
592 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
593 // now get the actual queue props
594 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
595
596 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
597
598 // iterate to find the graphics queue
599 uint32_t graphicsQueueIndex = queueCount;
600 for (uint32_t i = 0; i < queueCount; i++) {
601 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
602 graphicsQueueIndex = i;
603 break;
604 }
605 }
606 if (graphicsQueueIndex == queueCount) {
607 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000608 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000609 return false;
610 }
611
612 // iterate to find the present queue, if needed
613 uint32_t presentQueueIndex = queueCount;
614 if (presentQueueIndexPtr && canPresent) {
615 for (uint32_t i = 0; i < queueCount; i++) {
616 if (canPresent(inst, physDev, i)) {
617 presentQueueIndex = i;
618 break;
619 }
620 }
621 if (presentQueueIndex == queueCount) {
622 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000623 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000624 return false;
625 }
626 *presentQueueIndexPtr = presentQueueIndex;
627 } else {
628 // Just setting this so we end up make a single queue for graphics since there was no
629 // request for a present queue.
630 presentQueueIndex = graphicsQueueIndex;
631 }
632
Greg Daniel98bffae2018-08-01 13:25:41 -0400633 SkTArray<VkLayerProperties> deviceLayers;
634 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400635 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400636 inst, physDev,
637 &deviceExtensions,
638 &deviceLayers)) {
639 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
640 return false;
641 }
Greg Danielf730c182018-07-02 20:15:37 +0000642
643 SkTArray<const char*> deviceLayerNames;
644 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400645 for (int i = 0; i < deviceLayers.count(); ++i) {
646 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000647 }
Greg Danielbc486b82020-07-09 15:04:48 -0400648
649 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
650 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
651 // the next loop.
652 bool hasKHRBufferDeviceAddress = false;
653 for (int i = 0; i < deviceExtensions.count(); ++i) {
654 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
655 hasKHRBufferDeviceAddress = true;
656 break;
657 }
658 }
659
Greg Daniel98bffae2018-08-01 13:25:41 -0400660 for (int i = 0; i < deviceExtensions.count(); ++i) {
661 // Don't use experimental extensions since they typically don't work with debug layers and
662 // often are missing dependecy requirements for other extensions. Additionally, these are
663 // often left behind in the driver even after they've been promoted to real extensions.
John Stilesc1c3c6d2020-08-15 23:22:53 -0400664 if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
665 0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
Greg Danielbc486b82020-07-09 15:04:48 -0400666
Greg Daniela6707822020-10-19 13:11:57 -0400667 // This is an nvidia extension that isn't supported by the debug layers so we get lots
668 // of warnings. We don't actually use it, so it is easiest to just not enable it.
Michael Ludwig785ecad2021-06-03 14:32:25 -0400669 if (0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_low_latency") ||
Greg Daniele8502cc2021-06-16 16:42:13 -0400670 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_acquire_winrt_display") ||
Greg Daniel095185d2021-06-23 11:52:57 -0400671 0 == strcmp(deviceExtensions[i].extensionName, "VK_NV_cuda_kernel_launch") ||
672 0 == strcmp(deviceExtensions[i].extensionName, "VK_EXT_provoking_vertex")) {
Greg Daniela6707822020-10-19 13:11:57 -0400673 continue;
674 }
675
Greg Danielbc486b82020-07-09 15:04:48 -0400676 if (!hasKHRBufferDeviceAddress ||
John Stilesc1c3c6d2020-08-15 23:22:53 -0400677 0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
Greg Danielbc486b82020-07-09 15:04:48 -0400678 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
679 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400680 }
Greg Danielf730c182018-07-02 20:15:37 +0000681 }
682
Greg Daniela0651ac2018-08-08 09:23:18 -0400683 extensions->init(getProc, inst, physDev,
684 (uint32_t) instanceExtensionNames.count(),
685 instanceExtensionNames.begin(),
686 (uint32_t) deviceExtensionNames.count(),
687 deviceExtensionNames.begin());
688
689 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
690 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
691 features->pNext = nullptr;
692
693 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
694 void* pointerToFeatures = nullptr;
695 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
696 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400697 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
698 isProtected)) {
699 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
700 return false;
701 }
702
Greg Daniela0651ac2018-08-08 09:23:18 -0400703 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
704 // the device creation will use that instead of the ppEnabledFeatures.
705 pointerToFeatures = features;
706 } else {
707 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
708 }
709
Greg Danielf730c182018-07-02 20:15:37 +0000710 // this looks like it would slow things down,
711 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400712 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000713
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400714 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
Greg Danielf730c182018-07-02 20:15:37 +0000715 float queuePriorities[1] = { 0.0 };
716 // Here we assume no need for swapchain queue
717 // If one is needed, the client will need its own setup code
718 const VkDeviceQueueCreateInfo queueInfo[2] = {
719 {
720 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
721 nullptr, // pNext
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400722 flags, // VkDeviceQueueCreateFlags
Greg Danielf730c182018-07-02 20:15:37 +0000723 graphicsQueueIndex, // queueFamilyIndex
724 1, // queueCount
725 queuePriorities, // pQueuePriorities
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400726
Greg Danielf730c182018-07-02 20:15:37 +0000727 },
728 {
729 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
730 nullptr, // pNext
731 0, // VkDeviceQueueCreateFlags
732 presentQueueIndex, // queueFamilyIndex
733 1, // queueCount
734 queuePriorities, // pQueuePriorities
735 }
736 };
737 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
738
739 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400740 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
741 pointerToFeatures, // pNext
742 0, // VkDeviceCreateFlags
743 queueInfoCount, // queueCreateInfoCount
744 queueInfo, // pQueueCreateInfos
745 (uint32_t) deviceLayerNames.count(), // layerCount
746 deviceLayerNames.begin(), // ppEnabledLayerNames
747 (uint32_t) deviceExtensionNames.count(), // extensionCount
748 deviceExtensionNames.begin(), // ppEnabledExtensionNames
749 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000750 };
751
Ben Wagner7ad9b962019-02-12 11:14:47 -0500752 {
753#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
754 // skia:8712
755 __lsan::ScopedDisabler lsanDisabler;
756#endif
757 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
758 }
Greg Danielf730c182018-07-02 20:15:37 +0000759 if (err) {
760 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000761 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000762 return false;
763 }
764
Greg Danielf730c182018-07-02 20:15:37 +0000765 VkQueue queue;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400766 if (isProtected) {
767 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
768 SkASSERT(grVkGetDeviceQueue2 != nullptr);
769 VkDeviceQueueInfo2 queue_info2 = {
770 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
771 nullptr, // pNext
772 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
773 graphicsQueueIndex, // queueFamilyIndex
774 0 // queueIndex
775 };
776 grVkGetDeviceQueue2(device, &queue_info2, &queue);
777 } else {
778 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
779 }
Greg Danielf730c182018-07-02 20:15:37 +0000780
781 ctx->fInstance = inst;
782 ctx->fPhysicalDevice = physDev;
783 ctx->fDevice = device;
784 ctx->fQueue = queue;
785 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500786 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400787 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400788 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400789 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000790 ctx->fOwnsInstanceAndDevice = false;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400791 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
Greg Danielf730c182018-07-02 20:15:37 +0000792
793 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000794}
795
Greg Daniela0651ac2018-08-08 09:23:18 -0400796void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
797 // All Vulkan structs that could be part of the features chain will start with the
798 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
799 // so we can get access to the pNext for the next struct.
800 struct CommonVulkanHeader {
801 VkStructureType sType;
802 void* pNext;
803 };
804
805 void* pNext = features->pNext;
806 while (pNext) {
807 void* current = pNext;
808 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
809 sk_free(current);
810 }
811}
812
John Stilesa6841be2020-08-06 14:11:56 -0400813} // namespace sk_gpu_test
Greg Daniel35970ec2017-11-10 10:03:05 -0500814
815#endif