blob: 4a1f33dea51af4631d9d0b543adb7cd651cab8af [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Hal Canary48cd11f2019-05-22 09:57:18 -040012#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18#endif
19
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050020#if defined(SK_BUILD_FOR_UNIX)
21#include <execinfo.h>
22#endif
Mike Kleinc0bd9f92019-04-23 12:05:21 -050023#include "include/gpu/vk/GrVkBackendContext.h"
24#include "include/gpu/vk/GrVkExtensions.h"
25#include "src/core/SkAutoMalloc.h"
26#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050027
Ben Wagner7ad9b962019-02-12 11:14:47 -050028#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
29#include <sanitizer/lsan_interface.h>
30#endif
31
Greg Daniel35970ec2017-11-10 10:03:05 -050032namespace sk_gpu_test {
33
34bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040035 PFN_vkGetDeviceProcAddr* devProc) {
Greg Daniel35970ec2017-11-10 10:03:05 -050036 static void* vkLib = nullptr;
37 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
38 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
39 if (!vkLib) {
Mike Klein77482cb2020-07-10 10:43:59 -050040 vkLib = SkLoadDynamicLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
Greg Daniel35970ec2017-11-10 10:03:05 -050041 if (!vkLib) {
42 return false;
43 }
Mike Klein77482cb2020-07-10 10:43:59 -050044 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050045 "vkGetInstanceProcAddr");
Mike Klein77482cb2020-07-10 10:43:59 -050046 localDevProc = (PFN_vkGetDeviceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050047 "vkGetDeviceProcAddr");
48 }
49 if (!localInstProc || !localDevProc) {
50 return false;
51 }
52 *instProc = localInstProc;
53 *devProc = localDevProc;
54 return true;
Greg Daniel35970ec2017-11-10 10:03:05 -050055}
Greg Danielf730c182018-07-02 20:15:37 +000056
57////////////////////////////////////////////////////////////////////////////////
58// Helper code to set up Vulkan context objects
59
60#ifdef SK_ENABLE_VK_LAYERS
61const char* kDebugLayerNames[] = {
Robert Phillips41acc0e2020-01-06 13:29:53 -050062 // single merged layer
63 "VK_LAYER_KHRONOS_validation",
Greg Danielf730c182018-07-02 20:15:37 +000064 // not included in standard_validation
65 //"VK_LAYER_LUNARG_api_dump",
66 //"VK_LAYER_LUNARG_vktrace",
67 //"VK_LAYER_LUNARG_screenshot",
68};
Greg Danielf730c182018-07-02 20:15:37 +000069
Greg Danielac616c82018-08-29 15:56:26 -040070static uint32_t remove_patch_version(uint32_t specVersion) {
71 return (specVersion >> 12) << 12;
72}
73
74// Returns the index into layers array for the layer we want. Returns -1 if not supported.
75static int should_include_debug_layer(const char* layerName,
76 uint32_t layerCount, VkLayerProperties* layers,
77 uint32_t version) {
78 for (uint32_t i = 0; i < layerCount; ++i) {
79 if (!strcmp(layerName, layers[i].layerName)) {
80 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
81 // layer was written against a version that isn't older than the version of Vulkan we're
82 // using so that it has all the api entry points.
83 if (version <= remove_patch_version(layers[i].specVersion)) {
84 return i;
85 }
86 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040087 }
Greg Danielac616c82018-08-29 15:56:26 -040088
Greg Danielf730c182018-07-02 20:15:37 +000089 }
Greg Danielac616c82018-08-29 15:56:26 -040090 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040091}
Greg Daniel92aef4b2018-08-02 13:55:49 -040092
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050093static void print_backtrace() {
94#if defined(SK_BUILD_FOR_UNIX)
95 void* stack[64];
96 int count = backtrace(stack, SK_ARRAY_COUNT(stack));
97 backtrace_symbols_fd(stack, count, 2);
98#else
99 // Please add implementations for other platforms.
100#endif
101}
102
Greg Daniel37329b32018-07-02 20:16:44 +0000103VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
104 VkDebugReportFlagsEXT flags,
105 VkDebugReportObjectTypeEXT objectType,
106 uint64_t object,
107 size_t location,
108 int32_t messageCode,
109 const char* pLayerPrefix,
110 const char* pMessage,
111 void* pUserData) {
112 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
Greg Daniel8a6e53a2020-06-09 09:05:09 -0400113 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
114 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
115 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
116 return VK_FALSE;
117 }
Greg Daniel37329b32018-07-02 20:16:44 +0000118 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500119 print_backtrace();
120 SkDEBUGFAIL("Vulkan debug layer error");
Greg Daniel37329b32018-07-02 20:16:44 +0000121 return VK_TRUE; // skip further layers
122 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500123 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
124 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000125 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
126 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500127 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000128 } else {
129 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
130 }
131 return VK_FALSE;
132}
133#endif
134
Greg Daniel98bffae2018-08-01 13:25:41 -0400135#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
136
Greg Daniel98bffae2018-08-01 13:25:41 -0400137static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
138 uint32_t specVersion,
139 SkTArray<VkExtensionProperties>* instanceExtensions,
140 SkTArray<VkLayerProperties>* instanceLayers) {
141 if (getProc == nullptr) {
142 return false;
143 }
144
145 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
146 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
147
148 if (!EnumerateInstanceExtensionProperties ||
149 !EnumerateInstanceLayerProperties) {
150 return false;
151 }
152
153 VkResult res;
154 uint32_t layerCount = 0;
155#ifdef SK_ENABLE_VK_LAYERS
156 // instance layers
157 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
158 if (VK_SUCCESS != res) {
159 return false;
160 }
161 VkLayerProperties* layers = new VkLayerProperties[layerCount];
162 res = EnumerateInstanceLayerProperties(&layerCount, layers);
163 if (VK_SUCCESS != res) {
164 delete[] layers;
165 return false;
166 }
167
168 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400169 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
170 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
171 nonPatchVersion);
172 if (idx != -1) {
173 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400174 }
175 }
176 delete[] layers;
177#endif
178
179 // instance extensions
180 // via Vulkan implementation and implicitly enabled layers
181 uint32_t extensionCount = 0;
182 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
183 if (VK_SUCCESS != res) {
184 return false;
185 }
186 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
187 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
188 if (VK_SUCCESS != res) {
189 delete[] extensions;
190 return false;
191 }
192 for (uint32_t i = 0; i < extensionCount; ++i) {
193 instanceExtensions->push_back() = extensions[i];
194 }
195 delete [] extensions;
196
197 // via explicitly enabled layers
198 layerCount = instanceLayers->count();
199 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
200 uint32_t extensionCount = 0;
201 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
202 &extensionCount, nullptr);
203 if (VK_SUCCESS != res) {
204 return false;
205 }
206 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
207 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
208 &extensionCount, extensions);
209 if (VK_SUCCESS != res) {
210 delete[] extensions;
211 return false;
212 }
213 for (uint32_t i = 0; i < extensionCount; ++i) {
214 instanceExtensions->push_back() = extensions[i];
215 }
216 delete[] extensions;
217 }
218
219 return true;
220}
221
222static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
223 VkInstance inst, VkPhysicalDevice physDev,
224 SkTArray<VkExtensionProperties>* deviceExtensions,
225 SkTArray<VkLayerProperties>* deviceLayers) {
226 if (getProc == nullptr) {
227 return false;
228 }
229
230 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
231 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
232
233 if (!EnumerateDeviceExtensionProperties ||
234 !EnumerateDeviceLayerProperties) {
235 return false;
236 }
237
238 VkResult res;
239 // device layers
240 uint32_t layerCount = 0;
241#ifdef SK_ENABLE_VK_LAYERS
242 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
243 if (VK_SUCCESS != res) {
244 return false;
245 }
246 VkLayerProperties* layers = new VkLayerProperties[layerCount];
247 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
248 if (VK_SUCCESS != res) {
249 delete[] layers;
250 return false;
251 }
252
253 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400254 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
255 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
256 nonPatchVersion);
257 if (idx != -1) {
258 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400259 }
260 }
261 delete[] layers;
262#endif
263
264 // device extensions
265 // via Vulkan implementation and implicitly enabled layers
266 uint32_t extensionCount = 0;
267 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
268 if (VK_SUCCESS != res) {
269 return false;
270 }
271 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
272 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
273 if (VK_SUCCESS != res) {
274 delete[] extensions;
275 return false;
276 }
277 for (uint32_t i = 0; i < extensionCount; ++i) {
278 deviceExtensions->push_back() = extensions[i];
279 }
280 delete[] extensions;
281
282 // via explicitly enabled layers
283 layerCount = deviceLayers->count();
284 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
285 uint32_t extensionCount = 0;
286 res = EnumerateDeviceExtensionProperties(physDev,
287 (*deviceLayers)[layerIndex].layerName,
288 &extensionCount, nullptr);
289 if (VK_SUCCESS != res) {
290 return false;
291 }
292 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
293 res = EnumerateDeviceExtensionProperties(physDev,
294 (*deviceLayers)[layerIndex].layerName,
295 &extensionCount, extensions);
296 if (VK_SUCCESS != res) {
297 delete[] extensions;
298 return false;
299 }
300 for (uint32_t i = 0; i < extensionCount; ++i) {
301 deviceExtensions->push_back() = extensions[i];
302 }
303 delete[] extensions;
304 }
305
306 return true;
307}
308
Brian Salomon23356442018-11-30 15:33:19 -0500309#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
310 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400311
Brian Salomon23356442018-11-30 15:33:19 -0500312#define ACQUIRE_VK_PROC(name, instance, device) \
313 PFN_vk##name grVk##name = \
314 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
315 do { \
316 if (grVk##name == nullptr) { \
317 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
318 if (device != VK_NULL_HANDLE) { \
319 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
320 } \
321 return false; \
322 } \
323 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400324
Brian Salomon23356442018-11-30 15:33:19 -0500325#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
326 PFN_vk##name grVk##name = \
327 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
328 do { \
329 if (grVk##name == nullptr) { \
330 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400331 return false; \
Brian Salomon23356442018-11-30 15:33:19 -0500332 } \
333 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000334
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400335static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000336 VkDebugReportCallbackEXT* debugCallback,
337 bool hasDebugExtension) {
338 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
339 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
340 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
341 *debugCallback = VK_NULL_HANDLE;
342 }
343 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
344 grVkDestroyInstance(inst, nullptr);
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400345 return true;
Greg Daniel37329b32018-07-02 20:16:44 +0000346}
347
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400348static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
349 uint32_t physDeviceVersion, GrVkExtensions* extensions,
350 VkPhysicalDeviceFeatures2* features, bool isProtected) {
Greg Daniela0651ac2018-08-08 09:23:18 -0400351 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
352 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
353
354 // Setup all extension feature structs we may want to use.
Greg Daniela0651ac2018-08-08 09:23:18 -0400355 void** tailPNext = &features->pNext;
356
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400357 // If |isProtected| is given, attach that first
358 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
359 if (isProtected) {
360 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
361 protectedMemoryFeatures =
362 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
363 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
364 protectedMemoryFeatures->sType =
365 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
366 protectedMemoryFeatures->pNext = nullptr;
367 *tailPNext = protectedMemoryFeatures;
368 tailPNext = &protectedMemoryFeatures->pNext;
369 }
370
Greg Daniela0651ac2018-08-08 09:23:18 -0400371 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
372 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
373 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
374 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
375 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
376 blend->pNext = nullptr;
377 *tailPNext = blend;
378 tailPNext = &blend->pNext;
379 }
380
Greg Daniel7e000222018-12-03 10:08:21 -0500381 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
382 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
383 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
384 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
385 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
386 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
387 ycbcrFeature->pNext = nullptr;
Sergey Ulanov2739fd22019-08-11 22:46:33 -0700388 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
Greg Daniel7e000222018-12-03 10:08:21 -0500389 *tailPNext = ycbcrFeature;
390 tailPNext = &ycbcrFeature->pNext;
391 }
392
Greg Daniela0651ac2018-08-08 09:23:18 -0400393 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
394 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
395 grVkGetPhysicalDeviceFeatures2(physDev, features);
396 } else {
397 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
398 1));
399 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
400 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
401 }
402
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400403 if (isProtected) {
404 if (!protectedMemoryFeatures->protectedMemory) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400405 return false;
406 }
407 }
408 return true;
Greg Daniela0651ac2018-08-08 09:23:18 -0400409 // If we want to disable any extension features do so here.
410}
411
Greg Danield3e65aa2018-08-01 09:19:45 -0400412bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000413 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400414 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400415 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000416 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000417 uint32_t* presentQueueIndexPtr,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400418 CanPresentFn canPresent,
419 bool isProtected) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400420 VkResult err;
421
422 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
423 uint32_t instanceVersion = 0;
424 if (!grVkEnumerateInstanceVersion) {
425 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
426 } else {
427 err = grVkEnumerateInstanceVersion(&instanceVersion);
428 if (err) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400429 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400430 return false;
431 }
432 }
433 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400434 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
435 SkDebugf("protected requires vk instance version 1.1\n");
436 return false;
437 }
438
Greg Daniel41f0e282019-01-28 13:15:05 -0500439 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
440 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
441 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
442 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
443 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
444 // since that is the highest vulkan version.
445 apiVersion = VK_MAKE_VERSION(1, 1, 0);
446 }
447
Brian Osman788b9162020-02-07 10:36:46 -0500448 instanceVersion = std::min(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400449
Greg Danielf730c182018-07-02 20:15:37 +0000450 VkPhysicalDevice physDev;
451 VkDevice device;
452 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000453
454 const VkApplicationInfo app_info = {
455 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
456 nullptr, // pNext
457 "vktest", // pApplicationName
458 0, // applicationVersion
459 "vktest", // pEngineName
460 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500461 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000462 };
463
Greg Daniel98bffae2018-08-01 13:25:41 -0400464 SkTArray<VkLayerProperties> instanceLayers;
465 SkTArray<VkExtensionProperties> instanceExtensions;
466
Greg Daniel92aef4b2018-08-02 13:55:49 -0400467 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400468 &instanceExtensions,
469 &instanceLayers)) {
470 return false;
471 }
Greg Danielf730c182018-07-02 20:15:37 +0000472
473 SkTArray<const char*> instanceLayerNames;
474 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400475 for (int i = 0; i < instanceLayers.count(); ++i) {
476 instanceLayerNames.push_back(instanceLayers[i].layerName);
477 }
478 for (int i = 0; i < instanceExtensions.count(); ++i) {
479 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
480 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000481 }
482 }
Greg Danielf730c182018-07-02 20:15:37 +0000483
484 const VkInstanceCreateInfo instance_create = {
485 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
486 nullptr, // pNext
487 0, // flags
488 &app_info, // pApplicationInfo
489 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
490 instanceLayerNames.begin(), // ppEnabledLayerNames
491 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
492 instanceExtensionNames.begin(), // ppEnabledExtensionNames
493 };
494
Greg Daniel98bffae2018-08-01 13:25:41 -0400495 bool hasDebugExtension = false;
496
Greg Danielf730c182018-07-02 20:15:37 +0000497 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
498 err = grVkCreateInstance(&instance_create, nullptr, &inst);
499 if (err < 0) {
500 SkDebugf("vkCreateInstance failed: %d\n", err);
501 return false;
502 }
503
Greg Daniel37329b32018-07-02 20:16:44 +0000504#ifdef SK_ENABLE_VK_LAYERS
505 *debugCallback = VK_NULL_HANDLE;
506 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
507 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
508 hasDebugExtension = true;
509 }
510 }
511 if (hasDebugExtension) {
512 // Setup callback creation information
513 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
514 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
515 callbackCreateInfo.pNext = nullptr;
516 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
517 VK_DEBUG_REPORT_WARNING_BIT_EXT |
518 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
519 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
520 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
521 callbackCreateInfo.pfnCallback = &DebugReportCallback;
522 callbackCreateInfo.pUserData = nullptr;
523
524 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
525 // Register the callback
526 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
527 }
528#endif
529
Greg Danielf730c182018-07-02 20:15:37 +0000530 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400531 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000532 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
533 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
534 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
535 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
536 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
537 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
538
539 uint32_t gpuCount;
540 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
541 if (err) {
542 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000543 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000544 return false;
545 }
546 if (!gpuCount) {
547 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000548 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000549 return false;
550 }
551 // Just returning the first physical device instead of getting the whole array.
552 // TODO: find best match for our needs
553 gpuCount = 1;
554 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
555 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
556 if (err && VK_INCOMPLETE != err) {
557 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000558 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000559 return false;
560 }
561
Greg Daniel92aef4b2018-08-02 13:55:49 -0400562 VkPhysicalDeviceProperties physDeviceProperties;
563 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Brian Osman788b9162020-02-07 10:36:46 -0500564 int physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400565
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400566 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
567 SkDebugf("protected requires vk physical device version 1.1\n");
568 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
569 return false;
570 }
571
Greg Danielf730c182018-07-02 20:15:37 +0000572 // query to get the initial queue props size
573 uint32_t queueCount;
574 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
575 if (!queueCount) {
576 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000577 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000578 return false;
579 }
580
581 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
582 // now get the actual queue props
583 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
584
585 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
586
587 // iterate to find the graphics queue
588 uint32_t graphicsQueueIndex = queueCount;
589 for (uint32_t i = 0; i < queueCount; i++) {
590 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
591 graphicsQueueIndex = i;
592 break;
593 }
594 }
595 if (graphicsQueueIndex == queueCount) {
596 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000597 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000598 return false;
599 }
600
601 // iterate to find the present queue, if needed
602 uint32_t presentQueueIndex = queueCount;
603 if (presentQueueIndexPtr && canPresent) {
604 for (uint32_t i = 0; i < queueCount; i++) {
605 if (canPresent(inst, physDev, i)) {
606 presentQueueIndex = i;
607 break;
608 }
609 }
610 if (presentQueueIndex == queueCount) {
611 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000612 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000613 return false;
614 }
615 *presentQueueIndexPtr = presentQueueIndex;
616 } else {
617 // Just setting this so we end up make a single queue for graphics since there was no
618 // request for a present queue.
619 presentQueueIndex = graphicsQueueIndex;
620 }
621
Greg Daniel98bffae2018-08-01 13:25:41 -0400622 SkTArray<VkLayerProperties> deviceLayers;
623 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400624 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400625 inst, physDev,
626 &deviceExtensions,
627 &deviceLayers)) {
628 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
629 return false;
630 }
Greg Danielf730c182018-07-02 20:15:37 +0000631
632 SkTArray<const char*> deviceLayerNames;
633 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400634 for (int i = 0; i < deviceLayers.count(); ++i) {
635 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000636 }
Greg Danielbc486b82020-07-09 15:04:48 -0400637
638 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
639 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
640 // the next loop.
641 bool hasKHRBufferDeviceAddress = false;
642 for (int i = 0; i < deviceExtensions.count(); ++i) {
643 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
644 hasKHRBufferDeviceAddress = true;
645 break;
646 }
647 }
648
Greg Daniel98bffae2018-08-01 13:25:41 -0400649 for (int i = 0; i < deviceExtensions.count(); ++i) {
650 // Don't use experimental extensions since they typically don't work with debug layers and
651 // often are missing dependecy requirements for other extensions. Additionally, these are
652 // often left behind in the driver even after they've been promoted to real extensions.
653 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
654 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
Greg Danielbc486b82020-07-09 15:04:48 -0400655
656 if (!hasKHRBufferDeviceAddress ||
657 strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
658 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
659 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400660 }
Greg Danielf730c182018-07-02 20:15:37 +0000661 }
662
Greg Daniela0651ac2018-08-08 09:23:18 -0400663 extensions->init(getProc, inst, physDev,
664 (uint32_t) instanceExtensionNames.count(),
665 instanceExtensionNames.begin(),
666 (uint32_t) deviceExtensionNames.count(),
667 deviceExtensionNames.begin());
668
669 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
670 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
671 features->pNext = nullptr;
672
673 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
674 void* pointerToFeatures = nullptr;
675 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
676 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400677 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
678 isProtected)) {
679 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
680 return false;
681 }
682
Greg Daniela0651ac2018-08-08 09:23:18 -0400683 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
684 // the device creation will use that instead of the ppEnabledFeatures.
685 pointerToFeatures = features;
686 } else {
687 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
688 }
689
Greg Danielf730c182018-07-02 20:15:37 +0000690 // this looks like it would slow things down,
691 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400692 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000693
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400694 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
Greg Danielf730c182018-07-02 20:15:37 +0000695 float queuePriorities[1] = { 0.0 };
696 // Here we assume no need for swapchain queue
697 // If one is needed, the client will need its own setup code
698 const VkDeviceQueueCreateInfo queueInfo[2] = {
699 {
700 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
701 nullptr, // pNext
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400702 flags, // VkDeviceQueueCreateFlags
Greg Danielf730c182018-07-02 20:15:37 +0000703 graphicsQueueIndex, // queueFamilyIndex
704 1, // queueCount
705 queuePriorities, // pQueuePriorities
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400706
Greg Danielf730c182018-07-02 20:15:37 +0000707 },
708 {
709 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
710 nullptr, // pNext
711 0, // VkDeviceQueueCreateFlags
712 presentQueueIndex, // queueFamilyIndex
713 1, // queueCount
714 queuePriorities, // pQueuePriorities
715 }
716 };
717 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
718
719 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400720 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
721 pointerToFeatures, // pNext
722 0, // VkDeviceCreateFlags
723 queueInfoCount, // queueCreateInfoCount
724 queueInfo, // pQueueCreateInfos
725 (uint32_t) deviceLayerNames.count(), // layerCount
726 deviceLayerNames.begin(), // ppEnabledLayerNames
727 (uint32_t) deviceExtensionNames.count(), // extensionCount
728 deviceExtensionNames.begin(), // ppEnabledExtensionNames
729 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000730 };
731
Ben Wagner7ad9b962019-02-12 11:14:47 -0500732 {
733#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
734 // skia:8712
735 __lsan::ScopedDisabler lsanDisabler;
736#endif
737 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
738 }
Greg Danielf730c182018-07-02 20:15:37 +0000739 if (err) {
740 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000741 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000742 return false;
743 }
744
Greg Danielf730c182018-07-02 20:15:37 +0000745 VkQueue queue;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400746 if (isProtected) {
747 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
748 SkASSERT(grVkGetDeviceQueue2 != nullptr);
749 VkDeviceQueueInfo2 queue_info2 = {
750 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
751 nullptr, // pNext
752 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
753 graphicsQueueIndex, // queueFamilyIndex
754 0 // queueIndex
755 };
756 grVkGetDeviceQueue2(device, &queue_info2, &queue);
757 } else {
758 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
759 }
Greg Danielf730c182018-07-02 20:15:37 +0000760
761 ctx->fInstance = inst;
762 ctx->fPhysicalDevice = physDev;
763 ctx->fDevice = device;
764 ctx->fQueue = queue;
765 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500766 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400767 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400768 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400769 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000770 ctx->fOwnsInstanceAndDevice = false;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400771 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
Greg Danielf730c182018-07-02 20:15:37 +0000772
773 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000774}
775
Greg Daniela0651ac2018-08-08 09:23:18 -0400776void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
777 // All Vulkan structs that could be part of the features chain will start with the
778 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
779 // so we can get access to the pNext for the next struct.
780 struct CommonVulkanHeader {
781 VkStructureType sType;
782 void* pNext;
783 };
784
785 void* pNext = features->pNext;
786 while (pNext) {
787 void* current = pNext;
788 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
789 sk_free(current);
790 }
791}
792
Greg Daniel35970ec2017-11-10 10:03:05 -0500793}
794
795#endif