blob: b2213121a3012dbdd3419c1c93d153cccfc82060 [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Hal Canary48cd11f2019-05-22 09:57:18 -040012#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18#endif
19
John Stiles34344e22020-07-28 15:38:02 -040020#include <algorithm>
21
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050022#if defined(SK_BUILD_FOR_UNIX)
23#include <execinfo.h>
24#endif
Mike Kleinc0bd9f92019-04-23 12:05:21 -050025#include "include/gpu/vk/GrVkBackendContext.h"
26#include "include/gpu/vk/GrVkExtensions.h"
27#include "src/core/SkAutoMalloc.h"
28#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050029
Ben Wagner7ad9b962019-02-12 11:14:47 -050030#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
31#include <sanitizer/lsan_interface.h>
32#endif
33
Greg Daniel35970ec2017-11-10 10:03:05 -050034namespace sk_gpu_test {
35
36bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040037 PFN_vkGetDeviceProcAddr* devProc) {
Greg Daniel35970ec2017-11-10 10:03:05 -050038 static void* vkLib = nullptr;
39 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
40 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
41 if (!vkLib) {
Mike Klein77482cb2020-07-10 10:43:59 -050042 vkLib = SkLoadDynamicLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
Greg Daniel35970ec2017-11-10 10:03:05 -050043 if (!vkLib) {
44 return false;
45 }
Mike Klein77482cb2020-07-10 10:43:59 -050046 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050047 "vkGetInstanceProcAddr");
Mike Klein77482cb2020-07-10 10:43:59 -050048 localDevProc = (PFN_vkGetDeviceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050049 "vkGetDeviceProcAddr");
50 }
51 if (!localInstProc || !localDevProc) {
52 return false;
53 }
54 *instProc = localInstProc;
55 *devProc = localDevProc;
56 return true;
Greg Daniel35970ec2017-11-10 10:03:05 -050057}
Greg Danielf730c182018-07-02 20:15:37 +000058
59////////////////////////////////////////////////////////////////////////////////
60// Helper code to set up Vulkan context objects
61
62#ifdef SK_ENABLE_VK_LAYERS
63const char* kDebugLayerNames[] = {
Robert Phillips41acc0e2020-01-06 13:29:53 -050064 // single merged layer
65 "VK_LAYER_KHRONOS_validation",
Greg Danielf730c182018-07-02 20:15:37 +000066 // not included in standard_validation
67 //"VK_LAYER_LUNARG_api_dump",
68 //"VK_LAYER_LUNARG_vktrace",
69 //"VK_LAYER_LUNARG_screenshot",
70};
Greg Danielf730c182018-07-02 20:15:37 +000071
Greg Danielac616c82018-08-29 15:56:26 -040072static uint32_t remove_patch_version(uint32_t specVersion) {
73 return (specVersion >> 12) << 12;
74}
75
76// Returns the index into layers array for the layer we want. Returns -1 if not supported.
77static int should_include_debug_layer(const char* layerName,
78 uint32_t layerCount, VkLayerProperties* layers,
79 uint32_t version) {
80 for (uint32_t i = 0; i < layerCount; ++i) {
81 if (!strcmp(layerName, layers[i].layerName)) {
82 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
83 // layer was written against a version that isn't older than the version of Vulkan we're
84 // using so that it has all the api entry points.
85 if (version <= remove_patch_version(layers[i].specVersion)) {
86 return i;
87 }
88 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040089 }
Greg Danielac616c82018-08-29 15:56:26 -040090
Greg Danielf730c182018-07-02 20:15:37 +000091 }
Greg Danielac616c82018-08-29 15:56:26 -040092 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040093}
Greg Daniel92aef4b2018-08-02 13:55:49 -040094
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050095static void print_backtrace() {
96#if defined(SK_BUILD_FOR_UNIX)
97 void* stack[64];
98 int count = backtrace(stack, SK_ARRAY_COUNT(stack));
99 backtrace_symbols_fd(stack, count, 2);
100#else
101 // Please add implementations for other platforms.
102#endif
103}
104
Greg Daniel37329b32018-07-02 20:16:44 +0000105VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
106 VkDebugReportFlagsEXT flags,
107 VkDebugReportObjectTypeEXT objectType,
108 uint64_t object,
109 size_t location,
110 int32_t messageCode,
111 const char* pLayerPrefix,
112 const char* pMessage,
113 void* pUserData) {
114 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
Greg Daniel8a6e53a2020-06-09 09:05:09 -0400115 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
116 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
117 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
118 return VK_FALSE;
119 }
Greg Daniele70a30d2020-10-09 10:43:54 -0400120 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/2171
121 if (strstr(pMessage, "VUID-vkCmdDraw-None-02686")) {
122 return VK_FALSE;
123 }
Greg Daniel37329b32018-07-02 20:16:44 +0000124 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500125 print_backtrace();
126 SkDEBUGFAIL("Vulkan debug layer error");
Greg Daniel37329b32018-07-02 20:16:44 +0000127 return VK_TRUE; // skip further layers
128 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500129 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
130 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000131 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
132 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500133 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000134 } else {
135 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
136 }
137 return VK_FALSE;
138}
139#endif
140
Greg Daniel98bffae2018-08-01 13:25:41 -0400141#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
142
Greg Daniel98bffae2018-08-01 13:25:41 -0400143static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
144 uint32_t specVersion,
145 SkTArray<VkExtensionProperties>* instanceExtensions,
146 SkTArray<VkLayerProperties>* instanceLayers) {
147 if (getProc == nullptr) {
148 return false;
149 }
150
151 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
152 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
153
154 if (!EnumerateInstanceExtensionProperties ||
155 !EnumerateInstanceLayerProperties) {
156 return false;
157 }
158
159 VkResult res;
160 uint32_t layerCount = 0;
161#ifdef SK_ENABLE_VK_LAYERS
162 // instance layers
163 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
164 if (VK_SUCCESS != res) {
165 return false;
166 }
167 VkLayerProperties* layers = new VkLayerProperties[layerCount];
168 res = EnumerateInstanceLayerProperties(&layerCount, layers);
169 if (VK_SUCCESS != res) {
170 delete[] layers;
171 return false;
172 }
173
174 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400175 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
176 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
177 nonPatchVersion);
178 if (idx != -1) {
179 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400180 }
181 }
182 delete[] layers;
183#endif
184
185 // instance extensions
186 // via Vulkan implementation and implicitly enabled layers
187 uint32_t extensionCount = 0;
188 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
189 if (VK_SUCCESS != res) {
190 return false;
191 }
192 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
193 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
194 if (VK_SUCCESS != res) {
195 delete[] extensions;
196 return false;
197 }
198 for (uint32_t i = 0; i < extensionCount; ++i) {
199 instanceExtensions->push_back() = extensions[i];
200 }
201 delete [] extensions;
202
203 // via explicitly enabled layers
204 layerCount = instanceLayers->count();
205 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
206 uint32_t extensionCount = 0;
207 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
208 &extensionCount, nullptr);
209 if (VK_SUCCESS != res) {
210 return false;
211 }
212 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
213 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
214 &extensionCount, extensions);
215 if (VK_SUCCESS != res) {
216 delete[] extensions;
217 return false;
218 }
219 for (uint32_t i = 0; i < extensionCount; ++i) {
220 instanceExtensions->push_back() = extensions[i];
221 }
222 delete[] extensions;
223 }
224
225 return true;
226}
227
228static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
229 VkInstance inst, VkPhysicalDevice physDev,
230 SkTArray<VkExtensionProperties>* deviceExtensions,
231 SkTArray<VkLayerProperties>* deviceLayers) {
232 if (getProc == nullptr) {
233 return false;
234 }
235
236 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
237 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
238
239 if (!EnumerateDeviceExtensionProperties ||
240 !EnumerateDeviceLayerProperties) {
241 return false;
242 }
243
244 VkResult res;
245 // device layers
246 uint32_t layerCount = 0;
247#ifdef SK_ENABLE_VK_LAYERS
248 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
249 if (VK_SUCCESS != res) {
250 return false;
251 }
252 VkLayerProperties* layers = new VkLayerProperties[layerCount];
253 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
254 if (VK_SUCCESS != res) {
255 delete[] layers;
256 return false;
257 }
258
259 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400260 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
261 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
262 nonPatchVersion);
263 if (idx != -1) {
264 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400265 }
266 }
267 delete[] layers;
268#endif
269
270 // device extensions
271 // via Vulkan implementation and implicitly enabled layers
272 uint32_t extensionCount = 0;
273 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
274 if (VK_SUCCESS != res) {
275 return false;
276 }
277 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
278 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
279 if (VK_SUCCESS != res) {
280 delete[] extensions;
281 return false;
282 }
283 for (uint32_t i = 0; i < extensionCount; ++i) {
284 deviceExtensions->push_back() = extensions[i];
285 }
286 delete[] extensions;
287
288 // via explicitly enabled layers
289 layerCount = deviceLayers->count();
290 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
291 uint32_t extensionCount = 0;
292 res = EnumerateDeviceExtensionProperties(physDev,
293 (*deviceLayers)[layerIndex].layerName,
294 &extensionCount, nullptr);
295 if (VK_SUCCESS != res) {
296 return false;
297 }
298 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
299 res = EnumerateDeviceExtensionProperties(physDev,
300 (*deviceLayers)[layerIndex].layerName,
301 &extensionCount, extensions);
302 if (VK_SUCCESS != res) {
303 delete[] extensions;
304 return false;
305 }
306 for (uint32_t i = 0; i < extensionCount; ++i) {
307 deviceExtensions->push_back() = extensions[i];
308 }
309 delete[] extensions;
310 }
311
312 return true;
313}
314
Brian Salomon23356442018-11-30 15:33:19 -0500315#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
316 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400317
Brian Salomon23356442018-11-30 15:33:19 -0500318#define ACQUIRE_VK_PROC(name, instance, device) \
319 PFN_vk##name grVk##name = \
320 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
321 do { \
322 if (grVk##name == nullptr) { \
323 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
324 if (device != VK_NULL_HANDLE) { \
325 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
326 } \
327 return false; \
328 } \
329 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400330
Brian Salomon23356442018-11-30 15:33:19 -0500331#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
332 PFN_vk##name grVk##name = \
333 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
334 do { \
335 if (grVk##name == nullptr) { \
336 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400337 return false; \
Brian Salomon23356442018-11-30 15:33:19 -0500338 } \
339 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000340
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400341static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000342 VkDebugReportCallbackEXT* debugCallback,
343 bool hasDebugExtension) {
344 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
345 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
346 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
347 *debugCallback = VK_NULL_HANDLE;
348 }
349 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
350 grVkDestroyInstance(inst, nullptr);
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400351 return true;
Greg Daniel37329b32018-07-02 20:16:44 +0000352}
353
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400354static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
355 uint32_t physDeviceVersion, GrVkExtensions* extensions,
356 VkPhysicalDeviceFeatures2* features, bool isProtected) {
Greg Daniela0651ac2018-08-08 09:23:18 -0400357 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
358 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
359
360 // Setup all extension feature structs we may want to use.
Greg Daniela0651ac2018-08-08 09:23:18 -0400361 void** tailPNext = &features->pNext;
362
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400363 // If |isProtected| is given, attach that first
364 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
365 if (isProtected) {
366 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
367 protectedMemoryFeatures =
368 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
369 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
370 protectedMemoryFeatures->sType =
371 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
372 protectedMemoryFeatures->pNext = nullptr;
373 *tailPNext = protectedMemoryFeatures;
374 tailPNext = &protectedMemoryFeatures->pNext;
375 }
376
Greg Daniela0651ac2018-08-08 09:23:18 -0400377 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
378 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
379 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
380 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
381 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
382 blend->pNext = nullptr;
383 *tailPNext = blend;
384 tailPNext = &blend->pNext;
385 }
386
Greg Daniel7e000222018-12-03 10:08:21 -0500387 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
388 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
389 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
390 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
391 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
392 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
393 ycbcrFeature->pNext = nullptr;
Sergey Ulanov2739fd22019-08-11 22:46:33 -0700394 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
Greg Daniel7e000222018-12-03 10:08:21 -0500395 *tailPNext = ycbcrFeature;
396 tailPNext = &ycbcrFeature->pNext;
397 }
398
Greg Daniela0651ac2018-08-08 09:23:18 -0400399 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
400 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
401 grVkGetPhysicalDeviceFeatures2(physDev, features);
402 } else {
403 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
404 1));
405 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
406 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
407 }
408
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400409 if (isProtected) {
410 if (!protectedMemoryFeatures->protectedMemory) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400411 return false;
412 }
413 }
414 return true;
Greg Daniela0651ac2018-08-08 09:23:18 -0400415 // If we want to disable any extension features do so here.
416}
417
Greg Danield3e65aa2018-08-01 09:19:45 -0400418bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000419 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400420 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400421 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000422 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000423 uint32_t* presentQueueIndexPtr,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400424 CanPresentFn canPresent,
425 bool isProtected) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400426 VkResult err;
427
428 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
429 uint32_t instanceVersion = 0;
430 if (!grVkEnumerateInstanceVersion) {
431 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
432 } else {
433 err = grVkEnumerateInstanceVersion(&instanceVersion);
434 if (err) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400435 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400436 return false;
437 }
438 }
439 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400440 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
441 SkDebugf("protected requires vk instance version 1.1\n");
442 return false;
443 }
444
Greg Daniel41f0e282019-01-28 13:15:05 -0500445 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
446 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
447 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
448 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
449 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
450 // since that is the highest vulkan version.
451 apiVersion = VK_MAKE_VERSION(1, 1, 0);
452 }
453
Brian Osman788b9162020-02-07 10:36:46 -0500454 instanceVersion = std::min(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400455
Greg Danielf730c182018-07-02 20:15:37 +0000456 VkPhysicalDevice physDev;
457 VkDevice device;
458 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000459
460 const VkApplicationInfo app_info = {
461 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
462 nullptr, // pNext
463 "vktest", // pApplicationName
464 0, // applicationVersion
465 "vktest", // pEngineName
466 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500467 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000468 };
469
Greg Daniel98bffae2018-08-01 13:25:41 -0400470 SkTArray<VkLayerProperties> instanceLayers;
471 SkTArray<VkExtensionProperties> instanceExtensions;
472
Greg Daniel92aef4b2018-08-02 13:55:49 -0400473 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400474 &instanceExtensions,
475 &instanceLayers)) {
476 return false;
477 }
Greg Danielf730c182018-07-02 20:15:37 +0000478
479 SkTArray<const char*> instanceLayerNames;
480 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400481 for (int i = 0; i < instanceLayers.count(); ++i) {
482 instanceLayerNames.push_back(instanceLayers[i].layerName);
483 }
484 for (int i = 0; i < instanceExtensions.count(); ++i) {
John Stilesc1c3c6d2020-08-15 23:22:53 -0400485 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6) != 0) {
Greg Daniel98bffae2018-08-01 13:25:41 -0400486 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000487 }
488 }
Greg Danielf730c182018-07-02 20:15:37 +0000489
490 const VkInstanceCreateInfo instance_create = {
491 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
492 nullptr, // pNext
493 0, // flags
494 &app_info, // pApplicationInfo
495 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
496 instanceLayerNames.begin(), // ppEnabledLayerNames
497 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
498 instanceExtensionNames.begin(), // ppEnabledExtensionNames
499 };
500
Greg Daniel98bffae2018-08-01 13:25:41 -0400501 bool hasDebugExtension = false;
502
Greg Danielf730c182018-07-02 20:15:37 +0000503 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
504 err = grVkCreateInstance(&instance_create, nullptr, &inst);
505 if (err < 0) {
506 SkDebugf("vkCreateInstance failed: %d\n", err);
507 return false;
508 }
509
Greg Daniel37329b32018-07-02 20:16:44 +0000510#ifdef SK_ENABLE_VK_LAYERS
511 *debugCallback = VK_NULL_HANDLE;
512 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
513 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
514 hasDebugExtension = true;
515 }
516 }
517 if (hasDebugExtension) {
518 // Setup callback creation information
519 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
520 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
521 callbackCreateInfo.pNext = nullptr;
522 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
523 VK_DEBUG_REPORT_WARNING_BIT_EXT |
524 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
525 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
526 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
527 callbackCreateInfo.pfnCallback = &DebugReportCallback;
528 callbackCreateInfo.pUserData = nullptr;
529
530 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
531 // Register the callback
532 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
533 }
534#endif
535
Greg Danielf730c182018-07-02 20:15:37 +0000536 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400537 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000538 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
539 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
540 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
541 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
542 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
543 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
544
545 uint32_t gpuCount;
546 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
547 if (err) {
548 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000549 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000550 return false;
551 }
552 if (!gpuCount) {
553 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000554 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000555 return false;
556 }
557 // Just returning the first physical device instead of getting the whole array.
558 // TODO: find best match for our needs
559 gpuCount = 1;
560 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
561 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
562 if (err && VK_INCOMPLETE != err) {
563 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000564 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000565 return false;
566 }
567
Greg Daniel92aef4b2018-08-02 13:55:49 -0400568 VkPhysicalDeviceProperties physDeviceProperties;
569 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Brian Osman788b9162020-02-07 10:36:46 -0500570 int physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400571
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400572 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
573 SkDebugf("protected requires vk physical device version 1.1\n");
574 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
575 return false;
576 }
577
Greg Danielf730c182018-07-02 20:15:37 +0000578 // query to get the initial queue props size
579 uint32_t queueCount;
580 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
581 if (!queueCount) {
582 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000583 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000584 return false;
585 }
586
587 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
588 // now get the actual queue props
589 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
590
591 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
592
593 // iterate to find the graphics queue
594 uint32_t graphicsQueueIndex = queueCount;
595 for (uint32_t i = 0; i < queueCount; i++) {
596 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
597 graphicsQueueIndex = i;
598 break;
599 }
600 }
601 if (graphicsQueueIndex == queueCount) {
602 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000603 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000604 return false;
605 }
606
607 // iterate to find the present queue, if needed
608 uint32_t presentQueueIndex = queueCount;
609 if (presentQueueIndexPtr && canPresent) {
610 for (uint32_t i = 0; i < queueCount; i++) {
611 if (canPresent(inst, physDev, i)) {
612 presentQueueIndex = i;
613 break;
614 }
615 }
616 if (presentQueueIndex == queueCount) {
617 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000618 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000619 return false;
620 }
621 *presentQueueIndexPtr = presentQueueIndex;
622 } else {
623 // Just setting this so we end up make a single queue for graphics since there was no
624 // request for a present queue.
625 presentQueueIndex = graphicsQueueIndex;
626 }
627
Greg Daniel98bffae2018-08-01 13:25:41 -0400628 SkTArray<VkLayerProperties> deviceLayers;
629 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400630 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400631 inst, physDev,
632 &deviceExtensions,
633 &deviceLayers)) {
634 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
635 return false;
636 }
Greg Danielf730c182018-07-02 20:15:37 +0000637
638 SkTArray<const char*> deviceLayerNames;
639 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400640 for (int i = 0; i < deviceLayers.count(); ++i) {
641 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000642 }
Greg Danielbc486b82020-07-09 15:04:48 -0400643
644 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
645 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
646 // the next loop.
647 bool hasKHRBufferDeviceAddress = false;
648 for (int i = 0; i < deviceExtensions.count(); ++i) {
649 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
650 hasKHRBufferDeviceAddress = true;
651 break;
652 }
653 }
654
Greg Daniel98bffae2018-08-01 13:25:41 -0400655 for (int i = 0; i < deviceExtensions.count(); ++i) {
656 // Don't use experimental extensions since they typically don't work with debug layers and
657 // often are missing dependecy requirements for other extensions. Additionally, these are
658 // often left behind in the driver even after they've been promoted to real extensions.
John Stilesc1c3c6d2020-08-15 23:22:53 -0400659 if (0 != strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
660 0 != strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
Greg Danielbc486b82020-07-09 15:04:48 -0400661
662 if (!hasKHRBufferDeviceAddress ||
John Stilesc1c3c6d2020-08-15 23:22:53 -0400663 0 != strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
Greg Danielbc486b82020-07-09 15:04:48 -0400664 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
665 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400666 }
Greg Danielf730c182018-07-02 20:15:37 +0000667 }
668
Greg Daniela0651ac2018-08-08 09:23:18 -0400669 extensions->init(getProc, inst, physDev,
670 (uint32_t) instanceExtensionNames.count(),
671 instanceExtensionNames.begin(),
672 (uint32_t) deviceExtensionNames.count(),
673 deviceExtensionNames.begin());
674
675 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
676 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
677 features->pNext = nullptr;
678
679 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
680 void* pointerToFeatures = nullptr;
681 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
682 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400683 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
684 isProtected)) {
685 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
686 return false;
687 }
688
Greg Daniela0651ac2018-08-08 09:23:18 -0400689 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
690 // the device creation will use that instead of the ppEnabledFeatures.
691 pointerToFeatures = features;
692 } else {
693 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
694 }
695
Greg Danielf730c182018-07-02 20:15:37 +0000696 // this looks like it would slow things down,
697 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400698 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000699
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400700 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
Greg Danielf730c182018-07-02 20:15:37 +0000701 float queuePriorities[1] = { 0.0 };
702 // Here we assume no need for swapchain queue
703 // If one is needed, the client will need its own setup code
704 const VkDeviceQueueCreateInfo queueInfo[2] = {
705 {
706 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
707 nullptr, // pNext
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400708 flags, // VkDeviceQueueCreateFlags
Greg Danielf730c182018-07-02 20:15:37 +0000709 graphicsQueueIndex, // queueFamilyIndex
710 1, // queueCount
711 queuePriorities, // pQueuePriorities
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400712
Greg Danielf730c182018-07-02 20:15:37 +0000713 },
714 {
715 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
716 nullptr, // pNext
717 0, // VkDeviceQueueCreateFlags
718 presentQueueIndex, // queueFamilyIndex
719 1, // queueCount
720 queuePriorities, // pQueuePriorities
721 }
722 };
723 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
724
725 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400726 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
727 pointerToFeatures, // pNext
728 0, // VkDeviceCreateFlags
729 queueInfoCount, // queueCreateInfoCount
730 queueInfo, // pQueueCreateInfos
731 (uint32_t) deviceLayerNames.count(), // layerCount
732 deviceLayerNames.begin(), // ppEnabledLayerNames
733 (uint32_t) deviceExtensionNames.count(), // extensionCount
734 deviceExtensionNames.begin(), // ppEnabledExtensionNames
735 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000736 };
737
Ben Wagner7ad9b962019-02-12 11:14:47 -0500738 {
739#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
740 // skia:8712
741 __lsan::ScopedDisabler lsanDisabler;
742#endif
743 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
744 }
Greg Danielf730c182018-07-02 20:15:37 +0000745 if (err) {
746 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000747 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000748 return false;
749 }
750
Greg Danielf730c182018-07-02 20:15:37 +0000751 VkQueue queue;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400752 if (isProtected) {
753 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
754 SkASSERT(grVkGetDeviceQueue2 != nullptr);
755 VkDeviceQueueInfo2 queue_info2 = {
756 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
757 nullptr, // pNext
758 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
759 graphicsQueueIndex, // queueFamilyIndex
760 0 // queueIndex
761 };
762 grVkGetDeviceQueue2(device, &queue_info2, &queue);
763 } else {
764 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
765 }
Greg Danielf730c182018-07-02 20:15:37 +0000766
767 ctx->fInstance = inst;
768 ctx->fPhysicalDevice = physDev;
769 ctx->fDevice = device;
770 ctx->fQueue = queue;
771 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500772 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400773 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400774 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400775 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000776 ctx->fOwnsInstanceAndDevice = false;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400777 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
Greg Danielf730c182018-07-02 20:15:37 +0000778
779 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000780}
781
Greg Daniela0651ac2018-08-08 09:23:18 -0400782void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
783 // All Vulkan structs that could be part of the features chain will start with the
784 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
785 // so we can get access to the pNext for the next struct.
786 struct CommonVulkanHeader {
787 VkStructureType sType;
788 void* pNext;
789 };
790
791 void* pNext = features->pNext;
792 while (pNext) {
793 void* current = pNext;
794 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
795 sk_free(current);
796 }
797}
798
John Stilesa6841be2020-08-06 14:11:56 -0400799} // namespace sk_gpu_test
Greg Daniel35970ec2017-11-10 10:03:05 -0500800
801#endif