blob: 41c0f2eec2ea18d7960b804f94378d08cbeb6102 [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Hal Canary48cd11f2019-05-22 09:57:18 -040012#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18#endif
19
John Stiles34344e22020-07-28 15:38:02 -040020#include <algorithm>
21
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050022#if defined(SK_BUILD_FOR_UNIX)
23#include <execinfo.h>
24#endif
Mike Kleinc0bd9f92019-04-23 12:05:21 -050025#include "include/gpu/vk/GrVkBackendContext.h"
26#include "include/gpu/vk/GrVkExtensions.h"
27#include "src/core/SkAutoMalloc.h"
28#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050029
Ben Wagner7ad9b962019-02-12 11:14:47 -050030#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
31#include <sanitizer/lsan_interface.h>
32#endif
33
Greg Daniel35970ec2017-11-10 10:03:05 -050034namespace sk_gpu_test {
35
36bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040037 PFN_vkGetDeviceProcAddr* devProc) {
Greg Daniel35970ec2017-11-10 10:03:05 -050038 static void* vkLib = nullptr;
39 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
40 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
41 if (!vkLib) {
Mike Klein77482cb2020-07-10 10:43:59 -050042 vkLib = SkLoadDynamicLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
Greg Daniel35970ec2017-11-10 10:03:05 -050043 if (!vkLib) {
44 return false;
45 }
Mike Klein77482cb2020-07-10 10:43:59 -050046 localInstProc = (PFN_vkGetInstanceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050047 "vkGetInstanceProcAddr");
Mike Klein77482cb2020-07-10 10:43:59 -050048 localDevProc = (PFN_vkGetDeviceProcAddr) SkGetProcedureAddress(vkLib,
Greg Daniel35970ec2017-11-10 10:03:05 -050049 "vkGetDeviceProcAddr");
50 }
51 if (!localInstProc || !localDevProc) {
52 return false;
53 }
54 *instProc = localInstProc;
55 *devProc = localDevProc;
56 return true;
Greg Daniel35970ec2017-11-10 10:03:05 -050057}
Greg Danielf730c182018-07-02 20:15:37 +000058
59////////////////////////////////////////////////////////////////////////////////
60// Helper code to set up Vulkan context objects
61
62#ifdef SK_ENABLE_VK_LAYERS
63const char* kDebugLayerNames[] = {
Robert Phillips41acc0e2020-01-06 13:29:53 -050064 // single merged layer
65 "VK_LAYER_KHRONOS_validation",
Greg Danielf730c182018-07-02 20:15:37 +000066 // not included in standard_validation
67 //"VK_LAYER_LUNARG_api_dump",
68 //"VK_LAYER_LUNARG_vktrace",
69 //"VK_LAYER_LUNARG_screenshot",
70};
Greg Danielf730c182018-07-02 20:15:37 +000071
Greg Danielac616c82018-08-29 15:56:26 -040072static uint32_t remove_patch_version(uint32_t specVersion) {
73 return (specVersion >> 12) << 12;
74}
75
76// Returns the index into layers array for the layer we want. Returns -1 if not supported.
77static int should_include_debug_layer(const char* layerName,
78 uint32_t layerCount, VkLayerProperties* layers,
79 uint32_t version) {
80 for (uint32_t i = 0; i < layerCount; ++i) {
81 if (!strcmp(layerName, layers[i].layerName)) {
82 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
83 // layer was written against a version that isn't older than the version of Vulkan we're
84 // using so that it has all the api entry points.
85 if (version <= remove_patch_version(layers[i].specVersion)) {
86 return i;
87 }
88 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040089 }
Greg Danielac616c82018-08-29 15:56:26 -040090
Greg Danielf730c182018-07-02 20:15:37 +000091 }
Greg Danielac616c82018-08-29 15:56:26 -040092 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040093}
Greg Daniel92aef4b2018-08-02 13:55:49 -040094
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -050095static void print_backtrace() {
96#if defined(SK_BUILD_FOR_UNIX)
97 void* stack[64];
98 int count = backtrace(stack, SK_ARRAY_COUNT(stack));
99 backtrace_symbols_fd(stack, count, 2);
100#else
101 // Please add implementations for other platforms.
102#endif
103}
104
Greg Daniel37329b32018-07-02 20:16:44 +0000105VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
106 VkDebugReportFlagsEXT flags,
107 VkDebugReportObjectTypeEXT objectType,
108 uint64_t object,
109 size_t location,
110 int32_t messageCode,
111 const char* pLayerPrefix,
112 const char* pMessage,
113 void* pUserData) {
114 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
Greg Daniel8a6e53a2020-06-09 09:05:09 -0400115 // See https://github.com/KhronosGroup/Vulkan-ValidationLayers/issues/1887
116 if (strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01521") ||
117 strstr(pMessage, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-01522")) {
118 return VK_FALSE;
119 }
Greg Daniel37329b32018-07-02 20:16:44 +0000120 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500121 print_backtrace();
122 SkDEBUGFAIL("Vulkan debug layer error");
Greg Daniel37329b32018-07-02 20:16:44 +0000123 return VK_TRUE; // skip further layers
124 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500125 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
126 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000127 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
128 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
Ben Wagnerc2b6d7f2020-01-02 11:22:39 -0500129 print_backtrace();
Greg Daniel37329b32018-07-02 20:16:44 +0000130 } else {
131 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
132 }
133 return VK_FALSE;
134}
135#endif
136
Greg Daniel98bffae2018-08-01 13:25:41 -0400137#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
138
Greg Daniel98bffae2018-08-01 13:25:41 -0400139static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
140 uint32_t specVersion,
141 SkTArray<VkExtensionProperties>* instanceExtensions,
142 SkTArray<VkLayerProperties>* instanceLayers) {
143 if (getProc == nullptr) {
144 return false;
145 }
146
147 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
148 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
149
150 if (!EnumerateInstanceExtensionProperties ||
151 !EnumerateInstanceLayerProperties) {
152 return false;
153 }
154
155 VkResult res;
156 uint32_t layerCount = 0;
157#ifdef SK_ENABLE_VK_LAYERS
158 // instance layers
159 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
160 if (VK_SUCCESS != res) {
161 return false;
162 }
163 VkLayerProperties* layers = new VkLayerProperties[layerCount];
164 res = EnumerateInstanceLayerProperties(&layerCount, layers);
165 if (VK_SUCCESS != res) {
166 delete[] layers;
167 return false;
168 }
169
170 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400171 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
172 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
173 nonPatchVersion);
174 if (idx != -1) {
175 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400176 }
177 }
178 delete[] layers;
179#endif
180
181 // instance extensions
182 // via Vulkan implementation and implicitly enabled layers
183 uint32_t extensionCount = 0;
184 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
185 if (VK_SUCCESS != res) {
186 return false;
187 }
188 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
189 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
190 if (VK_SUCCESS != res) {
191 delete[] extensions;
192 return false;
193 }
194 for (uint32_t i = 0; i < extensionCount; ++i) {
195 instanceExtensions->push_back() = extensions[i];
196 }
197 delete [] extensions;
198
199 // via explicitly enabled layers
200 layerCount = instanceLayers->count();
201 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
202 uint32_t extensionCount = 0;
203 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
204 &extensionCount, nullptr);
205 if (VK_SUCCESS != res) {
206 return false;
207 }
208 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
209 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
210 &extensionCount, extensions);
211 if (VK_SUCCESS != res) {
212 delete[] extensions;
213 return false;
214 }
215 for (uint32_t i = 0; i < extensionCount; ++i) {
216 instanceExtensions->push_back() = extensions[i];
217 }
218 delete[] extensions;
219 }
220
221 return true;
222}
223
224static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
225 VkInstance inst, VkPhysicalDevice physDev,
226 SkTArray<VkExtensionProperties>* deviceExtensions,
227 SkTArray<VkLayerProperties>* deviceLayers) {
228 if (getProc == nullptr) {
229 return false;
230 }
231
232 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
233 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
234
235 if (!EnumerateDeviceExtensionProperties ||
236 !EnumerateDeviceLayerProperties) {
237 return false;
238 }
239
240 VkResult res;
241 // device layers
242 uint32_t layerCount = 0;
243#ifdef SK_ENABLE_VK_LAYERS
244 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
245 if (VK_SUCCESS != res) {
246 return false;
247 }
248 VkLayerProperties* layers = new VkLayerProperties[layerCount];
249 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
250 if (VK_SUCCESS != res) {
251 delete[] layers;
252 return false;
253 }
254
255 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400256 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
257 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
258 nonPatchVersion);
259 if (idx != -1) {
260 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400261 }
262 }
263 delete[] layers;
264#endif
265
266 // device extensions
267 // via Vulkan implementation and implicitly enabled layers
268 uint32_t extensionCount = 0;
269 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
270 if (VK_SUCCESS != res) {
271 return false;
272 }
273 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
274 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
275 if (VK_SUCCESS != res) {
276 delete[] extensions;
277 return false;
278 }
279 for (uint32_t i = 0; i < extensionCount; ++i) {
280 deviceExtensions->push_back() = extensions[i];
281 }
282 delete[] extensions;
283
284 // via explicitly enabled layers
285 layerCount = deviceLayers->count();
286 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
287 uint32_t extensionCount = 0;
288 res = EnumerateDeviceExtensionProperties(physDev,
289 (*deviceLayers)[layerIndex].layerName,
290 &extensionCount, nullptr);
291 if (VK_SUCCESS != res) {
292 return false;
293 }
294 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
295 res = EnumerateDeviceExtensionProperties(physDev,
296 (*deviceLayers)[layerIndex].layerName,
297 &extensionCount, extensions);
298 if (VK_SUCCESS != res) {
299 delete[] extensions;
300 return false;
301 }
302 for (uint32_t i = 0; i < extensionCount; ++i) {
303 deviceExtensions->push_back() = extensions[i];
304 }
305 delete[] extensions;
306 }
307
308 return true;
309}
310
Brian Salomon23356442018-11-30 15:33:19 -0500311#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
312 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400313
Brian Salomon23356442018-11-30 15:33:19 -0500314#define ACQUIRE_VK_PROC(name, instance, device) \
315 PFN_vk##name grVk##name = \
316 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
317 do { \
318 if (grVk##name == nullptr) { \
319 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
320 if (device != VK_NULL_HANDLE) { \
321 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
322 } \
323 return false; \
324 } \
325 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400326
Brian Salomon23356442018-11-30 15:33:19 -0500327#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
328 PFN_vk##name grVk##name = \
329 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
330 do { \
331 if (grVk##name == nullptr) { \
332 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400333 return false; \
Brian Salomon23356442018-11-30 15:33:19 -0500334 } \
335 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000336
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400337static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000338 VkDebugReportCallbackEXT* debugCallback,
339 bool hasDebugExtension) {
340 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
341 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
342 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
343 *debugCallback = VK_NULL_HANDLE;
344 }
345 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
346 grVkDestroyInstance(inst, nullptr);
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400347 return true;
Greg Daniel37329b32018-07-02 20:16:44 +0000348}
349
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400350static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
351 uint32_t physDeviceVersion, GrVkExtensions* extensions,
352 VkPhysicalDeviceFeatures2* features, bool isProtected) {
Greg Daniela0651ac2018-08-08 09:23:18 -0400353 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
354 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
355
356 // Setup all extension feature structs we may want to use.
Greg Daniela0651ac2018-08-08 09:23:18 -0400357 void** tailPNext = &features->pNext;
358
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400359 // If |isProtected| is given, attach that first
360 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
361 if (isProtected) {
362 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
363 protectedMemoryFeatures =
364 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
365 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
366 protectedMemoryFeatures->sType =
367 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
368 protectedMemoryFeatures->pNext = nullptr;
369 *tailPNext = protectedMemoryFeatures;
370 tailPNext = &protectedMemoryFeatures->pNext;
371 }
372
Greg Daniela0651ac2018-08-08 09:23:18 -0400373 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
374 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
375 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
376 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
377 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
378 blend->pNext = nullptr;
379 *tailPNext = blend;
380 tailPNext = &blend->pNext;
381 }
382
Greg Daniel7e000222018-12-03 10:08:21 -0500383 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
384 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
385 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
386 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
387 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
388 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
389 ycbcrFeature->pNext = nullptr;
Sergey Ulanov2739fd22019-08-11 22:46:33 -0700390 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
Greg Daniel7e000222018-12-03 10:08:21 -0500391 *tailPNext = ycbcrFeature;
392 tailPNext = &ycbcrFeature->pNext;
393 }
394
Greg Daniela0651ac2018-08-08 09:23:18 -0400395 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
396 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
397 grVkGetPhysicalDeviceFeatures2(physDev, features);
398 } else {
399 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
400 1));
401 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
402 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
403 }
404
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400405 if (isProtected) {
406 if (!protectedMemoryFeatures->protectedMemory) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400407 return false;
408 }
409 }
410 return true;
Greg Daniela0651ac2018-08-08 09:23:18 -0400411 // If we want to disable any extension features do so here.
412}
413
Greg Danield3e65aa2018-08-01 09:19:45 -0400414bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000415 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400416 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400417 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000418 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000419 uint32_t* presentQueueIndexPtr,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400420 CanPresentFn canPresent,
421 bool isProtected) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400422 VkResult err;
423
424 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
425 uint32_t instanceVersion = 0;
426 if (!grVkEnumerateInstanceVersion) {
427 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
428 } else {
429 err = grVkEnumerateInstanceVersion(&instanceVersion);
430 if (err) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400431 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400432 return false;
433 }
434 }
435 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400436 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
437 SkDebugf("protected requires vk instance version 1.1\n");
438 return false;
439 }
440
Greg Daniel41f0e282019-01-28 13:15:05 -0500441 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
442 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
443 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
444 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
445 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
446 // since that is the highest vulkan version.
447 apiVersion = VK_MAKE_VERSION(1, 1, 0);
448 }
449
Brian Osman788b9162020-02-07 10:36:46 -0500450 instanceVersion = std::min(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400451
Greg Danielf730c182018-07-02 20:15:37 +0000452 VkPhysicalDevice physDev;
453 VkDevice device;
454 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000455
456 const VkApplicationInfo app_info = {
457 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
458 nullptr, // pNext
459 "vktest", // pApplicationName
460 0, // applicationVersion
461 "vktest", // pEngineName
462 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500463 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000464 };
465
Greg Daniel98bffae2018-08-01 13:25:41 -0400466 SkTArray<VkLayerProperties> instanceLayers;
467 SkTArray<VkExtensionProperties> instanceExtensions;
468
Greg Daniel92aef4b2018-08-02 13:55:49 -0400469 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400470 &instanceExtensions,
471 &instanceLayers)) {
472 return false;
473 }
Greg Danielf730c182018-07-02 20:15:37 +0000474
475 SkTArray<const char*> instanceLayerNames;
476 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400477 for (int i = 0; i < instanceLayers.count(); ++i) {
478 instanceLayerNames.push_back(instanceLayers[i].layerName);
479 }
480 for (int i = 0; i < instanceExtensions.count(); ++i) {
481 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
482 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000483 }
484 }
Greg Danielf730c182018-07-02 20:15:37 +0000485
486 const VkInstanceCreateInfo instance_create = {
487 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
488 nullptr, // pNext
489 0, // flags
490 &app_info, // pApplicationInfo
491 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
492 instanceLayerNames.begin(), // ppEnabledLayerNames
493 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
494 instanceExtensionNames.begin(), // ppEnabledExtensionNames
495 };
496
Greg Daniel98bffae2018-08-01 13:25:41 -0400497 bool hasDebugExtension = false;
498
Greg Danielf730c182018-07-02 20:15:37 +0000499 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
500 err = grVkCreateInstance(&instance_create, nullptr, &inst);
501 if (err < 0) {
502 SkDebugf("vkCreateInstance failed: %d\n", err);
503 return false;
504 }
505
Greg Daniel37329b32018-07-02 20:16:44 +0000506#ifdef SK_ENABLE_VK_LAYERS
507 *debugCallback = VK_NULL_HANDLE;
508 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
509 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
510 hasDebugExtension = true;
511 }
512 }
513 if (hasDebugExtension) {
514 // Setup callback creation information
515 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
516 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
517 callbackCreateInfo.pNext = nullptr;
518 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
519 VK_DEBUG_REPORT_WARNING_BIT_EXT |
520 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
521 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
522 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
523 callbackCreateInfo.pfnCallback = &DebugReportCallback;
524 callbackCreateInfo.pUserData = nullptr;
525
526 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
527 // Register the callback
528 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
529 }
530#endif
531
Greg Danielf730c182018-07-02 20:15:37 +0000532 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400533 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000534 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
535 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
536 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
537 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
538 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
539 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
540
541 uint32_t gpuCount;
542 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
543 if (err) {
544 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000545 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000546 return false;
547 }
548 if (!gpuCount) {
549 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000550 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000551 return false;
552 }
553 // Just returning the first physical device instead of getting the whole array.
554 // TODO: find best match for our needs
555 gpuCount = 1;
556 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
557 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
558 if (err && VK_INCOMPLETE != err) {
559 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000560 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000561 return false;
562 }
563
Greg Daniel92aef4b2018-08-02 13:55:49 -0400564 VkPhysicalDeviceProperties physDeviceProperties;
565 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Brian Osman788b9162020-02-07 10:36:46 -0500566 int physDeviceVersion = std::min(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400567
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400568 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
569 SkDebugf("protected requires vk physical device version 1.1\n");
570 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
571 return false;
572 }
573
Greg Danielf730c182018-07-02 20:15:37 +0000574 // query to get the initial queue props size
575 uint32_t queueCount;
576 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
577 if (!queueCount) {
578 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000579 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000580 return false;
581 }
582
583 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
584 // now get the actual queue props
585 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
586
587 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
588
589 // iterate to find the graphics queue
590 uint32_t graphicsQueueIndex = queueCount;
591 for (uint32_t i = 0; i < queueCount; i++) {
592 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
593 graphicsQueueIndex = i;
594 break;
595 }
596 }
597 if (graphicsQueueIndex == queueCount) {
598 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000599 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000600 return false;
601 }
602
603 // iterate to find the present queue, if needed
604 uint32_t presentQueueIndex = queueCount;
605 if (presentQueueIndexPtr && canPresent) {
606 for (uint32_t i = 0; i < queueCount; i++) {
607 if (canPresent(inst, physDev, i)) {
608 presentQueueIndex = i;
609 break;
610 }
611 }
612 if (presentQueueIndex == queueCount) {
613 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000614 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000615 return false;
616 }
617 *presentQueueIndexPtr = presentQueueIndex;
618 } else {
619 // Just setting this so we end up make a single queue for graphics since there was no
620 // request for a present queue.
621 presentQueueIndex = graphicsQueueIndex;
622 }
623
Greg Daniel98bffae2018-08-01 13:25:41 -0400624 SkTArray<VkLayerProperties> deviceLayers;
625 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400626 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400627 inst, physDev,
628 &deviceExtensions,
629 &deviceLayers)) {
630 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
631 return false;
632 }
Greg Danielf730c182018-07-02 20:15:37 +0000633
634 SkTArray<const char*> deviceLayerNames;
635 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400636 for (int i = 0; i < deviceLayers.count(); ++i) {
637 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000638 }
Greg Danielbc486b82020-07-09 15:04:48 -0400639
640 // We can't have both VK_KHR_buffer_device_address and VK_EXT_buffer_device_address as
641 // extensions. So see if we have the KHR version and if so don't push back the EXT version in
642 // the next loop.
643 bool hasKHRBufferDeviceAddress = false;
644 for (int i = 0; i < deviceExtensions.count(); ++i) {
645 if (!strcmp(deviceExtensions[i].extensionName, "VK_KHR_buffer_device_address")) {
646 hasKHRBufferDeviceAddress = true;
647 break;
648 }
649 }
650
Greg Daniel98bffae2018-08-01 13:25:41 -0400651 for (int i = 0; i < deviceExtensions.count(); ++i) {
652 // Don't use experimental extensions since they typically don't work with debug layers and
653 // often are missing dependecy requirements for other extensions. Additionally, these are
654 // often left behind in the driver even after they've been promoted to real extensions.
655 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
656 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
Greg Danielbc486b82020-07-09 15:04:48 -0400657
658 if (!hasKHRBufferDeviceAddress ||
659 strcmp(deviceExtensions[i].extensionName, "VK_EXT_buffer_device_address")) {
660 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
661 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400662 }
Greg Danielf730c182018-07-02 20:15:37 +0000663 }
664
Greg Daniela0651ac2018-08-08 09:23:18 -0400665 extensions->init(getProc, inst, physDev,
666 (uint32_t) instanceExtensionNames.count(),
667 instanceExtensionNames.begin(),
668 (uint32_t) deviceExtensionNames.count(),
669 deviceExtensionNames.begin());
670
671 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
672 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
673 features->pNext = nullptr;
674
675 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
676 void* pointerToFeatures = nullptr;
677 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
678 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400679 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
680 isProtected)) {
681 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
682 return false;
683 }
684
Greg Daniela0651ac2018-08-08 09:23:18 -0400685 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
686 // the device creation will use that instead of the ppEnabledFeatures.
687 pointerToFeatures = features;
688 } else {
689 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
690 }
691
Greg Danielf730c182018-07-02 20:15:37 +0000692 // this looks like it would slow things down,
693 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400694 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000695
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400696 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
Greg Danielf730c182018-07-02 20:15:37 +0000697 float queuePriorities[1] = { 0.0 };
698 // Here we assume no need for swapchain queue
699 // If one is needed, the client will need its own setup code
700 const VkDeviceQueueCreateInfo queueInfo[2] = {
701 {
702 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
703 nullptr, // pNext
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400704 flags, // VkDeviceQueueCreateFlags
Greg Danielf730c182018-07-02 20:15:37 +0000705 graphicsQueueIndex, // queueFamilyIndex
706 1, // queueCount
707 queuePriorities, // pQueuePriorities
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400708
Greg Danielf730c182018-07-02 20:15:37 +0000709 },
710 {
711 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
712 nullptr, // pNext
713 0, // VkDeviceQueueCreateFlags
714 presentQueueIndex, // queueFamilyIndex
715 1, // queueCount
716 queuePriorities, // pQueuePriorities
717 }
718 };
719 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
720
721 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400722 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
723 pointerToFeatures, // pNext
724 0, // VkDeviceCreateFlags
725 queueInfoCount, // queueCreateInfoCount
726 queueInfo, // pQueueCreateInfos
727 (uint32_t) deviceLayerNames.count(), // layerCount
728 deviceLayerNames.begin(), // ppEnabledLayerNames
729 (uint32_t) deviceExtensionNames.count(), // extensionCount
730 deviceExtensionNames.begin(), // ppEnabledExtensionNames
731 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000732 };
733
Ben Wagner7ad9b962019-02-12 11:14:47 -0500734 {
735#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
736 // skia:8712
737 __lsan::ScopedDisabler lsanDisabler;
738#endif
739 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
740 }
Greg Danielf730c182018-07-02 20:15:37 +0000741 if (err) {
742 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000743 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000744 return false;
745 }
746
Greg Danielf730c182018-07-02 20:15:37 +0000747 VkQueue queue;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400748 if (isProtected) {
749 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
750 SkASSERT(grVkGetDeviceQueue2 != nullptr);
751 VkDeviceQueueInfo2 queue_info2 = {
752 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
753 nullptr, // pNext
754 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
755 graphicsQueueIndex, // queueFamilyIndex
756 0 // queueIndex
757 };
758 grVkGetDeviceQueue2(device, &queue_info2, &queue);
759 } else {
760 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
761 }
Greg Danielf730c182018-07-02 20:15:37 +0000762
763 ctx->fInstance = inst;
764 ctx->fPhysicalDevice = physDev;
765 ctx->fDevice = device;
766 ctx->fQueue = queue;
767 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500768 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400769 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400770 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400771 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000772 ctx->fOwnsInstanceAndDevice = false;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400773 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
Greg Danielf730c182018-07-02 20:15:37 +0000774
775 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000776}
777
Greg Daniela0651ac2018-08-08 09:23:18 -0400778void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
779 // All Vulkan structs that could be part of the features chain will start with the
780 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
781 // so we can get access to the pNext for the next struct.
782 struct CommonVulkanHeader {
783 VkStructureType sType;
784 void* pNext;
785 };
786
787 void* pNext = features->pNext;
788 while (pNext) {
789 void* current = pNext;
790 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
791 sk_free(current);
792 }
793}
794
John Stilesa6841be2020-08-06 14:11:56 -0400795} // namespace sk_gpu_test
Greg Daniel35970ec2017-11-10 10:03:05 -0500796
797#endif