blob: cd6aa5b115f2daf53e089bb7bff4efa57b24ab55 [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Mike Kleinc0bd9f92019-04-23 12:05:21 -050012#include "include/gpu/vk/GrVkBackendContext.h"
13#include "include/gpu/vk/GrVkExtensions.h"
14#include "src/core/SkAutoMalloc.h"
15#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050016
Ben Wagner7ad9b962019-02-12 11:14:47 -050017#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
18#include <sanitizer/lsan_interface.h>
19#endif
20
Greg Daniel35970ec2017-11-10 10:03:05 -050021namespace sk_gpu_test {
22
23bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040024 PFN_vkGetDeviceProcAddr* devProc) {
Chris Dalton3a67b8e2018-05-03 09:30:29 -060025#ifdef SK_MOLTENVK
26 // MoltenVK is a statically linked framework, so there is no Vulkan library to load.
27 *instProc = &vkGetInstanceProcAddr;
28 *devProc = &vkGetDeviceProcAddr;
29 return true;
30#else
Greg Daniel35970ec2017-11-10 10:03:05 -050031 static void* vkLib = nullptr;
32 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
33 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
34 if (!vkLib) {
35#if defined _WIN32
36 vkLib = DynamicLoadLibrary("vulkan-1.dll");
37#else
38 vkLib = DynamicLoadLibrary("libvulkan.so");
39#endif
40 if (!vkLib) {
41 return false;
42 }
43 localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib,
44 "vkGetInstanceProcAddr");
45 localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib,
46 "vkGetDeviceProcAddr");
47 }
48 if (!localInstProc || !localDevProc) {
49 return false;
50 }
51 *instProc = localInstProc;
52 *devProc = localDevProc;
53 return true;
Chris Dalton3a67b8e2018-05-03 09:30:29 -060054#endif
Greg Daniel35970ec2017-11-10 10:03:05 -050055}
Greg Danielf730c182018-07-02 20:15:37 +000056
57////////////////////////////////////////////////////////////////////////////////
58// Helper code to set up Vulkan context objects
59
60#ifdef SK_ENABLE_VK_LAYERS
61const char* kDebugLayerNames[] = {
62 // elements of VK_LAYER_LUNARG_standard_validation
63 "VK_LAYER_GOOGLE_threading",
64 "VK_LAYER_LUNARG_parameter_validation",
65 "VK_LAYER_LUNARG_object_tracker",
Greg Danielf730c182018-07-02 20:15:37 +000066 "VK_LAYER_LUNARG_core_validation",
Greg Danielf730c182018-07-02 20:15:37 +000067 "VK_LAYER_GOOGLE_unique_objects",
68 // not included in standard_validation
69 //"VK_LAYER_LUNARG_api_dump",
70 //"VK_LAYER_LUNARG_vktrace",
71 //"VK_LAYER_LUNARG_screenshot",
72};
Greg Danielf730c182018-07-02 20:15:37 +000073
Greg Danielac616c82018-08-29 15:56:26 -040074static uint32_t remove_patch_version(uint32_t specVersion) {
75 return (specVersion >> 12) << 12;
76}
77
78// Returns the index into layers array for the layer we want. Returns -1 if not supported.
79static int should_include_debug_layer(const char* layerName,
80 uint32_t layerCount, VkLayerProperties* layers,
81 uint32_t version) {
82 for (uint32_t i = 0; i < layerCount; ++i) {
83 if (!strcmp(layerName, layers[i].layerName)) {
84 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
85 // layer was written against a version that isn't older than the version of Vulkan we're
86 // using so that it has all the api entry points.
87 if (version <= remove_patch_version(layers[i].specVersion)) {
88 return i;
89 }
90 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040091 }
Greg Danielac616c82018-08-29 15:56:26 -040092
Greg Danielf730c182018-07-02 20:15:37 +000093 }
Greg Danielac616c82018-08-29 15:56:26 -040094 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040095}
Greg Daniel92aef4b2018-08-02 13:55:49 -040096
Greg Daniel37329b32018-07-02 20:16:44 +000097VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
98 VkDebugReportFlagsEXT flags,
99 VkDebugReportObjectTypeEXT objectType,
100 uint64_t object,
101 size_t location,
102 int32_t messageCode,
103 const char* pLayerPrefix,
104 const char* pMessage,
105 void* pUserData) {
106 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
107 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
108 return VK_TRUE; // skip further layers
109 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Greg Danielac616c82018-08-29 15:56:26 -0400110 // There is currently a bug in the spec which doesn't have
111 // VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in
112 // VkDeviceCreateInfo. So we ignore that warning since it is wrong.
113 if (!strstr(pMessage,
114 "pCreateInfo->pNext chain includes a structure with unexpected VkStructureType "
115 "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) {
116 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
117 }
Greg Daniel37329b32018-07-02 20:16:44 +0000118 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
119 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
120 } else {
121 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
122 }
123 return VK_FALSE;
124}
125#endif
126
Greg Daniel98bffae2018-08-01 13:25:41 -0400127#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
128
Greg Daniel98bffae2018-08-01 13:25:41 -0400129static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
130 uint32_t specVersion,
131 SkTArray<VkExtensionProperties>* instanceExtensions,
132 SkTArray<VkLayerProperties>* instanceLayers) {
133 if (getProc == nullptr) {
134 return false;
135 }
136
137 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
138 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
139
140 if (!EnumerateInstanceExtensionProperties ||
141 !EnumerateInstanceLayerProperties) {
142 return false;
143 }
144
145 VkResult res;
146 uint32_t layerCount = 0;
147#ifdef SK_ENABLE_VK_LAYERS
148 // instance layers
149 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
150 if (VK_SUCCESS != res) {
151 return false;
152 }
153 VkLayerProperties* layers = new VkLayerProperties[layerCount];
154 res = EnumerateInstanceLayerProperties(&layerCount, layers);
155 if (VK_SUCCESS != res) {
156 delete[] layers;
157 return false;
158 }
159
160 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400161 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
162 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
163 nonPatchVersion);
164 if (idx != -1) {
165 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400166 }
167 }
168 delete[] layers;
169#endif
170
171 // instance extensions
172 // via Vulkan implementation and implicitly enabled layers
173 uint32_t extensionCount = 0;
174 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
175 if (VK_SUCCESS != res) {
176 return false;
177 }
178 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
179 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
180 if (VK_SUCCESS != res) {
181 delete[] extensions;
182 return false;
183 }
184 for (uint32_t i = 0; i < extensionCount; ++i) {
185 instanceExtensions->push_back() = extensions[i];
186 }
187 delete [] extensions;
188
189 // via explicitly enabled layers
190 layerCount = instanceLayers->count();
191 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
192 uint32_t extensionCount = 0;
193 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
194 &extensionCount, nullptr);
195 if (VK_SUCCESS != res) {
196 return false;
197 }
198 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
199 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
200 &extensionCount, extensions);
201 if (VK_SUCCESS != res) {
202 delete[] extensions;
203 return false;
204 }
205 for (uint32_t i = 0; i < extensionCount; ++i) {
206 instanceExtensions->push_back() = extensions[i];
207 }
208 delete[] extensions;
209 }
210
211 return true;
212}
213
214static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
215 VkInstance inst, VkPhysicalDevice physDev,
216 SkTArray<VkExtensionProperties>* deviceExtensions,
217 SkTArray<VkLayerProperties>* deviceLayers) {
218 if (getProc == nullptr) {
219 return false;
220 }
221
222 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
223 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
224
225 if (!EnumerateDeviceExtensionProperties ||
226 !EnumerateDeviceLayerProperties) {
227 return false;
228 }
229
230 VkResult res;
231 // device layers
232 uint32_t layerCount = 0;
233#ifdef SK_ENABLE_VK_LAYERS
234 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
235 if (VK_SUCCESS != res) {
236 return false;
237 }
238 VkLayerProperties* layers = new VkLayerProperties[layerCount];
239 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
240 if (VK_SUCCESS != res) {
241 delete[] layers;
242 return false;
243 }
244
245 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400246 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
247 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
248 nonPatchVersion);
249 if (idx != -1) {
250 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400251 }
252 }
253 delete[] layers;
254#endif
255
256 // device extensions
257 // via Vulkan implementation and implicitly enabled layers
258 uint32_t extensionCount = 0;
259 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
260 if (VK_SUCCESS != res) {
261 return false;
262 }
263 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
264 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
265 if (VK_SUCCESS != res) {
266 delete[] extensions;
267 return false;
268 }
269 for (uint32_t i = 0; i < extensionCount; ++i) {
270 deviceExtensions->push_back() = extensions[i];
271 }
272 delete[] extensions;
273
274 // via explicitly enabled layers
275 layerCount = deviceLayers->count();
276 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
277 uint32_t extensionCount = 0;
278 res = EnumerateDeviceExtensionProperties(physDev,
279 (*deviceLayers)[layerIndex].layerName,
280 &extensionCount, nullptr);
281 if (VK_SUCCESS != res) {
282 return false;
283 }
284 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
285 res = EnumerateDeviceExtensionProperties(physDev,
286 (*deviceLayers)[layerIndex].layerName,
287 &extensionCount, extensions);
288 if (VK_SUCCESS != res) {
289 delete[] extensions;
290 return false;
291 }
292 for (uint32_t i = 0; i < extensionCount; ++i) {
293 deviceExtensions->push_back() = extensions[i];
294 }
295 delete[] extensions;
296 }
297
298 return true;
299}
300
Brian Salomon23356442018-11-30 15:33:19 -0500301#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
302 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400303
Brian Salomon23356442018-11-30 15:33:19 -0500304#define ACQUIRE_VK_PROC(name, instance, device) \
305 PFN_vk##name grVk##name = \
306 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
307 do { \
308 if (grVk##name == nullptr) { \
309 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
310 if (device != VK_NULL_HANDLE) { \
311 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
312 } \
313 return false; \
314 } \
315 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400316
Brian Salomon23356442018-11-30 15:33:19 -0500317#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
318 PFN_vk##name grVk##name = \
319 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
320 do { \
321 if (grVk##name == nullptr) { \
322 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
323 return; \
324 } \
325 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000326
Greg Danield3e65aa2018-08-01 09:19:45 -0400327static void destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000328 VkDebugReportCallbackEXT* debugCallback,
329 bool hasDebugExtension) {
330 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
331 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
332 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
333 *debugCallback = VK_NULL_HANDLE;
334 }
335 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
336 grVkDestroyInstance(inst, nullptr);
337}
338
Greg Daniela0651ac2018-08-08 09:23:18 -0400339static void setup_extension_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
340 uint32_t physDeviceVersion, GrVkExtensions* extensions,
341 VkPhysicalDeviceFeatures2* features) {
342 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
343 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
344
345 // Setup all extension feature structs we may want to use.
346
347 void** tailPNext = &features->pNext;
348
349 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
350 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
351 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
352 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
353 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
354 blend->pNext = nullptr;
355 *tailPNext = blend;
356 tailPNext = &blend->pNext;
357 }
358
Greg Daniel7e000222018-12-03 10:08:21 -0500359 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
360 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
361 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
362 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
363 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
364 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
365 ycbcrFeature->pNext = nullptr;
366 *tailPNext = ycbcrFeature;
367 tailPNext = &ycbcrFeature->pNext;
368 }
369
Greg Daniela0651ac2018-08-08 09:23:18 -0400370 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
371 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
372 grVkGetPhysicalDeviceFeatures2(physDev, features);
373 } else {
374 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
375 1));
376 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
377 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
378 }
379
380 // If we want to disable any extension features do so here.
381}
382
Greg Danield3e65aa2018-08-01 09:19:45 -0400383bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000384 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400385 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400386 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000387 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000388 uint32_t* presentQueueIndexPtr,
389 CanPresentFn canPresent) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400390 VkResult err;
391
392 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
393 uint32_t instanceVersion = 0;
394 if (!grVkEnumerateInstanceVersion) {
395 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
396 } else {
397 err = grVkEnumerateInstanceVersion(&instanceVersion);
398 if (err) {
399 SkDebugf("failed ot enumerate instance version. Err: %d\n", err);
400 return false;
401 }
402 }
403 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Greg Daniel41f0e282019-01-28 13:15:05 -0500404 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
405 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
406 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
407 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
408 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
409 // since that is the highest vulkan version.
410 apiVersion = VK_MAKE_VERSION(1, 1, 0);
411 }
412
413 instanceVersion = SkTMin(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400414
Greg Danielf730c182018-07-02 20:15:37 +0000415 VkPhysicalDevice physDev;
416 VkDevice device;
417 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000418
419 const VkApplicationInfo app_info = {
420 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
421 nullptr, // pNext
422 "vktest", // pApplicationName
423 0, // applicationVersion
424 "vktest", // pEngineName
425 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500426 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000427 };
428
Greg Daniel98bffae2018-08-01 13:25:41 -0400429 SkTArray<VkLayerProperties> instanceLayers;
430 SkTArray<VkExtensionProperties> instanceExtensions;
431
Greg Daniel92aef4b2018-08-02 13:55:49 -0400432 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400433 &instanceExtensions,
434 &instanceLayers)) {
435 return false;
436 }
Greg Danielf730c182018-07-02 20:15:37 +0000437
438 SkTArray<const char*> instanceLayerNames;
439 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400440 for (int i = 0; i < instanceLayers.count(); ++i) {
441 instanceLayerNames.push_back(instanceLayers[i].layerName);
442 }
443 for (int i = 0; i < instanceExtensions.count(); ++i) {
444 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
445 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000446 }
447 }
Greg Danielf730c182018-07-02 20:15:37 +0000448
449 const VkInstanceCreateInfo instance_create = {
450 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
451 nullptr, // pNext
452 0, // flags
453 &app_info, // pApplicationInfo
454 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
455 instanceLayerNames.begin(), // ppEnabledLayerNames
456 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
457 instanceExtensionNames.begin(), // ppEnabledExtensionNames
458 };
459
Greg Daniel98bffae2018-08-01 13:25:41 -0400460 bool hasDebugExtension = false;
461
Greg Danielf730c182018-07-02 20:15:37 +0000462 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
463 err = grVkCreateInstance(&instance_create, nullptr, &inst);
464 if (err < 0) {
465 SkDebugf("vkCreateInstance failed: %d\n", err);
466 return false;
467 }
468
Greg Daniel37329b32018-07-02 20:16:44 +0000469#ifdef SK_ENABLE_VK_LAYERS
470 *debugCallback = VK_NULL_HANDLE;
471 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
472 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
473 hasDebugExtension = true;
474 }
475 }
476 if (hasDebugExtension) {
477 // Setup callback creation information
478 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
479 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
480 callbackCreateInfo.pNext = nullptr;
481 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
482 VK_DEBUG_REPORT_WARNING_BIT_EXT |
483 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
484 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
485 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
486 callbackCreateInfo.pfnCallback = &DebugReportCallback;
487 callbackCreateInfo.pUserData = nullptr;
488
489 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
490 // Register the callback
491 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
492 }
493#endif
494
Greg Danielf730c182018-07-02 20:15:37 +0000495 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400496 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000497 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
498 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
499 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
500 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
501 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
502 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
503
504 uint32_t gpuCount;
505 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
506 if (err) {
507 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000508 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000509 return false;
510 }
511 if (!gpuCount) {
512 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000513 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000514 return false;
515 }
516 // Just returning the first physical device instead of getting the whole array.
517 // TODO: find best match for our needs
518 gpuCount = 1;
519 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
520 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
521 if (err && VK_INCOMPLETE != err) {
522 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000523 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000524 return false;
525 }
526
Greg Daniel92aef4b2018-08-02 13:55:49 -0400527 VkPhysicalDeviceProperties physDeviceProperties;
528 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Greg Daniel41f0e282019-01-28 13:15:05 -0500529 int physDeviceVersion = SkTMin(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400530
Greg Danielf730c182018-07-02 20:15:37 +0000531 // query to get the initial queue props size
532 uint32_t queueCount;
533 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
534 if (!queueCount) {
535 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000536 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000537 return false;
538 }
539
540 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
541 // now get the actual queue props
542 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
543
544 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
545
546 // iterate to find the graphics queue
547 uint32_t graphicsQueueIndex = queueCount;
548 for (uint32_t i = 0; i < queueCount; i++) {
549 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
550 graphicsQueueIndex = i;
551 break;
552 }
553 }
554 if (graphicsQueueIndex == queueCount) {
555 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000556 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000557 return false;
558 }
559
560 // iterate to find the present queue, if needed
561 uint32_t presentQueueIndex = queueCount;
562 if (presentQueueIndexPtr && canPresent) {
563 for (uint32_t i = 0; i < queueCount; i++) {
564 if (canPresent(inst, physDev, i)) {
565 presentQueueIndex = i;
566 break;
567 }
568 }
569 if (presentQueueIndex == queueCount) {
570 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000571 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000572 return false;
573 }
574 *presentQueueIndexPtr = presentQueueIndex;
575 } else {
576 // Just setting this so we end up make a single queue for graphics since there was no
577 // request for a present queue.
578 presentQueueIndex = graphicsQueueIndex;
579 }
580
Greg Daniel98bffae2018-08-01 13:25:41 -0400581 SkTArray<VkLayerProperties> deviceLayers;
582 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400583 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400584 inst, physDev,
585 &deviceExtensions,
586 &deviceLayers)) {
587 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
588 return false;
589 }
Greg Danielf730c182018-07-02 20:15:37 +0000590
591 SkTArray<const char*> deviceLayerNames;
592 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400593 for (int i = 0; i < deviceLayers.count(); ++i) {
594 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000595 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400596 for (int i = 0; i < deviceExtensions.count(); ++i) {
597 // Don't use experimental extensions since they typically don't work with debug layers and
598 // often are missing dependecy requirements for other extensions. Additionally, these are
599 // often left behind in the driver even after they've been promoted to real extensions.
600 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
601 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
602 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
603 }
Greg Danielf730c182018-07-02 20:15:37 +0000604 }
605
Greg Daniela0651ac2018-08-08 09:23:18 -0400606 extensions->init(getProc, inst, physDev,
607 (uint32_t) instanceExtensionNames.count(),
608 instanceExtensionNames.begin(),
609 (uint32_t) deviceExtensionNames.count(),
610 deviceExtensionNames.begin());
611
612 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
613 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
614 features->pNext = nullptr;
615
616 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
617 void* pointerToFeatures = nullptr;
618 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
619 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
620 setup_extension_features(getProc, inst, physDev, physDeviceVersion, extensions, features);
621 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
622 // the device creation will use that instead of the ppEnabledFeatures.
623 pointerToFeatures = features;
624 } else {
625 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
626 }
627
Greg Danielf730c182018-07-02 20:15:37 +0000628 // this looks like it would slow things down,
629 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400630 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000631
Greg Danielf730c182018-07-02 20:15:37 +0000632 float queuePriorities[1] = { 0.0 };
633 // Here we assume no need for swapchain queue
634 // If one is needed, the client will need its own setup code
635 const VkDeviceQueueCreateInfo queueInfo[2] = {
636 {
637 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
638 nullptr, // pNext
639 0, // VkDeviceQueueCreateFlags
640 graphicsQueueIndex, // queueFamilyIndex
641 1, // queueCount
642 queuePriorities, // pQueuePriorities
643 },
644 {
645 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
646 nullptr, // pNext
647 0, // VkDeviceQueueCreateFlags
648 presentQueueIndex, // queueFamilyIndex
649 1, // queueCount
650 queuePriorities, // pQueuePriorities
651 }
652 };
653 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
654
655 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400656 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
657 pointerToFeatures, // pNext
658 0, // VkDeviceCreateFlags
659 queueInfoCount, // queueCreateInfoCount
660 queueInfo, // pQueueCreateInfos
661 (uint32_t) deviceLayerNames.count(), // layerCount
662 deviceLayerNames.begin(), // ppEnabledLayerNames
663 (uint32_t) deviceExtensionNames.count(), // extensionCount
664 deviceExtensionNames.begin(), // ppEnabledExtensionNames
665 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000666 };
667
Ben Wagner7ad9b962019-02-12 11:14:47 -0500668 {
669#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
670 // skia:8712
671 __lsan::ScopedDisabler lsanDisabler;
672#endif
673 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
674 }
Greg Danielf730c182018-07-02 20:15:37 +0000675 if (err) {
676 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000677 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000678 return false;
679 }
680
Greg Danielf730c182018-07-02 20:15:37 +0000681 VkQueue queue;
682 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
683
684 ctx->fInstance = inst;
685 ctx->fPhysicalDevice = physDev;
686 ctx->fDevice = device;
687 ctx->fQueue = queue;
688 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500689 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400690 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400691 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400692 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000693 ctx->fOwnsInstanceAndDevice = false;
694
695 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000696}
697
Greg Daniela0651ac2018-08-08 09:23:18 -0400698void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
699 // All Vulkan structs that could be part of the features chain will start with the
700 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
701 // so we can get access to the pNext for the next struct.
702 struct CommonVulkanHeader {
703 VkStructureType sType;
704 void* pNext;
705 };
706
707 void* pNext = features->pNext;
708 while (pNext) {
709 void* current = pNext;
710 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
711 sk_free(current);
712 }
713}
714
Greg Daniel35970ec2017-11-10 10:03:05 -0500715}
716
717#endif