blob: c1ea41b3eb6455a477a792c8af6943b2cc46b050 [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Hal Canary48cd11f2019-05-22 09:57:18 -040012#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18#endif
19
Mike Kleinc0bd9f92019-04-23 12:05:21 -050020#include "include/gpu/vk/GrVkBackendContext.h"
21#include "include/gpu/vk/GrVkExtensions.h"
22#include "src/core/SkAutoMalloc.h"
23#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050024
Ben Wagner7ad9b962019-02-12 11:14:47 -050025#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
26#include <sanitizer/lsan_interface.h>
27#endif
28
Greg Daniel35970ec2017-11-10 10:03:05 -050029namespace sk_gpu_test {
30
31bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040032 PFN_vkGetDeviceProcAddr* devProc) {
Chris Dalton3a67b8e2018-05-03 09:30:29 -060033#ifdef SK_MOLTENVK
34 // MoltenVK is a statically linked framework, so there is no Vulkan library to load.
35 *instProc = &vkGetInstanceProcAddr;
36 *devProc = &vkGetDeviceProcAddr;
37 return true;
38#else
Greg Daniel35970ec2017-11-10 10:03:05 -050039 static void* vkLib = nullptr;
40 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
41 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
42 if (!vkLib) {
Hal Canary48cd11f2019-05-22 09:57:18 -040043 vkLib = DynamicLoadLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
Greg Daniel35970ec2017-11-10 10:03:05 -050044 if (!vkLib) {
45 return false;
46 }
47 localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib,
48 "vkGetInstanceProcAddr");
49 localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib,
50 "vkGetDeviceProcAddr");
51 }
52 if (!localInstProc || !localDevProc) {
53 return false;
54 }
55 *instProc = localInstProc;
56 *devProc = localDevProc;
57 return true;
Chris Dalton3a67b8e2018-05-03 09:30:29 -060058#endif
Greg Daniel35970ec2017-11-10 10:03:05 -050059}
Greg Danielf730c182018-07-02 20:15:37 +000060
61////////////////////////////////////////////////////////////////////////////////
62// Helper code to set up Vulkan context objects
63
64#ifdef SK_ENABLE_VK_LAYERS
65const char* kDebugLayerNames[] = {
66 // elements of VK_LAYER_LUNARG_standard_validation
67 "VK_LAYER_GOOGLE_threading",
68 "VK_LAYER_LUNARG_parameter_validation",
69 "VK_LAYER_LUNARG_object_tracker",
Greg Danielf730c182018-07-02 20:15:37 +000070 "VK_LAYER_LUNARG_core_validation",
Greg Danielf730c182018-07-02 20:15:37 +000071 "VK_LAYER_GOOGLE_unique_objects",
72 // not included in standard_validation
73 //"VK_LAYER_LUNARG_api_dump",
74 //"VK_LAYER_LUNARG_vktrace",
75 //"VK_LAYER_LUNARG_screenshot",
76};
Greg Danielf730c182018-07-02 20:15:37 +000077
Greg Danielac616c82018-08-29 15:56:26 -040078static uint32_t remove_patch_version(uint32_t specVersion) {
79 return (specVersion >> 12) << 12;
80}
81
82// Returns the index into layers array for the layer we want. Returns -1 if not supported.
83static int should_include_debug_layer(const char* layerName,
84 uint32_t layerCount, VkLayerProperties* layers,
85 uint32_t version) {
86 for (uint32_t i = 0; i < layerCount; ++i) {
87 if (!strcmp(layerName, layers[i].layerName)) {
88 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
89 // layer was written against a version that isn't older than the version of Vulkan we're
90 // using so that it has all the api entry points.
91 if (version <= remove_patch_version(layers[i].specVersion)) {
92 return i;
93 }
94 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040095 }
Greg Danielac616c82018-08-29 15:56:26 -040096
Greg Danielf730c182018-07-02 20:15:37 +000097 }
Greg Danielac616c82018-08-29 15:56:26 -040098 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040099}
Greg Daniel92aef4b2018-08-02 13:55:49 -0400100
Greg Daniel37329b32018-07-02 20:16:44 +0000101VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
102 VkDebugReportFlagsEXT flags,
103 VkDebugReportObjectTypeEXT objectType,
104 uint64_t object,
105 size_t location,
106 int32_t messageCode,
107 const char* pLayerPrefix,
108 const char* pMessage,
109 void* pUserData) {
110 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
111 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
112 return VK_TRUE; // skip further layers
113 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Greg Danielac616c82018-08-29 15:56:26 -0400114 // There is currently a bug in the spec which doesn't have
115 // VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in
116 // VkDeviceCreateInfo. So we ignore that warning since it is wrong.
117 if (!strstr(pMessage,
118 "pCreateInfo->pNext chain includes a structure with unexpected VkStructureType "
119 "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) {
120 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
121 }
Greg Daniel37329b32018-07-02 20:16:44 +0000122 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
123 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
124 } else {
125 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
126 }
127 return VK_FALSE;
128}
129#endif
130
Greg Daniel98bffae2018-08-01 13:25:41 -0400131#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
132
Greg Daniel98bffae2018-08-01 13:25:41 -0400133static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
134 uint32_t specVersion,
135 SkTArray<VkExtensionProperties>* instanceExtensions,
136 SkTArray<VkLayerProperties>* instanceLayers) {
137 if (getProc == nullptr) {
138 return false;
139 }
140
141 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
142 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
143
144 if (!EnumerateInstanceExtensionProperties ||
145 !EnumerateInstanceLayerProperties) {
146 return false;
147 }
148
149 VkResult res;
150 uint32_t layerCount = 0;
151#ifdef SK_ENABLE_VK_LAYERS
152 // instance layers
153 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
154 if (VK_SUCCESS != res) {
155 return false;
156 }
157 VkLayerProperties* layers = new VkLayerProperties[layerCount];
158 res = EnumerateInstanceLayerProperties(&layerCount, layers);
159 if (VK_SUCCESS != res) {
160 delete[] layers;
161 return false;
162 }
163
164 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400165 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
166 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
167 nonPatchVersion);
168 if (idx != -1) {
169 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400170 }
171 }
172 delete[] layers;
173#endif
174
175 // instance extensions
176 // via Vulkan implementation and implicitly enabled layers
177 uint32_t extensionCount = 0;
178 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
179 if (VK_SUCCESS != res) {
180 return false;
181 }
182 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
183 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
184 if (VK_SUCCESS != res) {
185 delete[] extensions;
186 return false;
187 }
188 for (uint32_t i = 0; i < extensionCount; ++i) {
189 instanceExtensions->push_back() = extensions[i];
190 }
191 delete [] extensions;
192
193 // via explicitly enabled layers
194 layerCount = instanceLayers->count();
195 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
196 uint32_t extensionCount = 0;
197 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
198 &extensionCount, nullptr);
199 if (VK_SUCCESS != res) {
200 return false;
201 }
202 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
203 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
204 &extensionCount, extensions);
205 if (VK_SUCCESS != res) {
206 delete[] extensions;
207 return false;
208 }
209 for (uint32_t i = 0; i < extensionCount; ++i) {
210 instanceExtensions->push_back() = extensions[i];
211 }
212 delete[] extensions;
213 }
214
215 return true;
216}
217
218static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
219 VkInstance inst, VkPhysicalDevice physDev,
220 SkTArray<VkExtensionProperties>* deviceExtensions,
221 SkTArray<VkLayerProperties>* deviceLayers) {
222 if (getProc == nullptr) {
223 return false;
224 }
225
226 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
227 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
228
229 if (!EnumerateDeviceExtensionProperties ||
230 !EnumerateDeviceLayerProperties) {
231 return false;
232 }
233
234 VkResult res;
235 // device layers
236 uint32_t layerCount = 0;
237#ifdef SK_ENABLE_VK_LAYERS
238 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
239 if (VK_SUCCESS != res) {
240 return false;
241 }
242 VkLayerProperties* layers = new VkLayerProperties[layerCount];
243 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
244 if (VK_SUCCESS != res) {
245 delete[] layers;
246 return false;
247 }
248
249 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400250 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
251 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
252 nonPatchVersion);
253 if (idx != -1) {
254 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400255 }
256 }
257 delete[] layers;
258#endif
259
260 // device extensions
261 // via Vulkan implementation and implicitly enabled layers
262 uint32_t extensionCount = 0;
263 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
264 if (VK_SUCCESS != res) {
265 return false;
266 }
267 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
268 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
269 if (VK_SUCCESS != res) {
270 delete[] extensions;
271 return false;
272 }
273 for (uint32_t i = 0; i < extensionCount; ++i) {
274 deviceExtensions->push_back() = extensions[i];
275 }
276 delete[] extensions;
277
278 // via explicitly enabled layers
279 layerCount = deviceLayers->count();
280 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
281 uint32_t extensionCount = 0;
282 res = EnumerateDeviceExtensionProperties(physDev,
283 (*deviceLayers)[layerIndex].layerName,
284 &extensionCount, nullptr);
285 if (VK_SUCCESS != res) {
286 return false;
287 }
288 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
289 res = EnumerateDeviceExtensionProperties(physDev,
290 (*deviceLayers)[layerIndex].layerName,
291 &extensionCount, extensions);
292 if (VK_SUCCESS != res) {
293 delete[] extensions;
294 return false;
295 }
296 for (uint32_t i = 0; i < extensionCount; ++i) {
297 deviceExtensions->push_back() = extensions[i];
298 }
299 delete[] extensions;
300 }
301
302 return true;
303}
304
Brian Salomon23356442018-11-30 15:33:19 -0500305#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
306 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400307
Brian Salomon23356442018-11-30 15:33:19 -0500308#define ACQUIRE_VK_PROC(name, instance, device) \
309 PFN_vk##name grVk##name = \
310 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
311 do { \
312 if (grVk##name == nullptr) { \
313 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
314 if (device != VK_NULL_HANDLE) { \
315 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
316 } \
317 return false; \
318 } \
319 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400320
Brian Salomon23356442018-11-30 15:33:19 -0500321#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
322 PFN_vk##name grVk##name = \
323 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
324 do { \
325 if (grVk##name == nullptr) { \
326 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
327 return; \
328 } \
329 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000330
Greg Danield3e65aa2018-08-01 09:19:45 -0400331static void destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000332 VkDebugReportCallbackEXT* debugCallback,
333 bool hasDebugExtension) {
334 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
335 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
336 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
337 *debugCallback = VK_NULL_HANDLE;
338 }
339 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
340 grVkDestroyInstance(inst, nullptr);
341}
342
Greg Daniela0651ac2018-08-08 09:23:18 -0400343static void setup_extension_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
344 uint32_t physDeviceVersion, GrVkExtensions* extensions,
345 VkPhysicalDeviceFeatures2* features) {
346 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
347 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
348
349 // Setup all extension feature structs we may want to use.
350
351 void** tailPNext = &features->pNext;
352
353 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
354 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
355 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
356 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
357 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
358 blend->pNext = nullptr;
359 *tailPNext = blend;
360 tailPNext = &blend->pNext;
361 }
362
Greg Daniel7e000222018-12-03 10:08:21 -0500363 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
364 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
365 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
366 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
367 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
368 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
369 ycbcrFeature->pNext = nullptr;
370 *tailPNext = ycbcrFeature;
371 tailPNext = &ycbcrFeature->pNext;
372 }
373
Greg Daniela0651ac2018-08-08 09:23:18 -0400374 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
375 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
376 grVkGetPhysicalDeviceFeatures2(physDev, features);
377 } else {
378 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
379 1));
380 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
381 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
382 }
383
384 // If we want to disable any extension features do so here.
385}
386
Greg Danield3e65aa2018-08-01 09:19:45 -0400387bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000388 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400389 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400390 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000391 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000392 uint32_t* presentQueueIndexPtr,
393 CanPresentFn canPresent) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400394 VkResult err;
395
396 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
397 uint32_t instanceVersion = 0;
398 if (!grVkEnumerateInstanceVersion) {
399 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
400 } else {
401 err = grVkEnumerateInstanceVersion(&instanceVersion);
402 if (err) {
403 SkDebugf("failed ot enumerate instance version. Err: %d\n", err);
404 return false;
405 }
406 }
407 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Greg Daniel41f0e282019-01-28 13:15:05 -0500408 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
409 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
410 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
411 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
412 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
413 // since that is the highest vulkan version.
414 apiVersion = VK_MAKE_VERSION(1, 1, 0);
415 }
416
417 instanceVersion = SkTMin(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400418
Greg Danielf730c182018-07-02 20:15:37 +0000419 VkPhysicalDevice physDev;
420 VkDevice device;
421 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000422
423 const VkApplicationInfo app_info = {
424 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
425 nullptr, // pNext
426 "vktest", // pApplicationName
427 0, // applicationVersion
428 "vktest", // pEngineName
429 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500430 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000431 };
432
Greg Daniel98bffae2018-08-01 13:25:41 -0400433 SkTArray<VkLayerProperties> instanceLayers;
434 SkTArray<VkExtensionProperties> instanceExtensions;
435
Greg Daniel92aef4b2018-08-02 13:55:49 -0400436 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400437 &instanceExtensions,
438 &instanceLayers)) {
439 return false;
440 }
Greg Danielf730c182018-07-02 20:15:37 +0000441
442 SkTArray<const char*> instanceLayerNames;
443 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400444 for (int i = 0; i < instanceLayers.count(); ++i) {
445 instanceLayerNames.push_back(instanceLayers[i].layerName);
446 }
447 for (int i = 0; i < instanceExtensions.count(); ++i) {
448 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
449 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000450 }
451 }
Greg Danielf730c182018-07-02 20:15:37 +0000452
453 const VkInstanceCreateInfo instance_create = {
454 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
455 nullptr, // pNext
456 0, // flags
457 &app_info, // pApplicationInfo
458 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
459 instanceLayerNames.begin(), // ppEnabledLayerNames
460 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
461 instanceExtensionNames.begin(), // ppEnabledExtensionNames
462 };
463
Greg Daniel98bffae2018-08-01 13:25:41 -0400464 bool hasDebugExtension = false;
465
Greg Danielf730c182018-07-02 20:15:37 +0000466 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
467 err = grVkCreateInstance(&instance_create, nullptr, &inst);
468 if (err < 0) {
469 SkDebugf("vkCreateInstance failed: %d\n", err);
470 return false;
471 }
472
Greg Daniel37329b32018-07-02 20:16:44 +0000473#ifdef SK_ENABLE_VK_LAYERS
474 *debugCallback = VK_NULL_HANDLE;
475 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
476 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
477 hasDebugExtension = true;
478 }
479 }
480 if (hasDebugExtension) {
481 // Setup callback creation information
482 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
483 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
484 callbackCreateInfo.pNext = nullptr;
485 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
486 VK_DEBUG_REPORT_WARNING_BIT_EXT |
487 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
488 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
489 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
490 callbackCreateInfo.pfnCallback = &DebugReportCallback;
491 callbackCreateInfo.pUserData = nullptr;
492
493 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
494 // Register the callback
495 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
496 }
497#endif
498
Greg Danielf730c182018-07-02 20:15:37 +0000499 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400500 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000501 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
502 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
503 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
504 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
505 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
506 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
507
508 uint32_t gpuCount;
509 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
510 if (err) {
511 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000512 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000513 return false;
514 }
515 if (!gpuCount) {
516 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000517 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000518 return false;
519 }
520 // Just returning the first physical device instead of getting the whole array.
521 // TODO: find best match for our needs
522 gpuCount = 1;
523 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
524 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
525 if (err && VK_INCOMPLETE != err) {
526 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000527 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000528 return false;
529 }
530
Greg Daniel92aef4b2018-08-02 13:55:49 -0400531 VkPhysicalDeviceProperties physDeviceProperties;
532 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Greg Daniel41f0e282019-01-28 13:15:05 -0500533 int physDeviceVersion = SkTMin(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400534
Greg Danielf730c182018-07-02 20:15:37 +0000535 // query to get the initial queue props size
536 uint32_t queueCount;
537 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
538 if (!queueCount) {
539 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000540 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000541 return false;
542 }
543
544 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
545 // now get the actual queue props
546 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
547
548 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
549
550 // iterate to find the graphics queue
551 uint32_t graphicsQueueIndex = queueCount;
552 for (uint32_t i = 0; i < queueCount; i++) {
553 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
554 graphicsQueueIndex = i;
555 break;
556 }
557 }
558 if (graphicsQueueIndex == queueCount) {
559 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000560 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000561 return false;
562 }
563
564 // iterate to find the present queue, if needed
565 uint32_t presentQueueIndex = queueCount;
566 if (presentQueueIndexPtr && canPresent) {
567 for (uint32_t i = 0; i < queueCount; i++) {
568 if (canPresent(inst, physDev, i)) {
569 presentQueueIndex = i;
570 break;
571 }
572 }
573 if (presentQueueIndex == queueCount) {
574 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000575 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000576 return false;
577 }
578 *presentQueueIndexPtr = presentQueueIndex;
579 } else {
580 // Just setting this so we end up make a single queue for graphics since there was no
581 // request for a present queue.
582 presentQueueIndex = graphicsQueueIndex;
583 }
584
Greg Daniel98bffae2018-08-01 13:25:41 -0400585 SkTArray<VkLayerProperties> deviceLayers;
586 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400587 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400588 inst, physDev,
589 &deviceExtensions,
590 &deviceLayers)) {
591 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
592 return false;
593 }
Greg Danielf730c182018-07-02 20:15:37 +0000594
595 SkTArray<const char*> deviceLayerNames;
596 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400597 for (int i = 0; i < deviceLayers.count(); ++i) {
598 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000599 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400600 for (int i = 0; i < deviceExtensions.count(); ++i) {
601 // Don't use experimental extensions since they typically don't work with debug layers and
602 // often are missing dependecy requirements for other extensions. Additionally, these are
603 // often left behind in the driver even after they've been promoted to real extensions.
604 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
605 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
606 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
607 }
Greg Danielf730c182018-07-02 20:15:37 +0000608 }
609
Greg Daniela0651ac2018-08-08 09:23:18 -0400610 extensions->init(getProc, inst, physDev,
611 (uint32_t) instanceExtensionNames.count(),
612 instanceExtensionNames.begin(),
613 (uint32_t) deviceExtensionNames.count(),
614 deviceExtensionNames.begin());
615
616 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
617 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
618 features->pNext = nullptr;
619
620 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
621 void* pointerToFeatures = nullptr;
622 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
623 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
624 setup_extension_features(getProc, inst, physDev, physDeviceVersion, extensions, features);
625 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
626 // the device creation will use that instead of the ppEnabledFeatures.
627 pointerToFeatures = features;
628 } else {
629 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
630 }
631
Greg Danielf730c182018-07-02 20:15:37 +0000632 // this looks like it would slow things down,
633 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400634 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000635
Greg Danielf730c182018-07-02 20:15:37 +0000636 float queuePriorities[1] = { 0.0 };
637 // Here we assume no need for swapchain queue
638 // If one is needed, the client will need its own setup code
639 const VkDeviceQueueCreateInfo queueInfo[2] = {
640 {
641 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
642 nullptr, // pNext
643 0, // VkDeviceQueueCreateFlags
644 graphicsQueueIndex, // queueFamilyIndex
645 1, // queueCount
646 queuePriorities, // pQueuePriorities
647 },
648 {
649 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
650 nullptr, // pNext
651 0, // VkDeviceQueueCreateFlags
652 presentQueueIndex, // queueFamilyIndex
653 1, // queueCount
654 queuePriorities, // pQueuePriorities
655 }
656 };
657 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
658
659 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400660 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
661 pointerToFeatures, // pNext
662 0, // VkDeviceCreateFlags
663 queueInfoCount, // queueCreateInfoCount
664 queueInfo, // pQueueCreateInfos
665 (uint32_t) deviceLayerNames.count(), // layerCount
666 deviceLayerNames.begin(), // ppEnabledLayerNames
667 (uint32_t) deviceExtensionNames.count(), // extensionCount
668 deviceExtensionNames.begin(), // ppEnabledExtensionNames
669 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000670 };
671
Ben Wagner7ad9b962019-02-12 11:14:47 -0500672 {
673#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
674 // skia:8712
675 __lsan::ScopedDisabler lsanDisabler;
676#endif
677 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
678 }
Greg Danielf730c182018-07-02 20:15:37 +0000679 if (err) {
680 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000681 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000682 return false;
683 }
684
Greg Danielf730c182018-07-02 20:15:37 +0000685 VkQueue queue;
686 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
687
688 ctx->fInstance = inst;
689 ctx->fPhysicalDevice = physDev;
690 ctx->fDevice = device;
691 ctx->fQueue = queue;
692 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500693 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400694 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400695 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400696 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000697 ctx->fOwnsInstanceAndDevice = false;
698
699 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000700}
701
Greg Daniela0651ac2018-08-08 09:23:18 -0400702void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
703 // All Vulkan structs that could be part of the features chain will start with the
704 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
705 // so we can get access to the pNext for the next struct.
706 struct CommonVulkanHeader {
707 VkStructureType sType;
708 void* pNext;
709 };
710
711 void* pNext = features->pNext;
712 while (pNext) {
713 void* current = pNext;
714 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
715 sk_free(current);
716 }
717}
718
Greg Daniel35970ec2017-11-10 10:03:05 -0500719}
720
721#endif