blob: 9888df3e66d822737324c9946ddf99cbb95db78b [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "VkTestUtils.h"
9
10#ifdef SK_VULKAN
11
Greg Danielf730c182018-07-02 20:15:37 +000012#include "SkAutoMalloc.h"
13#include "vk/GrVkBackendContext.h"
14#include "vk/GrVkExtensions.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050015#include "../ports/SkOSLibrary.h"
16
17namespace sk_gpu_test {
18
19bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040020 PFN_vkGetDeviceProcAddr* devProc) {
Chris Dalton3a67b8e2018-05-03 09:30:29 -060021#ifdef SK_MOLTENVK
22 // MoltenVK is a statically linked framework, so there is no Vulkan library to load.
23 *instProc = &vkGetInstanceProcAddr;
24 *devProc = &vkGetDeviceProcAddr;
25 return true;
26#else
Greg Daniel35970ec2017-11-10 10:03:05 -050027 static void* vkLib = nullptr;
28 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
29 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
30 if (!vkLib) {
31#if defined _WIN32
32 vkLib = DynamicLoadLibrary("vulkan-1.dll");
33#else
34 vkLib = DynamicLoadLibrary("libvulkan.so");
35#endif
36 if (!vkLib) {
37 return false;
38 }
39 localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib,
40 "vkGetInstanceProcAddr");
41 localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib,
42 "vkGetDeviceProcAddr");
43 }
44 if (!localInstProc || !localDevProc) {
45 return false;
46 }
47 *instProc = localInstProc;
48 *devProc = localDevProc;
49 return true;
Chris Dalton3a67b8e2018-05-03 09:30:29 -060050#endif
Greg Daniel35970ec2017-11-10 10:03:05 -050051}
Greg Danielf730c182018-07-02 20:15:37 +000052
53////////////////////////////////////////////////////////////////////////////////
54// Helper code to set up Vulkan context objects
55
56#ifdef SK_ENABLE_VK_LAYERS
57const char* kDebugLayerNames[] = {
58 // elements of VK_LAYER_LUNARG_standard_validation
59 "VK_LAYER_GOOGLE_threading",
60 "VK_LAYER_LUNARG_parameter_validation",
61 "VK_LAYER_LUNARG_object_tracker",
Greg Danielf730c182018-07-02 20:15:37 +000062 "VK_LAYER_LUNARG_core_validation",
Greg Danielf730c182018-07-02 20:15:37 +000063 "VK_LAYER_GOOGLE_unique_objects",
64 // not included in standard_validation
65 //"VK_LAYER_LUNARG_api_dump",
66 //"VK_LAYER_LUNARG_vktrace",
67 //"VK_LAYER_LUNARG_screenshot",
68};
Greg Danielf730c182018-07-02 20:15:37 +000069
Greg Danielac616c82018-08-29 15:56:26 -040070static uint32_t remove_patch_version(uint32_t specVersion) {
71 return (specVersion >> 12) << 12;
72}
73
74// Returns the index into layers array for the layer we want. Returns -1 if not supported.
75static int should_include_debug_layer(const char* layerName,
76 uint32_t layerCount, VkLayerProperties* layers,
77 uint32_t version) {
78 for (uint32_t i = 0; i < layerCount; ++i) {
79 if (!strcmp(layerName, layers[i].layerName)) {
80 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
81 // layer was written against a version that isn't older than the version of Vulkan we're
82 // using so that it has all the api entry points.
83 if (version <= remove_patch_version(layers[i].specVersion)) {
84 return i;
85 }
86 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040087 }
Greg Danielac616c82018-08-29 15:56:26 -040088
Greg Danielf730c182018-07-02 20:15:37 +000089 }
Greg Danielac616c82018-08-29 15:56:26 -040090 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040091}
Greg Daniel92aef4b2018-08-02 13:55:49 -040092
Greg Daniel37329b32018-07-02 20:16:44 +000093VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
94 VkDebugReportFlagsEXT flags,
95 VkDebugReportObjectTypeEXT objectType,
96 uint64_t object,
97 size_t location,
98 int32_t messageCode,
99 const char* pLayerPrefix,
100 const char* pMessage,
101 void* pUserData) {
102 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
103 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
104 return VK_TRUE; // skip further layers
105 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Greg Danielac616c82018-08-29 15:56:26 -0400106 // There is currently a bug in the spec which doesn't have
107 // VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in
108 // VkDeviceCreateInfo. So we ignore that warning since it is wrong.
109 if (!strstr(pMessage,
110 "pCreateInfo->pNext chain includes a structure with unexpected VkStructureType "
111 "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) {
112 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
113 }
Greg Daniel37329b32018-07-02 20:16:44 +0000114 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
115 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
116 } else {
117 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
118 }
119 return VK_FALSE;
120}
121#endif
122
Greg Daniel98bffae2018-08-01 13:25:41 -0400123#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
124
Greg Daniel98bffae2018-08-01 13:25:41 -0400125static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
126 uint32_t specVersion,
127 SkTArray<VkExtensionProperties>* instanceExtensions,
128 SkTArray<VkLayerProperties>* instanceLayers) {
129 if (getProc == nullptr) {
130 return false;
131 }
132
133 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
134 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
135
136 if (!EnumerateInstanceExtensionProperties ||
137 !EnumerateInstanceLayerProperties) {
138 return false;
139 }
140
141 VkResult res;
142 uint32_t layerCount = 0;
143#ifdef SK_ENABLE_VK_LAYERS
144 // instance layers
145 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
146 if (VK_SUCCESS != res) {
147 return false;
148 }
149 VkLayerProperties* layers = new VkLayerProperties[layerCount];
150 res = EnumerateInstanceLayerProperties(&layerCount, layers);
151 if (VK_SUCCESS != res) {
152 delete[] layers;
153 return false;
154 }
155
156 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400157 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
158 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
159 nonPatchVersion);
160 if (idx != -1) {
161 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400162 }
163 }
164 delete[] layers;
165#endif
166
167 // instance extensions
168 // via Vulkan implementation and implicitly enabled layers
169 uint32_t extensionCount = 0;
170 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
171 if (VK_SUCCESS != res) {
172 return false;
173 }
174 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
175 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
176 if (VK_SUCCESS != res) {
177 delete[] extensions;
178 return false;
179 }
180 for (uint32_t i = 0; i < extensionCount; ++i) {
181 instanceExtensions->push_back() = extensions[i];
182 }
183 delete [] extensions;
184
185 // via explicitly enabled layers
186 layerCount = instanceLayers->count();
187 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
188 uint32_t extensionCount = 0;
189 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
190 &extensionCount, nullptr);
191 if (VK_SUCCESS != res) {
192 return false;
193 }
194 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
195 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
196 &extensionCount, extensions);
197 if (VK_SUCCESS != res) {
198 delete[] extensions;
199 return false;
200 }
201 for (uint32_t i = 0; i < extensionCount; ++i) {
202 instanceExtensions->push_back() = extensions[i];
203 }
204 delete[] extensions;
205 }
206
207 return true;
208}
209
210static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
211 VkInstance inst, VkPhysicalDevice physDev,
212 SkTArray<VkExtensionProperties>* deviceExtensions,
213 SkTArray<VkLayerProperties>* deviceLayers) {
214 if (getProc == nullptr) {
215 return false;
216 }
217
218 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
219 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
220
221 if (!EnumerateDeviceExtensionProperties ||
222 !EnumerateDeviceLayerProperties) {
223 return false;
224 }
225
226 VkResult res;
227 // device layers
228 uint32_t layerCount = 0;
229#ifdef SK_ENABLE_VK_LAYERS
230 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
231 if (VK_SUCCESS != res) {
232 return false;
233 }
234 VkLayerProperties* layers = new VkLayerProperties[layerCount];
235 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
236 if (VK_SUCCESS != res) {
237 delete[] layers;
238 return false;
239 }
240
241 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400242 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
243 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
244 nonPatchVersion);
245 if (idx != -1) {
246 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400247 }
248 }
249 delete[] layers;
250#endif
251
252 // device extensions
253 // via Vulkan implementation and implicitly enabled layers
254 uint32_t extensionCount = 0;
255 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
256 if (VK_SUCCESS != res) {
257 return false;
258 }
259 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
260 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
261 if (VK_SUCCESS != res) {
262 delete[] extensions;
263 return false;
264 }
265 for (uint32_t i = 0; i < extensionCount; ++i) {
266 deviceExtensions->push_back() = extensions[i];
267 }
268 delete[] extensions;
269
270 // via explicitly enabled layers
271 layerCount = deviceLayers->count();
272 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
273 uint32_t extensionCount = 0;
274 res = EnumerateDeviceExtensionProperties(physDev,
275 (*deviceLayers)[layerIndex].layerName,
276 &extensionCount, nullptr);
277 if (VK_SUCCESS != res) {
278 return false;
279 }
280 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
281 res = EnumerateDeviceExtensionProperties(physDev,
282 (*deviceLayers)[layerIndex].layerName,
283 &extensionCount, extensions);
284 if (VK_SUCCESS != res) {
285 delete[] extensions;
286 return false;
287 }
288 for (uint32_t i = 0; i < extensionCount; ++i) {
289 deviceExtensions->push_back() = extensions[i];
290 }
291 delete[] extensions;
292 }
293
294 return true;
295}
296
Brian Salomon23356442018-11-30 15:33:19 -0500297#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
298 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400299
Brian Salomon23356442018-11-30 15:33:19 -0500300#define ACQUIRE_VK_PROC(name, instance, device) \
301 PFN_vk##name grVk##name = \
302 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
303 do { \
304 if (grVk##name == nullptr) { \
305 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
306 if (device != VK_NULL_HANDLE) { \
307 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
308 } \
309 return false; \
310 } \
311 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400312
Brian Salomon23356442018-11-30 15:33:19 -0500313#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
314 PFN_vk##name grVk##name = \
315 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
316 do { \
317 if (grVk##name == nullptr) { \
318 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
319 return; \
320 } \
321 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000322
Greg Danield3e65aa2018-08-01 09:19:45 -0400323static void destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000324 VkDebugReportCallbackEXT* debugCallback,
325 bool hasDebugExtension) {
326 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
327 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
328 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
329 *debugCallback = VK_NULL_HANDLE;
330 }
331 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
332 grVkDestroyInstance(inst, nullptr);
333}
334
Greg Daniela0651ac2018-08-08 09:23:18 -0400335static void setup_extension_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
336 uint32_t physDeviceVersion, GrVkExtensions* extensions,
337 VkPhysicalDeviceFeatures2* features) {
338 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
339 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
340
341 // Setup all extension feature structs we may want to use.
342
343 void** tailPNext = &features->pNext;
344
345 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
346 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
347 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
348 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
349 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
350 blend->pNext = nullptr;
351 *tailPNext = blend;
352 tailPNext = &blend->pNext;
353 }
354
Greg Daniel7e000222018-12-03 10:08:21 -0500355 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
356 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
357 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
358 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
359 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
360 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
361 ycbcrFeature->pNext = nullptr;
362 *tailPNext = ycbcrFeature;
363 tailPNext = &ycbcrFeature->pNext;
364 }
365
Greg Daniela0651ac2018-08-08 09:23:18 -0400366 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
367 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
368 grVkGetPhysicalDeviceFeatures2(physDev, features);
369 } else {
370 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
371 1));
372 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
373 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
374 }
375
376 // If we want to disable any extension features do so here.
377}
378
Greg Danield3e65aa2018-08-01 09:19:45 -0400379bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000380 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400381 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400382 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000383 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000384 uint32_t* presentQueueIndexPtr,
385 CanPresentFn canPresent) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400386 VkResult err;
387
388 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
389 uint32_t instanceVersion = 0;
390 if (!grVkEnumerateInstanceVersion) {
391 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
392 } else {
393 err = grVkEnumerateInstanceVersion(&instanceVersion);
394 if (err) {
395 SkDebugf("failed ot enumerate instance version. Err: %d\n", err);
396 return false;
397 }
398 }
399 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
400
Greg Danielf730c182018-07-02 20:15:37 +0000401 VkPhysicalDevice physDev;
402 VkDevice device;
403 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000404
405 const VkApplicationInfo app_info = {
406 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
407 nullptr, // pNext
408 "vktest", // pApplicationName
409 0, // applicationVersion
410 "vktest", // pEngineName
411 0, // engineVerison
Greg Daniel92aef4b2018-08-02 13:55:49 -0400412 instanceVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000413 };
414
Greg Daniel98bffae2018-08-01 13:25:41 -0400415 SkTArray<VkLayerProperties> instanceLayers;
416 SkTArray<VkExtensionProperties> instanceExtensions;
417
Greg Daniel92aef4b2018-08-02 13:55:49 -0400418 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400419 &instanceExtensions,
420 &instanceLayers)) {
421 return false;
422 }
Greg Danielf730c182018-07-02 20:15:37 +0000423
424 SkTArray<const char*> instanceLayerNames;
425 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400426 for (int i = 0; i < instanceLayers.count(); ++i) {
427 instanceLayerNames.push_back(instanceLayers[i].layerName);
428 }
429 for (int i = 0; i < instanceExtensions.count(); ++i) {
430 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
431 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000432 }
433 }
Greg Danielf730c182018-07-02 20:15:37 +0000434
435 const VkInstanceCreateInfo instance_create = {
436 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
437 nullptr, // pNext
438 0, // flags
439 &app_info, // pApplicationInfo
440 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
441 instanceLayerNames.begin(), // ppEnabledLayerNames
442 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
443 instanceExtensionNames.begin(), // ppEnabledExtensionNames
444 };
445
Greg Daniel98bffae2018-08-01 13:25:41 -0400446 bool hasDebugExtension = false;
447
Greg Danielf730c182018-07-02 20:15:37 +0000448 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
449 err = grVkCreateInstance(&instance_create, nullptr, &inst);
450 if (err < 0) {
451 SkDebugf("vkCreateInstance failed: %d\n", err);
452 return false;
453 }
454
Greg Daniel37329b32018-07-02 20:16:44 +0000455#ifdef SK_ENABLE_VK_LAYERS
456 *debugCallback = VK_NULL_HANDLE;
457 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
458 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
459 hasDebugExtension = true;
460 }
461 }
462 if (hasDebugExtension) {
463 // Setup callback creation information
464 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
465 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
466 callbackCreateInfo.pNext = nullptr;
467 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
468 VK_DEBUG_REPORT_WARNING_BIT_EXT |
469 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
470 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
471 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
472 callbackCreateInfo.pfnCallback = &DebugReportCallback;
473 callbackCreateInfo.pUserData = nullptr;
474
475 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
476 // Register the callback
477 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
478 }
479#endif
480
Greg Danielf730c182018-07-02 20:15:37 +0000481 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400482 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000483 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
484 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
485 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
486 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
487 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
488 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
489
490 uint32_t gpuCount;
491 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
492 if (err) {
493 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000494 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000495 return false;
496 }
497 if (!gpuCount) {
498 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000499 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000500 return false;
501 }
502 // Just returning the first physical device instead of getting the whole array.
503 // TODO: find best match for our needs
504 gpuCount = 1;
505 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
506 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
507 if (err && VK_INCOMPLETE != err) {
508 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000509 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000510 return false;
511 }
512
Greg Daniel92aef4b2018-08-02 13:55:49 -0400513 VkPhysicalDeviceProperties physDeviceProperties;
514 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
515 int physDeviceVersion = physDeviceProperties.apiVersion;
516
Greg Danielf730c182018-07-02 20:15:37 +0000517 // query to get the initial queue props size
518 uint32_t queueCount;
519 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
520 if (!queueCount) {
521 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000522 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000523 return false;
524 }
525
526 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
527 // now get the actual queue props
528 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
529
530 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
531
532 // iterate to find the graphics queue
533 uint32_t graphicsQueueIndex = queueCount;
534 for (uint32_t i = 0; i < queueCount; i++) {
535 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
536 graphicsQueueIndex = i;
537 break;
538 }
539 }
540 if (graphicsQueueIndex == queueCount) {
541 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000542 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000543 return false;
544 }
545
546 // iterate to find the present queue, if needed
547 uint32_t presentQueueIndex = queueCount;
548 if (presentQueueIndexPtr && canPresent) {
549 for (uint32_t i = 0; i < queueCount; i++) {
550 if (canPresent(inst, physDev, i)) {
551 presentQueueIndex = i;
552 break;
553 }
554 }
555 if (presentQueueIndex == queueCount) {
556 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000557 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000558 return false;
559 }
560 *presentQueueIndexPtr = presentQueueIndex;
561 } else {
562 // Just setting this so we end up make a single queue for graphics since there was no
563 // request for a present queue.
564 presentQueueIndex = graphicsQueueIndex;
565 }
566
Greg Daniel98bffae2018-08-01 13:25:41 -0400567 SkTArray<VkLayerProperties> deviceLayers;
568 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400569 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400570 inst, physDev,
571 &deviceExtensions,
572 &deviceLayers)) {
573 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
574 return false;
575 }
Greg Danielf730c182018-07-02 20:15:37 +0000576
577 SkTArray<const char*> deviceLayerNames;
578 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400579 for (int i = 0; i < deviceLayers.count(); ++i) {
580 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000581 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400582 for (int i = 0; i < deviceExtensions.count(); ++i) {
583 // Don't use experimental extensions since they typically don't work with debug layers and
584 // often are missing dependecy requirements for other extensions. Additionally, these are
585 // often left behind in the driver even after they've been promoted to real extensions.
586 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
587 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
588 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
589 }
Greg Danielf730c182018-07-02 20:15:37 +0000590 }
591
Greg Daniela0651ac2018-08-08 09:23:18 -0400592 extensions->init(getProc, inst, physDev,
593 (uint32_t) instanceExtensionNames.count(),
594 instanceExtensionNames.begin(),
595 (uint32_t) deviceExtensionNames.count(),
596 deviceExtensionNames.begin());
597
598 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
599 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
600 features->pNext = nullptr;
601
602 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
603 void* pointerToFeatures = nullptr;
604 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
605 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
606 setup_extension_features(getProc, inst, physDev, physDeviceVersion, extensions, features);
607 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
608 // the device creation will use that instead of the ppEnabledFeatures.
609 pointerToFeatures = features;
610 } else {
611 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
612 }
613
Greg Danielf730c182018-07-02 20:15:37 +0000614 // this looks like it would slow things down,
615 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400616 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000617
Greg Danielf730c182018-07-02 20:15:37 +0000618 float queuePriorities[1] = { 0.0 };
619 // Here we assume no need for swapchain queue
620 // If one is needed, the client will need its own setup code
621 const VkDeviceQueueCreateInfo queueInfo[2] = {
622 {
623 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
624 nullptr, // pNext
625 0, // VkDeviceQueueCreateFlags
626 graphicsQueueIndex, // queueFamilyIndex
627 1, // queueCount
628 queuePriorities, // pQueuePriorities
629 },
630 {
631 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
632 nullptr, // pNext
633 0, // VkDeviceQueueCreateFlags
634 presentQueueIndex, // queueFamilyIndex
635 1, // queueCount
636 queuePriorities, // pQueuePriorities
637 }
638 };
639 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
640
641 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400642 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
643 pointerToFeatures, // pNext
644 0, // VkDeviceCreateFlags
645 queueInfoCount, // queueCreateInfoCount
646 queueInfo, // pQueueCreateInfos
647 (uint32_t) deviceLayerNames.count(), // layerCount
648 deviceLayerNames.begin(), // ppEnabledLayerNames
649 (uint32_t) deviceExtensionNames.count(), // extensionCount
650 deviceExtensionNames.begin(), // ppEnabledExtensionNames
651 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000652 };
653
654 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
655 if (err) {
656 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000657 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000658 return false;
659 }
660
Greg Danielf730c182018-07-02 20:15:37 +0000661 VkQueue queue;
662 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
663
664 ctx->fInstance = inst;
665 ctx->fPhysicalDevice = physDev;
666 ctx->fDevice = device;
667 ctx->fQueue = queue;
668 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400669 ctx->fInstanceVersion = instanceVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400670 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400671 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400672 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000673 ctx->fOwnsInstanceAndDevice = false;
674
675 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000676}
677
Greg Daniela0651ac2018-08-08 09:23:18 -0400678void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
679 // All Vulkan structs that could be part of the features chain will start with the
680 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
681 // so we can get access to the pNext for the next struct.
682 struct CommonVulkanHeader {
683 VkStructureType sType;
684 void* pNext;
685 };
686
687 void* pNext = features->pNext;
688 while (pNext) {
689 void* current = pNext;
690 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
691 sk_free(current);
692 }
693}
694
Greg Daniel35970ec2017-11-10 10:03:05 -0500695}
696
697#endif