blob: 5b7e8c29ae25634ff2d22f8caffd7d279af09b6d [file] [log] [blame]
Greg Daniel35970ec2017-11-10 10:03:05 -05001/*
2 * Copyright 2017 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
Mike Kleinc0bd9f92019-04-23 12:05:21 -05008#include "tools/gpu/vk/VkTestUtils.h"
Greg Daniel35970ec2017-11-10 10:03:05 -05009
10#ifdef SK_VULKAN
11
Hal Canary48cd11f2019-05-22 09:57:18 -040012#ifndef SK_GPU_TOOLS_VK_LIBRARY_NAME
13 #if defined _WIN32
14 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "vulkan-1.dll"
15 #else
16 #define SK_GPU_TOOLS_VK_LIBRARY_NAME "libvulkan.so"
17 #endif
18#endif
19
Mike Kleinc0bd9f92019-04-23 12:05:21 -050020#include "include/gpu/vk/GrVkBackendContext.h"
21#include "include/gpu/vk/GrVkExtensions.h"
22#include "src/core/SkAutoMalloc.h"
23#include "src/ports/SkOSLibrary.h"
Greg Daniel35970ec2017-11-10 10:03:05 -050024
Ben Wagner7ad9b962019-02-12 11:14:47 -050025#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
26#include <sanitizer/lsan_interface.h>
27#endif
28
Greg Daniel35970ec2017-11-10 10:03:05 -050029namespace sk_gpu_test {
30
31bool LoadVkLibraryAndGetProcAddrFuncs(PFN_vkGetInstanceProcAddr* instProc,
Greg Danield3e65aa2018-08-01 09:19:45 -040032 PFN_vkGetDeviceProcAddr* devProc) {
Chris Dalton3a67b8e2018-05-03 09:30:29 -060033#ifdef SK_MOLTENVK
34 // MoltenVK is a statically linked framework, so there is no Vulkan library to load.
35 *instProc = &vkGetInstanceProcAddr;
36 *devProc = &vkGetDeviceProcAddr;
37 return true;
38#else
Greg Daniel35970ec2017-11-10 10:03:05 -050039 static void* vkLib = nullptr;
40 static PFN_vkGetInstanceProcAddr localInstProc = nullptr;
41 static PFN_vkGetDeviceProcAddr localDevProc = nullptr;
42 if (!vkLib) {
Hal Canary48cd11f2019-05-22 09:57:18 -040043 vkLib = DynamicLoadLibrary(SK_GPU_TOOLS_VK_LIBRARY_NAME);
Greg Daniel35970ec2017-11-10 10:03:05 -050044 if (!vkLib) {
45 return false;
46 }
47 localInstProc = (PFN_vkGetInstanceProcAddr) GetProcedureAddress(vkLib,
48 "vkGetInstanceProcAddr");
49 localDevProc = (PFN_vkGetDeviceProcAddr) GetProcedureAddress(vkLib,
50 "vkGetDeviceProcAddr");
51 }
52 if (!localInstProc || !localDevProc) {
53 return false;
54 }
55 *instProc = localInstProc;
56 *devProc = localDevProc;
57 return true;
Chris Dalton3a67b8e2018-05-03 09:30:29 -060058#endif
Greg Daniel35970ec2017-11-10 10:03:05 -050059}
Greg Danielf730c182018-07-02 20:15:37 +000060
61////////////////////////////////////////////////////////////////////////////////
62// Helper code to set up Vulkan context objects
63
64#ifdef SK_ENABLE_VK_LAYERS
65const char* kDebugLayerNames[] = {
66 // elements of VK_LAYER_LUNARG_standard_validation
67 "VK_LAYER_GOOGLE_threading",
68 "VK_LAYER_LUNARG_parameter_validation",
69 "VK_LAYER_LUNARG_object_tracker",
Greg Danielf730c182018-07-02 20:15:37 +000070 "VK_LAYER_LUNARG_core_validation",
Greg Danielf730c182018-07-02 20:15:37 +000071 "VK_LAYER_GOOGLE_unique_objects",
72 // not included in standard_validation
73 //"VK_LAYER_LUNARG_api_dump",
74 //"VK_LAYER_LUNARG_vktrace",
75 //"VK_LAYER_LUNARG_screenshot",
76};
Greg Danielf730c182018-07-02 20:15:37 +000077
Greg Danielac616c82018-08-29 15:56:26 -040078static uint32_t remove_patch_version(uint32_t specVersion) {
79 return (specVersion >> 12) << 12;
80}
81
82// Returns the index into layers array for the layer we want. Returns -1 if not supported.
83static int should_include_debug_layer(const char* layerName,
84 uint32_t layerCount, VkLayerProperties* layers,
85 uint32_t version) {
86 for (uint32_t i = 0; i < layerCount; ++i) {
87 if (!strcmp(layerName, layers[i].layerName)) {
88 // Since the layers intercept the vulkan calls and forward them on, we need to make sure
89 // layer was written against a version that isn't older than the version of Vulkan we're
90 // using so that it has all the api entry points.
91 if (version <= remove_patch_version(layers[i].specVersion)) {
92 return i;
93 }
94 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040095 }
Greg Danielac616c82018-08-29 15:56:26 -040096
Greg Danielf730c182018-07-02 20:15:37 +000097 }
Greg Danielac616c82018-08-29 15:56:26 -040098 return -1;
Greg Daniel98bffae2018-08-01 13:25:41 -040099}
Greg Daniel92aef4b2018-08-02 13:55:49 -0400100
Greg Daniel37329b32018-07-02 20:16:44 +0000101VKAPI_ATTR VkBool32 VKAPI_CALL DebugReportCallback(
102 VkDebugReportFlagsEXT flags,
103 VkDebugReportObjectTypeEXT objectType,
104 uint64_t object,
105 size_t location,
106 int32_t messageCode,
107 const char* pLayerPrefix,
108 const char* pMessage,
109 void* pUserData) {
110 if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
111 SkDebugf("Vulkan error [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
112 return VK_TRUE; // skip further layers
113 } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
Greg Danielac616c82018-08-29 15:56:26 -0400114 // There is currently a bug in the spec which doesn't have
115 // VK_STRUCTURE_TYPE_BLEND_OPERATION_ADVANCED_FEATURES_EXT as an allowable pNext struct in
116 // VkDeviceCreateInfo. So we ignore that warning since it is wrong.
117 if (!strstr(pMessage,
118 "pCreateInfo->pNext chain includes a structure with unexpected VkStructureType "
119 "VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT")) {
120 SkDebugf("Vulkan warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
121 }
Greg Daniel37329b32018-07-02 20:16:44 +0000122 } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
123 SkDebugf("Vulkan perf warning [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
124 } else {
125 SkDebugf("Vulkan info/debug [%s]: code: %d: %s\n", pLayerPrefix, messageCode, pMessage);
126 }
127 return VK_FALSE;
128}
129#endif
130
Greg Daniel98bffae2018-08-01 13:25:41 -0400131#define GET_PROC_LOCAL(F, inst, device) PFN_vk ## F F = (PFN_vk ## F) getProc("vk" #F, inst, device)
132
Greg Daniel98bffae2018-08-01 13:25:41 -0400133static bool init_instance_extensions_and_layers(GrVkGetProc getProc,
134 uint32_t specVersion,
135 SkTArray<VkExtensionProperties>* instanceExtensions,
136 SkTArray<VkLayerProperties>* instanceLayers) {
137 if (getProc == nullptr) {
138 return false;
139 }
140
141 GET_PROC_LOCAL(EnumerateInstanceExtensionProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
142 GET_PROC_LOCAL(EnumerateInstanceLayerProperties, VK_NULL_HANDLE, VK_NULL_HANDLE);
143
144 if (!EnumerateInstanceExtensionProperties ||
145 !EnumerateInstanceLayerProperties) {
146 return false;
147 }
148
149 VkResult res;
150 uint32_t layerCount = 0;
151#ifdef SK_ENABLE_VK_LAYERS
152 // instance layers
153 res = EnumerateInstanceLayerProperties(&layerCount, nullptr);
154 if (VK_SUCCESS != res) {
155 return false;
156 }
157 VkLayerProperties* layers = new VkLayerProperties[layerCount];
158 res = EnumerateInstanceLayerProperties(&layerCount, layers);
159 if (VK_SUCCESS != res) {
160 delete[] layers;
161 return false;
162 }
163
164 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400165 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
166 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
167 nonPatchVersion);
168 if (idx != -1) {
169 instanceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400170 }
171 }
172 delete[] layers;
173#endif
174
175 // instance extensions
176 // via Vulkan implementation and implicitly enabled layers
177 uint32_t extensionCount = 0;
178 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
179 if (VK_SUCCESS != res) {
180 return false;
181 }
182 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
183 res = EnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions);
184 if (VK_SUCCESS != res) {
185 delete[] extensions;
186 return false;
187 }
188 for (uint32_t i = 0; i < extensionCount; ++i) {
189 instanceExtensions->push_back() = extensions[i];
190 }
191 delete [] extensions;
192
193 // via explicitly enabled layers
194 layerCount = instanceLayers->count();
195 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
196 uint32_t extensionCount = 0;
197 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
198 &extensionCount, nullptr);
199 if (VK_SUCCESS != res) {
200 return false;
201 }
202 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
203 res = EnumerateInstanceExtensionProperties((*instanceLayers)[layerIndex].layerName,
204 &extensionCount, extensions);
205 if (VK_SUCCESS != res) {
206 delete[] extensions;
207 return false;
208 }
209 for (uint32_t i = 0; i < extensionCount; ++i) {
210 instanceExtensions->push_back() = extensions[i];
211 }
212 delete[] extensions;
213 }
214
215 return true;
216}
217
218static bool init_device_extensions_and_layers(GrVkGetProc getProc, uint32_t specVersion,
219 VkInstance inst, VkPhysicalDevice physDev,
220 SkTArray<VkExtensionProperties>* deviceExtensions,
221 SkTArray<VkLayerProperties>* deviceLayers) {
222 if (getProc == nullptr) {
223 return false;
224 }
225
226 GET_PROC_LOCAL(EnumerateDeviceExtensionProperties, inst, VK_NULL_HANDLE);
227 GET_PROC_LOCAL(EnumerateDeviceLayerProperties, inst, VK_NULL_HANDLE);
228
229 if (!EnumerateDeviceExtensionProperties ||
230 !EnumerateDeviceLayerProperties) {
231 return false;
232 }
233
234 VkResult res;
235 // device layers
236 uint32_t layerCount = 0;
237#ifdef SK_ENABLE_VK_LAYERS
238 res = EnumerateDeviceLayerProperties(physDev, &layerCount, nullptr);
239 if (VK_SUCCESS != res) {
240 return false;
241 }
242 VkLayerProperties* layers = new VkLayerProperties[layerCount];
243 res = EnumerateDeviceLayerProperties(physDev, &layerCount, layers);
244 if (VK_SUCCESS != res) {
245 delete[] layers;
246 return false;
247 }
248
249 uint32_t nonPatchVersion = remove_patch_version(specVersion);
Greg Danielac616c82018-08-29 15:56:26 -0400250 for (size_t i = 0; i < SK_ARRAY_COUNT(kDebugLayerNames); ++i) {
251 int idx = should_include_debug_layer(kDebugLayerNames[i], layerCount, layers,
252 nonPatchVersion);
253 if (idx != -1) {
254 deviceLayers->push_back() = layers[idx];
Greg Daniel98bffae2018-08-01 13:25:41 -0400255 }
256 }
257 delete[] layers;
258#endif
259
260 // device extensions
261 // via Vulkan implementation and implicitly enabled layers
262 uint32_t extensionCount = 0;
263 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, nullptr);
264 if (VK_SUCCESS != res) {
265 return false;
266 }
267 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
268 res = EnumerateDeviceExtensionProperties(physDev, nullptr, &extensionCount, extensions);
269 if (VK_SUCCESS != res) {
270 delete[] extensions;
271 return false;
272 }
273 for (uint32_t i = 0; i < extensionCount; ++i) {
274 deviceExtensions->push_back() = extensions[i];
275 }
276 delete[] extensions;
277
278 // via explicitly enabled layers
279 layerCount = deviceLayers->count();
280 for (uint32_t layerIndex = 0; layerIndex < layerCount; ++layerIndex) {
281 uint32_t extensionCount = 0;
282 res = EnumerateDeviceExtensionProperties(physDev,
283 (*deviceLayers)[layerIndex].layerName,
284 &extensionCount, nullptr);
285 if (VK_SUCCESS != res) {
286 return false;
287 }
288 VkExtensionProperties* extensions = new VkExtensionProperties[extensionCount];
289 res = EnumerateDeviceExtensionProperties(physDev,
290 (*deviceLayers)[layerIndex].layerName,
291 &extensionCount, extensions);
292 if (VK_SUCCESS != res) {
293 delete[] extensions;
294 return false;
295 }
296 for (uint32_t i = 0; i < extensionCount; ++i) {
297 deviceExtensions->push_back() = extensions[i];
298 }
299 delete[] extensions;
300 }
301
302 return true;
303}
304
Brian Salomon23356442018-11-30 15:33:19 -0500305#define ACQUIRE_VK_PROC_NOCHECK(name, instance, device) \
306 PFN_vk##name grVk##name = reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device))
Greg Daniel92aef4b2018-08-02 13:55:49 -0400307
Brian Salomon23356442018-11-30 15:33:19 -0500308#define ACQUIRE_VK_PROC(name, instance, device) \
309 PFN_vk##name grVk##name = \
310 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
311 do { \
312 if (grVk##name == nullptr) { \
313 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
314 if (device != VK_NULL_HANDLE) { \
315 destroy_instance(getProc, inst, debugCallback, hasDebugExtension); \
316 } \
317 return false; \
318 } \
319 } while (0)
Greg Daniel98bffae2018-08-01 13:25:41 -0400320
Brian Salomon23356442018-11-30 15:33:19 -0500321#define ACQUIRE_VK_PROC_LOCAL(name, instance, device) \
322 PFN_vk##name grVk##name = \
323 reinterpret_cast<PFN_vk##name>(getProc("vk" #name, instance, device)); \
324 do { \
325 if (grVk##name == nullptr) { \
326 SkDebugf("Function ptr for vk%s could not be acquired\n", #name); \
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400327 return false; \
Brian Salomon23356442018-11-30 15:33:19 -0500328 } \
329 } while (0)
Greg Daniel37329b32018-07-02 20:16:44 +0000330
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400331static bool destroy_instance(GrVkGetProc getProc, VkInstance inst,
Greg Daniel37329b32018-07-02 20:16:44 +0000332 VkDebugReportCallbackEXT* debugCallback,
333 bool hasDebugExtension) {
334 if (hasDebugExtension && *debugCallback != VK_NULL_HANDLE) {
335 ACQUIRE_VK_PROC_LOCAL(DestroyDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
336 grVkDestroyDebugReportCallbackEXT(inst, *debugCallback, nullptr);
337 *debugCallback = VK_NULL_HANDLE;
338 }
339 ACQUIRE_VK_PROC_LOCAL(DestroyInstance, inst, VK_NULL_HANDLE);
340 grVkDestroyInstance(inst, nullptr);
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400341 return true;
Greg Daniel37329b32018-07-02 20:16:44 +0000342}
343
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400344static bool setup_features(GrVkGetProc getProc, VkInstance inst, VkPhysicalDevice physDev,
345 uint32_t physDeviceVersion, GrVkExtensions* extensions,
346 VkPhysicalDeviceFeatures2* features, bool isProtected) {
Greg Daniela0651ac2018-08-08 09:23:18 -0400347 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
348 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1));
349
350 // Setup all extension feature structs we may want to use.
Greg Daniela0651ac2018-08-08 09:23:18 -0400351 void** tailPNext = &features->pNext;
352
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400353 // If |isProtected| is given, attach that first
354 VkPhysicalDeviceProtectedMemoryFeatures* protectedMemoryFeatures = nullptr;
355 if (isProtected) {
356 SkASSERT(physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0));
357 protectedMemoryFeatures =
358 (VkPhysicalDeviceProtectedMemoryFeatures*)sk_malloc_throw(
359 sizeof(VkPhysicalDeviceProtectedMemoryFeatures));
360 protectedMemoryFeatures->sType =
361 VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES;
362 protectedMemoryFeatures->pNext = nullptr;
363 *tailPNext = protectedMemoryFeatures;
364 tailPNext = &protectedMemoryFeatures->pNext;
365 }
366
Greg Daniela0651ac2018-08-08 09:23:18 -0400367 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend = nullptr;
368 if (extensions->hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
369 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*) sk_malloc_throw(
370 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
371 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
372 blend->pNext = nullptr;
373 *tailPNext = blend;
374 tailPNext = &blend->pNext;
375 }
376
Greg Daniel7e000222018-12-03 10:08:21 -0500377 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature = nullptr;
378 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
379 extensions->hasExtension(VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME, 1)) {
380 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*) sk_malloc_throw(
381 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
382 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
383 ycbcrFeature->pNext = nullptr;
Sergey Ulanov2739fd22019-08-11 22:46:33 -0700384 ycbcrFeature->samplerYcbcrConversion = VK_TRUE;
Greg Daniel7e000222018-12-03 10:08:21 -0500385 *tailPNext = ycbcrFeature;
386 tailPNext = &ycbcrFeature->pNext;
387 }
388
Greg Daniela0651ac2018-08-08 09:23:18 -0400389 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
390 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2, inst, VK_NULL_HANDLE);
391 grVkGetPhysicalDeviceFeatures2(physDev, features);
392 } else {
393 SkASSERT(extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
394 1));
395 ACQUIRE_VK_PROC_LOCAL(GetPhysicalDeviceFeatures2KHR, inst, VK_NULL_HANDLE);
396 grVkGetPhysicalDeviceFeatures2KHR(physDev, features);
397 }
398
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400399 if (isProtected) {
400 if (!protectedMemoryFeatures->protectedMemory) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400401 return false;
402 }
403 }
404 return true;
Greg Daniela0651ac2018-08-08 09:23:18 -0400405 // If we want to disable any extension features do so here.
406}
407
Greg Danield3e65aa2018-08-01 09:19:45 -0400408bool CreateVkBackendContext(GrVkGetProc getProc,
Greg Danielf730c182018-07-02 20:15:37 +0000409 GrVkBackendContext* ctx,
Greg Daniel98bffae2018-08-01 13:25:41 -0400410 GrVkExtensions* extensions,
Greg Daniela0651ac2018-08-08 09:23:18 -0400411 VkPhysicalDeviceFeatures2* features,
Greg Daniel37329b32018-07-02 20:16:44 +0000412 VkDebugReportCallbackEXT* debugCallback,
Greg Danielf730c182018-07-02 20:15:37 +0000413 uint32_t* presentQueueIndexPtr,
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400414 CanPresentFn canPresent,
415 bool isProtected) {
Greg Daniel92aef4b2018-08-02 13:55:49 -0400416 VkResult err;
417
418 ACQUIRE_VK_PROC_NOCHECK(EnumerateInstanceVersion, VK_NULL_HANDLE, VK_NULL_HANDLE);
419 uint32_t instanceVersion = 0;
420 if (!grVkEnumerateInstanceVersion) {
421 instanceVersion = VK_MAKE_VERSION(1, 0, 0);
422 } else {
423 err = grVkEnumerateInstanceVersion(&instanceVersion);
424 if (err) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400425 SkDebugf("failed to enumerate instance version. Err: %d\n", err);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400426 return false;
427 }
428 }
429 SkASSERT(instanceVersion >= VK_MAKE_VERSION(1, 0, 0));
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400430 if (isProtected && instanceVersion < VK_MAKE_VERSION(1, 1, 0)) {
431 SkDebugf("protected requires vk instance version 1.1\n");
432 return false;
433 }
434
Greg Daniel41f0e282019-01-28 13:15:05 -0500435 uint32_t apiVersion = VK_MAKE_VERSION(1, 0, 0);
436 if (instanceVersion >= VK_MAKE_VERSION(1, 1, 0)) {
437 // If the instance version is 1.0 we must have the apiVersion also be 1.0. However, if the
438 // instance version is 1.1 or higher, we can set the apiVersion to be whatever the highest
439 // api we may use in skia (technically it can be arbitrary). So for now we set it to 1.1
440 // since that is the highest vulkan version.
441 apiVersion = VK_MAKE_VERSION(1, 1, 0);
442 }
443
444 instanceVersion = SkTMin(instanceVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400445
Greg Danielf730c182018-07-02 20:15:37 +0000446 VkPhysicalDevice physDev;
447 VkDevice device;
448 VkInstance inst;
Greg Danielf730c182018-07-02 20:15:37 +0000449
450 const VkApplicationInfo app_info = {
451 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
452 nullptr, // pNext
453 "vktest", // pApplicationName
454 0, // applicationVersion
455 "vktest", // pEngineName
456 0, // engineVerison
Greg Daniel41f0e282019-01-28 13:15:05 -0500457 apiVersion, // apiVersion
Greg Danielf730c182018-07-02 20:15:37 +0000458 };
459
Greg Daniel98bffae2018-08-01 13:25:41 -0400460 SkTArray<VkLayerProperties> instanceLayers;
461 SkTArray<VkExtensionProperties> instanceExtensions;
462
Greg Daniel92aef4b2018-08-02 13:55:49 -0400463 if (!init_instance_extensions_and_layers(getProc, instanceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400464 &instanceExtensions,
465 &instanceLayers)) {
466 return false;
467 }
Greg Danielf730c182018-07-02 20:15:37 +0000468
469 SkTArray<const char*> instanceLayerNames;
470 SkTArray<const char*> instanceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400471 for (int i = 0; i < instanceLayers.count(); ++i) {
472 instanceLayerNames.push_back(instanceLayers[i].layerName);
473 }
474 for (int i = 0; i < instanceExtensions.count(); ++i) {
475 if (strncmp(instanceExtensions[i].extensionName, "VK_KHX", 6)) {
476 instanceExtensionNames.push_back(instanceExtensions[i].extensionName);
Greg Danielf730c182018-07-02 20:15:37 +0000477 }
478 }
Greg Danielf730c182018-07-02 20:15:37 +0000479
480 const VkInstanceCreateInfo instance_create = {
481 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
482 nullptr, // pNext
483 0, // flags
484 &app_info, // pApplicationInfo
485 (uint32_t) instanceLayerNames.count(), // enabledLayerNameCount
486 instanceLayerNames.begin(), // ppEnabledLayerNames
487 (uint32_t) instanceExtensionNames.count(), // enabledExtensionNameCount
488 instanceExtensionNames.begin(), // ppEnabledExtensionNames
489 };
490
Greg Daniel98bffae2018-08-01 13:25:41 -0400491 bool hasDebugExtension = false;
492
Greg Danielf730c182018-07-02 20:15:37 +0000493 ACQUIRE_VK_PROC(CreateInstance, VK_NULL_HANDLE, VK_NULL_HANDLE);
494 err = grVkCreateInstance(&instance_create, nullptr, &inst);
495 if (err < 0) {
496 SkDebugf("vkCreateInstance failed: %d\n", err);
497 return false;
498 }
499
Greg Daniel37329b32018-07-02 20:16:44 +0000500#ifdef SK_ENABLE_VK_LAYERS
501 *debugCallback = VK_NULL_HANDLE;
502 for (int i = 0; i < instanceExtensionNames.count() && !hasDebugExtension; ++i) {
503 if (!strcmp(instanceExtensionNames[i], VK_EXT_DEBUG_REPORT_EXTENSION_NAME)) {
504 hasDebugExtension = true;
505 }
506 }
507 if (hasDebugExtension) {
508 // Setup callback creation information
509 VkDebugReportCallbackCreateInfoEXT callbackCreateInfo;
510 callbackCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
511 callbackCreateInfo.pNext = nullptr;
512 callbackCreateInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT |
513 VK_DEBUG_REPORT_WARNING_BIT_EXT |
514 // VK_DEBUG_REPORT_INFORMATION_BIT_EXT |
515 // VK_DEBUG_REPORT_DEBUG_BIT_EXT |
516 VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT;
517 callbackCreateInfo.pfnCallback = &DebugReportCallback;
518 callbackCreateInfo.pUserData = nullptr;
519
520 ACQUIRE_VK_PROC(CreateDebugReportCallbackEXT, inst, VK_NULL_HANDLE);
521 // Register the callback
522 grVkCreateDebugReportCallbackEXT(inst, &callbackCreateInfo, nullptr, debugCallback);
523 }
524#endif
525
Greg Danielf730c182018-07-02 20:15:37 +0000526 ACQUIRE_VK_PROC(EnumeratePhysicalDevices, inst, VK_NULL_HANDLE);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400527 ACQUIRE_VK_PROC(GetPhysicalDeviceProperties, inst, VK_NULL_HANDLE);
Greg Danielf730c182018-07-02 20:15:37 +0000528 ACQUIRE_VK_PROC(GetPhysicalDeviceQueueFamilyProperties, inst, VK_NULL_HANDLE);
529 ACQUIRE_VK_PROC(GetPhysicalDeviceFeatures, inst, VK_NULL_HANDLE);
530 ACQUIRE_VK_PROC(CreateDevice, inst, VK_NULL_HANDLE);
531 ACQUIRE_VK_PROC(GetDeviceQueue, inst, VK_NULL_HANDLE);
532 ACQUIRE_VK_PROC(DeviceWaitIdle, inst, VK_NULL_HANDLE);
533 ACQUIRE_VK_PROC(DestroyDevice, inst, VK_NULL_HANDLE);
534
535 uint32_t gpuCount;
536 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, nullptr);
537 if (err) {
538 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000539 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000540 return false;
541 }
542 if (!gpuCount) {
543 SkDebugf("vkEnumeratePhysicalDevices returned no supported devices.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000544 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000545 return false;
546 }
547 // Just returning the first physical device instead of getting the whole array.
548 // TODO: find best match for our needs
549 gpuCount = 1;
550 err = grVkEnumeratePhysicalDevices(inst, &gpuCount, &physDev);
551 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
552 if (err && VK_INCOMPLETE != err) {
553 SkDebugf("vkEnumeratePhysicalDevices failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000554 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000555 return false;
556 }
557
Greg Daniel92aef4b2018-08-02 13:55:49 -0400558 VkPhysicalDeviceProperties physDeviceProperties;
559 grVkGetPhysicalDeviceProperties(physDev, &physDeviceProperties);
Greg Daniel41f0e282019-01-28 13:15:05 -0500560 int physDeviceVersion = SkTMin(physDeviceProperties.apiVersion, apiVersion);
Greg Daniel92aef4b2018-08-02 13:55:49 -0400561
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400562 if (isProtected && physDeviceVersion < VK_MAKE_VERSION(1, 1, 0)) {
563 SkDebugf("protected requires vk physical device version 1.1\n");
564 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
565 return false;
566 }
567
Greg Danielf730c182018-07-02 20:15:37 +0000568 // query to get the initial queue props size
569 uint32_t queueCount;
570 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, nullptr);
571 if (!queueCount) {
572 SkDebugf("vkGetPhysicalDeviceQueueFamilyProperties returned no queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000573 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000574 return false;
575 }
576
577 SkAutoMalloc queuePropsAlloc(queueCount * sizeof(VkQueueFamilyProperties));
578 // now get the actual queue props
579 VkQueueFamilyProperties* queueProps = (VkQueueFamilyProperties*)queuePropsAlloc.get();
580
581 grVkGetPhysicalDeviceQueueFamilyProperties(physDev, &queueCount, queueProps);
582
583 // iterate to find the graphics queue
584 uint32_t graphicsQueueIndex = queueCount;
585 for (uint32_t i = 0; i < queueCount; i++) {
586 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
587 graphicsQueueIndex = i;
588 break;
589 }
590 }
591 if (graphicsQueueIndex == queueCount) {
592 SkDebugf("Could not find any supported graphics queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000593 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000594 return false;
595 }
596
597 // iterate to find the present queue, if needed
598 uint32_t presentQueueIndex = queueCount;
599 if (presentQueueIndexPtr && canPresent) {
600 for (uint32_t i = 0; i < queueCount; i++) {
601 if (canPresent(inst, physDev, i)) {
602 presentQueueIndex = i;
603 break;
604 }
605 }
606 if (presentQueueIndex == queueCount) {
607 SkDebugf("Could not find any supported present queues.\n");
Greg Daniel37329b32018-07-02 20:16:44 +0000608 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000609 return false;
610 }
611 *presentQueueIndexPtr = presentQueueIndex;
612 } else {
613 // Just setting this so we end up make a single queue for graphics since there was no
614 // request for a present queue.
615 presentQueueIndex = graphicsQueueIndex;
616 }
617
Greg Daniel98bffae2018-08-01 13:25:41 -0400618 SkTArray<VkLayerProperties> deviceLayers;
619 SkTArray<VkExtensionProperties> deviceExtensions;
Greg Daniel92aef4b2018-08-02 13:55:49 -0400620 if (!init_device_extensions_and_layers(getProc, physDeviceVersion,
Greg Daniel98bffae2018-08-01 13:25:41 -0400621 inst, physDev,
622 &deviceExtensions,
623 &deviceLayers)) {
624 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
625 return false;
626 }
Greg Danielf730c182018-07-02 20:15:37 +0000627
628 SkTArray<const char*> deviceLayerNames;
629 SkTArray<const char*> deviceExtensionNames;
Greg Daniel98bffae2018-08-01 13:25:41 -0400630 for (int i = 0; i < deviceLayers.count(); ++i) {
631 deviceLayerNames.push_back(deviceLayers[i].layerName);
Greg Danielf730c182018-07-02 20:15:37 +0000632 }
Greg Daniel98bffae2018-08-01 13:25:41 -0400633 for (int i = 0; i < deviceExtensions.count(); ++i) {
634 // Don't use experimental extensions since they typically don't work with debug layers and
635 // often are missing dependecy requirements for other extensions. Additionally, these are
636 // often left behind in the driver even after they've been promoted to real extensions.
637 if (strncmp(deviceExtensions[i].extensionName, "VK_KHX", 6) &&
638 strncmp(deviceExtensions[i].extensionName, "VK_NVX", 6)) {
639 deviceExtensionNames.push_back(deviceExtensions[i].extensionName);
640 }
Greg Danielf730c182018-07-02 20:15:37 +0000641 }
642
Greg Daniela0651ac2018-08-08 09:23:18 -0400643 extensions->init(getProc, inst, physDev,
644 (uint32_t) instanceExtensionNames.count(),
645 instanceExtensionNames.begin(),
646 (uint32_t) deviceExtensionNames.count(),
647 deviceExtensionNames.begin());
648
649 memset(features, 0, sizeof(VkPhysicalDeviceFeatures2));
650 features->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
651 features->pNext = nullptr;
652
653 VkPhysicalDeviceFeatures* deviceFeatures = &features->features;
654 void* pointerToFeatures = nullptr;
655 if (physDeviceVersion >= VK_MAKE_VERSION(1, 1, 0) ||
656 extensions->hasExtension(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME, 1)) {
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400657 if (!setup_features(getProc, inst, physDev, physDeviceVersion, extensions, features,
658 isProtected)) {
659 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
660 return false;
661 }
662
Greg Daniela0651ac2018-08-08 09:23:18 -0400663 // If we set the pNext of the VkDeviceCreateInfo to our VkPhysicalDeviceFeatures2 struct,
664 // the device creation will use that instead of the ppEnabledFeatures.
665 pointerToFeatures = features;
666 } else {
667 grVkGetPhysicalDeviceFeatures(physDev, deviceFeatures);
668 }
669
Greg Danielf730c182018-07-02 20:15:37 +0000670 // this looks like it would slow things down,
671 // and we can't depend on it on all platforms
Greg Daniela0651ac2018-08-08 09:23:18 -0400672 deviceFeatures->robustBufferAccess = VK_FALSE;
Greg Danielf730c182018-07-02 20:15:37 +0000673
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400674 VkDeviceQueueCreateFlags flags = isProtected ? VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT : 0;
Greg Danielf730c182018-07-02 20:15:37 +0000675 float queuePriorities[1] = { 0.0 };
676 // Here we assume no need for swapchain queue
677 // If one is needed, the client will need its own setup code
678 const VkDeviceQueueCreateInfo queueInfo[2] = {
679 {
680 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
681 nullptr, // pNext
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400682 flags, // VkDeviceQueueCreateFlags
Greg Danielf730c182018-07-02 20:15:37 +0000683 graphicsQueueIndex, // queueFamilyIndex
684 1, // queueCount
685 queuePriorities, // pQueuePriorities
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400686
Greg Danielf730c182018-07-02 20:15:37 +0000687 },
688 {
689 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
690 nullptr, // pNext
691 0, // VkDeviceQueueCreateFlags
692 presentQueueIndex, // queueFamilyIndex
693 1, // queueCount
694 queuePriorities, // pQueuePriorities
695 }
696 };
697 uint32_t queueInfoCount = (presentQueueIndex != graphicsQueueIndex) ? 2 : 1;
698
699 const VkDeviceCreateInfo deviceInfo = {
Greg Daniela0651ac2018-08-08 09:23:18 -0400700 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
701 pointerToFeatures, // pNext
702 0, // VkDeviceCreateFlags
703 queueInfoCount, // queueCreateInfoCount
704 queueInfo, // pQueueCreateInfos
705 (uint32_t) deviceLayerNames.count(), // layerCount
706 deviceLayerNames.begin(), // ppEnabledLayerNames
707 (uint32_t) deviceExtensionNames.count(), // extensionCount
708 deviceExtensionNames.begin(), // ppEnabledExtensionNames
709 pointerToFeatures ? nullptr : deviceFeatures // ppEnabledFeatures
Greg Danielf730c182018-07-02 20:15:37 +0000710 };
711
Ben Wagner7ad9b962019-02-12 11:14:47 -0500712 {
713#if defined(SK_ENABLE_SCOPED_LSAN_SUPPRESSIONS)
714 // skia:8712
715 __lsan::ScopedDisabler lsanDisabler;
716#endif
717 err = grVkCreateDevice(physDev, &deviceInfo, nullptr, &device);
718 }
Greg Danielf730c182018-07-02 20:15:37 +0000719 if (err) {
720 SkDebugf("CreateDevice failed: %d\n", err);
Greg Daniel37329b32018-07-02 20:16:44 +0000721 destroy_instance(getProc, inst, debugCallback, hasDebugExtension);
Greg Danielf730c182018-07-02 20:15:37 +0000722 return false;
723 }
724
Greg Danielf730c182018-07-02 20:15:37 +0000725 VkQueue queue;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400726 if (isProtected) {
727 ACQUIRE_VK_PROC(GetDeviceQueue2, inst, device);
728 SkASSERT(grVkGetDeviceQueue2 != nullptr);
729 VkDeviceQueueInfo2 queue_info2 = {
730 VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2, // sType
731 nullptr, // pNext
732 VK_DEVICE_QUEUE_CREATE_PROTECTED_BIT, // flags
733 graphicsQueueIndex, // queueFamilyIndex
734 0 // queueIndex
735 };
736 grVkGetDeviceQueue2(device, &queue_info2, &queue);
737 } else {
738 grVkGetDeviceQueue(device, graphicsQueueIndex, 0, &queue);
739 }
Greg Danielf730c182018-07-02 20:15:37 +0000740
741 ctx->fInstance = inst;
742 ctx->fPhysicalDevice = physDev;
743 ctx->fDevice = device;
744 ctx->fQueue = queue;
745 ctx->fGraphicsQueueIndex = graphicsQueueIndex;
Greg Daniel41f0e282019-01-28 13:15:05 -0500746 ctx->fMaxAPIVersion = apiVersion;
Greg Daniel98bffae2018-08-01 13:25:41 -0400747 ctx->fVkExtensions = extensions;
Greg Daniela0651ac2018-08-08 09:23:18 -0400748 ctx->fDeviceFeatures2 = features;
Greg Danielc8cd45a2018-07-12 10:02:37 -0400749 ctx->fGetProc = getProc;
Greg Danielf730c182018-07-02 20:15:37 +0000750 ctx->fOwnsInstanceAndDevice = false;
Emircan Uysaler23ca4e72019-06-24 10:53:09 -0400751 ctx->fProtectedContext = isProtected ? GrProtected::kYes : GrProtected::kNo;
Greg Danielf730c182018-07-02 20:15:37 +0000752
753 return true;
Greg Danielf730c182018-07-02 20:15:37 +0000754}
755
Greg Daniela0651ac2018-08-08 09:23:18 -0400756void FreeVulkanFeaturesStructs(const VkPhysicalDeviceFeatures2* features) {
757 // All Vulkan structs that could be part of the features chain will start with the
758 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
759 // so we can get access to the pNext for the next struct.
760 struct CommonVulkanHeader {
761 VkStructureType sType;
762 void* pNext;
763 };
764
765 void* pNext = features->pNext;
766 while (pNext) {
767 void* current = pNext;
768 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
769 sk_free(current);
770 }
771}
772
Greg Daniel35970ec2017-11-10 10:03:05 -0500773}
774
775#endif