blob: dfd35f27e9372907260ae74dad71e61d917beffa [file] [log] [blame]
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -06001/*
2 * Copyright (c) 2015-2016 The Khronos Group Inc.
3 * Copyright (c) 2015-2016 Valve Corporation
4 * Copyright (c) 2015-2016 LunarG, Inc.
5 * Copyright (c) 2015-2016 Google, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * Author: Tobin Ehlis <tobine@google.com>
20 * Author: Mark Lobodzinski <mark@lunarg.com>
21 */
22
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <unordered_map>
27#include <vector>
28#include <list>
29#include <memory>
30
Mike Weiblen6a27de52016-12-09 17:36:28 -070031// For Windows, this #include must come before other Vk headers.
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060032#include "vk_loader_platform.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070033
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060034#include "unique_objects.h"
35#include "vk_dispatch_table_helper.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070036#include "vk_layer_config.h"
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060037#include "vk_layer_data.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070038#include "vk_layer_extension_utils.h"
39#include "vk_layer_logging.h"
40#include "vk_layer_table.h"
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060041#include "vk_layer_utils.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070042#include "vk_layer_utils.h"
Mark Lobodzinski9acd2e32016-12-21 15:22:39 -070043#include "vk_enum_string_helper.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070044#include "vk_validation_error_messages.h"
45#include "vulkan/vk_layer.h"
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060046
Mike Stroyanb985fca2016-11-01 11:50:16 -060047// This intentionally includes a cpp file
48#include "vk_safe_struct.cpp"
49
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060050#include "unique_objects_wrappers.h"
51
52namespace unique_objects {
53
Mark Young39389872017-01-19 21:10:49 -070054static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
55
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060056static void initUniqueObjects(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
57 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects");
58}
59
60// Handle CreateInstance Extensions
61static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
62 uint32_t i;
63 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
64 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
65 instance_ext_map[disp_table] = {};
66
67 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
68 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) {
69 instance_ext_map[disp_table].wsi_enabled = true;
70 }
71 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME) == 0) {
72 instance_ext_map[disp_table].display_enabled = true;
73 }
74#ifdef VK_USE_PLATFORM_XLIB_KHR
75 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0) {
76 instance_ext_map[disp_table].xlib_enabled = true;
77 }
78#endif
79#ifdef VK_USE_PLATFORM_XCB_KHR
80 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0) {
81 instance_ext_map[disp_table].xcb_enabled = true;
82 }
83#endif
84#ifdef VK_USE_PLATFORM_WAYLAND_KHR
85 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0) {
86 instance_ext_map[disp_table].wayland_enabled = true;
87 }
88#endif
89#ifdef VK_USE_PLATFORM_MIR_KHR
90 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME) == 0) {
91 instance_ext_map[disp_table].mir_enabled = true;
92 }
93#endif
94#ifdef VK_USE_PLATFORM_ANDROID_KHR
95 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0) {
96 instance_ext_map[disp_table].android_enabled = true;
97 }
98#endif
99#ifdef VK_USE_PLATFORM_WIN32_KHR
100 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0) {
101 instance_ext_map[disp_table].win32_enabled = true;
102 }
103#endif
104
105 // Check for recognized instance extensions
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600106 if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedInstanceExtensions)) {
107 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
Mike Weiblen6a27de52016-12-09 17:36:28 -0700108 VALIDATION_ERROR_UNDEFINED, "UniqueObjects",
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600109 "Instance Extension %s is not supported by this layer. Using this extension may adversely affect "
110 "validation results and/or produce undefined behavior.",
111 pCreateInfo->ppEnabledExtensionNames[i]);
112 }
113 }
114}
115
116// Handle CreateDevice Extensions
117static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
118 layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
119 VkLayerDispatchTable *disp_table = device_data->device_dispatch_table;
120 PFN_vkGetDeviceProcAddr gpa = disp_table->GetDeviceProcAddr;
121
122 device_data->device_dispatch_table->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
123 disp_table->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
124 disp_table->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
125 disp_table->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
126 disp_table->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
127 device_data->wsi_enabled = false;
128
129 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
130 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
131 device_data->wsi_enabled = true;
132 }
133 // Check for recognized device extensions
134 if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedDeviceExtensions)) {
135 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
Mike Weiblen6a27de52016-12-09 17:36:28 -0700136 VALIDATION_ERROR_UNDEFINED, "UniqueObjects",
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600137 "Device Extension %s is not supported by this layer. Using this extension may adversely affect "
138 "validation results and/or produce undefined behavior.",
139 pCreateInfo->ppEnabledExtensionNames[i]);
140 }
141 }
142}
143
144VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
145 VkInstance *pInstance) {
146 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
147
148 assert(chain_info->u.pLayerInfo);
149 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
150 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
151 if (fpCreateInstance == NULL) {
152 return VK_ERROR_INITIALIZATION_FAILED;
153 }
154
155 // Advance the link info for the next element on the chain
156 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
157
158 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
159 if (result != VK_SUCCESS) {
160 return result;
161 }
162
163 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
164 instance_data->instance = *pInstance;
165 instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
166 layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
167
168 instance_data->instance = *pInstance;
169 instance_data->report_data =
170 debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
171 pCreateInfo->ppEnabledExtensionNames);
172
173 // Set up temporary debug callbacks to output messages at CreateInstance-time
174 if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos,
175 &instance_data->tmp_callbacks)) {
176 if (instance_data->num_tmp_callbacks > 0) {
177 if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks,
178 instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) {
179 layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks);
180 instance_data->num_tmp_callbacks = 0;
181 }
182 }
183 }
184
185 initUniqueObjects(instance_data, pAllocator);
186 checkInstanceRegisterExtensions(pCreateInfo, *pInstance);
187
188 // Disable and free tmp callbacks, no longer necessary
189 if (instance_data->num_tmp_callbacks > 0) {
190 layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks);
191 layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks);
192 instance_data->num_tmp_callbacks = 0;
193 }
194
195 return result;
196}
197
198VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
199 dispatch_key key = get_dispatch_key(instance);
200 layer_data *instance_data = get_my_data_ptr(key, layer_data_map);
201 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
202 instance_ext_map.erase(disp_table);
203 disp_table->DestroyInstance(instance, pAllocator);
204
205 // Clean up logging callback, if any
206 while (instance_data->logging_callback.size() > 0) {
207 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
208 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
209 instance_data->logging_callback.pop_back();
210 }
211
212 layer_debug_report_destroy_instance(instance_data->report_data);
213 layer_data_map.erase(key);
214}
215
216VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
217 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
218 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
219 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
220
221 assert(chain_info->u.pLayerInfo);
222 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
223 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
224 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
225 if (fpCreateDevice == NULL) {
226 return VK_ERROR_INITIALIZATION_FAILED;
227 }
228
229 // Advance the link info for the next element on the chain
230 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
231
232 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
233 if (result != VK_SUCCESS) {
234 return result;
235 }
236
237 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
238 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
239
240 // Setup layer's device dispatch table
241 my_device_data->device_dispatch_table = new VkLayerDispatchTable;
242 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
243
244 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
245 // Set gpu for this device in order to get at any objects mapped at instance level
246
247 my_device_data->gpu = gpu;
248
249 return result;
250}
251
252VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
253 dispatch_key key = get_dispatch_key(device);
254 layer_data *dev_data = get_my_data_ptr(key, layer_data_map);
255
256 layer_debug_report_destroy_device(device);
257 dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
258 layer_data_map.erase(key);
259}
260
261static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects",
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700262 VK_LAYER_API_VERSION, // specVersion
263 1, // implementationVersion
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600264 "Google Validation Layer"};
265
Mark Young39389872017-01-19 21:10:49 -0700266/// Declare prototype for these functions
267VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
268
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600269static inline PFN_vkVoidFunction layer_intercept_proc(const char *name) {
Jamie Madill6069c822016-12-15 09:35:36 -0500270 for (unsigned int i = 0; i < sizeof(procmap) / sizeof(procmap[0]); i++) {
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700271 if (!strcmp(name, procmap[i].name)) return procmap[i].pFunc;
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600272 }
Mark Young39389872017-01-19 21:10:49 -0700273 if (0 == strcmp(name, "vk_layerGetPhysicalDeviceProcAddr")) {
274 return (PFN_vkVoidFunction)GetPhysicalDeviceProcAddr;
275 }
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600276 return NULL;
277}
278
279VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
280 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
281}
282
283VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
284 VkLayerProperties *pProperties) {
285 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
286}
287
288VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
289 VkExtensionProperties *pProperties) {
290 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
291 return util_GetExtensionProperties(0, NULL, pCount, pProperties);
292
293 return VK_ERROR_LAYER_NOT_PRESENT;
294}
295
296VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
297 uint32_t *pCount, VkExtensionProperties *pProperties) {
298 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
299 return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
300
301 assert(physicalDevice);
302
303 dispatch_key key = get_dispatch_key(physicalDevice);
304 layer_data *instance_data = get_my_data_ptr(key, layer_data_map);
305 return instance_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
306}
307
308VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
309 PFN_vkVoidFunction addr;
310 assert(device);
311 addr = layer_intercept_proc(funcName);
312 if (addr) {
313 return addr;
314 }
315
316 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
317 VkLayerDispatchTable *disp_table = dev_data->device_dispatch_table;
318 if (disp_table->GetDeviceProcAddr == NULL) {
319 return NULL;
320 }
321 return disp_table->GetDeviceProcAddr(device, funcName);
322}
323
324VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
325 PFN_vkVoidFunction addr;
326
327 addr = layer_intercept_proc(funcName);
328 if (addr) {
329 return addr;
330 }
331 assert(instance);
332
333 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
334 addr = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
335 if (addr) {
336 return addr;
337 }
338
339 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
340 if (disp_table->GetInstanceProcAddr == NULL) {
341 return NULL;
342 }
343 return disp_table->GetInstanceProcAddr(instance, funcName);
344}
345
Mark Young39389872017-01-19 21:10:49 -0700346VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
347 assert(instance);
348
349 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
350 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
351 if (disp_table->GetPhysicalDeviceProcAddr == NULL) {
352 return NULL;
353 }
354 return disp_table->GetPhysicalDeviceProcAddr(instance, funcName);
355}
356
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600357VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
358 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
359 const VkMemoryAllocateInfo *input_allocate_info = pAllocateInfo;
360 std::unique_ptr<safe_VkMemoryAllocateInfo> safe_allocate_info;
Mark Lobodzinskid2443222016-10-07 14:13:38 -0600361 std::unique_ptr<safe_VkDedicatedAllocationMemoryAllocateInfoNV> safe_dedicated_allocate_info;
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600362 layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
363
364 if ((pAllocateInfo != nullptr) &&
365 ContainsExtStruct(pAllocateInfo, VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV)) {
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600366 // Assuming there is only one extension struct of this type in the list for now
367 safe_dedicated_allocate_info =
368 std::unique_ptr<safe_VkDedicatedAllocationMemoryAllocateInfoNV>(new safe_VkDedicatedAllocationMemoryAllocateInfoNV);
369 safe_allocate_info = std::unique_ptr<safe_VkMemoryAllocateInfo>(new safe_VkMemoryAllocateInfo(pAllocateInfo));
370 input_allocate_info = reinterpret_cast<const VkMemoryAllocateInfo *>(safe_allocate_info.get());
371
372 const GenericHeader *orig_pnext = reinterpret_cast<const GenericHeader *>(pAllocateInfo->pNext);
373 GenericHeader *input_pnext = reinterpret_cast<GenericHeader *>(safe_allocate_info.get());
374 while (orig_pnext != nullptr) {
375 if (orig_pnext->sType == VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV) {
376 safe_dedicated_allocate_info->initialize(
377 reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV *>(orig_pnext));
378
379 std::unique_lock<std::mutex> lock(global_lock);
380
381 if (safe_dedicated_allocate_info->buffer != VK_NULL_HANDLE) {
382 uint64_t local_buffer = reinterpret_cast<uint64_t &>(safe_dedicated_allocate_info->buffer);
383 safe_dedicated_allocate_info->buffer =
384 reinterpret_cast<VkBuffer &>(device_data->unique_id_mapping[local_buffer]);
385 }
386
387 if (safe_dedicated_allocate_info->image != VK_NULL_HANDLE) {
388 uint64_t local_image = reinterpret_cast<uint64_t &>(safe_dedicated_allocate_info->image);
389 safe_dedicated_allocate_info->image = reinterpret_cast<VkImage &>(device_data->unique_id_mapping[local_image]);
390 }
391
392 lock.unlock();
393
394 input_pnext->pNext = reinterpret_cast<GenericHeader *>(safe_dedicated_allocate_info.get());
395 input_pnext = reinterpret_cast<GenericHeader *>(input_pnext->pNext);
396 } else {
397 // TODO: generic handling of pNext copies
398 }
399
400 orig_pnext = reinterpret_cast<const GenericHeader *>(orig_pnext->pNext);
401 }
402 }
403
404 VkResult result = device_data->device_dispatch_table->AllocateMemory(device, input_allocate_info, pAllocator, pMemory);
405
406 if (VK_SUCCESS == result) {
407 std::lock_guard<std::mutex> lock(global_lock);
408 uint64_t unique_id = global_unique_id++;
409 device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pMemory);
410 *pMemory = reinterpret_cast<VkDeviceMemory &>(unique_id);
411 }
412
413 return result;
414}
415
416VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
417 const VkComputePipelineCreateInfo *pCreateInfos,
418 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
419 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
420 safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL;
421 if (pCreateInfos) {
422 std::lock_guard<std::mutex> lock(global_lock);
423 local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount];
424 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
425 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
426 if (pCreateInfos[idx0].basePipelineHandle) {
427 local_pCreateInfos[idx0].basePipelineHandle =
428 (VkPipeline)my_device_data
429 ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].basePipelineHandle)];
430 }
431 if (pCreateInfos[idx0].layout) {
432 local_pCreateInfos[idx0].layout =
433 (VkPipelineLayout)
434 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].layout)];
435 }
436 if (pCreateInfos[idx0].stage.module) {
437 local_pCreateInfos[idx0].stage.module =
438 (VkShaderModule)
439 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].stage.module)];
440 }
441 }
442 }
443 if (pipelineCache) {
444 std::lock_guard<std::mutex> lock(global_lock);
445 pipelineCache = (VkPipelineCache)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(pipelineCache)];
446 }
447
448 VkResult result = my_device_data->device_dispatch_table->CreateComputePipelines(
449 device, pipelineCache, createInfoCount, (const VkComputePipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
450 delete[] local_pCreateInfos;
Maciej Jesionowski42200702016-11-23 10:44:34 +0100451 {
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600452 uint64_t unique_id = 0;
453 std::lock_guard<std::mutex> lock(global_lock);
454 for (uint32_t i = 0; i < createInfoCount; ++i) {
Maciej Jesionowski42200702016-11-23 10:44:34 +0100455 if (pPipelines[i] != VK_NULL_HANDLE) {
456 unique_id = global_unique_id++;
457 my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pPipelines[i]);
458 pPipelines[i] = reinterpret_cast<VkPipeline &>(unique_id);
459 }
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600460 }
461 }
462 return result;
463}
464
465VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
466 const VkGraphicsPipelineCreateInfo *pCreateInfos,
467 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
468 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
469 safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = NULL;
470 if (pCreateInfos) {
471 local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount];
472 std::lock_guard<std::mutex> lock(global_lock);
473 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
474 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
475 if (pCreateInfos[idx0].basePipelineHandle) {
476 local_pCreateInfos[idx0].basePipelineHandle =
477 (VkPipeline)my_device_data
478 ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].basePipelineHandle)];
479 }
480 if (pCreateInfos[idx0].layout) {
481 local_pCreateInfos[idx0].layout =
482 (VkPipelineLayout)
483 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].layout)];
484 }
485 if (pCreateInfos[idx0].pStages) {
486 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
487 if (pCreateInfos[idx0].pStages[idx1].module) {
488 local_pCreateInfos[idx0].pStages[idx1].module =
489 (VkShaderModule)my_device_data
490 ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].pStages[idx1].module)];
491 }
492 }
493 }
494 if (pCreateInfos[idx0].renderPass) {
495 local_pCreateInfos[idx0].renderPass =
496 (VkRenderPass)
497 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].renderPass)];
498 }
499 }
500 }
501 if (pipelineCache) {
502 std::lock_guard<std::mutex> lock(global_lock);
503 pipelineCache = (VkPipelineCache)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(pipelineCache)];
504 }
505
506 VkResult result = my_device_data->device_dispatch_table->CreateGraphicsPipelines(
507 device, pipelineCache, createInfoCount, (const VkGraphicsPipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
508 delete[] local_pCreateInfos;
Maciej Jesionowski42200702016-11-23 10:44:34 +0100509 {
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600510 uint64_t unique_id = 0;
511 std::lock_guard<std::mutex> lock(global_lock);
512 for (uint32_t i = 0; i < createInfoCount; ++i) {
Maciej Jesionowski42200702016-11-23 10:44:34 +0100513 if (pPipelines[i] != VK_NULL_HANDLE) {
514 unique_id = global_unique_id++;
515 my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pPipelines[i]);
516 pPipelines[i] = reinterpret_cast<VkPipeline &>(unique_id);
517 }
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600518 }
519 }
520 return result;
521}
522
523VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
524 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
525 const VkAllocationCallbacks *pAllocator,
526 VkDebugReportCallbackEXT *pMsgCallback) {
527 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
528 VkResult result =
529 instance_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
530
531 if (VK_SUCCESS == result) {
532 result = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
533 }
534 return result;
535}
536
537VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback,
538 const VkAllocationCallbacks *pAllocator) {
539 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
540 instance_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, callback, pAllocator);
541 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
542}
543
544VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
545 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
546 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
547 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map);
548 instance_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
549 pMsg);
550}
551
552VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
553 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
554 layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
555 safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL;
556 if (pCreateInfo) {
557 std::lock_guard<std::mutex> lock(global_lock);
558 local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo);
559 local_pCreateInfo->oldSwapchain =
560 (VkSwapchainKHR)my_map_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->oldSwapchain)];
561 // Need to pull surface mapping from the instance-level map
562 layer_data *instance_data = get_my_data_ptr(get_dispatch_key(my_map_data->gpu), layer_data_map);
563 local_pCreateInfo->surface =
564 (VkSurfaceKHR)instance_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->surface)];
565 }
566
567 VkResult result = my_map_data->device_dispatch_table->CreateSwapchainKHR(
568 device, (const VkSwapchainCreateInfoKHR *)local_pCreateInfo, pAllocator, pSwapchain);
569 if (local_pCreateInfo) {
570 delete local_pCreateInfo;
571 }
572 if (VK_SUCCESS == result) {
573 std::lock_guard<std::mutex> lock(global_lock);
574 uint64_t unique_id = global_unique_id++;
575 my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pSwapchain);
576 *pSwapchain = reinterpret_cast<VkSwapchainKHR &>(unique_id);
577 }
578 return result;
579}
580
581VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
582 VkImage *pSwapchainImages) {
583 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
584 if (VK_NULL_HANDLE != swapchain) {
585 std::lock_guard<std::mutex> lock(global_lock);
586 swapchain = (VkSwapchainKHR)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(swapchain)];
587 }
588 VkResult result =
589 my_device_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
590 // TODO : Need to add corresponding code to delete these images
591 if (VK_SUCCESS == result) {
592 if ((*pSwapchainImageCount > 0) && pSwapchainImages) {
593 uint64_t unique_id = 0;
594 std::lock_guard<std::mutex> lock(global_lock);
595 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
596 unique_id = global_unique_id++;
597 my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pSwapchainImages[i]);
598 pSwapchainImages[i] = reinterpret_cast<VkImage &>(unique_id);
599 }
600 }
601 }
602 return result;
603}
604
605#ifndef __ANDROID__
606VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
607 VkDisplayPropertiesKHR *pProperties) {
608 layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
609 safe_VkDisplayPropertiesKHR *local_pProperties = NULL;
610 {
611 std::lock_guard<std::mutex> lock(global_lock);
612 if (pProperties) {
613 local_pProperties = new safe_VkDisplayPropertiesKHR[*pPropertyCount];
614 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
615 local_pProperties[idx0].initialize(&pProperties[idx0]);
616 if (pProperties[idx0].display) {
617 local_pProperties[idx0].display =
618 (VkDisplayKHR)my_map_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pProperties[idx0].display)];
619 }
620 }
621 }
622 }
623
624 VkResult result = my_map_data->instance_dispatch_table->GetPhysicalDeviceDisplayPropertiesKHR(
625 physicalDevice, pPropertyCount, (VkDisplayPropertiesKHR *)local_pProperties);
626 if (result == VK_SUCCESS && pProperties) {
627 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
628 std::lock_guard<std::mutex> lock(global_lock);
629
630 uint64_t unique_id = global_unique_id++;
631 my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(local_pProperties[idx0].display);
632 pProperties[idx0].display = reinterpret_cast<VkDisplayKHR &>(unique_id);
633 pProperties[idx0].displayName = local_pProperties[idx0].displayName;
634 pProperties[idx0].physicalDimensions = local_pProperties[idx0].physicalDimensions;
635 pProperties[idx0].physicalResolution = local_pProperties[idx0].physicalResolution;
636 pProperties[idx0].supportedTransforms = local_pProperties[idx0].supportedTransforms;
637 pProperties[idx0].planeReorderPossible = local_pProperties[idx0].planeReorderPossible;
638 pProperties[idx0].persistentContent = local_pProperties[idx0].persistentContent;
639 }
640 }
641 if (local_pProperties) {
642 delete[] local_pProperties;
643 }
644 return result;
645}
646
647VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
648 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
649 layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
650 VkResult result = my_map_data->instance_dispatch_table->GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex,
651 pDisplayCount, pDisplays);
652 if (VK_SUCCESS == result) {
653 if ((*pDisplayCount > 0) && pDisplays) {
654 std::lock_guard<std::mutex> lock(global_lock);
655 for (uint32_t i = 0; i < *pDisplayCount; i++) {
656 auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i]));
657 assert(it != my_map_data->unique_id_mapping.end());
658 pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second);
659 }
660 }
661 }
662 return result;
663}
664
665VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
666 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) {
667 layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
Mark Lobodzinskif0650ff2017-01-03 08:52:14 -0700668 VkDisplayModePropertiesKHR *local_pProperties = NULL;
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600669 {
670 std::lock_guard<std::mutex> lock(global_lock);
671 display = (VkDisplayKHR)my_map_data->unique_id_mapping[reinterpret_cast<uint64_t &>(display)];
672 if (pProperties) {
Mark Lobodzinskif0650ff2017-01-03 08:52:14 -0700673 local_pProperties = new VkDisplayModePropertiesKHR[*pPropertyCount];
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600674 }
675 }
676
677 VkResult result = my_map_data->instance_dispatch_table->GetDisplayModePropertiesKHR(
678 physicalDevice, display, pPropertyCount, (VkDisplayModePropertiesKHR *)local_pProperties);
679 if (result == VK_SUCCESS && pProperties) {
680 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
681 std::lock_guard<std::mutex> lock(global_lock);
682
683 uint64_t unique_id = global_unique_id++;
684 my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(local_pProperties[idx0].displayMode);
685 pProperties[idx0].displayMode = reinterpret_cast<VkDisplayModeKHR &>(unique_id);
686 pProperties[idx0].parameters.visibleRegion.width = local_pProperties[idx0].parameters.visibleRegion.width;
687 pProperties[idx0].parameters.visibleRegion.height = local_pProperties[idx0].parameters.visibleRegion.height;
688 pProperties[idx0].parameters.refreshRate = local_pProperties[idx0].parameters.refreshRate;
689 }
690 }
691 if (local_pProperties) {
692 delete[] local_pProperties;
693 }
694 return result;
695}
Norbert Nopper1dec9a52016-11-25 07:55:13 +0100696
697VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
698 uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
699 layer_data *dev_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map);
700 {
701 std::lock_guard<std::mutex> lock(global_lock);
702 auto it = dev_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(mode));
703 if (it == dev_data->unique_id_mapping.end()) {
704 uint64_t unique_id = global_unique_id++;
705 dev_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(mode);
706
707 mode = reinterpret_cast<VkDisplayModeKHR &>(unique_id);
708 } else {
709 mode = reinterpret_cast<VkDisplayModeKHR &>(it->second);
710 }
711 }
712 VkResult result =
713 dev_data->instance_dispatch_table->GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
714 return result;
715}
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600716#endif
717
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700718} // namespace unique_objects
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600719
720// vk_layer_logging.h expects these to be defined
721VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(VkInstance instance,
722 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
723 const VkAllocationCallbacks *pAllocator,
724 VkDebugReportCallbackEXT *pMsgCallback) {
725 return unique_objects::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
726}
727
728VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
729 const VkAllocationCallbacks *pAllocator) {
730 unique_objects::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
731}
732
733VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
734 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
735 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
736 unique_objects::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
737}
738
739VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
740 VkExtensionProperties *pProperties) {
741 return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
742}
743
744VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
745 VkLayerProperties *pProperties) {
746 return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties);
747}
748
749VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
750 VkLayerProperties *pProperties) {
751 assert(physicalDevice == VK_NULL_HANDLE);
752 return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
753}
754
755VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
756 return unique_objects::GetDeviceProcAddr(dev, funcName);
757}
758
759VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
760 return unique_objects::GetInstanceProcAddr(instance, funcName);
761}
762
763VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
764 const char *pLayerName, uint32_t *pCount,
765 VkExtensionProperties *pProperties) {
766 assert(physicalDevice == VK_NULL_HANDLE);
767 return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
768}
Mark Young39389872017-01-19 21:10:49 -0700769
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700770VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
771 const char *funcName) {
Mark Young39389872017-01-19 21:10:49 -0700772 return unique_objects::GetPhysicalDeviceProcAddr(instance, funcName);
773}
774
775VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
776 assert(pVersionStruct != NULL);
777 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
778
779 // Fill in the function pointers if our version is at least capable of having the structure contain them.
780 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
781 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
782 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
783 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
784 }
785
786 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
787 unique_objects::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
788 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
789 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
790 }
791
792 return VK_SUCCESS;
793}