Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015-2016 The Khronos Group Inc. |
| 3 | * Copyright (c) 2015-2016 Valve Corporation |
| 4 | * Copyright (c) 2015-2016 LunarG, Inc. |
| 5 | * Copyright (c) 2015-2016 Google, Inc. |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | * you may not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * Author: Tobin Ehlis <tobine@google.com> |
| 20 | * Author: Mark Lobodzinski <mark@lunarg.com> |
| 21 | */ |
| 22 | |
| 23 | #include <stdio.h> |
| 24 | #include <stdlib.h> |
| 25 | #include <string.h> |
| 26 | #include <unordered_map> |
| 27 | #include <vector> |
| 28 | #include <list> |
| 29 | #include <memory> |
| 30 | |
| 31 | #include "vk_loader_platform.h" |
| 32 | #include "vulkan/vk_layer.h" |
| 33 | #include "vk_layer_config.h" |
| 34 | #include "vk_layer_extension_utils.h" |
| 35 | #include "vk_layer_utils.h" |
| 36 | #include "vk_layer_table.h" |
| 37 | #include "vk_layer_logging.h" |
| 38 | #include "unique_objects.h" |
| 39 | #include "vk_dispatch_table_helper.h" |
| 40 | #include "vk_struct_string_helper_cpp.h" |
| 41 | #include "vk_layer_data.h" |
| 42 | #include "vk_layer_utils.h" |
| 43 | |
| 44 | #include "unique_objects_wrappers.h" |
| 45 | |
| 46 | namespace unique_objects { |
| 47 | |
| 48 | static void initUniqueObjects(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { |
| 49 | layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects"); |
| 50 | } |
| 51 | |
| 52 | // Handle CreateInstance Extensions |
| 53 | static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { |
| 54 | uint32_t i; |
| 55 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); |
| 56 | VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table; |
| 57 | instance_ext_map[disp_table] = {}; |
| 58 | |
| 59 | for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
| 60 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) { |
| 61 | instance_ext_map[disp_table].wsi_enabled = true; |
| 62 | } |
| 63 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME) == 0) { |
| 64 | instance_ext_map[disp_table].display_enabled = true; |
| 65 | } |
| 66 | #ifdef VK_USE_PLATFORM_XLIB_KHR |
| 67 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0) { |
| 68 | instance_ext_map[disp_table].xlib_enabled = true; |
| 69 | } |
| 70 | #endif |
| 71 | #ifdef VK_USE_PLATFORM_XCB_KHR |
| 72 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0) { |
| 73 | instance_ext_map[disp_table].xcb_enabled = true; |
| 74 | } |
| 75 | #endif |
| 76 | #ifdef VK_USE_PLATFORM_WAYLAND_KHR |
| 77 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0) { |
| 78 | instance_ext_map[disp_table].wayland_enabled = true; |
| 79 | } |
| 80 | #endif |
| 81 | #ifdef VK_USE_PLATFORM_MIR_KHR |
| 82 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME) == 0) { |
| 83 | instance_ext_map[disp_table].mir_enabled = true; |
| 84 | } |
| 85 | #endif |
| 86 | #ifdef VK_USE_PLATFORM_ANDROID_KHR |
| 87 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0) { |
| 88 | instance_ext_map[disp_table].android_enabled = true; |
| 89 | } |
| 90 | #endif |
| 91 | #ifdef VK_USE_PLATFORM_WIN32_KHR |
| 92 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0) { |
| 93 | instance_ext_map[disp_table].win32_enabled = true; |
| 94 | } |
| 95 | #endif |
| 96 | |
| 97 | // Check for recognized instance extensions |
| 98 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); |
| 99 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedInstanceExtensions)) { |
| 100 | log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
| 101 | 0, "UniqueObjects", |
| 102 | "Instance Extension %s is not supported by this layer. Using this extension may adversely affect " |
| 103 | "validation results and/or produce undefined behavior.", |
| 104 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 105 | } |
| 106 | } |
| 107 | } |
| 108 | |
| 109 | // Handle CreateDevice Extensions |
| 110 | static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { |
| 111 | layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 112 | VkLayerDispatchTable *disp_table = device_data->device_dispatch_table; |
| 113 | PFN_vkGetDeviceProcAddr gpa = disp_table->GetDeviceProcAddr; |
| 114 | |
| 115 | device_data->device_dispatch_table->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR"); |
| 116 | disp_table->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR"); |
| 117 | disp_table->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR"); |
| 118 | disp_table->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR"); |
| 119 | disp_table->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR"); |
| 120 | device_data->wsi_enabled = false; |
| 121 | |
| 122 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
| 123 | if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) { |
| 124 | device_data->wsi_enabled = true; |
| 125 | } |
| 126 | // Check for recognized device extensions |
| 127 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedDeviceExtensions)) { |
| 128 | log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
| 129 | 0, "UniqueObjects", |
| 130 | "Device Extension %s is not supported by this layer. Using this extension may adversely affect " |
| 131 | "validation results and/or produce undefined behavior.", |
| 132 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 133 | } |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, |
| 138 | VkInstance *pInstance) { |
| 139 | VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 140 | |
| 141 | assert(chain_info->u.pLayerInfo); |
| 142 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 143 | PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); |
| 144 | if (fpCreateInstance == NULL) { |
| 145 | return VK_ERROR_INITIALIZATION_FAILED; |
| 146 | } |
| 147 | |
| 148 | // Advance the link info for the next element on the chain |
| 149 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 150 | |
| 151 | VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); |
| 152 | if (result != VK_SUCCESS) { |
| 153 | return result; |
| 154 | } |
| 155 | |
| 156 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map); |
| 157 | instance_data->instance = *pInstance; |
| 158 | instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable; |
| 159 | layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr); |
| 160 | |
| 161 | instance_data->instance = *pInstance; |
| 162 | instance_data->report_data = |
| 163 | debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, |
| 164 | pCreateInfo->ppEnabledExtensionNames); |
| 165 | |
| 166 | // Set up temporary debug callbacks to output messages at CreateInstance-time |
| 167 | if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos, |
| 168 | &instance_data->tmp_callbacks)) { |
| 169 | if (instance_data->num_tmp_callbacks > 0) { |
| 170 | if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, |
| 171 | instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) { |
| 172 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 173 | instance_data->num_tmp_callbacks = 0; |
| 174 | } |
| 175 | } |
| 176 | } |
| 177 | |
| 178 | initUniqueObjects(instance_data, pAllocator); |
| 179 | checkInstanceRegisterExtensions(pCreateInfo, *pInstance); |
| 180 | |
| 181 | // Disable and free tmp callbacks, no longer necessary |
| 182 | if (instance_data->num_tmp_callbacks > 0) { |
| 183 | layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks); |
| 184 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 185 | instance_data->num_tmp_callbacks = 0; |
| 186 | } |
| 187 | |
| 188 | return result; |
| 189 | } |
| 190 | |
| 191 | VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { |
| 192 | dispatch_key key = get_dispatch_key(instance); |
| 193 | layer_data *instance_data = get_my_data_ptr(key, layer_data_map); |
| 194 | VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table; |
| 195 | instance_ext_map.erase(disp_table); |
| 196 | disp_table->DestroyInstance(instance, pAllocator); |
| 197 | |
| 198 | // Clean up logging callback, if any |
| 199 | while (instance_data->logging_callback.size() > 0) { |
| 200 | VkDebugReportCallbackEXT callback = instance_data->logging_callback.back(); |
| 201 | layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); |
| 202 | instance_data->logging_callback.pop_back(); |
| 203 | } |
| 204 | |
| 205 | layer_debug_report_destroy_instance(instance_data->report_data); |
| 206 | layer_data_map.erase(key); |
| 207 | } |
| 208 | |
| 209 | VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, |
| 210 | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { |
| 211 | layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map); |
| 212 | VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 213 | |
| 214 | assert(chain_info->u.pLayerInfo); |
| 215 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 216 | PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; |
| 217 | PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); |
| 218 | if (fpCreateDevice == NULL) { |
| 219 | return VK_ERROR_INITIALIZATION_FAILED; |
| 220 | } |
| 221 | |
| 222 | // Advance the link info for the next element on the chain |
| 223 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 224 | |
| 225 | VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); |
| 226 | if (result != VK_SUCCESS) { |
| 227 | return result; |
| 228 | } |
| 229 | |
| 230 | layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map); |
| 231 | my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); |
| 232 | |
| 233 | // Setup layer's device dispatch table |
| 234 | my_device_data->device_dispatch_table = new VkLayerDispatchTable; |
| 235 | layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr); |
| 236 | |
| 237 | createDeviceRegisterExtensions(pCreateInfo, *pDevice); |
| 238 | // Set gpu for this device in order to get at any objects mapped at instance level |
| 239 | |
| 240 | my_device_data->gpu = gpu; |
| 241 | |
| 242 | return result; |
| 243 | } |
| 244 | |
| 245 | VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { |
| 246 | dispatch_key key = get_dispatch_key(device); |
| 247 | layer_data *dev_data = get_my_data_ptr(key, layer_data_map); |
| 248 | |
| 249 | layer_debug_report_destroy_device(device); |
| 250 | dev_data->device_dispatch_table->DestroyDevice(device, pAllocator); |
| 251 | layer_data_map.erase(key); |
| 252 | } |
| 253 | |
| 254 | static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects", |
| 255 | VK_LAYER_API_VERSION, // specVersion |
| 256 | 1, // implementationVersion |
| 257 | "Google Validation Layer"}; |
| 258 | |
| 259 | static inline PFN_vkVoidFunction layer_intercept_proc(const char *name) { |
| 260 | for (int i = 0; i < sizeof(procmap) / sizeof(procmap[0]); i++) { |
| 261 | if (!strcmp(name, procmap[i].name)) |
| 262 | return procmap[i].pFunc; |
| 263 | } |
| 264 | return NULL; |
| 265 | } |
| 266 | |
| 267 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { |
| 268 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 269 | } |
| 270 | |
| 271 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 272 | VkLayerProperties *pProperties) { |
| 273 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 274 | } |
| 275 | |
| 276 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 277 | VkExtensionProperties *pProperties) { |
| 278 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 279 | return util_GetExtensionProperties(0, NULL, pCount, pProperties); |
| 280 | |
| 281 | return VK_ERROR_LAYER_NOT_PRESENT; |
| 282 | } |
| 283 | |
| 284 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, |
| 285 | uint32_t *pCount, VkExtensionProperties *pProperties) { |
| 286 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 287 | return util_GetExtensionProperties(0, nullptr, pCount, pProperties); |
| 288 | |
| 289 | assert(physicalDevice); |
| 290 | |
| 291 | dispatch_key key = get_dispatch_key(physicalDevice); |
| 292 | layer_data *instance_data = get_my_data_ptr(key, layer_data_map); |
| 293 | return instance_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); |
| 294 | } |
| 295 | |
| 296 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) { |
| 297 | PFN_vkVoidFunction addr; |
| 298 | assert(device); |
| 299 | addr = layer_intercept_proc(funcName); |
| 300 | if (addr) { |
| 301 | return addr; |
| 302 | } |
| 303 | |
| 304 | layer_data *dev_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 305 | VkLayerDispatchTable *disp_table = dev_data->device_dispatch_table; |
| 306 | if (disp_table->GetDeviceProcAddr == NULL) { |
| 307 | return NULL; |
| 308 | } |
| 309 | return disp_table->GetDeviceProcAddr(device, funcName); |
| 310 | } |
| 311 | |
| 312 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { |
| 313 | PFN_vkVoidFunction addr; |
| 314 | |
| 315 | addr = layer_intercept_proc(funcName); |
| 316 | if (addr) { |
| 317 | return addr; |
| 318 | } |
| 319 | assert(instance); |
| 320 | |
| 321 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); |
| 322 | addr = debug_report_get_instance_proc_addr(instance_data->report_data, funcName); |
| 323 | if (addr) { |
| 324 | return addr; |
| 325 | } |
| 326 | |
| 327 | VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table; |
| 328 | if (disp_table->GetInstanceProcAddr == NULL) { |
| 329 | return NULL; |
| 330 | } |
| 331 | return disp_table->GetInstanceProcAddr(instance, funcName); |
| 332 | } |
| 333 | |
| 334 | VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo, |
| 335 | const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) { |
| 336 | const VkMemoryAllocateInfo *input_allocate_info = pAllocateInfo; |
| 337 | std::unique_ptr<safe_VkMemoryAllocateInfo> safe_allocate_info; |
Mark Lobodzinski | d244322 | 2016-10-07 14:13:38 -0600 | [diff] [blame^] | 338 | std::unique_ptr<safe_VkDedicatedAllocationMemoryAllocateInfoNV> safe_dedicated_allocate_info; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 339 | layer_data *device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 340 | |
| 341 | if ((pAllocateInfo != nullptr) && |
| 342 | ContainsExtStruct(pAllocateInfo, VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV)) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 343 | // Assuming there is only one extension struct of this type in the list for now |
| 344 | safe_dedicated_allocate_info = |
| 345 | std::unique_ptr<safe_VkDedicatedAllocationMemoryAllocateInfoNV>(new safe_VkDedicatedAllocationMemoryAllocateInfoNV); |
| 346 | safe_allocate_info = std::unique_ptr<safe_VkMemoryAllocateInfo>(new safe_VkMemoryAllocateInfo(pAllocateInfo)); |
| 347 | input_allocate_info = reinterpret_cast<const VkMemoryAllocateInfo *>(safe_allocate_info.get()); |
| 348 | |
| 349 | const GenericHeader *orig_pnext = reinterpret_cast<const GenericHeader *>(pAllocateInfo->pNext); |
| 350 | GenericHeader *input_pnext = reinterpret_cast<GenericHeader *>(safe_allocate_info.get()); |
| 351 | while (orig_pnext != nullptr) { |
| 352 | if (orig_pnext->sType == VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV) { |
| 353 | safe_dedicated_allocate_info->initialize( |
| 354 | reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV *>(orig_pnext)); |
| 355 | |
| 356 | std::unique_lock<std::mutex> lock(global_lock); |
| 357 | |
| 358 | if (safe_dedicated_allocate_info->buffer != VK_NULL_HANDLE) { |
| 359 | uint64_t local_buffer = reinterpret_cast<uint64_t &>(safe_dedicated_allocate_info->buffer); |
| 360 | safe_dedicated_allocate_info->buffer = |
| 361 | reinterpret_cast<VkBuffer &>(device_data->unique_id_mapping[local_buffer]); |
| 362 | } |
| 363 | |
| 364 | if (safe_dedicated_allocate_info->image != VK_NULL_HANDLE) { |
| 365 | uint64_t local_image = reinterpret_cast<uint64_t &>(safe_dedicated_allocate_info->image); |
| 366 | safe_dedicated_allocate_info->image = reinterpret_cast<VkImage &>(device_data->unique_id_mapping[local_image]); |
| 367 | } |
| 368 | |
| 369 | lock.unlock(); |
| 370 | |
| 371 | input_pnext->pNext = reinterpret_cast<GenericHeader *>(safe_dedicated_allocate_info.get()); |
| 372 | input_pnext = reinterpret_cast<GenericHeader *>(input_pnext->pNext); |
| 373 | } else { |
| 374 | // TODO: generic handling of pNext copies |
| 375 | } |
| 376 | |
| 377 | orig_pnext = reinterpret_cast<const GenericHeader *>(orig_pnext->pNext); |
| 378 | } |
| 379 | } |
| 380 | |
| 381 | VkResult result = device_data->device_dispatch_table->AllocateMemory(device, input_allocate_info, pAllocator, pMemory); |
| 382 | |
| 383 | if (VK_SUCCESS == result) { |
| 384 | std::lock_guard<std::mutex> lock(global_lock); |
| 385 | uint64_t unique_id = global_unique_id++; |
| 386 | device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pMemory); |
| 387 | *pMemory = reinterpret_cast<VkDeviceMemory &>(unique_id); |
| 388 | } |
| 389 | |
| 390 | return result; |
| 391 | } |
| 392 | |
| 393 | VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 394 | const VkComputePipelineCreateInfo *pCreateInfos, |
| 395 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
| 396 | layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 397 | safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL; |
| 398 | if (pCreateInfos) { |
| 399 | std::lock_guard<std::mutex> lock(global_lock); |
| 400 | local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; |
| 401 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| 402 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); |
| 403 | if (pCreateInfos[idx0].basePipelineHandle) { |
| 404 | local_pCreateInfos[idx0].basePipelineHandle = |
| 405 | (VkPipeline)my_device_data |
| 406 | ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].basePipelineHandle)]; |
| 407 | } |
| 408 | if (pCreateInfos[idx0].layout) { |
| 409 | local_pCreateInfos[idx0].layout = |
| 410 | (VkPipelineLayout) |
| 411 | my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].layout)]; |
| 412 | } |
| 413 | if (pCreateInfos[idx0].stage.module) { |
| 414 | local_pCreateInfos[idx0].stage.module = |
| 415 | (VkShaderModule) |
| 416 | my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].stage.module)]; |
| 417 | } |
| 418 | } |
| 419 | } |
| 420 | if (pipelineCache) { |
| 421 | std::lock_guard<std::mutex> lock(global_lock); |
| 422 | pipelineCache = (VkPipelineCache)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(pipelineCache)]; |
| 423 | } |
| 424 | |
| 425 | VkResult result = my_device_data->device_dispatch_table->CreateComputePipelines( |
| 426 | device, pipelineCache, createInfoCount, (const VkComputePipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines); |
| 427 | delete[] local_pCreateInfos; |
| 428 | if (VK_SUCCESS == result) { |
| 429 | uint64_t unique_id = 0; |
| 430 | std::lock_guard<std::mutex> lock(global_lock); |
| 431 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
| 432 | unique_id = global_unique_id++; |
| 433 | my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pPipelines[i]); |
| 434 | pPipelines[i] = reinterpret_cast<VkPipeline &>(unique_id); |
| 435 | } |
| 436 | } |
| 437 | return result; |
| 438 | } |
| 439 | |
| 440 | VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 441 | const VkGraphicsPipelineCreateInfo *pCreateInfos, |
| 442 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
| 443 | layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 444 | safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = NULL; |
| 445 | if (pCreateInfos) { |
| 446 | local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; |
| 447 | std::lock_guard<std::mutex> lock(global_lock); |
| 448 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| 449 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); |
| 450 | if (pCreateInfos[idx0].basePipelineHandle) { |
| 451 | local_pCreateInfos[idx0].basePipelineHandle = |
| 452 | (VkPipeline)my_device_data |
| 453 | ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].basePipelineHandle)]; |
| 454 | } |
| 455 | if (pCreateInfos[idx0].layout) { |
| 456 | local_pCreateInfos[idx0].layout = |
| 457 | (VkPipelineLayout) |
| 458 | my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].layout)]; |
| 459 | } |
| 460 | if (pCreateInfos[idx0].pStages) { |
| 461 | for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { |
| 462 | if (pCreateInfos[idx0].pStages[idx1].module) { |
| 463 | local_pCreateInfos[idx0].pStages[idx1].module = |
| 464 | (VkShaderModule)my_device_data |
| 465 | ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].pStages[idx1].module)]; |
| 466 | } |
| 467 | } |
| 468 | } |
| 469 | if (pCreateInfos[idx0].renderPass) { |
| 470 | local_pCreateInfos[idx0].renderPass = |
| 471 | (VkRenderPass) |
| 472 | my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].renderPass)]; |
| 473 | } |
| 474 | } |
| 475 | } |
| 476 | if (pipelineCache) { |
| 477 | std::lock_guard<std::mutex> lock(global_lock); |
| 478 | pipelineCache = (VkPipelineCache)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(pipelineCache)]; |
| 479 | } |
| 480 | |
| 481 | VkResult result = my_device_data->device_dispatch_table->CreateGraphicsPipelines( |
| 482 | device, pipelineCache, createInfoCount, (const VkGraphicsPipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines); |
| 483 | delete[] local_pCreateInfos; |
| 484 | if (VK_SUCCESS == result) { |
| 485 | uint64_t unique_id = 0; |
| 486 | std::lock_guard<std::mutex> lock(global_lock); |
| 487 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
| 488 | unique_id = global_unique_id++; |
| 489 | my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pPipelines[i]); |
| 490 | pPipelines[i] = reinterpret_cast<VkPipeline &>(unique_id); |
| 491 | } |
| 492 | } |
| 493 | return result; |
| 494 | } |
| 495 | |
| 496 | VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance, |
| 497 | const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, |
| 498 | const VkAllocationCallbacks *pAllocator, |
| 499 | VkDebugReportCallbackEXT *pMsgCallback) { |
| 500 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); |
| 501 | VkResult result = |
| 502 | instance_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); |
| 503 | |
| 504 | if (VK_SUCCESS == result) { |
| 505 | result = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback); |
| 506 | } |
| 507 | return result; |
| 508 | } |
| 509 | |
| 510 | VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback, |
| 511 | const VkAllocationCallbacks *pAllocator) { |
| 512 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); |
| 513 | instance_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, callback, pAllocator); |
| 514 | layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); |
| 515 | } |
| 516 | |
| 517 | VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, |
| 518 | VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location, |
| 519 | int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { |
| 520 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(instance), layer_data_map); |
| 521 | instance_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, |
| 522 | pMsg); |
| 523 | } |
| 524 | |
| 525 | VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, |
| 526 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { |
| 527 | layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 528 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL; |
| 529 | if (pCreateInfo) { |
| 530 | std::lock_guard<std::mutex> lock(global_lock); |
| 531 | local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); |
| 532 | local_pCreateInfo->oldSwapchain = |
| 533 | (VkSwapchainKHR)my_map_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->oldSwapchain)]; |
| 534 | // Need to pull surface mapping from the instance-level map |
| 535 | layer_data *instance_data = get_my_data_ptr(get_dispatch_key(my_map_data->gpu), layer_data_map); |
| 536 | local_pCreateInfo->surface = |
| 537 | (VkSurfaceKHR)instance_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->surface)]; |
| 538 | } |
| 539 | |
| 540 | VkResult result = my_map_data->device_dispatch_table->CreateSwapchainKHR( |
| 541 | device, (const VkSwapchainCreateInfoKHR *)local_pCreateInfo, pAllocator, pSwapchain); |
| 542 | if (local_pCreateInfo) { |
| 543 | delete local_pCreateInfo; |
| 544 | } |
| 545 | if (VK_SUCCESS == result) { |
| 546 | std::lock_guard<std::mutex> lock(global_lock); |
| 547 | uint64_t unique_id = global_unique_id++; |
| 548 | my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pSwapchain); |
| 549 | *pSwapchain = reinterpret_cast<VkSwapchainKHR &>(unique_id); |
| 550 | } |
| 551 | return result; |
| 552 | } |
| 553 | |
| 554 | VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, |
| 555 | VkImage *pSwapchainImages) { |
| 556 | layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map); |
| 557 | if (VK_NULL_HANDLE != swapchain) { |
| 558 | std::lock_guard<std::mutex> lock(global_lock); |
| 559 | swapchain = (VkSwapchainKHR)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(swapchain)]; |
| 560 | } |
| 561 | VkResult result = |
| 562 | my_device_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); |
| 563 | // TODO : Need to add corresponding code to delete these images |
| 564 | if (VK_SUCCESS == result) { |
| 565 | if ((*pSwapchainImageCount > 0) && pSwapchainImages) { |
| 566 | uint64_t unique_id = 0; |
| 567 | std::lock_guard<std::mutex> lock(global_lock); |
| 568 | for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) { |
| 569 | unique_id = global_unique_id++; |
| 570 | my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pSwapchainImages[i]); |
| 571 | pSwapchainImages[i] = reinterpret_cast<VkImage &>(unique_id); |
| 572 | } |
| 573 | } |
| 574 | } |
| 575 | return result; |
| 576 | } |
| 577 | |
| 578 | #ifndef __ANDROID__ |
| 579 | VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| 580 | VkDisplayPropertiesKHR *pProperties) { |
| 581 | layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); |
| 582 | safe_VkDisplayPropertiesKHR *local_pProperties = NULL; |
| 583 | { |
| 584 | std::lock_guard<std::mutex> lock(global_lock); |
| 585 | if (pProperties) { |
| 586 | local_pProperties = new safe_VkDisplayPropertiesKHR[*pPropertyCount]; |
| 587 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| 588 | local_pProperties[idx0].initialize(&pProperties[idx0]); |
| 589 | if (pProperties[idx0].display) { |
| 590 | local_pProperties[idx0].display = |
| 591 | (VkDisplayKHR)my_map_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pProperties[idx0].display)]; |
| 592 | } |
| 593 | } |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | VkResult result = my_map_data->instance_dispatch_table->GetPhysicalDeviceDisplayPropertiesKHR( |
| 598 | physicalDevice, pPropertyCount, (VkDisplayPropertiesKHR *)local_pProperties); |
| 599 | if (result == VK_SUCCESS && pProperties) { |
| 600 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| 601 | std::lock_guard<std::mutex> lock(global_lock); |
| 602 | |
| 603 | uint64_t unique_id = global_unique_id++; |
| 604 | my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(local_pProperties[idx0].display); |
| 605 | pProperties[idx0].display = reinterpret_cast<VkDisplayKHR &>(unique_id); |
| 606 | pProperties[idx0].displayName = local_pProperties[idx0].displayName; |
| 607 | pProperties[idx0].physicalDimensions = local_pProperties[idx0].physicalDimensions; |
| 608 | pProperties[idx0].physicalResolution = local_pProperties[idx0].physicalResolution; |
| 609 | pProperties[idx0].supportedTransforms = local_pProperties[idx0].supportedTransforms; |
| 610 | pProperties[idx0].planeReorderPossible = local_pProperties[idx0].planeReorderPossible; |
| 611 | pProperties[idx0].persistentContent = local_pProperties[idx0].persistentContent; |
| 612 | } |
| 613 | } |
| 614 | if (local_pProperties) { |
| 615 | delete[] local_pProperties; |
| 616 | } |
| 617 | return result; |
| 618 | } |
| 619 | |
| 620 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, |
| 621 | uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { |
| 622 | layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); |
| 623 | VkResult result = my_map_data->instance_dispatch_table->GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, |
| 624 | pDisplayCount, pDisplays); |
| 625 | if (VK_SUCCESS == result) { |
| 626 | if ((*pDisplayCount > 0) && pDisplays) { |
| 627 | std::lock_guard<std::mutex> lock(global_lock); |
| 628 | for (uint32_t i = 0; i < *pDisplayCount; i++) { |
| 629 | auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i])); |
| 630 | assert(it != my_map_data->unique_id_mapping.end()); |
| 631 | pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second); |
| 632 | } |
| 633 | } |
| 634 | } |
| 635 | return result; |
| 636 | } |
| 637 | |
| 638 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, |
| 639 | uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { |
| 640 | layer_data *my_map_data = get_my_data_ptr(get_dispatch_key(physicalDevice), layer_data_map); |
| 641 | safe_VkDisplayModePropertiesKHR *local_pProperties = NULL; |
| 642 | { |
| 643 | std::lock_guard<std::mutex> lock(global_lock); |
| 644 | display = (VkDisplayKHR)my_map_data->unique_id_mapping[reinterpret_cast<uint64_t &>(display)]; |
| 645 | if (pProperties) { |
| 646 | local_pProperties = new safe_VkDisplayModePropertiesKHR[*pPropertyCount]; |
| 647 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| 648 | local_pProperties[idx0].initialize(&pProperties[idx0]); |
| 649 | } |
| 650 | } |
| 651 | } |
| 652 | |
| 653 | VkResult result = my_map_data->instance_dispatch_table->GetDisplayModePropertiesKHR( |
| 654 | physicalDevice, display, pPropertyCount, (VkDisplayModePropertiesKHR *)local_pProperties); |
| 655 | if (result == VK_SUCCESS && pProperties) { |
| 656 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
| 657 | std::lock_guard<std::mutex> lock(global_lock); |
| 658 | |
| 659 | uint64_t unique_id = global_unique_id++; |
| 660 | my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(local_pProperties[idx0].displayMode); |
| 661 | pProperties[idx0].displayMode = reinterpret_cast<VkDisplayModeKHR &>(unique_id); |
| 662 | pProperties[idx0].parameters.visibleRegion.width = local_pProperties[idx0].parameters.visibleRegion.width; |
| 663 | pProperties[idx0].parameters.visibleRegion.height = local_pProperties[idx0].parameters.visibleRegion.height; |
| 664 | pProperties[idx0].parameters.refreshRate = local_pProperties[idx0].parameters.refreshRate; |
| 665 | } |
| 666 | } |
| 667 | if (local_pProperties) { |
| 668 | delete[] local_pProperties; |
| 669 | } |
| 670 | return result; |
| 671 | } |
| 672 | #endif |
| 673 | |
| 674 | } // namespace unique_objects |
| 675 | |
| 676 | // vk_layer_logging.h expects these to be defined |
| 677 | VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(VkInstance instance, |
| 678 | const VkDebugReportCallbackCreateInfoEXT *pCreateInfo, |
| 679 | const VkAllocationCallbacks *pAllocator, |
| 680 | VkDebugReportCallbackEXT *pMsgCallback) { |
| 681 | return unique_objects::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback); |
| 682 | } |
| 683 | |
| 684 | VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback, |
| 685 | const VkAllocationCallbacks *pAllocator) { |
| 686 | unique_objects::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator); |
| 687 | } |
| 688 | |
| 689 | VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags, |
| 690 | VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location, |
| 691 | int32_t msgCode, const char *pLayerPrefix, const char *pMsg) { |
| 692 | unique_objects::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg); |
| 693 | } |
| 694 | |
| 695 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 696 | VkExtensionProperties *pProperties) { |
| 697 | return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); |
| 698 | } |
| 699 | |
| 700 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, |
| 701 | VkLayerProperties *pProperties) { |
| 702 | return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties); |
| 703 | } |
| 704 | |
| 705 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 706 | VkLayerProperties *pProperties) { |
| 707 | assert(physicalDevice == VK_NULL_HANDLE); |
| 708 | return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); |
| 709 | } |
| 710 | |
| 711 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { |
| 712 | return unique_objects::GetDeviceProcAddr(dev, funcName); |
| 713 | } |
| 714 | |
| 715 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { |
| 716 | return unique_objects::GetInstanceProcAddr(instance, funcName); |
| 717 | } |
| 718 | |
| 719 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, |
| 720 | const char *pLayerName, uint32_t *pCount, |
| 721 | VkExtensionProperties *pProperties) { |
| 722 | assert(physicalDevice == VK_NULL_HANDLE); |
| 723 | return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); |
| 724 | } |