Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015-2016 The Khronos Group Inc. |
| 3 | * Copyright (c) 2015-2016 Valve Corporation |
| 4 | * Copyright (c) 2015-2016 LunarG, Inc. |
| 5 | * Copyright (c) 2015-2016 Google, Inc. |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | * you may not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * Author: Tobin Ehlis <tobine@google.com> |
| 20 | * Author: Mark Lobodzinski <mark@lunarg.com> |
| 21 | */ |
| 22 | |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 23 | #define NOMINMAX |
| 24 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 25 | #include <stdio.h> |
| 26 | #include <stdlib.h> |
| 27 | #include <string.h> |
| 28 | #include <unordered_map> |
| 29 | #include <vector> |
| 30 | #include <list> |
| 31 | #include <memory> |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 32 | #include <algorithm> |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 33 | |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 34 | // For Windows, this #include must come before other Vk headers. |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 35 | #include "vk_loader_platform.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 36 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 37 | #include "unique_objects.h" |
| 38 | #include "vk_dispatch_table_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 39 | #include "vk_layer_config.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 40 | #include "vk_layer_data.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 41 | #include "vk_layer_extension_utils.h" |
| 42 | #include "vk_layer_logging.h" |
| 43 | #include "vk_layer_table.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 44 | #include "vk_layer_utils.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 45 | #include "vk_layer_utils.h" |
Mark Lobodzinski | 9acd2e3 | 2016-12-21 15:22:39 -0700 | [diff] [blame] | 46 | #include "vk_enum_string_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 47 | #include "vk_validation_error_messages.h" |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 48 | #include "vk_object_types.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 49 | #include "vulkan/vk_layer.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 50 | |
Mike Stroyan | b985fca | 2016-11-01 11:50:16 -0600 | [diff] [blame] | 51 | // This intentionally includes a cpp file |
| 52 | #include "vk_safe_struct.cpp" |
| 53 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 54 | #include "unique_objects_wrappers.h" |
| 55 | |
| 56 | namespace unique_objects { |
| 57 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 58 | static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
| 59 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 60 | static void initUniqueObjects(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 61 | layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects"); |
| 62 | } |
| 63 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 64 | |
| 65 | // Hey, we need to codegen in the debug helper functions into the procmap LUGMAL |
| 66 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 67 | // Handle CreateInstance Extensions |
| 68 | static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 69 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 70 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 71 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 72 | // Check for recognized instance extensions |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 73 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedInstanceExtensions)) { |
| 74 | log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 75 | VALIDATION_ERROR_UNDEFINED, "UniqueObjects", |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 76 | "Instance Extension %s is not supported by this layer. Using this extension may adversely affect " |
| 77 | "validation results and/or produce undefined behavior.", |
| 78 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 79 | } |
| 80 | } |
| 81 | } |
| 82 | |
| 83 | // Handle CreateDevice Extensions |
| 84 | static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 85 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 86 | |
| 87 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 88 | // Check for recognized device extensions |
| 89 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedDeviceExtensions)) { |
| 90 | log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 91 | VALIDATION_ERROR_UNDEFINED, "UniqueObjects", |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 92 | "Device Extension %s is not supported by this layer. Using this extension may adversely affect " |
| 93 | "validation results and/or produce undefined behavior.", |
| 94 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 95 | } |
| 96 | } |
| 97 | } |
| 98 | |
| 99 | VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, |
| 100 | VkInstance *pInstance) { |
| 101 | VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 102 | |
| 103 | assert(chain_info->u.pLayerInfo); |
| 104 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 105 | PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); |
| 106 | if (fpCreateInstance == NULL) { |
| 107 | return VK_ERROR_INITIALIZATION_FAILED; |
| 108 | } |
| 109 | |
| 110 | // Advance the link info for the next element on the chain |
| 111 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 112 | |
| 113 | VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); |
| 114 | if (result != VK_SUCCESS) { |
| 115 | return result; |
| 116 | } |
| 117 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 118 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 119 | instance_data->instance = *pInstance; |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 120 | layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 121 | |
| 122 | instance_data->instance = *pInstance; |
| 123 | instance_data->report_data = |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 124 | debug_report_create_instance(&instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 125 | pCreateInfo->ppEnabledExtensionNames); |
| 126 | |
| 127 | // Set up temporary debug callbacks to output messages at CreateInstance-time |
| 128 | if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos, |
| 129 | &instance_data->tmp_callbacks)) { |
| 130 | if (instance_data->num_tmp_callbacks > 0) { |
| 131 | if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, |
| 132 | instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) { |
| 133 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 134 | instance_data->num_tmp_callbacks = 0; |
| 135 | } |
| 136 | } |
| 137 | } |
| 138 | |
| 139 | initUniqueObjects(instance_data, pAllocator); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 140 | |
| 141 | // Disable and free tmp callbacks, no longer necessary |
| 142 | if (instance_data->num_tmp_callbacks > 0) { |
| 143 | layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks); |
| 144 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 145 | instance_data->num_tmp_callbacks = 0; |
| 146 | } |
| 147 | |
| 148 | return result; |
| 149 | } |
| 150 | |
| 151 | VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { |
| 152 | dispatch_key key = get_dispatch_key(instance); |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 153 | instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 154 | VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 155 | disp_table->DestroyInstance(instance, pAllocator); |
| 156 | |
| 157 | // Clean up logging callback, if any |
| 158 | while (instance_data->logging_callback.size() > 0) { |
| 159 | VkDebugReportCallbackEXT callback = instance_data->logging_callback.back(); |
| 160 | layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); |
| 161 | instance_data->logging_callback.pop_back(); |
| 162 | } |
| 163 | |
| 164 | layer_debug_report_destroy_instance(instance_data->report_data); |
GabrÃel Arthúr Pétursson | 2c5e750 | 2017-06-03 23:27:59 +0000 | [diff] [blame] | 165 | FreeLayerDataPtr(key, instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, |
| 169 | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 170 | instance_layer_data *my_instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 171 | VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 172 | |
| 173 | assert(chain_info->u.pLayerInfo); |
| 174 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 175 | PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; |
| 176 | PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); |
| 177 | if (fpCreateDevice == NULL) { |
| 178 | return VK_ERROR_INITIALIZATION_FAILED; |
| 179 | } |
| 180 | |
| 181 | // Advance the link info for the next element on the chain |
| 182 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 183 | |
| 184 | VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); |
| 185 | if (result != VK_SUCCESS) { |
| 186 | return result; |
| 187 | } |
| 188 | |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 189 | layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 190 | my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); |
| 191 | |
| 192 | // Setup layer's device dispatch table |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 193 | layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 194 | |
| 195 | createDeviceRegisterExtensions(pCreateInfo, *pDevice); |
| 196 | // Set gpu for this device in order to get at any objects mapped at instance level |
| 197 | |
Chris Forbes | 7fcfde1 | 2017-05-02 16:54:24 -0700 | [diff] [blame] | 198 | my_device_data->instance_data = my_instance_data; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 199 | |
| 200 | return result; |
| 201 | } |
| 202 | |
| 203 | VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { |
| 204 | dispatch_key key = get_dispatch_key(device); |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 205 | layer_data *dev_data = GetLayerDataPtr(key, layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 206 | |
| 207 | layer_debug_report_destroy_device(device); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 208 | dev_data->dispatch_table.DestroyDevice(device, pAllocator); |
GabrÃel Arthúr Pétursson | 2c5e750 | 2017-06-03 23:27:59 +0000 | [diff] [blame] | 209 | |
| 210 | FreeLayerDataPtr(key, layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 211 | } |
| 212 | |
| 213 | static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects", |
Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 214 | VK_LAYER_API_VERSION, // specVersion |
| 215 | 1, // implementationVersion |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 216 | "Google Validation Layer"}; |
| 217 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 218 | /// Declare prototype for these functions |
| 219 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName); |
| 220 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 221 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { |
| 222 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 223 | } |
| 224 | |
| 225 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 226 | VkLayerProperties *pProperties) { |
| 227 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 228 | } |
| 229 | |
| 230 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 231 | VkExtensionProperties *pProperties) { |
| 232 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 233 | return util_GetExtensionProperties(0, NULL, pCount, pProperties); |
| 234 | |
| 235 | return VK_ERROR_LAYER_NOT_PRESENT; |
| 236 | } |
| 237 | |
| 238 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, |
| 239 | uint32_t *pCount, VkExtensionProperties *pProperties) { |
| 240 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 241 | return util_GetExtensionProperties(0, nullptr, pCount, pProperties); |
| 242 | |
| 243 | assert(physicalDevice); |
| 244 | |
| 245 | dispatch_key key = get_dispatch_key(physicalDevice); |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 246 | instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 247 | return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 248 | } |
| 249 | |
| 250 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) { |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 251 | const auto item = name_to_funcptr_map.find(funcName); |
| 252 | if (item != name_to_funcptr_map.end()) { |
| 253 | return reinterpret_cast<PFN_vkVoidFunction>(item->second); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 254 | } |
| 255 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 256 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 257 | const auto &table = device_data->dispatch_table; |
| 258 | if (!table.GetDeviceProcAddr) return nullptr; |
| 259 | return table.GetDeviceProcAddr(device, funcName); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 260 | } |
| 261 | |
| 262 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 263 | const auto item = name_to_funcptr_map.find(funcName); |
| 264 | if (item != name_to_funcptr_map.end()) { |
| 265 | return reinterpret_cast<PFN_vkVoidFunction>(item->second); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 266 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 267 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 268 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 269 | const auto &table = instance_data->dispatch_table; |
| 270 | if (!table.GetInstanceProcAddr) return nullptr; |
| 271 | return table.GetInstanceProcAddr(instance, funcName); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 272 | } |
| 273 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 274 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame^] | 275 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 276 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 277 | VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 278 | if (disp_table->GetPhysicalDeviceProcAddr == NULL) { |
| 279 | return NULL; |
| 280 | } |
| 281 | return disp_table->GetPhysicalDeviceProcAddr(instance, funcName); |
| 282 | } |
| 283 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 284 | VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 285 | const VkComputePipelineCreateInfo *pCreateInfos, |
| 286 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 287 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 288 | safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL; |
| 289 | if (pCreateInfos) { |
| 290 | std::lock_guard<std::mutex> lock(global_lock); |
| 291 | local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; |
| 292 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| 293 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); |
| 294 | if (pCreateInfos[idx0].basePipelineHandle) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 295 | local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 296 | } |
| 297 | if (pCreateInfos[idx0].layout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 298 | local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 299 | } |
| 300 | if (pCreateInfos[idx0].stage.module) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 301 | local_pCreateInfos[idx0].stage.module = Unwrap(device_data, pCreateInfos[idx0].stage.module); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 302 | } |
| 303 | } |
| 304 | } |
| 305 | if (pipelineCache) { |
| 306 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 307 | pipelineCache = Unwrap(device_data, pipelineCache); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 308 | } |
| 309 | |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 310 | VkResult result = device_data->dispatch_table.CreateComputePipelines( |
| 311 | device, pipelineCache, createInfoCount, local_pCreateInfos->ptr(), pAllocator, pPipelines); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 312 | delete[] local_pCreateInfos; |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 313 | { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 314 | std::lock_guard<std::mutex> lock(global_lock); |
| 315 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 316 | if (pPipelines[i] != VK_NULL_HANDLE) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 317 | pPipelines[i] = WrapNew(device_data, pPipelines[i]); |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 318 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 319 | } |
| 320 | } |
| 321 | return result; |
| 322 | } |
| 323 | |
| 324 | VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 325 | const VkGraphicsPipelineCreateInfo *pCreateInfos, |
| 326 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 327 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 328 | safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 329 | if (pCreateInfos) { |
| 330 | local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; |
| 331 | std::lock_guard<std::mutex> lock(global_lock); |
| 332 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| 333 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); |
| 334 | if (pCreateInfos[idx0].basePipelineHandle) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 335 | local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 336 | } |
| 337 | if (pCreateInfos[idx0].layout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 338 | local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 339 | } |
| 340 | if (pCreateInfos[idx0].pStages) { |
| 341 | for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { |
| 342 | if (pCreateInfos[idx0].pStages[idx1].module) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 343 | local_pCreateInfos[idx0].pStages[idx1].module = Unwrap(device_data, pCreateInfos[idx0].pStages[idx1].module); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 344 | } |
| 345 | } |
| 346 | } |
| 347 | if (pCreateInfos[idx0].renderPass) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 348 | local_pCreateInfos[idx0].renderPass = Unwrap(device_data, pCreateInfos[idx0].renderPass); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 349 | } |
| 350 | } |
| 351 | } |
| 352 | if (pipelineCache) { |
| 353 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 354 | pipelineCache = Unwrap(device_data, pipelineCache); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 355 | } |
| 356 | |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 357 | VkResult result = device_data->dispatch_table.CreateGraphicsPipelines( |
| 358 | device, pipelineCache, createInfoCount, local_pCreateInfos->ptr(), pAllocator, pPipelines); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 359 | delete[] local_pCreateInfos; |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 360 | { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 361 | std::lock_guard<std::mutex> lock(global_lock); |
| 362 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 363 | if (pPipelines[i] != VK_NULL_HANDLE) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 364 | pPipelines[i] = WrapNew(device_data, pPipelines[i]); |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 365 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 366 | } |
| 367 | } |
| 368 | return result; |
| 369 | } |
| 370 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 371 | VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, |
| 372 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 373 | layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 374 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL; |
| 375 | if (pCreateInfo) { |
| 376 | std::lock_guard<std::mutex> lock(global_lock); |
| 377 | local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 378 | local_pCreateInfo->oldSwapchain = Unwrap(my_map_data, pCreateInfo->oldSwapchain); |
| 379 | // Surface is instance-level object |
| 380 | local_pCreateInfo->surface = Unwrap(my_map_data->instance_data, pCreateInfo->surface); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 381 | } |
| 382 | |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 383 | VkResult result = my_map_data->dispatch_table.CreateSwapchainKHR( |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 384 | device, local_pCreateInfo->ptr(), pAllocator, pSwapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 385 | if (local_pCreateInfo) { |
| 386 | delete local_pCreateInfo; |
| 387 | } |
| 388 | if (VK_SUCCESS == result) { |
| 389 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 390 | *pSwapchain = WrapNew(my_map_data, *pSwapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 391 | } |
| 392 | return result; |
| 393 | } |
| 394 | |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 395 | VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, |
| 396 | const VkSwapchainCreateInfoKHR *pCreateInfos, |
| 397 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { |
| 398 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 399 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = NULL; |
| 400 | { |
| 401 | std::lock_guard<std::mutex> lock(global_lock); |
| 402 | if (pCreateInfos) { |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 403 | local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount]; |
| 404 | for (uint32_t i = 0; i < swapchainCount; ++i) { |
| 405 | local_pCreateInfos[i].initialize(&pCreateInfos[i]); |
| 406 | if (pCreateInfos[i].surface) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 407 | // Surface is instance-level object |
| 408 | local_pCreateInfos[i].surface = Unwrap(dev_data->instance_data, pCreateInfos[i].surface); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 409 | } |
| 410 | if (pCreateInfos[i].oldSwapchain) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 411 | local_pCreateInfos[i].oldSwapchain = Unwrap(dev_data, pCreateInfos[i].oldSwapchain); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 412 | } |
| 413 | } |
| 414 | } |
| 415 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 416 | VkResult result = dev_data->dispatch_table.CreateSharedSwapchainsKHR( |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 417 | device, swapchainCount, local_pCreateInfos->ptr(), pAllocator, pSwapchains); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 418 | if (local_pCreateInfos) delete[] local_pCreateInfos; |
| 419 | if (VK_SUCCESS == result) { |
| 420 | std::lock_guard<std::mutex> lock(global_lock); |
| 421 | for (uint32_t i = 0; i < swapchainCount; i++) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 422 | pSwapchains[i] = WrapNew(dev_data, pSwapchains[i]); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 423 | } |
| 424 | } |
| 425 | return result; |
| 426 | } |
| 427 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 428 | VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, |
| 429 | VkImage *pSwapchainImages) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 430 | layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 431 | if (VK_NULL_HANDLE != swapchain) { |
| 432 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 433 | swapchain = Unwrap(my_device_data, swapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 434 | } |
| 435 | VkResult result = |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 436 | my_device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 437 | // TODO : Need to add corresponding code to delete these images |
| 438 | if (VK_SUCCESS == result) { |
| 439 | if ((*pSwapchainImageCount > 0) && pSwapchainImages) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 440 | std::lock_guard<std::mutex> lock(global_lock); |
| 441 | for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 442 | pSwapchainImages[i] = WrapNew(my_device_data, pSwapchainImages[i]); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 443 | } |
| 444 | } |
| 445 | } |
| 446 | return result; |
| 447 | } |
| 448 | |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 449 | VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { |
| 450 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map); |
| 451 | safe_VkPresentInfoKHR *local_pPresentInfo = NULL; |
| 452 | { |
| 453 | std::lock_guard<std::mutex> lock(global_lock); |
| 454 | if (pPresentInfo) { |
| 455 | local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo); |
| 456 | if (local_pPresentInfo->pWaitSemaphores) { |
| 457 | for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 458 | local_pPresentInfo->pWaitSemaphores[index1] = Unwrap(dev_data, pPresentInfo->pWaitSemaphores[index1]); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 459 | } |
| 460 | } |
| 461 | if (local_pPresentInfo->pSwapchains) { |
| 462 | for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 463 | local_pPresentInfo->pSwapchains[index1] = Unwrap(dev_data, pPresentInfo->pSwapchains[index1]); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 464 | } |
| 465 | } |
| 466 | } |
| 467 | } |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 468 | VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr()); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 469 | |
| 470 | // pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version, |
| 471 | // so handle it as a special case here: |
| 472 | if (pPresentInfo && pPresentInfo->pResults) { |
| 473 | for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) { |
| 474 | pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i]; |
| 475 | } |
| 476 | } |
| 477 | |
| 478 | if (local_pPresentInfo) delete local_pPresentInfo; |
| 479 | return result; |
| 480 | } |
| 481 | |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 482 | VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device, |
| 483 | const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, |
| 484 | const VkAllocationCallbacks *pAllocator, |
| 485 | VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { |
| 486 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 487 | safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info = NULL; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 488 | { |
| 489 | std::lock_guard<std::mutex> lock(global_lock); |
| 490 | if (pCreateInfo) { |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 491 | local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 492 | if (pCreateInfo->descriptorSetLayout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 493 | local_create_info->descriptorSetLayout = Unwrap(dev_data, pCreateInfo->descriptorSetLayout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 494 | } |
| 495 | if (pCreateInfo->pipelineLayout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 496 | local_create_info->pipelineLayout = Unwrap(dev_data, pCreateInfo->pipelineLayout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 497 | } |
| 498 | } |
| 499 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 500 | VkResult result = dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR( |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 501 | device, local_create_info->ptr(), pAllocator, pDescriptorUpdateTemplate); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 502 | if (VK_SUCCESS == result) { |
| 503 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 504 | *pDescriptorUpdateTemplate = WrapNew(dev_data, *pDescriptorUpdateTemplate); |
Mark Lobodzinski | 4f3ce67 | 2017-03-03 10:28:21 -0700 | [diff] [blame] | 505 | |
| 506 | // Shadow template createInfo for later updates |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 507 | std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 508 | dev_data->desc_template_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 509 | } |
| 510 | return result; |
| 511 | } |
| 512 | |
| 513 | VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device, |
| 514 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 515 | const VkAllocationCallbacks *pAllocator) { |
| 516 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 517 | std::unique_lock<std::mutex> lock(global_lock); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 518 | uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
| 519 | dev_data->desc_template_map.erase(descriptor_update_template_id); |
| 520 | descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[descriptor_update_template_id]; |
| 521 | dev_data->unique_id_mapping.erase(descriptor_update_template_id); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 522 | lock.unlock(); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 523 | dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 524 | } |
| 525 | |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 526 | void *BuildUnwrappedUpdateTemplateBuffer(layer_data *dev_data, uint64_t descriptorUpdateTemplate, const void *pData) { |
| 527 | auto const template_map_entry = dev_data->desc_template_map.find(descriptorUpdateTemplate); |
| 528 | if (template_map_entry == dev_data->desc_template_map.end()) { |
| 529 | assert(0); |
| 530 | } |
| 531 | auto const &create_info = template_map_entry->second->create_info; |
| 532 | size_t allocation_size = 0; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 533 | std::vector<std::tuple<size_t, VulkanObjectType, void *>> template_entries; |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 534 | |
| 535 | for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { |
| 536 | for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { |
| 537 | size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; |
| 538 | char *update_entry = (char *)(pData) + offset; |
| 539 | |
| 540 | switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { |
| 541 | case VK_DESCRIPTOR_TYPE_SAMPLER: |
| 542 | case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: |
| 543 | case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: |
| 544 | case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: |
| 545 | case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { |
| 546 | auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); |
| 547 | allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo)); |
| 548 | |
| 549 | VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 550 | wrapped_entry->sampler = Unwrap(dev_data, image_entry->sampler); |
| 551 | wrapped_entry->imageView = Unwrap(dev_data, image_entry->imageView); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 552 | template_entries.emplace_back(offset, kVulkanObjectTypeImage, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 553 | } break; |
| 554 | |
| 555 | case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: |
| 556 | case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: |
| 557 | case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: |
| 558 | case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { |
| 559 | auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); |
| 560 | allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo)); |
| 561 | |
| 562 | VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 563 | wrapped_entry->buffer = Unwrap(dev_data, buffer_entry->buffer); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 564 | template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 565 | } break; |
| 566 | |
| 567 | case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: |
| 568 | case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 569 | auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 570 | allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView)); |
| 571 | |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 572 | VkBufferView wrapped_entry = Unwrap(dev_data, *buffer_view_handle); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 573 | template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 574 | } break; |
| 575 | default: |
| 576 | assert(0); |
| 577 | break; |
| 578 | } |
| 579 | } |
| 580 | } |
| 581 | // Allocate required buffer size and populate with source/unwrapped data |
| 582 | void *unwrapped_data = malloc(allocation_size); |
| 583 | for (auto &this_entry : template_entries) { |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 584 | VulkanObjectType type = std::get<1>(this_entry); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 585 | void *destination = (char *)unwrapped_data + std::get<0>(this_entry); |
| 586 | void *source = (char *)std::get<2>(this_entry); |
| 587 | |
| 588 | switch (type) { |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 589 | case kVulkanObjectTypeImage: |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 590 | *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = *(reinterpret_cast<VkDescriptorImageInfo *>(source)); |
| 591 | delete reinterpret_cast<VkDescriptorImageInfo *>(source); |
| 592 | break; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 593 | case kVulkanObjectTypeBuffer: |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 594 | *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = *(reinterpret_cast<VkDescriptorBufferInfo *>(source)); |
| 595 | delete reinterpret_cast<VkDescriptorBufferInfo *>(source); |
| 596 | break; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 597 | case kVulkanObjectTypeBufferView: |
Mark Lobodzinski | d5197d0 | 2017-03-15 13:13:49 -0600 | [diff] [blame] | 598 | *(reinterpret_cast<VkBufferView *>(destination)) = reinterpret_cast<VkBufferView>(source); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 599 | break; |
| 600 | default: |
| 601 | assert(0); |
| 602 | break; |
| 603 | } |
| 604 | } |
| 605 | return (void *)unwrapped_data; |
| 606 | } |
| 607 | |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 608 | VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, |
| 609 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 610 | const void *pData) { |
| 611 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 612 | uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 613 | { |
| 614 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 615 | descriptorSet = Unwrap(dev_data, descriptorSet); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 616 | descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[template_handle]; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 617 | } |
Mark Lobodzinski | 5e7a02f | 2017-03-29 11:55:44 -0600 | [diff] [blame] | 618 | void *unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 619 | dev_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 620 | unwrapped_buffer); |
| 621 | free(unwrapped_buffer); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 622 | } |
| 623 | |
| 624 | VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, |
| 625 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 626 | VkPipelineLayout layout, uint32_t set, const void *pData) { |
| 627 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map); |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 628 | uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 629 | { |
| 630 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 631 | descriptorUpdateTemplate = Unwrap(dev_data, descriptorUpdateTemplate); |
| 632 | layout = Unwrap(dev_data, layout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 633 | } |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 634 | void *unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 635 | dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 636 | unwrapped_buffer); |
| 637 | free(unwrapped_buffer); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 638 | } |
| 639 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 640 | #ifndef __ANDROID__ |
| 641 | VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| 642 | VkDisplayPropertiesKHR *pProperties) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 643 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 644 | |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 645 | VkResult result = my_map_data->dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR( |
Chris Forbes | f1e49bf | 2017-05-02 17:36:57 -0700 | [diff] [blame] | 646 | physicalDevice, pPropertyCount, pProperties); |
| 647 | if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| 648 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 649 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
Chris Forbes | f1e49bf | 2017-05-02 17:36:57 -0700 | [diff] [blame] | 650 | pProperties[idx0].display = WrapNew(my_map_data, pProperties[idx0].display); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 651 | } |
| 652 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 653 | return result; |
| 654 | } |
| 655 | |
| 656 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, |
| 657 | uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 658 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 659 | VkResult result = my_map_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 660 | pDisplayCount, pDisplays); |
| 661 | if (VK_SUCCESS == result) { |
| 662 | if ((*pDisplayCount > 0) && pDisplays) { |
| 663 | std::lock_guard<std::mutex> lock(global_lock); |
| 664 | for (uint32_t i = 0; i < *pDisplayCount; i++) { |
Chris Forbes | 824c117 | 2017-05-02 17:45:29 -0700 | [diff] [blame] | 665 | // TODO: this looks like it really wants a /reverse/ mapping. What's going on here? |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 666 | auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i])); |
| 667 | assert(it != my_map_data->unique_id_mapping.end()); |
| 668 | pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second); |
| 669 | } |
| 670 | } |
| 671 | } |
| 672 | return result; |
| 673 | } |
| 674 | |
| 675 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, |
| 676 | uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 677 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 678 | { |
| 679 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 680 | display = Unwrap(my_map_data, display); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 681 | } |
| 682 | |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 683 | VkResult result = my_map_data->dispatch_table.GetDisplayModePropertiesKHR( |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 684 | physicalDevice, display, pPropertyCount, pProperties); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 685 | if (result == VK_SUCCESS && pProperties) { |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 686 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 687 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 688 | pProperties[idx0].displayMode = WrapNew(my_map_data, pProperties[idx0].displayMode); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 689 | } |
| 690 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 691 | return result; |
| 692 | } |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 693 | |
| 694 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, |
| 695 | uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 696 | instance_layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 697 | { |
| 698 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 65029c1 | 2017-05-02 17:48:20 -0700 | [diff] [blame] | 699 | mode = Unwrap(dev_data, mode); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 700 | } |
| 701 | VkResult result = |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 702 | dev_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 703 | return result; |
| 704 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 705 | #endif |
| 706 | |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 707 | VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, VkDebugMarkerObjectTagInfoEXT *pTagInfo) { |
| 708 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 709 | auto local_tag_info = new safe_VkDebugMarkerObjectTagInfoEXT(pTagInfo); |
| 710 | { |
| 711 | std::lock_guard<std::mutex> lock(global_lock); |
| 712 | auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info->object)); |
| 713 | if (it != device_data->unique_id_mapping.end()) { |
| 714 | local_tag_info->object = it->second; |
| 715 | } |
| 716 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 717 | VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT( |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 718 | device, reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(local_tag_info)); |
| 719 | return result; |
| 720 | } |
| 721 | |
| 722 | VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, VkDebugMarkerObjectNameInfoEXT *pNameInfo) { |
| 723 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 724 | auto local_name_info = new safe_VkDebugMarkerObjectNameInfoEXT(pNameInfo); |
| 725 | { |
| 726 | std::lock_guard<std::mutex> lock(global_lock); |
| 727 | auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info->object)); |
| 728 | if (it != device_data->unique_id_mapping.end()) { |
| 729 | local_name_info->object = it->second; |
| 730 | } |
| 731 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 732 | VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT( |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 733 | device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(local_name_info)); |
| 734 | return result; |
| 735 | } |
| 736 | |
Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 737 | } // namespace unique_objects |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 738 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 739 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 740 | VkExtensionProperties *pProperties) { |
| 741 | return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); |
| 742 | } |
| 743 | |
| 744 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, |
| 745 | VkLayerProperties *pProperties) { |
| 746 | return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties); |
| 747 | } |
| 748 | |
| 749 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 750 | VkLayerProperties *pProperties) { |
| 751 | assert(physicalDevice == VK_NULL_HANDLE); |
| 752 | return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); |
| 753 | } |
| 754 | |
| 755 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { |
| 756 | return unique_objects::GetDeviceProcAddr(dev, funcName); |
| 757 | } |
| 758 | |
| 759 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { |
| 760 | return unique_objects::GetInstanceProcAddr(instance, funcName); |
| 761 | } |
| 762 | |
| 763 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, |
| 764 | const char *pLayerName, uint32_t *pCount, |
| 765 | VkExtensionProperties *pProperties) { |
| 766 | assert(physicalDevice == VK_NULL_HANDLE); |
| 767 | return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); |
| 768 | } |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 769 | |
Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 770 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance, |
| 771 | const char *funcName) { |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 772 | return unique_objects::GetPhysicalDeviceProcAddr(instance, funcName); |
| 773 | } |
| 774 | |
| 775 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) { |
| 776 | assert(pVersionStruct != NULL); |
| 777 | assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT); |
| 778 | |
| 779 | // Fill in the function pointers if our version is at least capable of having the structure contain them. |
| 780 | if (pVersionStruct->loaderLayerInterfaceVersion >= 2) { |
| 781 | pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr; |
| 782 | pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr; |
| 783 | pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr; |
| 784 | } |
| 785 | |
| 786 | if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) { |
| 787 | unique_objects::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion; |
| 788 | } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) { |
| 789 | pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
| 790 | } |
| 791 | |
| 792 | return VK_SUCCESS; |
| 793 | } |