Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015-2016 The Khronos Group Inc. |
| 3 | * Copyright (c) 2015-2016 Valve Corporation |
| 4 | * Copyright (c) 2015-2016 LunarG, Inc. |
| 5 | * Copyright (c) 2015-2016 Google, Inc. |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | * you may not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * Author: Tobin Ehlis <tobine@google.com> |
| 20 | * Author: Mark Lobodzinski <mark@lunarg.com> |
| 21 | */ |
| 22 | |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 23 | #define NOMINMAX |
| 24 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 25 | #include <stdio.h> |
| 26 | #include <stdlib.h> |
| 27 | #include <string.h> |
| 28 | #include <unordered_map> |
| 29 | #include <vector> |
| 30 | #include <list> |
| 31 | #include <memory> |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 32 | #include <algorithm> |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 33 | |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 34 | // For Windows, this #include must come before other Vk headers. |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 35 | #include "vk_loader_platform.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 36 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 37 | #include "unique_objects.h" |
| 38 | #include "vk_dispatch_table_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 39 | #include "vk_layer_config.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 40 | #include "vk_layer_data.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 41 | #include "vk_layer_extension_utils.h" |
| 42 | #include "vk_layer_logging.h" |
| 43 | #include "vk_layer_table.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 44 | #include "vk_layer_utils.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 45 | #include "vk_layer_utils.h" |
Mark Lobodzinski | 9acd2e3 | 2016-12-21 15:22:39 -0700 | [diff] [blame] | 46 | #include "vk_enum_string_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 47 | #include "vk_validation_error_messages.h" |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 48 | #include "vk_object_types.h" |
Mark Lobodzinski | 75a4631 | 2018-01-03 11:23:55 -0700 | [diff] [blame] | 49 | #include "vk_extension_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 50 | #include "vulkan/vk_layer.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 51 | |
Mike Stroyan | b985fca | 2016-11-01 11:50:16 -0600 | [diff] [blame] | 52 | // This intentionally includes a cpp file |
| 53 | #include "vk_safe_struct.cpp" |
| 54 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 55 | #include "unique_objects_wrappers.h" |
| 56 | |
| 57 | namespace unique_objects { |
| 58 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 59 | static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
| 60 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 61 | static void initUniqueObjects(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 62 | layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects"); |
| 63 | } |
| 64 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 65 | // Check enabled instance extensions against supported instance extension whitelist |
| 66 | static void InstanceExtensionWhitelist(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 67 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 68 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 69 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 70 | // Check for recognized instance extensions |
Mark Lobodzinski | 75a4631 | 2018-01-03 11:23:55 -0700 | [diff] [blame] | 71 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 72 | log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 73 | VALIDATION_ERROR_UNDEFINED, "UniqueObjects", |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 74 | "Instance Extension %s is not supported by this layer. Using this extension may adversely affect validation " |
| 75 | "results and/or produce undefined behavior.", |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 76 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 77 | } |
| 78 | } |
| 79 | } |
| 80 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 81 | // Check enabled device extensions against supported device extension whitelist |
| 82 | static void DeviceExtensionWhitelist(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 83 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 84 | |
| 85 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 86 | // Check for recognized device extensions |
Mark Lobodzinski | 75a4631 | 2018-01-03 11:23:55 -0700 | [diff] [blame] | 87 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 88 | log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 89 | VALIDATION_ERROR_UNDEFINED, "UniqueObjects", |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 90 | "Device Extension %s is not supported by this layer. Using this extension may adversely affect validation " |
| 91 | "results and/or produce undefined behavior.", |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 92 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 93 | } |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, |
| 98 | VkInstance *pInstance) { |
| 99 | VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 100 | |
| 101 | assert(chain_info->u.pLayerInfo); |
| 102 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 103 | PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); |
| 104 | if (fpCreateInstance == NULL) { |
| 105 | return VK_ERROR_INITIALIZATION_FAILED; |
| 106 | } |
| 107 | |
| 108 | // Advance the link info for the next element on the chain |
| 109 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 110 | |
| 111 | VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); |
| 112 | if (result != VK_SUCCESS) { |
| 113 | return result; |
| 114 | } |
| 115 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 116 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 117 | instance_data->instance = *pInstance; |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 118 | layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 119 | |
| 120 | instance_data->instance = *pInstance; |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 121 | instance_data->report_data = debug_report_create_instance( |
| 122 | &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 123 | |
| 124 | // Set up temporary debug callbacks to output messages at CreateInstance-time |
| 125 | if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos, |
| 126 | &instance_data->tmp_callbacks)) { |
| 127 | if (instance_data->num_tmp_callbacks > 0) { |
| 128 | if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, |
| 129 | instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) { |
| 130 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 131 | instance_data->num_tmp_callbacks = 0; |
| 132 | } |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | initUniqueObjects(instance_data, pAllocator); |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 137 | InstanceExtensionWhitelist(pCreateInfo, *pInstance); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 138 | |
| 139 | // Disable and free tmp callbacks, no longer necessary |
| 140 | if (instance_data->num_tmp_callbacks > 0) { |
| 141 | layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks); |
| 142 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 143 | instance_data->num_tmp_callbacks = 0; |
| 144 | } |
| 145 | |
| 146 | return result; |
| 147 | } |
| 148 | |
| 149 | VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { |
| 150 | dispatch_key key = get_dispatch_key(instance); |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 151 | instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 152 | VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 153 | disp_table->DestroyInstance(instance, pAllocator); |
| 154 | |
| 155 | // Clean up logging callback, if any |
| 156 | while (instance_data->logging_callback.size() > 0) { |
| 157 | VkDebugReportCallbackEXT callback = instance_data->logging_callback.back(); |
| 158 | layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); |
| 159 | instance_data->logging_callback.pop_back(); |
| 160 | } |
| 161 | |
| 162 | layer_debug_report_destroy_instance(instance_data->report_data); |
GabrÃel Arthúr Pétursson | 2c5e750 | 2017-06-03 23:27:59 +0000 | [diff] [blame] | 163 | FreeLayerDataPtr(key, instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, |
| 167 | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 168 | instance_layer_data *my_instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 169 | VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 170 | |
| 171 | assert(chain_info->u.pLayerInfo); |
| 172 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 173 | PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; |
| 174 | PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); |
| 175 | if (fpCreateDevice == NULL) { |
| 176 | return VK_ERROR_INITIALIZATION_FAILED; |
| 177 | } |
| 178 | |
| 179 | // Advance the link info for the next element on the chain |
| 180 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 181 | |
| 182 | VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); |
| 183 | if (result != VK_SUCCESS) { |
| 184 | return result; |
| 185 | } |
| 186 | |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 187 | layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 188 | my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); |
| 189 | |
| 190 | // Setup layer's device dispatch table |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 191 | layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 192 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 193 | DeviceExtensionWhitelist(pCreateInfo, *pDevice); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 194 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 195 | // Set gpu for this device in order to get at any objects mapped at instance level |
Chris Forbes | 7fcfde1 | 2017-05-02 16:54:24 -0700 | [diff] [blame] | 196 | my_device_data->instance_data = my_instance_data; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 197 | |
| 198 | return result; |
| 199 | } |
| 200 | |
| 201 | VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { |
| 202 | dispatch_key key = get_dispatch_key(device); |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 203 | layer_data *dev_data = GetLayerDataPtr(key, layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 204 | |
| 205 | layer_debug_report_destroy_device(device); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 206 | dev_data->dispatch_table.DestroyDevice(device, pAllocator); |
GabrÃel Arthúr Pétursson | 2c5e750 | 2017-06-03 23:27:59 +0000 | [diff] [blame] | 207 | |
| 208 | FreeLayerDataPtr(key, layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects", |
Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 212 | VK_LAYER_API_VERSION, // specVersion |
| 213 | 1, // implementationVersion |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 214 | "Google Validation Layer"}; |
| 215 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 216 | /// Declare prototype for these functions |
| 217 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName); |
| 218 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 219 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { |
| 220 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 221 | } |
| 222 | |
| 223 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 224 | VkLayerProperties *pProperties) { |
| 225 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 226 | } |
| 227 | |
| 228 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 229 | VkExtensionProperties *pProperties) { |
| 230 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 231 | return util_GetExtensionProperties(0, NULL, pCount, pProperties); |
| 232 | |
| 233 | return VK_ERROR_LAYER_NOT_PRESENT; |
| 234 | } |
| 235 | |
| 236 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, |
| 237 | uint32_t *pCount, VkExtensionProperties *pProperties) { |
| 238 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 239 | return util_GetExtensionProperties(0, nullptr, pCount, pProperties); |
| 240 | |
| 241 | assert(physicalDevice); |
| 242 | |
| 243 | dispatch_key key = get_dispatch_key(physicalDevice); |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 244 | instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 245 | return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) { |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 249 | const auto item = name_to_funcptr_map.find(funcName); |
| 250 | if (item != name_to_funcptr_map.end()) { |
| 251 | return reinterpret_cast<PFN_vkVoidFunction>(item->second); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 252 | } |
| 253 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 254 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 255 | const auto &table = device_data->dispatch_table; |
| 256 | if (!table.GetDeviceProcAddr) return nullptr; |
| 257 | return table.GetDeviceProcAddr(device, funcName); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 261 | const auto item = name_to_funcptr_map.find(funcName); |
| 262 | if (item != name_to_funcptr_map.end()) { |
| 263 | return reinterpret_cast<PFN_vkVoidFunction>(item->second); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 264 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 265 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 266 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 267 | const auto &table = instance_data->dispatch_table; |
| 268 | if (!table.GetInstanceProcAddr) return nullptr; |
| 269 | return table.GetInstanceProcAddr(instance, funcName); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 270 | } |
| 271 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 272 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 273 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 274 | VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 275 | if (disp_table->GetPhysicalDeviceProcAddr == NULL) { |
| 276 | return NULL; |
| 277 | } |
| 278 | return disp_table->GetPhysicalDeviceProcAddr(instance, funcName); |
| 279 | } |
| 280 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 281 | VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 282 | const VkComputePipelineCreateInfo *pCreateInfos, |
| 283 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 284 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 285 | safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL; |
| 286 | if (pCreateInfos) { |
| 287 | std::lock_guard<std::mutex> lock(global_lock); |
| 288 | local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; |
| 289 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| 290 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); |
| 291 | if (pCreateInfos[idx0].basePipelineHandle) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 292 | local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 293 | } |
| 294 | if (pCreateInfos[idx0].layout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 295 | local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 296 | } |
| 297 | if (pCreateInfos[idx0].stage.module) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 298 | local_pCreateInfos[idx0].stage.module = Unwrap(device_data, pCreateInfos[idx0].stage.module); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 299 | } |
| 300 | } |
| 301 | } |
| 302 | if (pipelineCache) { |
| 303 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 304 | pipelineCache = Unwrap(device_data, pipelineCache); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 305 | } |
| 306 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 307 | VkResult result = device_data->dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, |
| 308 | local_pCreateInfos->ptr(), pAllocator, pPipelines); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 309 | delete[] local_pCreateInfos; |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 310 | { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 311 | std::lock_guard<std::mutex> lock(global_lock); |
| 312 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 313 | if (pPipelines[i] != VK_NULL_HANDLE) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 314 | pPipelines[i] = WrapNew(device_data, pPipelines[i]); |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 315 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 316 | } |
| 317 | } |
| 318 | return result; |
| 319 | } |
| 320 | |
| 321 | VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 322 | const VkGraphicsPipelineCreateInfo *pCreateInfos, |
| 323 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 324 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 325 | safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 326 | if (pCreateInfos) { |
| 327 | local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; |
| 328 | std::lock_guard<std::mutex> lock(global_lock); |
| 329 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
Petr Kraus | e91f7a1 | 2017-12-14 20:57:36 +0100 | [diff] [blame] | 330 | bool uses_color_attachment = false; |
| 331 | bool uses_depthstencil_attachment = false; |
| 332 | { |
| 333 | const auto subpasses_uses_it = |
| 334 | device_data->renderpasses_states.find(Unwrap(device_data, pCreateInfos[idx0].renderPass)); |
| 335 | if (subpasses_uses_it != device_data->renderpasses_states.end()) { |
| 336 | const auto &subpasses_uses = subpasses_uses_it->second; |
| 337 | if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[idx0].subpass)) |
| 338 | uses_color_attachment = true; |
| 339 | if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[idx0].subpass)) |
| 340 | uses_depthstencil_attachment = true; |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0], uses_color_attachment, uses_depthstencil_attachment); |
| 345 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 346 | if (pCreateInfos[idx0].basePipelineHandle) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 347 | local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 348 | } |
| 349 | if (pCreateInfos[idx0].layout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 350 | local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 351 | } |
| 352 | if (pCreateInfos[idx0].pStages) { |
| 353 | for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { |
| 354 | if (pCreateInfos[idx0].pStages[idx1].module) { |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 355 | local_pCreateInfos[idx0].pStages[idx1].module = |
| 356 | Unwrap(device_data, pCreateInfos[idx0].pStages[idx1].module); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 357 | } |
| 358 | } |
| 359 | } |
| 360 | if (pCreateInfos[idx0].renderPass) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 361 | local_pCreateInfos[idx0].renderPass = Unwrap(device_data, pCreateInfos[idx0].renderPass); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 362 | } |
| 363 | } |
| 364 | } |
| 365 | if (pipelineCache) { |
| 366 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 367 | pipelineCache = Unwrap(device_data, pipelineCache); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 368 | } |
| 369 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 370 | VkResult result = device_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, |
| 371 | local_pCreateInfos->ptr(), pAllocator, pPipelines); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 372 | delete[] local_pCreateInfos; |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 373 | { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 374 | std::lock_guard<std::mutex> lock(global_lock); |
| 375 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 376 | if (pPipelines[i] != VK_NULL_HANDLE) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 377 | pPipelines[i] = WrapNew(device_data, pPipelines[i]); |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 378 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 379 | } |
| 380 | } |
| 381 | return result; |
| 382 | } |
| 383 | |
Petr Kraus | e91f7a1 | 2017-12-14 20:57:36 +0100 | [diff] [blame] | 384 | static void PostCallCreateRenderPass(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, VkRenderPass renderPass) { |
| 385 | auto &renderpass_state = dev_data->renderpasses_states[renderPass]; |
| 386 | |
| 387 | for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { |
| 388 | bool uses_color = false; |
| 389 | for (uint32_t i = 0; i < pCreateInfo->pSubpasses[subpass].colorAttachmentCount && !uses_color; ++i) |
| 390 | if (pCreateInfo->pSubpasses[subpass].pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) uses_color = true; |
| 391 | |
| 392 | bool uses_depthstencil = false; |
| 393 | if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment) |
| 394 | if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) |
| 395 | uses_depthstencil = true; |
| 396 | |
| 397 | if (uses_color) renderpass_state.subpasses_using_color_attachment.insert(subpass); |
| 398 | if (uses_depthstencil) renderpass_state.subpasses_using_depthstencil_attachment.insert(subpass); |
| 399 | } |
| 400 | } |
| 401 | |
| 402 | VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, |
| 403 | const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { |
| 404 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 405 | VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); |
| 406 | if (VK_SUCCESS == result) { |
| 407 | std::lock_guard<std::mutex> lock(global_lock); |
| 408 | |
| 409 | PostCallCreateRenderPass(dev_data, pCreateInfo, *pRenderPass); |
| 410 | |
| 411 | *pRenderPass = WrapNew(dev_data, *pRenderPass); |
| 412 | } |
| 413 | return result; |
| 414 | } |
| 415 | |
| 416 | static void PostCallDestroyRenderPass(layer_data *dev_data, VkRenderPass renderPass) { |
| 417 | dev_data->renderpasses_states.erase(renderPass); |
| 418 | } |
| 419 | |
| 420 | VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { |
| 421 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 422 | std::unique_lock<std::mutex> lock(global_lock); |
| 423 | uint64_t renderPass_id = reinterpret_cast<uint64_t &>(renderPass); |
| 424 | renderPass = (VkRenderPass)dev_data->unique_id_mapping[renderPass_id]; |
| 425 | dev_data->unique_id_mapping.erase(renderPass_id); |
| 426 | lock.unlock(); |
| 427 | dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); |
| 428 | |
| 429 | lock.lock(); |
| 430 | PostCallDestroyRenderPass(dev_data, renderPass); |
| 431 | } |
| 432 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 433 | VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, |
| 434 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 435 | layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 436 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL; |
| 437 | if (pCreateInfo) { |
| 438 | std::lock_guard<std::mutex> lock(global_lock); |
| 439 | local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 440 | local_pCreateInfo->oldSwapchain = Unwrap(my_map_data, pCreateInfo->oldSwapchain); |
| 441 | // Surface is instance-level object |
| 442 | local_pCreateInfo->surface = Unwrap(my_map_data->instance_data, pCreateInfo->surface); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 443 | } |
| 444 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 445 | VkResult result = my_map_data->dispatch_table.CreateSwapchainKHR(device, local_pCreateInfo->ptr(), pAllocator, pSwapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 446 | if (local_pCreateInfo) { |
| 447 | delete local_pCreateInfo; |
| 448 | } |
| 449 | if (VK_SUCCESS == result) { |
| 450 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 451 | *pSwapchain = WrapNew(my_map_data, *pSwapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 452 | } |
| 453 | return result; |
| 454 | } |
| 455 | |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 456 | VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, |
| 457 | const VkSwapchainCreateInfoKHR *pCreateInfos, |
| 458 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { |
| 459 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 460 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = NULL; |
| 461 | { |
| 462 | std::lock_guard<std::mutex> lock(global_lock); |
| 463 | if (pCreateInfos) { |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 464 | local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount]; |
| 465 | for (uint32_t i = 0; i < swapchainCount; ++i) { |
| 466 | local_pCreateInfos[i].initialize(&pCreateInfos[i]); |
| 467 | if (pCreateInfos[i].surface) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 468 | // Surface is instance-level object |
| 469 | local_pCreateInfos[i].surface = Unwrap(dev_data->instance_data, pCreateInfos[i].surface); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 470 | } |
| 471 | if (pCreateInfos[i].oldSwapchain) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 472 | local_pCreateInfos[i].oldSwapchain = Unwrap(dev_data, pCreateInfos[i].oldSwapchain); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 473 | } |
| 474 | } |
| 475 | } |
| 476 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 477 | VkResult result = dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, local_pCreateInfos->ptr(), |
| 478 | pAllocator, pSwapchains); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 479 | if (local_pCreateInfos) delete[] local_pCreateInfos; |
| 480 | if (VK_SUCCESS == result) { |
| 481 | std::lock_guard<std::mutex> lock(global_lock); |
| 482 | for (uint32_t i = 0; i < swapchainCount; i++) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 483 | pSwapchains[i] = WrapNew(dev_data, pSwapchains[i]); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 484 | } |
| 485 | } |
| 486 | return result; |
| 487 | } |
| 488 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 489 | VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, |
| 490 | VkImage *pSwapchainImages) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 491 | layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 2eb39bc | 2018-02-16 11:24:21 -0700 | [diff] [blame] | 492 | VkSwapchainKHR wrapped_swapchain_handle = swapchain; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 493 | if (VK_NULL_HANDLE != swapchain) { |
| 494 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 495 | swapchain = Unwrap(my_device_data, swapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 496 | } |
| 497 | VkResult result = |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 498 | my_device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); |
Mark Lobodzinski | 2eb39bc | 2018-02-16 11:24:21 -0700 | [diff] [blame] | 499 | if ((VK_SUCCESS == result) || (VK_INCOMPLETE == result)) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 500 | if ((*pSwapchainImageCount > 0) && pSwapchainImages) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 501 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | 2eb39bc | 2018-02-16 11:24:21 -0700 | [diff] [blame] | 502 | auto &wrapped_swapchain_image_handles = my_device_data->swapchain_wrapped_image_handle_map[wrapped_swapchain_handle]; |
| 503 | for (uint32_t i = static_cast<uint32_t>(wrapped_swapchain_image_handles.size()); i < *pSwapchainImageCount; i++) { |
| 504 | wrapped_swapchain_image_handles.emplace_back(WrapNew(my_device_data, pSwapchainImages[i])); |
| 505 | } |
| 506 | for (uint32_t i = 0; i < *pSwapchainImageCount; i++) { |
| 507 | pSwapchainImages[i] = wrapped_swapchain_image_handles[i]; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 508 | } |
| 509 | } |
| 510 | } |
| 511 | return result; |
| 512 | } |
| 513 | |
Mark Lobodzinski | 1ce83f4 | 2018-02-16 09:58:07 -0700 | [diff] [blame] | 514 | VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { |
| 515 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 516 | std::unique_lock<std::mutex> lock(global_lock); |
Mark Lobodzinski | 2eb39bc | 2018-02-16 11:24:21 -0700 | [diff] [blame] | 517 | |
| 518 | auto &image_array = dev_data->swapchain_wrapped_image_handle_map[swapchain]; |
| 519 | for (auto &image_handle : image_array) { |
| 520 | dev_data->unique_id_mapping.erase(HandleToUint64(image_handle)); |
| 521 | } |
| 522 | dev_data->swapchain_wrapped_image_handle_map.erase(swapchain); |
| 523 | |
| 524 | uint64_t swapchain_id = HandleToUint64(swapchain); |
Mark Lobodzinski | 1ce83f4 | 2018-02-16 09:58:07 -0700 | [diff] [blame] | 525 | swapchain = (VkSwapchainKHR)dev_data->unique_id_mapping[swapchain_id]; |
| 526 | dev_data->unique_id_mapping.erase(swapchain_id); |
| 527 | lock.unlock(); |
| 528 | dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); |
| 529 | } |
| 530 | |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 531 | VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { |
| 532 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map); |
| 533 | safe_VkPresentInfoKHR *local_pPresentInfo = NULL; |
| 534 | { |
| 535 | std::lock_guard<std::mutex> lock(global_lock); |
| 536 | if (pPresentInfo) { |
| 537 | local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo); |
| 538 | if (local_pPresentInfo->pWaitSemaphores) { |
| 539 | for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 540 | local_pPresentInfo->pWaitSemaphores[index1] = Unwrap(dev_data, pPresentInfo->pWaitSemaphores[index1]); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 541 | } |
| 542 | } |
| 543 | if (local_pPresentInfo->pSwapchains) { |
| 544 | for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 545 | local_pPresentInfo->pSwapchains[index1] = Unwrap(dev_data, pPresentInfo->pSwapchains[index1]); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 546 | } |
| 547 | } |
| 548 | } |
| 549 | } |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 550 | VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr()); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 551 | |
| 552 | // pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version, |
| 553 | // so handle it as a special case here: |
| 554 | if (pPresentInfo && pPresentInfo->pResults) { |
| 555 | for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) { |
| 556 | pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i]; |
| 557 | } |
| 558 | } |
| 559 | |
| 560 | if (local_pPresentInfo) delete local_pPresentInfo; |
| 561 | return result; |
| 562 | } |
| 563 | |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 564 | VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device, |
| 565 | const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, |
| 566 | const VkAllocationCallbacks *pAllocator, |
| 567 | VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { |
| 568 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 569 | safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info = NULL; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 570 | { |
| 571 | std::lock_guard<std::mutex> lock(global_lock); |
| 572 | if (pCreateInfo) { |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 573 | local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 574 | if (pCreateInfo->descriptorSetLayout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 575 | local_create_info->descriptorSetLayout = Unwrap(dev_data, pCreateInfo->descriptorSetLayout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 576 | } |
| 577 | if (pCreateInfo->pipelineLayout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 578 | local_create_info->pipelineLayout = Unwrap(dev_data, pCreateInfo->pipelineLayout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 579 | } |
| 580 | } |
| 581 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 582 | VkResult result = dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, local_create_info->ptr(), pAllocator, |
| 583 | pDescriptorUpdateTemplate); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 584 | if (VK_SUCCESS == result) { |
| 585 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 586 | *pDescriptorUpdateTemplate = WrapNew(dev_data, *pDescriptorUpdateTemplate); |
Mark Lobodzinski | 4f3ce67 | 2017-03-03 10:28:21 -0700 | [diff] [blame] | 587 | |
| 588 | // Shadow template createInfo for later updates |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 589 | std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 590 | dev_data->desc_template_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 591 | } |
| 592 | return result; |
| 593 | } |
| 594 | |
| 595 | VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device, |
| 596 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 597 | const VkAllocationCallbacks *pAllocator) { |
| 598 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 599 | std::unique_lock<std::mutex> lock(global_lock); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 600 | uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
| 601 | dev_data->desc_template_map.erase(descriptor_update_template_id); |
| 602 | descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[descriptor_update_template_id]; |
| 603 | dev_data->unique_id_mapping.erase(descriptor_update_template_id); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 604 | lock.unlock(); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 605 | dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 606 | } |
| 607 | |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 608 | void *BuildUnwrappedUpdateTemplateBuffer(layer_data *dev_data, uint64_t descriptorUpdateTemplate, const void *pData) { |
| 609 | auto const template_map_entry = dev_data->desc_template_map.find(descriptorUpdateTemplate); |
| 610 | if (template_map_entry == dev_data->desc_template_map.end()) { |
| 611 | assert(0); |
| 612 | } |
| 613 | auto const &create_info = template_map_entry->second->create_info; |
| 614 | size_t allocation_size = 0; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 615 | std::vector<std::tuple<size_t, VulkanObjectType, void *>> template_entries; |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 616 | |
| 617 | for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { |
| 618 | for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { |
| 619 | size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; |
| 620 | char *update_entry = (char *)(pData) + offset; |
| 621 | |
| 622 | switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { |
| 623 | case VK_DESCRIPTOR_TYPE_SAMPLER: |
| 624 | case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: |
| 625 | case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: |
| 626 | case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: |
| 627 | case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { |
| 628 | auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); |
| 629 | allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo)); |
| 630 | |
| 631 | VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 632 | wrapped_entry->sampler = Unwrap(dev_data, image_entry->sampler); |
| 633 | wrapped_entry->imageView = Unwrap(dev_data, image_entry->imageView); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 634 | template_entries.emplace_back(offset, kVulkanObjectTypeImage, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 635 | } break; |
| 636 | |
| 637 | case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: |
| 638 | case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: |
| 639 | case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: |
| 640 | case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { |
| 641 | auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); |
| 642 | allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo)); |
| 643 | |
| 644 | VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 645 | wrapped_entry->buffer = Unwrap(dev_data, buffer_entry->buffer); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 646 | template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 647 | } break; |
| 648 | |
| 649 | case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: |
| 650 | case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 651 | auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 652 | allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView)); |
| 653 | |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 654 | VkBufferView wrapped_entry = Unwrap(dev_data, *buffer_view_handle); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 655 | template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 656 | } break; |
| 657 | default: |
| 658 | assert(0); |
| 659 | break; |
| 660 | } |
| 661 | } |
| 662 | } |
| 663 | // Allocate required buffer size and populate with source/unwrapped data |
| 664 | void *unwrapped_data = malloc(allocation_size); |
| 665 | for (auto &this_entry : template_entries) { |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 666 | VulkanObjectType type = std::get<1>(this_entry); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 667 | void *destination = (char *)unwrapped_data + std::get<0>(this_entry); |
| 668 | void *source = (char *)std::get<2>(this_entry); |
| 669 | |
| 670 | switch (type) { |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 671 | case kVulkanObjectTypeImage: |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 672 | *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = *(reinterpret_cast<VkDescriptorImageInfo *>(source)); |
| 673 | delete reinterpret_cast<VkDescriptorImageInfo *>(source); |
| 674 | break; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 675 | case kVulkanObjectTypeBuffer: |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 676 | *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = *(reinterpret_cast<VkDescriptorBufferInfo *>(source)); |
| 677 | delete reinterpret_cast<VkDescriptorBufferInfo *>(source); |
| 678 | break; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 679 | case kVulkanObjectTypeBufferView: |
Mark Lobodzinski | d5197d0 | 2017-03-15 13:13:49 -0600 | [diff] [blame] | 680 | *(reinterpret_cast<VkBufferView *>(destination)) = reinterpret_cast<VkBufferView>(source); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 681 | break; |
| 682 | default: |
| 683 | assert(0); |
| 684 | break; |
| 685 | } |
| 686 | } |
| 687 | return (void *)unwrapped_data; |
| 688 | } |
| 689 | |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 690 | VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, |
| 691 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 692 | const void *pData) { |
| 693 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 694 | uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 695 | void *unwrapped_buffer = nullptr; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 696 | { |
| 697 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 698 | descriptorSet = Unwrap(dev_data, descriptorSet); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 699 | descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[template_handle]; |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 700 | unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 701 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 702 | dev_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 703 | free(unwrapped_buffer); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 704 | } |
| 705 | |
| 706 | VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, |
| 707 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 708 | VkPipelineLayout layout, uint32_t set, const void *pData) { |
| 709 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map); |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 710 | uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 711 | void *unwrapped_buffer = nullptr; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 712 | { |
| 713 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 714 | descriptorUpdateTemplate = Unwrap(dev_data, descriptorUpdateTemplate); |
| 715 | layout = Unwrap(dev_data, layout); |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 716 | unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 717 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 718 | dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 719 | unwrapped_buffer); |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 720 | free(unwrapped_buffer); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 721 | } |
| 722 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 723 | #ifndef __ANDROID__ |
| 724 | VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| 725 | VkDisplayPropertiesKHR *pProperties) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 726 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 727 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 728 | VkResult result = |
| 729 | my_map_data->dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties); |
Chris Forbes | f1e49bf | 2017-05-02 17:36:57 -0700 | [diff] [blame] | 730 | if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| 731 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 732 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
Chris Forbes | f1e49bf | 2017-05-02 17:36:57 -0700 | [diff] [blame] | 733 | pProperties[idx0].display = WrapNew(my_map_data, pProperties[idx0].display); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 734 | } |
| 735 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 736 | return result; |
| 737 | } |
| 738 | |
| 739 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, |
| 740 | uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 741 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 742 | VkResult result = |
| 743 | my_map_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 744 | if (VK_SUCCESS == result) { |
| 745 | if ((*pDisplayCount > 0) && pDisplays) { |
| 746 | std::lock_guard<std::mutex> lock(global_lock); |
| 747 | for (uint32_t i = 0; i < *pDisplayCount; i++) { |
Chris Forbes | 824c117 | 2017-05-02 17:45:29 -0700 | [diff] [blame] | 748 | // TODO: this looks like it really wants a /reverse/ mapping. What's going on here? |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 749 | auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i])); |
| 750 | assert(it != my_map_data->unique_id_mapping.end()); |
| 751 | pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second); |
| 752 | } |
| 753 | } |
| 754 | } |
| 755 | return result; |
| 756 | } |
| 757 | |
| 758 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, |
| 759 | uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 760 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 761 | { |
| 762 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 763 | display = Unwrap(my_map_data, display); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 764 | } |
| 765 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 766 | VkResult result = my_map_data->dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 767 | if (result == VK_SUCCESS && pProperties) { |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 768 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 769 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 770 | pProperties[idx0].displayMode = WrapNew(my_map_data, pProperties[idx0].displayMode); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 771 | } |
| 772 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 773 | return result; |
| 774 | } |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 775 | |
| 776 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, |
| 777 | uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 778 | instance_layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 779 | { |
| 780 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 65029c1 | 2017-05-02 17:48:20 -0700 | [diff] [blame] | 781 | mode = Unwrap(dev_data, mode); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 782 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 783 | VkResult result = dev_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 784 | return result; |
| 785 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 786 | #endif |
| 787 | |
Mark Lobodzinski | e4f2c5f | 2017-07-17 14:26:47 -0600 | [diff] [blame] | 788 | VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) { |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 789 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 790 | auto local_tag_info = new safe_VkDebugMarkerObjectTagInfoEXT(pTagInfo); |
| 791 | { |
| 792 | std::lock_guard<std::mutex> lock(global_lock); |
| 793 | auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info->object)); |
| 794 | if (it != device_data->unique_id_mapping.end()) { |
| 795 | local_tag_info->object = it->second; |
| 796 | } |
| 797 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 798 | VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT( |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 799 | device, reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(local_tag_info)); |
| 800 | return result; |
| 801 | } |
| 802 | |
Mark Lobodzinski | e4f2c5f | 2017-07-17 14:26:47 -0600 | [diff] [blame] | 803 | VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) { |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 804 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 805 | auto local_name_info = new safe_VkDebugMarkerObjectNameInfoEXT(pNameInfo); |
| 806 | { |
| 807 | std::lock_guard<std::mutex> lock(global_lock); |
| 808 | auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info->object)); |
| 809 | if (it != device_data->unique_id_mapping.end()) { |
| 810 | local_name_info->object = it->second; |
| 811 | } |
| 812 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 813 | VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT( |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 814 | device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(local_name_info)); |
| 815 | return result; |
| 816 | } |
| 817 | |
Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 818 | } // namespace unique_objects |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 819 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 820 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 821 | VkExtensionProperties *pProperties) { |
| 822 | return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); |
| 823 | } |
| 824 | |
| 825 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, |
| 826 | VkLayerProperties *pProperties) { |
| 827 | return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties); |
| 828 | } |
| 829 | |
| 830 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 831 | VkLayerProperties *pProperties) { |
| 832 | assert(physicalDevice == VK_NULL_HANDLE); |
| 833 | return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); |
| 834 | } |
| 835 | |
| 836 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { |
| 837 | return unique_objects::GetDeviceProcAddr(dev, funcName); |
| 838 | } |
| 839 | |
| 840 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { |
| 841 | return unique_objects::GetInstanceProcAddr(instance, funcName); |
| 842 | } |
| 843 | |
| 844 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, |
| 845 | const char *pLayerName, uint32_t *pCount, |
| 846 | VkExtensionProperties *pProperties) { |
| 847 | assert(physicalDevice == VK_NULL_HANDLE); |
| 848 | return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); |
| 849 | } |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 850 | |
Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 851 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance, |
| 852 | const char *funcName) { |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 853 | return unique_objects::GetPhysicalDeviceProcAddr(instance, funcName); |
| 854 | } |
| 855 | |
| 856 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) { |
| 857 | assert(pVersionStruct != NULL); |
| 858 | assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT); |
| 859 | |
| 860 | // Fill in the function pointers if our version is at least capable of having the structure contain them. |
| 861 | if (pVersionStruct->loaderLayerInterfaceVersion >= 2) { |
| 862 | pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr; |
| 863 | pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr; |
| 864 | pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr; |
| 865 | } |
| 866 | |
| 867 | if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) { |
| 868 | unique_objects::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion; |
| 869 | } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) { |
| 870 | pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
| 871 | } |
| 872 | |
| 873 | return VK_SUCCESS; |
| 874 | } |