Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2015-2016 The Khronos Group Inc. |
| 3 | * Copyright (c) 2015-2016 Valve Corporation |
| 4 | * Copyright (c) 2015-2016 LunarG, Inc. |
| 5 | * Copyright (c) 2015-2016 Google, Inc. |
| 6 | * |
| 7 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | * you may not use this file except in compliance with the License. |
| 9 | * You may obtain a copy of the License at |
| 10 | * |
| 11 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | * |
| 13 | * Unless required by applicable law or agreed to in writing, software |
| 14 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | * See the License for the specific language governing permissions and |
| 17 | * limitations under the License. |
| 18 | * |
| 19 | * Author: Tobin Ehlis <tobine@google.com> |
| 20 | * Author: Mark Lobodzinski <mark@lunarg.com> |
| 21 | */ |
| 22 | |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 23 | #define NOMINMAX |
| 24 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 25 | #include <stdio.h> |
| 26 | #include <stdlib.h> |
| 27 | #include <string.h> |
| 28 | #include <unordered_map> |
| 29 | #include <vector> |
| 30 | #include <list> |
| 31 | #include <memory> |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 32 | #include <algorithm> |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 33 | |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 34 | // For Windows, this #include must come before other Vk headers. |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 35 | #include "vk_loader_platform.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 36 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 37 | #include "unique_objects.h" |
| 38 | #include "vk_dispatch_table_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 39 | #include "vk_layer_config.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 40 | #include "vk_layer_data.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 41 | #include "vk_layer_extension_utils.h" |
| 42 | #include "vk_layer_logging.h" |
| 43 | #include "vk_layer_table.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 44 | #include "vk_layer_utils.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 45 | #include "vk_layer_utils.h" |
Mark Lobodzinski | 9acd2e3 | 2016-12-21 15:22:39 -0700 | [diff] [blame] | 46 | #include "vk_enum_string_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 47 | #include "vk_validation_error_messages.h" |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 48 | #include "vk_object_types.h" |
Mark Lobodzinski | 75a4631 | 2018-01-03 11:23:55 -0700 | [diff] [blame] | 49 | #include "vk_extension_helper.h" |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 50 | #include "vulkan/vk_layer.h" |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 51 | |
Mike Stroyan | b985fca | 2016-11-01 11:50:16 -0600 | [diff] [blame] | 52 | // This intentionally includes a cpp file |
| 53 | #include "vk_safe_struct.cpp" |
| 54 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 55 | #include "unique_objects_wrappers.h" |
| 56 | |
| 57 | namespace unique_objects { |
| 58 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 59 | static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
| 60 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 61 | static void initUniqueObjects(instance_layer_data *instance_data, const VkAllocationCallbacks *pAllocator) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 62 | layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects"); |
| 63 | } |
| 64 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 65 | // Check enabled instance extensions against supported instance extension whitelist |
| 66 | static void InstanceExtensionWhitelist(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 67 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 68 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 69 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 70 | // Check for recognized instance extensions |
Mark Lobodzinski | 75a4631 | 2018-01-03 11:23:55 -0700 | [diff] [blame] | 71 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kInstanceExtensionNames)) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 72 | log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 73 | VALIDATION_ERROR_UNDEFINED, "UniqueObjects", |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 74 | "Instance Extension %s is not supported by this layer. Using this extension may adversely affect validation " |
| 75 | "results and/or produce undefined behavior.", |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 76 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 77 | } |
| 78 | } |
| 79 | } |
| 80 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 81 | // Check enabled device extensions against supported device extension whitelist |
| 82 | static void DeviceExtensionWhitelist(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 83 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 84 | |
| 85 | for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 86 | // Check for recognized device extensions |
Mark Lobodzinski | 75a4631 | 2018-01-03 11:23:55 -0700 | [diff] [blame] | 87 | if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kDeviceExtensionNames)) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 88 | log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__, |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 89 | VALIDATION_ERROR_UNDEFINED, "UniqueObjects", |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 90 | "Device Extension %s is not supported by this layer. Using this extension may adversely affect validation " |
| 91 | "results and/or produce undefined behavior.", |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 92 | pCreateInfo->ppEnabledExtensionNames[i]); |
| 93 | } |
| 94 | } |
| 95 | } |
| 96 | |
| 97 | VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator, |
| 98 | VkInstance *pInstance) { |
| 99 | VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 100 | |
| 101 | assert(chain_info->u.pLayerInfo); |
| 102 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 103 | PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance"); |
| 104 | if (fpCreateInstance == NULL) { |
| 105 | return VK_ERROR_INITIALIZATION_FAILED; |
| 106 | } |
| 107 | |
| 108 | // Advance the link info for the next element on the chain |
| 109 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 110 | |
| 111 | VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance); |
| 112 | if (result != VK_SUCCESS) { |
| 113 | return result; |
| 114 | } |
| 115 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 116 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 117 | instance_data->instance = *pInstance; |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 118 | layer_init_instance_dispatch_table(*pInstance, &instance_data->dispatch_table, fpGetInstanceProcAddr); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 119 | |
| 120 | instance_data->instance = *pInstance; |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 121 | instance_data->report_data = debug_report_create_instance( |
| 122 | &instance_data->dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 123 | |
| 124 | // Set up temporary debug callbacks to output messages at CreateInstance-time |
| 125 | if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos, |
| 126 | &instance_data->tmp_callbacks)) { |
| 127 | if (instance_data->num_tmp_callbacks > 0) { |
| 128 | if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, |
| 129 | instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) { |
| 130 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 131 | instance_data->num_tmp_callbacks = 0; |
| 132 | } |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | initUniqueObjects(instance_data, pAllocator); |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 137 | InstanceExtensionWhitelist(pCreateInfo, *pInstance); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 138 | |
| 139 | // Disable and free tmp callbacks, no longer necessary |
| 140 | if (instance_data->num_tmp_callbacks > 0) { |
| 141 | layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks); |
| 142 | layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks); |
| 143 | instance_data->num_tmp_callbacks = 0; |
| 144 | } |
| 145 | |
| 146 | return result; |
| 147 | } |
| 148 | |
| 149 | VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) { |
| 150 | dispatch_key key = get_dispatch_key(instance); |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 151 | instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 152 | VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 153 | disp_table->DestroyInstance(instance, pAllocator); |
| 154 | |
| 155 | // Clean up logging callback, if any |
| 156 | while (instance_data->logging_callback.size() > 0) { |
| 157 | VkDebugReportCallbackEXT callback = instance_data->logging_callback.back(); |
| 158 | layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator); |
| 159 | instance_data->logging_callback.pop_back(); |
| 160 | } |
| 161 | |
| 162 | layer_debug_report_destroy_instance(instance_data->report_data); |
GabrÃel Arthúr Pétursson | 2c5e750 | 2017-06-03 23:27:59 +0000 | [diff] [blame] | 163 | FreeLayerDataPtr(key, instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 164 | } |
| 165 | |
| 166 | VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, |
| 167 | const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 168 | instance_layer_data *my_instance_data = GetLayerDataPtr(get_dispatch_key(gpu), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 169 | VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO); |
| 170 | |
| 171 | assert(chain_info->u.pLayerInfo); |
| 172 | PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr; |
| 173 | PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr; |
| 174 | PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice"); |
| 175 | if (fpCreateDevice == NULL) { |
| 176 | return VK_ERROR_INITIALIZATION_FAILED; |
| 177 | } |
| 178 | |
| 179 | // Advance the link info for the next element on the chain |
| 180 | chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext; |
| 181 | |
| 182 | VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice); |
| 183 | if (result != VK_SUCCESS) { |
| 184 | return result; |
| 185 | } |
| 186 | |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 187 | layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 188 | my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice); |
| 189 | |
| 190 | // Setup layer's device dispatch table |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 191 | layer_init_device_dispatch_table(*pDevice, &my_device_data->dispatch_table, fpGetDeviceProcAddr); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 192 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 193 | DeviceExtensionWhitelist(pCreateInfo, *pDevice); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 194 | |
Mark Lobodzinski | c014147 | 2017-06-09 09:51:34 -0600 | [diff] [blame] | 195 | // Set gpu for this device in order to get at any objects mapped at instance level |
Chris Forbes | 7fcfde1 | 2017-05-02 16:54:24 -0700 | [diff] [blame] | 196 | my_device_data->instance_data = my_instance_data; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 197 | |
| 198 | return result; |
| 199 | } |
| 200 | |
| 201 | VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) { |
| 202 | dispatch_key key = get_dispatch_key(device); |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 203 | layer_data *dev_data = GetLayerDataPtr(key, layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 204 | |
| 205 | layer_debug_report_destroy_device(device); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 206 | dev_data->dispatch_table.DestroyDevice(device, pAllocator); |
GabrÃel Arthúr Pétursson | 2c5e750 | 2017-06-03 23:27:59 +0000 | [diff] [blame] | 207 | |
| 208 | FreeLayerDataPtr(key, layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 209 | } |
| 210 | |
| 211 | static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects", |
Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 212 | VK_LAYER_API_VERSION, // specVersion |
| 213 | 1, // implementationVersion |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 214 | "Google Validation Layer"}; |
| 215 | |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 216 | /// Declare prototype for these functions |
| 217 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName); |
| 218 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 219 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) { |
| 220 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 221 | } |
| 222 | |
| 223 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 224 | VkLayerProperties *pProperties) { |
| 225 | return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties); |
| 226 | } |
| 227 | |
| 228 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 229 | VkExtensionProperties *pProperties) { |
| 230 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 231 | return util_GetExtensionProperties(0, NULL, pCount, pProperties); |
| 232 | |
| 233 | return VK_ERROR_LAYER_NOT_PRESENT; |
| 234 | } |
| 235 | |
| 236 | VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName, |
| 237 | uint32_t *pCount, VkExtensionProperties *pProperties) { |
| 238 | if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName)) |
| 239 | return util_GetExtensionProperties(0, nullptr, pCount, pProperties); |
| 240 | |
| 241 | assert(physicalDevice); |
| 242 | |
| 243 | dispatch_key key = get_dispatch_key(physicalDevice); |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 244 | instance_layer_data *instance_data = GetLayerDataPtr(key, instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 245 | return instance_data->dispatch_table.EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 246 | } |
| 247 | |
| 248 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) { |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 249 | const auto item = name_to_funcptr_map.find(funcName); |
| 250 | if (item != name_to_funcptr_map.end()) { |
| 251 | return reinterpret_cast<PFN_vkVoidFunction>(item->second); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 252 | } |
| 253 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 254 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 255 | const auto &table = device_data->dispatch_table; |
| 256 | if (!table.GetDeviceProcAddr) return nullptr; |
| 257 | return table.GetDeviceProcAddr(device, funcName); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 258 | } |
| 259 | |
| 260 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) { |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 261 | const auto item = name_to_funcptr_map.find(funcName); |
| 262 | if (item != name_to_funcptr_map.end()) { |
| 263 | return reinterpret_cast<PFN_vkVoidFunction>(item->second); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 264 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 265 | |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 266 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 267 | const auto &table = instance_data->dispatch_table; |
| 268 | if (!table.GetInstanceProcAddr) return nullptr; |
| 269 | return table.GetInstanceProcAddr(instance, funcName); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 270 | } |
| 271 | |
Mark Lobodzinski | 38686e9 | 2017-06-07 16:04:50 -0600 | [diff] [blame] | 272 | VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 273 | instance_layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), instance_layer_data_map); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 274 | VkLayerInstanceDispatchTable *disp_table = &instance_data->dispatch_table; |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 275 | if (disp_table->GetPhysicalDeviceProcAddr == NULL) { |
| 276 | return NULL; |
| 277 | } |
| 278 | return disp_table->GetPhysicalDeviceProcAddr(instance, funcName); |
| 279 | } |
| 280 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 281 | VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 282 | const VkComputePipelineCreateInfo *pCreateInfos, |
| 283 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 284 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 285 | safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL; |
| 286 | if (pCreateInfos) { |
| 287 | std::lock_guard<std::mutex> lock(global_lock); |
| 288 | local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount]; |
| 289 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
| 290 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]); |
| 291 | if (pCreateInfos[idx0].basePipelineHandle) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 292 | local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 293 | } |
| 294 | if (pCreateInfos[idx0].layout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 295 | local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 296 | } |
| 297 | if (pCreateInfos[idx0].stage.module) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 298 | local_pCreateInfos[idx0].stage.module = Unwrap(device_data, pCreateInfos[idx0].stage.module); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 299 | } |
| 300 | } |
| 301 | } |
| 302 | if (pipelineCache) { |
| 303 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 304 | pipelineCache = Unwrap(device_data, pipelineCache); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 305 | } |
| 306 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 307 | VkResult result = device_data->dispatch_table.CreateComputePipelines(device, pipelineCache, createInfoCount, |
| 308 | local_pCreateInfos->ptr(), pAllocator, pPipelines); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 309 | delete[] local_pCreateInfos; |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 310 | { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 311 | std::lock_guard<std::mutex> lock(global_lock); |
| 312 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 313 | if (pPipelines[i] != VK_NULL_HANDLE) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 314 | pPipelines[i] = WrapNew(device_data, pPipelines[i]); |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 315 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 316 | } |
| 317 | } |
| 318 | return result; |
| 319 | } |
| 320 | |
| 321 | VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, |
| 322 | const VkGraphicsPipelineCreateInfo *pCreateInfos, |
| 323 | const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 324 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 325 | safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = nullptr; |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 326 | if (pCreateInfos) { |
| 327 | local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount]; |
| 328 | std::lock_guard<std::mutex> lock(global_lock); |
| 329 | for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) { |
Petr Kraus | e91f7a1 | 2017-12-14 20:57:36 +0100 | [diff] [blame] | 330 | bool uses_color_attachment = false; |
| 331 | bool uses_depthstencil_attachment = false; |
| 332 | { |
| 333 | const auto subpasses_uses_it = |
| 334 | device_data->renderpasses_states.find(Unwrap(device_data, pCreateInfos[idx0].renderPass)); |
| 335 | if (subpasses_uses_it != device_data->renderpasses_states.end()) { |
| 336 | const auto &subpasses_uses = subpasses_uses_it->second; |
| 337 | if (subpasses_uses.subpasses_using_color_attachment.count(pCreateInfos[idx0].subpass)) |
| 338 | uses_color_attachment = true; |
| 339 | if (subpasses_uses.subpasses_using_depthstencil_attachment.count(pCreateInfos[idx0].subpass)) |
| 340 | uses_depthstencil_attachment = true; |
| 341 | } |
| 342 | } |
| 343 | |
| 344 | local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0], uses_color_attachment, uses_depthstencil_attachment); |
| 345 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 346 | if (pCreateInfos[idx0].basePipelineHandle) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 347 | local_pCreateInfos[idx0].basePipelineHandle = Unwrap(device_data, pCreateInfos[idx0].basePipelineHandle); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 348 | } |
| 349 | if (pCreateInfos[idx0].layout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 350 | local_pCreateInfos[idx0].layout = Unwrap(device_data, pCreateInfos[idx0].layout); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 351 | } |
| 352 | if (pCreateInfos[idx0].pStages) { |
| 353 | for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) { |
| 354 | if (pCreateInfos[idx0].pStages[idx1].module) { |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 355 | local_pCreateInfos[idx0].pStages[idx1].module = |
| 356 | Unwrap(device_data, pCreateInfos[idx0].pStages[idx1].module); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 357 | } |
| 358 | } |
| 359 | } |
| 360 | if (pCreateInfos[idx0].renderPass) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 361 | local_pCreateInfos[idx0].renderPass = Unwrap(device_data, pCreateInfos[idx0].renderPass); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 362 | } |
| 363 | } |
| 364 | } |
| 365 | if (pipelineCache) { |
| 366 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 367 | pipelineCache = Unwrap(device_data, pipelineCache); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 368 | } |
| 369 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 370 | VkResult result = device_data->dispatch_table.CreateGraphicsPipelines(device, pipelineCache, createInfoCount, |
| 371 | local_pCreateInfos->ptr(), pAllocator, pPipelines); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 372 | delete[] local_pCreateInfos; |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 373 | { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 374 | std::lock_guard<std::mutex> lock(global_lock); |
| 375 | for (uint32_t i = 0; i < createInfoCount; ++i) { |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 376 | if (pPipelines[i] != VK_NULL_HANDLE) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 377 | pPipelines[i] = WrapNew(device_data, pPipelines[i]); |
Maciej Jesionowski | 4220070 | 2016-11-23 10:44:34 +0100 | [diff] [blame] | 378 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 379 | } |
| 380 | } |
| 381 | return result; |
| 382 | } |
| 383 | |
Petr Kraus | e91f7a1 | 2017-12-14 20:57:36 +0100 | [diff] [blame] | 384 | static void PostCallCreateRenderPass(layer_data *dev_data, const VkRenderPassCreateInfo *pCreateInfo, VkRenderPass renderPass) { |
| 385 | auto &renderpass_state = dev_data->renderpasses_states[renderPass]; |
| 386 | |
| 387 | for (uint32_t subpass = 0; subpass < pCreateInfo->subpassCount; ++subpass) { |
| 388 | bool uses_color = false; |
| 389 | for (uint32_t i = 0; i < pCreateInfo->pSubpasses[subpass].colorAttachmentCount && !uses_color; ++i) |
| 390 | if (pCreateInfo->pSubpasses[subpass].pColorAttachments[i].attachment != VK_ATTACHMENT_UNUSED) uses_color = true; |
| 391 | |
| 392 | bool uses_depthstencil = false; |
| 393 | if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment) |
| 394 | if (pCreateInfo->pSubpasses[subpass].pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED) |
| 395 | uses_depthstencil = true; |
| 396 | |
| 397 | if (uses_color) renderpass_state.subpasses_using_color_attachment.insert(subpass); |
| 398 | if (uses_depthstencil) renderpass_state.subpasses_using_depthstencil_attachment.insert(subpass); |
| 399 | } |
| 400 | } |
| 401 | |
| 402 | VKAPI_ATTR VkResult VKAPI_CALL CreateRenderPass(VkDevice device, const VkRenderPassCreateInfo *pCreateInfo, |
| 403 | const VkAllocationCallbacks *pAllocator, VkRenderPass *pRenderPass) { |
| 404 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 405 | VkResult result = dev_data->dispatch_table.CreateRenderPass(device, pCreateInfo, pAllocator, pRenderPass); |
| 406 | if (VK_SUCCESS == result) { |
| 407 | std::lock_guard<std::mutex> lock(global_lock); |
| 408 | |
| 409 | PostCallCreateRenderPass(dev_data, pCreateInfo, *pRenderPass); |
| 410 | |
| 411 | *pRenderPass = WrapNew(dev_data, *pRenderPass); |
| 412 | } |
| 413 | return result; |
| 414 | } |
| 415 | |
| 416 | static void PostCallDestroyRenderPass(layer_data *dev_data, VkRenderPass renderPass) { |
| 417 | dev_data->renderpasses_states.erase(renderPass); |
| 418 | } |
| 419 | |
| 420 | VKAPI_ATTR void VKAPI_CALL DestroyRenderPass(VkDevice device, VkRenderPass renderPass, const VkAllocationCallbacks *pAllocator) { |
| 421 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 422 | std::unique_lock<std::mutex> lock(global_lock); |
| 423 | uint64_t renderPass_id = reinterpret_cast<uint64_t &>(renderPass); |
| 424 | renderPass = (VkRenderPass)dev_data->unique_id_mapping[renderPass_id]; |
| 425 | dev_data->unique_id_mapping.erase(renderPass_id); |
| 426 | lock.unlock(); |
| 427 | dev_data->dispatch_table.DestroyRenderPass(device, renderPass, pAllocator); |
| 428 | |
| 429 | lock.lock(); |
| 430 | PostCallDestroyRenderPass(dev_data, renderPass); |
| 431 | } |
| 432 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 433 | VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo, |
| 434 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 435 | layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 436 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL; |
| 437 | if (pCreateInfo) { |
| 438 | std::lock_guard<std::mutex> lock(global_lock); |
| 439 | local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 440 | local_pCreateInfo->oldSwapchain = Unwrap(my_map_data, pCreateInfo->oldSwapchain); |
| 441 | // Surface is instance-level object |
| 442 | local_pCreateInfo->surface = Unwrap(my_map_data->instance_data, pCreateInfo->surface); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 443 | } |
| 444 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 445 | VkResult result = my_map_data->dispatch_table.CreateSwapchainKHR(device, local_pCreateInfo->ptr(), pAllocator, pSwapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 446 | if (local_pCreateInfo) { |
| 447 | delete local_pCreateInfo; |
| 448 | } |
| 449 | if (VK_SUCCESS == result) { |
| 450 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 451 | *pSwapchain = WrapNew(my_map_data, *pSwapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 452 | } |
| 453 | return result; |
| 454 | } |
| 455 | |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 456 | VKAPI_ATTR VkResult VKAPI_CALL CreateSharedSwapchainsKHR(VkDevice device, uint32_t swapchainCount, |
| 457 | const VkSwapchainCreateInfoKHR *pCreateInfos, |
| 458 | const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchains) { |
| 459 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 460 | safe_VkSwapchainCreateInfoKHR *local_pCreateInfos = NULL; |
| 461 | { |
| 462 | std::lock_guard<std::mutex> lock(global_lock); |
| 463 | if (pCreateInfos) { |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 464 | local_pCreateInfos = new safe_VkSwapchainCreateInfoKHR[swapchainCount]; |
| 465 | for (uint32_t i = 0; i < swapchainCount; ++i) { |
| 466 | local_pCreateInfos[i].initialize(&pCreateInfos[i]); |
| 467 | if (pCreateInfos[i].surface) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 468 | // Surface is instance-level object |
| 469 | local_pCreateInfos[i].surface = Unwrap(dev_data->instance_data, pCreateInfos[i].surface); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 470 | } |
| 471 | if (pCreateInfos[i].oldSwapchain) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 472 | local_pCreateInfos[i].oldSwapchain = Unwrap(dev_data, pCreateInfos[i].oldSwapchain); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 473 | } |
| 474 | } |
| 475 | } |
| 476 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 477 | VkResult result = dev_data->dispatch_table.CreateSharedSwapchainsKHR(device, swapchainCount, local_pCreateInfos->ptr(), |
| 478 | pAllocator, pSwapchains); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 479 | if (local_pCreateInfos) delete[] local_pCreateInfos; |
| 480 | if (VK_SUCCESS == result) { |
| 481 | std::lock_guard<std::mutex> lock(global_lock); |
| 482 | for (uint32_t i = 0; i < swapchainCount; i++) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 483 | pSwapchains[i] = WrapNew(dev_data, pSwapchains[i]); |
Dustin Graves | 9a6eb05 | 2017-03-28 14:18:54 -0600 | [diff] [blame] | 484 | } |
| 485 | } |
| 486 | return result; |
| 487 | } |
| 488 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 489 | VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount, |
| 490 | VkImage *pSwapchainImages) { |
Tobin Ehlis | 8d6acde | 2017-02-08 07:40:40 -0700 | [diff] [blame] | 491 | layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 492 | if (VK_NULL_HANDLE != swapchain) { |
| 493 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 494 | swapchain = Unwrap(my_device_data, swapchain); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 495 | } |
| 496 | VkResult result = |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 497 | my_device_data->dispatch_table.GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 498 | // TODO : Need to add corresponding code to delete these images |
| 499 | if (VK_SUCCESS == result) { |
| 500 | if ((*pSwapchainImageCount > 0) && pSwapchainImages) { |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 501 | std::lock_guard<std::mutex> lock(global_lock); |
| 502 | for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 503 | pSwapchainImages[i] = WrapNew(my_device_data, pSwapchainImages[i]); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 504 | } |
| 505 | } |
| 506 | } |
| 507 | return result; |
| 508 | } |
| 509 | |
Mark Lobodzinski | 1ce83f4 | 2018-02-16 09:58:07 -0700 | [diff] [blame^] | 510 | VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) { |
| 511 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 512 | std::unique_lock<std::mutex> lock(global_lock); |
| 513 | uint64_t swapchain_id = reinterpret_cast<uint64_t &>(swapchain); |
| 514 | swapchain = (VkSwapchainKHR)dev_data->unique_id_mapping[swapchain_id]; |
| 515 | dev_data->unique_id_mapping.erase(swapchain_id); |
| 516 | lock.unlock(); |
| 517 | dev_data->dispatch_table.DestroySwapchainKHR(device, swapchain, pAllocator); |
| 518 | } |
| 519 | |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 520 | VKAPI_ATTR VkResult VKAPI_CALL QueuePresentKHR(VkQueue queue, const VkPresentInfoKHR *pPresentInfo) { |
| 521 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map); |
| 522 | safe_VkPresentInfoKHR *local_pPresentInfo = NULL; |
| 523 | { |
| 524 | std::lock_guard<std::mutex> lock(global_lock); |
| 525 | if (pPresentInfo) { |
| 526 | local_pPresentInfo = new safe_VkPresentInfoKHR(pPresentInfo); |
| 527 | if (local_pPresentInfo->pWaitSemaphores) { |
| 528 | for (uint32_t index1 = 0; index1 < local_pPresentInfo->waitSemaphoreCount; ++index1) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 529 | local_pPresentInfo->pWaitSemaphores[index1] = Unwrap(dev_data, pPresentInfo->pWaitSemaphores[index1]); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 530 | } |
| 531 | } |
| 532 | if (local_pPresentInfo->pSwapchains) { |
| 533 | for (uint32_t index1 = 0; index1 < local_pPresentInfo->swapchainCount; ++index1) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 534 | local_pPresentInfo->pSwapchains[index1] = Unwrap(dev_data, pPresentInfo->pSwapchains[index1]); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 535 | } |
| 536 | } |
| 537 | } |
| 538 | } |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 539 | VkResult result = dev_data->dispatch_table.QueuePresentKHR(queue, local_pPresentInfo->ptr()); |
Chris Forbes | 0f507f2 | 2017-04-16 13:13:17 +1200 | [diff] [blame] | 540 | |
| 541 | // pResults is an output array embedded in a structure. The code generator neglects to copy back from the safe_* version, |
| 542 | // so handle it as a special case here: |
| 543 | if (pPresentInfo && pPresentInfo->pResults) { |
| 544 | for (uint32_t i = 0; i < pPresentInfo->swapchainCount; i++) { |
| 545 | pPresentInfo->pResults[i] = local_pPresentInfo->pResults[i]; |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | if (local_pPresentInfo) delete local_pPresentInfo; |
| 550 | return result; |
| 551 | } |
| 552 | |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 553 | VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device, |
| 554 | const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo, |
| 555 | const VkAllocationCallbacks *pAllocator, |
| 556 | VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) { |
| 557 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 558 | safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info = NULL; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 559 | { |
| 560 | std::lock_guard<std::mutex> lock(global_lock); |
| 561 | if (pCreateInfo) { |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 562 | local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 563 | if (pCreateInfo->descriptorSetLayout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 564 | local_create_info->descriptorSetLayout = Unwrap(dev_data, pCreateInfo->descriptorSetLayout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 565 | } |
| 566 | if (pCreateInfo->pipelineLayout) { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 567 | local_create_info->pipelineLayout = Unwrap(dev_data, pCreateInfo->pipelineLayout); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 568 | } |
| 569 | } |
| 570 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 571 | VkResult result = dev_data->dispatch_table.CreateDescriptorUpdateTemplateKHR(device, local_create_info->ptr(), pAllocator, |
| 572 | pDescriptorUpdateTemplate); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 573 | if (VK_SUCCESS == result) { |
| 574 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 575 | *pDescriptorUpdateTemplate = WrapNew(dev_data, *pDescriptorUpdateTemplate); |
Mark Lobodzinski | 4f3ce67 | 2017-03-03 10:28:21 -0700 | [diff] [blame] | 576 | |
| 577 | // Shadow template createInfo for later updates |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 578 | std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info)); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 579 | dev_data->desc_template_map[(uint64_t)*pDescriptorUpdateTemplate] = std::move(template_state); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 580 | } |
| 581 | return result; |
| 582 | } |
| 583 | |
| 584 | VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device, |
| 585 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 586 | const VkAllocationCallbacks *pAllocator) { |
| 587 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 588 | std::unique_lock<std::mutex> lock(global_lock); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 589 | uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
| 590 | dev_data->desc_template_map.erase(descriptor_update_template_id); |
| 591 | descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[descriptor_update_template_id]; |
| 592 | dev_data->unique_id_mapping.erase(descriptor_update_template_id); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 593 | lock.unlock(); |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 594 | dev_data->dispatch_table.DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 595 | } |
| 596 | |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 597 | void *BuildUnwrappedUpdateTemplateBuffer(layer_data *dev_data, uint64_t descriptorUpdateTemplate, const void *pData) { |
| 598 | auto const template_map_entry = dev_data->desc_template_map.find(descriptorUpdateTemplate); |
| 599 | if (template_map_entry == dev_data->desc_template_map.end()) { |
| 600 | assert(0); |
| 601 | } |
| 602 | auto const &create_info = template_map_entry->second->create_info; |
| 603 | size_t allocation_size = 0; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 604 | std::vector<std::tuple<size_t, VulkanObjectType, void *>> template_entries; |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 605 | |
| 606 | for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) { |
| 607 | for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) { |
| 608 | size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride; |
| 609 | char *update_entry = (char *)(pData) + offset; |
| 610 | |
| 611 | switch (create_info.pDescriptorUpdateEntries[i].descriptorType) { |
| 612 | case VK_DESCRIPTOR_TYPE_SAMPLER: |
| 613 | case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: |
| 614 | case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: |
| 615 | case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: |
| 616 | case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: { |
| 617 | auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry); |
| 618 | allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo)); |
| 619 | |
| 620 | VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 621 | wrapped_entry->sampler = Unwrap(dev_data, image_entry->sampler); |
| 622 | wrapped_entry->imageView = Unwrap(dev_data, image_entry->imageView); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 623 | template_entries.emplace_back(offset, kVulkanObjectTypeImage, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 624 | } break; |
| 625 | |
| 626 | case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: |
| 627 | case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: |
| 628 | case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: |
| 629 | case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { |
| 630 | auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry); |
| 631 | allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo)); |
| 632 | |
| 633 | VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 634 | wrapped_entry->buffer = Unwrap(dev_data, buffer_entry->buffer); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 635 | template_entries.emplace_back(offset, kVulkanObjectTypeBuffer, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 636 | } break; |
| 637 | |
| 638 | case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: |
| 639 | case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: { |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 640 | auto buffer_view_handle = reinterpret_cast<VkBufferView *>(update_entry); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 641 | allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView)); |
| 642 | |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 643 | VkBufferView wrapped_entry = Unwrap(dev_data, *buffer_view_handle); |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 644 | template_entries.emplace_back(offset, kVulkanObjectTypeBufferView, reinterpret_cast<void *>(wrapped_entry)); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 645 | } break; |
| 646 | default: |
| 647 | assert(0); |
| 648 | break; |
| 649 | } |
| 650 | } |
| 651 | } |
| 652 | // Allocate required buffer size and populate with source/unwrapped data |
| 653 | void *unwrapped_data = malloc(allocation_size); |
| 654 | for (auto &this_entry : template_entries) { |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 655 | VulkanObjectType type = std::get<1>(this_entry); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 656 | void *destination = (char *)unwrapped_data + std::get<0>(this_entry); |
| 657 | void *source = (char *)std::get<2>(this_entry); |
| 658 | |
| 659 | switch (type) { |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 660 | case kVulkanObjectTypeImage: |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 661 | *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = *(reinterpret_cast<VkDescriptorImageInfo *>(source)); |
| 662 | delete reinterpret_cast<VkDescriptorImageInfo *>(source); |
| 663 | break; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 664 | case kVulkanObjectTypeBuffer: |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 665 | *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = *(reinterpret_cast<VkDescriptorBufferInfo *>(source)); |
| 666 | delete reinterpret_cast<VkDescriptorBufferInfo *>(source); |
| 667 | break; |
Mark Lobodzinski | 2d9de65 | 2017-04-24 08:58:52 -0600 | [diff] [blame] | 668 | case kVulkanObjectTypeBufferView: |
Mark Lobodzinski | d5197d0 | 2017-03-15 13:13:49 -0600 | [diff] [blame] | 669 | *(reinterpret_cast<VkBufferView *>(destination)) = reinterpret_cast<VkBufferView>(source); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 670 | break; |
| 671 | default: |
| 672 | assert(0); |
| 673 | break; |
| 674 | } |
| 675 | } |
| 676 | return (void *)unwrapped_data; |
| 677 | } |
| 678 | |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 679 | VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet, |
| 680 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 681 | const void *pData) { |
| 682 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 683 | uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 684 | void *unwrapped_buffer = nullptr; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 685 | { |
| 686 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 687 | descriptorSet = Unwrap(dev_data, descriptorSet); |
Mark Lobodzinski | 94d9e8c | 2017-03-06 16:18:19 -0700 | [diff] [blame] | 688 | descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[template_handle]; |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 689 | unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 690 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 691 | dev_data->dispatch_table.UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate, unwrapped_buffer); |
Mark Lobodzinski | b523f7c | 2017-03-06 09:00:21 -0700 | [diff] [blame] | 692 | free(unwrapped_buffer); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 693 | } |
| 694 | |
| 695 | VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer, |
| 696 | VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate, |
| 697 | VkPipelineLayout layout, uint32_t set, const void *pData) { |
| 698 | layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map); |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 699 | uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate); |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 700 | void *unwrapped_buffer = nullptr; |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 701 | { |
| 702 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 6956a31 | 2017-05-02 17:36:28 -0700 | [diff] [blame] | 703 | descriptorUpdateTemplate = Unwrap(dev_data, descriptorUpdateTemplate); |
| 704 | layout = Unwrap(dev_data, layout); |
Mark Lobodzinski | c8a0c9b | 2017-11-13 09:42:58 -0700 | [diff] [blame] | 705 | unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 706 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 707 | dev_data->dispatch_table.CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set, |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 708 | unwrapped_buffer); |
Mark Lobodzinski | 1c47126 | 2017-03-28 16:22:56 -0600 | [diff] [blame] | 709 | free(unwrapped_buffer); |
Mark Lobodzinski | 71703a5 | 2017-03-03 08:40:16 -0700 | [diff] [blame] | 710 | } |
| 711 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 712 | #ifndef __ANDROID__ |
| 713 | VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount, |
| 714 | VkDisplayPropertiesKHR *pProperties) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 715 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 716 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 717 | VkResult result = |
| 718 | my_map_data->dispatch_table.GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties); |
Chris Forbes | f1e49bf | 2017-05-02 17:36:57 -0700 | [diff] [blame] | 719 | if ((result == VK_SUCCESS || result == VK_INCOMPLETE) && pProperties) { |
| 720 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 721 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
Chris Forbes | f1e49bf | 2017-05-02 17:36:57 -0700 | [diff] [blame] | 722 | pProperties[idx0].display = WrapNew(my_map_data, pProperties[idx0].display); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 723 | } |
| 724 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 725 | return result; |
| 726 | } |
| 727 | |
| 728 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex, |
| 729 | uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 730 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 731 | VkResult result = |
| 732 | my_map_data->dispatch_table.GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex, pDisplayCount, pDisplays); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 733 | if (VK_SUCCESS == result) { |
| 734 | if ((*pDisplayCount > 0) && pDisplays) { |
| 735 | std::lock_guard<std::mutex> lock(global_lock); |
| 736 | for (uint32_t i = 0; i < *pDisplayCount; i++) { |
Chris Forbes | 824c117 | 2017-05-02 17:45:29 -0700 | [diff] [blame] | 737 | // TODO: this looks like it really wants a /reverse/ mapping. What's going on here? |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 738 | auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i])); |
| 739 | assert(it != my_map_data->unique_id_mapping.end()); |
| 740 | pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second); |
| 741 | } |
| 742 | } |
| 743 | } |
| 744 | return result; |
| 745 | } |
| 746 | |
| 747 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, |
| 748 | uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 749 | instance_layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 750 | { |
| 751 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 752 | display = Unwrap(my_map_data, display); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 753 | } |
| 754 | |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 755 | VkResult result = my_map_data->dispatch_table.GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 756 | if (result == VK_SUCCESS && pProperties) { |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 757 | std::lock_guard<std::mutex> lock(global_lock); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 758 | for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) { |
Chris Forbes | ef35afd | 2017-05-02 17:45:45 -0700 | [diff] [blame] | 759 | pProperties[idx0].displayMode = WrapNew(my_map_data, pProperties[idx0].displayMode); |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 760 | } |
| 761 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 762 | return result; |
| 763 | } |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 764 | |
| 765 | VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode, |
| 766 | uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) { |
Chris Forbes | 5279a8c | 2017-05-02 16:26:23 -0700 | [diff] [blame] | 767 | instance_layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), instance_layer_data_map); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 768 | { |
| 769 | std::lock_guard<std::mutex> lock(global_lock); |
Chris Forbes | 65029c1 | 2017-05-02 17:48:20 -0700 | [diff] [blame] | 770 | mode = Unwrap(dev_data, mode); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 771 | } |
Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 772 | VkResult result = dev_data->dispatch_table.GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities); |
Norbert Nopper | 1dec9a5 | 2016-11-25 07:55:13 +0100 | [diff] [blame] | 773 | return result; |
| 774 | } |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 775 | #endif |
| 776 | |
Mark Lobodzinski | e4f2c5f | 2017-07-17 14:26:47 -0600 | [diff] [blame] | 777 | VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectTagEXT(VkDevice device, const VkDebugMarkerObjectTagInfoEXT *pTagInfo) { |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 778 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 779 | auto local_tag_info = new safe_VkDebugMarkerObjectTagInfoEXT(pTagInfo); |
| 780 | { |
| 781 | std::lock_guard<std::mutex> lock(global_lock); |
| 782 | auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_tag_info->object)); |
| 783 | if (it != device_data->unique_id_mapping.end()) { |
| 784 | local_tag_info->object = it->second; |
| 785 | } |
| 786 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 787 | VkResult result = device_data->dispatch_table.DebugMarkerSetObjectTagEXT( |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 788 | device, reinterpret_cast<VkDebugMarkerObjectTagInfoEXT *>(local_tag_info)); |
| 789 | return result; |
| 790 | } |
| 791 | |
Mark Lobodzinski | e4f2c5f | 2017-07-17 14:26:47 -0600 | [diff] [blame] | 792 | VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) { |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 793 | layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map); |
| 794 | auto local_name_info = new safe_VkDebugMarkerObjectNameInfoEXT(pNameInfo); |
| 795 | { |
| 796 | std::lock_guard<std::mutex> lock(global_lock); |
| 797 | auto it = device_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(local_name_info->object)); |
| 798 | if (it != device_data->unique_id_mapping.end()) { |
| 799 | local_name_info->object = it->second; |
| 800 | } |
| 801 | } |
Chris Forbes | 44c0530 | 2017-05-02 16:42:55 -0700 | [diff] [blame] | 802 | VkResult result = device_data->dispatch_table.DebugMarkerSetObjectNameEXT( |
Mark Lobodzinski | a096c12 | 2017-03-16 11:54:35 -0600 | [diff] [blame] | 803 | device, reinterpret_cast<VkDebugMarkerObjectNameInfoEXT *>(local_name_info)); |
| 804 | return result; |
| 805 | } |
| 806 | |
Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 807 | } // namespace unique_objects |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 808 | |
Mark Lobodzinski | dc3bd85 | 2016-09-06 16:12:23 -0600 | [diff] [blame] | 809 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount, |
| 810 | VkExtensionProperties *pProperties) { |
| 811 | return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties); |
| 812 | } |
| 813 | |
| 814 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount, |
| 815 | VkLayerProperties *pProperties) { |
| 816 | return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties); |
| 817 | } |
| 818 | |
| 819 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount, |
| 820 | VkLayerProperties *pProperties) { |
| 821 | assert(physicalDevice == VK_NULL_HANDLE); |
| 822 | return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties); |
| 823 | } |
| 824 | |
| 825 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) { |
| 826 | return unique_objects::GetDeviceProcAddr(dev, funcName); |
| 827 | } |
| 828 | |
| 829 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) { |
| 830 | return unique_objects::GetInstanceProcAddr(instance, funcName); |
| 831 | } |
| 832 | |
| 833 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, |
| 834 | const char *pLayerName, uint32_t *pCount, |
| 835 | VkExtensionProperties *pProperties) { |
| 836 | assert(physicalDevice == VK_NULL_HANDLE); |
| 837 | return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties); |
| 838 | } |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 839 | |
Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 840 | VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance, |
| 841 | const char *funcName) { |
Mark Young | 3938987 | 2017-01-19 21:10:49 -0700 | [diff] [blame] | 842 | return unique_objects::GetPhysicalDeviceProcAddr(instance, funcName); |
| 843 | } |
| 844 | |
| 845 | VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) { |
| 846 | assert(pVersionStruct != NULL); |
| 847 | assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT); |
| 848 | |
| 849 | // Fill in the function pointers if our version is at least capable of having the structure contain them. |
| 850 | if (pVersionStruct->loaderLayerInterfaceVersion >= 2) { |
| 851 | pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr; |
| 852 | pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr; |
| 853 | pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr; |
| 854 | } |
| 855 | |
| 856 | if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) { |
| 857 | unique_objects::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion; |
| 858 | } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) { |
| 859 | pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION; |
| 860 | } |
| 861 | |
| 862 | return VK_SUCCESS; |
| 863 | } |