blob: ed8dd2628f2c6725e1a45a79bfd9183d6c180095 [file] [log] [blame]
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -06001/*
2 * Copyright (c) 2015-2016 The Khronos Group Inc.
3 * Copyright (c) 2015-2016 Valve Corporation
4 * Copyright (c) 2015-2016 LunarG, Inc.
5 * Copyright (c) 2015-2016 Google, Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 * Author: Tobin Ehlis <tobine@google.com>
20 * Author: Mark Lobodzinski <mark@lunarg.com>
21 */
22
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -070023#define NOMINMAX
24
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060025#include <stdio.h>
26#include <stdlib.h>
27#include <string.h>
28#include <unordered_map>
29#include <vector>
30#include <list>
31#include <memory>
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -070032#include <algorithm>
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060033
Mike Weiblen6a27de52016-12-09 17:36:28 -070034// For Windows, this #include must come before other Vk headers.
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060035#include "vk_loader_platform.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070036
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060037#include "unique_objects.h"
38#include "vk_dispatch_table_helper.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070039#include "vk_layer_config.h"
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060040#include "vk_layer_data.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070041#include "vk_layer_extension_utils.h"
42#include "vk_layer_logging.h"
43#include "vk_layer_table.h"
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060044#include "vk_layer_utils.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070045#include "vk_layer_utils.h"
Mark Lobodzinski9acd2e32016-12-21 15:22:39 -070046#include "vk_enum_string_helper.h"
Mike Weiblen6a27de52016-12-09 17:36:28 -070047#include "vk_validation_error_messages.h"
48#include "vulkan/vk_layer.h"
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060049
Mike Stroyanb985fca2016-11-01 11:50:16 -060050// This intentionally includes a cpp file
51#include "vk_safe_struct.cpp"
52
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060053#include "unique_objects_wrappers.h"
54
55namespace unique_objects {
56
Mark Young39389872017-01-19 21:10:49 -070057static uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
58
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060059static void initUniqueObjects(layer_data *instance_data, const VkAllocationCallbacks *pAllocator) {
60 layer_debug_actions(instance_data->report_data, instance_data->logging_callback, pAllocator, "google_unique_objects");
61}
62
63// Handle CreateInstance Extensions
64static void checkInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
65 uint32_t i;
Tobin Ehlis8d6acde2017-02-08 07:40:40 -070066 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -060067 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
68 instance_ext_map[disp_table] = {};
69
70 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
71 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) {
72 instance_ext_map[disp_table].wsi_enabled = true;
73 }
74 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_DISPLAY_EXTENSION_NAME) == 0) {
75 instance_ext_map[disp_table].display_enabled = true;
76 }
77#ifdef VK_USE_PLATFORM_XLIB_KHR
78 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0) {
79 instance_ext_map[disp_table].xlib_enabled = true;
80 }
81#endif
82#ifdef VK_USE_PLATFORM_XCB_KHR
83 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0) {
84 instance_ext_map[disp_table].xcb_enabled = true;
85 }
86#endif
87#ifdef VK_USE_PLATFORM_WAYLAND_KHR
88 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0) {
89 instance_ext_map[disp_table].wayland_enabled = true;
90 }
91#endif
92#ifdef VK_USE_PLATFORM_MIR_KHR
93 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME) == 0) {
94 instance_ext_map[disp_table].mir_enabled = true;
95 }
96#endif
97#ifdef VK_USE_PLATFORM_ANDROID_KHR
98 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0) {
99 instance_ext_map[disp_table].android_enabled = true;
100 }
101#endif
102#ifdef VK_USE_PLATFORM_WIN32_KHR
103 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0) {
104 instance_ext_map[disp_table].win32_enabled = true;
105 }
106#endif
107
108 // Check for recognized instance extensions
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600109 if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedInstanceExtensions)) {
110 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
Mike Weiblen6a27de52016-12-09 17:36:28 -0700111 VALIDATION_ERROR_UNDEFINED, "UniqueObjects",
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600112 "Instance Extension %s is not supported by this layer. Using this extension may adversely affect "
113 "validation results and/or produce undefined behavior.",
114 pCreateInfo->ppEnabledExtensionNames[i]);
115 }
116 }
117}
118
119// Handle CreateDevice Extensions
120static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700121 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600122 VkLayerDispatchTable *disp_table = device_data->device_dispatch_table;
123 PFN_vkGetDeviceProcAddr gpa = disp_table->GetDeviceProcAddr;
124
125 device_data->device_dispatch_table->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
126 disp_table->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
127 disp_table->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
128 disp_table->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
129 disp_table->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
130 device_data->wsi_enabled = false;
131
132 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
133 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0) {
134 device_data->wsi_enabled = true;
135 }
136 // Check for recognized device extensions
137 if (!white_list(pCreateInfo->ppEnabledExtensionNames[i], kUniqueObjectsSupportedDeviceExtensions)) {
138 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, 0, __LINE__,
Mike Weiblen6a27de52016-12-09 17:36:28 -0700139 VALIDATION_ERROR_UNDEFINED, "UniqueObjects",
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600140 "Device Extension %s is not supported by this layer. Using this extension may adversely affect "
141 "validation results and/or produce undefined behavior.",
142 pCreateInfo->ppEnabledExtensionNames[i]);
143 }
144 }
145}
146
147VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
148 VkInstance *pInstance) {
149 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
150
151 assert(chain_info->u.pLayerInfo);
152 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
153 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
154 if (fpCreateInstance == NULL) {
155 return VK_ERROR_INITIALIZATION_FAILED;
156 }
157
158 // Advance the link info for the next element on the chain
159 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
160
161 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
162 if (result != VK_SUCCESS) {
163 return result;
164 }
165
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700166 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600167 instance_data->instance = *pInstance;
168 instance_data->instance_dispatch_table = new VkLayerInstanceDispatchTable;
169 layer_init_instance_dispatch_table(*pInstance, instance_data->instance_dispatch_table, fpGetInstanceProcAddr);
170
171 instance_data->instance = *pInstance;
172 instance_data->report_data =
173 debug_report_create_instance(instance_data->instance_dispatch_table, *pInstance, pCreateInfo->enabledExtensionCount,
174 pCreateInfo->ppEnabledExtensionNames);
175
176 // Set up temporary debug callbacks to output messages at CreateInstance-time
177 if (!layer_copy_tmp_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_callbacks, &instance_data->tmp_dbg_create_infos,
178 &instance_data->tmp_callbacks)) {
179 if (instance_data->num_tmp_callbacks > 0) {
180 if (layer_enable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks,
181 instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks)) {
182 layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks);
183 instance_data->num_tmp_callbacks = 0;
184 }
185 }
186 }
187
188 initUniqueObjects(instance_data, pAllocator);
189 checkInstanceRegisterExtensions(pCreateInfo, *pInstance);
190
191 // Disable and free tmp callbacks, no longer necessary
192 if (instance_data->num_tmp_callbacks > 0) {
193 layer_disable_tmp_callbacks(instance_data->report_data, instance_data->num_tmp_callbacks, instance_data->tmp_callbacks);
194 layer_free_tmp_callbacks(instance_data->tmp_dbg_create_infos, instance_data->tmp_callbacks);
195 instance_data->num_tmp_callbacks = 0;
196 }
197
198 return result;
199}
200
201VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
202 dispatch_key key = get_dispatch_key(instance);
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700203 layer_data *instance_data = GetLayerDataPtr(key, layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600204 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
205 instance_ext_map.erase(disp_table);
206 disp_table->DestroyInstance(instance, pAllocator);
207
208 // Clean up logging callback, if any
209 while (instance_data->logging_callback.size() > 0) {
210 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
211 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
212 instance_data->logging_callback.pop_back();
213 }
214
215 layer_debug_report_destroy_instance(instance_data->report_data);
216 layer_data_map.erase(key);
217}
218
219VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo,
220 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700221 layer_data *my_instance_data = GetLayerDataPtr(get_dispatch_key(gpu), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600222 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
223
224 assert(chain_info->u.pLayerInfo);
225 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
226 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
227 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
228 if (fpCreateDevice == NULL) {
229 return VK_ERROR_INITIALIZATION_FAILED;
230 }
231
232 // Advance the link info for the next element on the chain
233 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
234
235 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
236 if (result != VK_SUCCESS) {
237 return result;
238 }
239
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700240 layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600241 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
242
243 // Setup layer's device dispatch table
244 my_device_data->device_dispatch_table = new VkLayerDispatchTable;
245 layer_init_device_dispatch_table(*pDevice, my_device_data->device_dispatch_table, fpGetDeviceProcAddr);
246
247 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
248 // Set gpu for this device in order to get at any objects mapped at instance level
249
250 my_device_data->gpu = gpu;
251
252 return result;
253}
254
255VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
256 dispatch_key key = get_dispatch_key(device);
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700257 layer_data *dev_data = GetLayerDataPtr(key, layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600258
259 layer_debug_report_destroy_device(device);
260 dev_data->device_dispatch_table->DestroyDevice(device, pAllocator);
261 layer_data_map.erase(key);
262}
263
264static const VkLayerProperties globalLayerProps = {"VK_LAYER_GOOGLE_unique_objects",
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700265 VK_LAYER_API_VERSION, // specVersion
266 1, // implementationVersion
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600267 "Google Validation Layer"};
268
Mark Young39389872017-01-19 21:10:49 -0700269/// Declare prototype for these functions
270VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName);
271
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600272static inline PFN_vkVoidFunction layer_intercept_proc(const char *name) {
Jamie Madill6069c822016-12-15 09:35:36 -0500273 for (unsigned int i = 0; i < sizeof(procmap) / sizeof(procmap[0]); i++) {
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700274 if (!strcmp(name, procmap[i].name)) return procmap[i].pFunc;
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600275 }
Mark Young39389872017-01-19 21:10:49 -0700276 if (0 == strcmp(name, "vk_layerGetPhysicalDeviceProcAddr")) {
277 return (PFN_vkVoidFunction)GetPhysicalDeviceProcAddr;
278 }
Mark Young0f183a82017-02-28 09:58:04 -0700279 if (0 == strcmp(name, "vk_layerGetPhysicalDeviceProcAddr")) {
280 return (PFN_vkVoidFunction)GetPhysicalDeviceProcAddr;
281 }
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600282 return NULL;
283}
284
285VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
286 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
287}
288
289VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
290 VkLayerProperties *pProperties) {
291 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
292}
293
294VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
295 VkExtensionProperties *pProperties) {
296 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
297 return util_GetExtensionProperties(0, NULL, pCount, pProperties);
298
299 return VK_ERROR_LAYER_NOT_PRESENT;
300}
301
302VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
303 uint32_t *pCount, VkExtensionProperties *pProperties) {
304 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
305 return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
306
307 assert(physicalDevice);
308
309 dispatch_key key = get_dispatch_key(physicalDevice);
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700310 layer_data *instance_data = GetLayerDataPtr(key, layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600311 return instance_data->instance_dispatch_table->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
312}
313
314VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
315 PFN_vkVoidFunction addr;
316 assert(device);
317 addr = layer_intercept_proc(funcName);
318 if (addr) {
319 return addr;
320 }
321
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700322 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600323 VkLayerDispatchTable *disp_table = dev_data->device_dispatch_table;
324 if (disp_table->GetDeviceProcAddr == NULL) {
325 return NULL;
326 }
327 return disp_table->GetDeviceProcAddr(device, funcName);
328}
329
330VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
331 PFN_vkVoidFunction addr;
332
333 addr = layer_intercept_proc(funcName);
334 if (addr) {
335 return addr;
336 }
337 assert(instance);
338
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700339 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600340 addr = debug_report_get_instance_proc_addr(instance_data->report_data, funcName);
341 if (addr) {
342 return addr;
343 }
344
345 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
346 if (disp_table->GetInstanceProcAddr == NULL) {
347 return NULL;
348 }
349 return disp_table->GetInstanceProcAddr(instance, funcName);
350}
351
Mark Young39389872017-01-19 21:10:49 -0700352VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
353 assert(instance);
354
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700355 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Young39389872017-01-19 21:10:49 -0700356 VkLayerInstanceDispatchTable *disp_table = instance_data->instance_dispatch_table;
357 if (disp_table->GetPhysicalDeviceProcAddr == NULL) {
358 return NULL;
359 }
360 return disp_table->GetPhysicalDeviceProcAddr(instance, funcName);
361}
362
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600363VKAPI_ATTR VkResult VKAPI_CALL AllocateMemory(VkDevice device, const VkMemoryAllocateInfo *pAllocateInfo,
364 const VkAllocationCallbacks *pAllocator, VkDeviceMemory *pMemory) {
365 const VkMemoryAllocateInfo *input_allocate_info = pAllocateInfo;
366 std::unique_ptr<safe_VkMemoryAllocateInfo> safe_allocate_info;
Mark Lobodzinskid2443222016-10-07 14:13:38 -0600367 std::unique_ptr<safe_VkDedicatedAllocationMemoryAllocateInfoNV> safe_dedicated_allocate_info;
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700368 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600369
370 if ((pAllocateInfo != nullptr) &&
371 ContainsExtStruct(pAllocateInfo, VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV)) {
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600372 // Assuming there is only one extension struct of this type in the list for now
373 safe_dedicated_allocate_info =
374 std::unique_ptr<safe_VkDedicatedAllocationMemoryAllocateInfoNV>(new safe_VkDedicatedAllocationMemoryAllocateInfoNV);
375 safe_allocate_info = std::unique_ptr<safe_VkMemoryAllocateInfo>(new safe_VkMemoryAllocateInfo(pAllocateInfo));
376 input_allocate_info = reinterpret_cast<const VkMemoryAllocateInfo *>(safe_allocate_info.get());
377
378 const GenericHeader *orig_pnext = reinterpret_cast<const GenericHeader *>(pAllocateInfo->pNext);
379 GenericHeader *input_pnext = reinterpret_cast<GenericHeader *>(safe_allocate_info.get());
380 while (orig_pnext != nullptr) {
381 if (orig_pnext->sType == VK_STRUCTURE_TYPE_DEDICATED_ALLOCATION_MEMORY_ALLOCATE_INFO_NV) {
382 safe_dedicated_allocate_info->initialize(
383 reinterpret_cast<const VkDedicatedAllocationMemoryAllocateInfoNV *>(orig_pnext));
384
385 std::unique_lock<std::mutex> lock(global_lock);
386
387 if (safe_dedicated_allocate_info->buffer != VK_NULL_HANDLE) {
388 uint64_t local_buffer = reinterpret_cast<uint64_t &>(safe_dedicated_allocate_info->buffer);
389 safe_dedicated_allocate_info->buffer =
390 reinterpret_cast<VkBuffer &>(device_data->unique_id_mapping[local_buffer]);
391 }
392
393 if (safe_dedicated_allocate_info->image != VK_NULL_HANDLE) {
394 uint64_t local_image = reinterpret_cast<uint64_t &>(safe_dedicated_allocate_info->image);
395 safe_dedicated_allocate_info->image = reinterpret_cast<VkImage &>(device_data->unique_id_mapping[local_image]);
396 }
397
398 lock.unlock();
399
400 input_pnext->pNext = reinterpret_cast<GenericHeader *>(safe_dedicated_allocate_info.get());
401 input_pnext = reinterpret_cast<GenericHeader *>(input_pnext->pNext);
402 } else {
403 // TODO: generic handling of pNext copies
404 }
405
406 orig_pnext = reinterpret_cast<const GenericHeader *>(orig_pnext->pNext);
407 }
408 }
409
410 VkResult result = device_data->device_dispatch_table->AllocateMemory(device, input_allocate_info, pAllocator, pMemory);
411
412 if (VK_SUCCESS == result) {
413 std::lock_guard<std::mutex> lock(global_lock);
414 uint64_t unique_id = global_unique_id++;
415 device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pMemory);
416 *pMemory = reinterpret_cast<VkDeviceMemory &>(unique_id);
417 }
418
419 return result;
420}
421
422VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
423 const VkComputePipelineCreateInfo *pCreateInfos,
424 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700425 layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600426 safe_VkComputePipelineCreateInfo *local_pCreateInfos = NULL;
427 if (pCreateInfos) {
428 std::lock_guard<std::mutex> lock(global_lock);
429 local_pCreateInfos = new safe_VkComputePipelineCreateInfo[createInfoCount];
430 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
431 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
432 if (pCreateInfos[idx0].basePipelineHandle) {
433 local_pCreateInfos[idx0].basePipelineHandle =
434 (VkPipeline)my_device_data
435 ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].basePipelineHandle)];
436 }
437 if (pCreateInfos[idx0].layout) {
438 local_pCreateInfos[idx0].layout =
439 (VkPipelineLayout)
440 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].layout)];
441 }
442 if (pCreateInfos[idx0].stage.module) {
443 local_pCreateInfos[idx0].stage.module =
444 (VkShaderModule)
445 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].stage.module)];
446 }
447 }
448 }
449 if (pipelineCache) {
450 std::lock_guard<std::mutex> lock(global_lock);
451 pipelineCache = (VkPipelineCache)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(pipelineCache)];
452 }
453
454 VkResult result = my_device_data->device_dispatch_table->CreateComputePipelines(
455 device, pipelineCache, createInfoCount, (const VkComputePipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
456 delete[] local_pCreateInfos;
Maciej Jesionowski42200702016-11-23 10:44:34 +0100457 {
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600458 uint64_t unique_id = 0;
459 std::lock_guard<std::mutex> lock(global_lock);
460 for (uint32_t i = 0; i < createInfoCount; ++i) {
Maciej Jesionowski42200702016-11-23 10:44:34 +0100461 if (pPipelines[i] != VK_NULL_HANDLE) {
462 unique_id = global_unique_id++;
463 my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pPipelines[i]);
464 pPipelines[i] = reinterpret_cast<VkPipeline &>(unique_id);
465 }
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600466 }
467 }
468 return result;
469}
470
471VKAPI_ATTR VkResult VKAPI_CALL CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
472 const VkGraphicsPipelineCreateInfo *pCreateInfos,
473 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700474 layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600475 safe_VkGraphicsPipelineCreateInfo *local_pCreateInfos = NULL;
476 if (pCreateInfos) {
477 local_pCreateInfos = new safe_VkGraphicsPipelineCreateInfo[createInfoCount];
478 std::lock_guard<std::mutex> lock(global_lock);
479 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
480 local_pCreateInfos[idx0].initialize(&pCreateInfos[idx0]);
481 if (pCreateInfos[idx0].basePipelineHandle) {
482 local_pCreateInfos[idx0].basePipelineHandle =
483 (VkPipeline)my_device_data
484 ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].basePipelineHandle)];
485 }
486 if (pCreateInfos[idx0].layout) {
487 local_pCreateInfos[idx0].layout =
488 (VkPipelineLayout)
489 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].layout)];
490 }
491 if (pCreateInfos[idx0].pStages) {
492 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
493 if (pCreateInfos[idx0].pStages[idx1].module) {
494 local_pCreateInfos[idx0].pStages[idx1].module =
495 (VkShaderModule)my_device_data
496 ->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].pStages[idx1].module)];
497 }
498 }
499 }
500 if (pCreateInfos[idx0].renderPass) {
501 local_pCreateInfos[idx0].renderPass =
502 (VkRenderPass)
503 my_device_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfos[idx0].renderPass)];
504 }
505 }
506 }
507 if (pipelineCache) {
508 std::lock_guard<std::mutex> lock(global_lock);
509 pipelineCache = (VkPipelineCache)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(pipelineCache)];
510 }
511
512 VkResult result = my_device_data->device_dispatch_table->CreateGraphicsPipelines(
513 device, pipelineCache, createInfoCount, (const VkGraphicsPipelineCreateInfo *)local_pCreateInfos, pAllocator, pPipelines);
514 delete[] local_pCreateInfos;
Maciej Jesionowski42200702016-11-23 10:44:34 +0100515 {
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600516 uint64_t unique_id = 0;
517 std::lock_guard<std::mutex> lock(global_lock);
518 for (uint32_t i = 0; i < createInfoCount; ++i) {
Maciej Jesionowski42200702016-11-23 10:44:34 +0100519 if (pPipelines[i] != VK_NULL_HANDLE) {
520 unique_id = global_unique_id++;
521 my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pPipelines[i]);
522 pPipelines[i] = reinterpret_cast<VkPipeline &>(unique_id);
523 }
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600524 }
525 }
526 return result;
527}
528
529VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
530 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
531 const VkAllocationCallbacks *pAllocator,
532 VkDebugReportCallbackEXT *pMsgCallback) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700533 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600534 VkResult result =
535 instance_data->instance_dispatch_table->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
536
537 if (VK_SUCCESS == result) {
538 result = layer_create_msg_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMsgCallback);
539 }
540 return result;
541}
542
543VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT callback,
544 const VkAllocationCallbacks *pAllocator) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700545 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600546 instance_data->instance_dispatch_table->DestroyDebugReportCallbackEXT(instance, callback, pAllocator);
547 layer_destroy_msg_callback(instance_data->report_data, callback, pAllocator);
548}
549
550VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
551 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
552 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700553 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600554 instance_data->instance_dispatch_table->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix,
555 pMsg);
556}
557
558VKAPI_ATTR VkResult VKAPI_CALL CreateSwapchainKHR(VkDevice device, const VkSwapchainCreateInfoKHR *pCreateInfo,
559 const VkAllocationCallbacks *pAllocator, VkSwapchainKHR *pSwapchain) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700560 layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600561 safe_VkSwapchainCreateInfoKHR *local_pCreateInfo = NULL;
562 if (pCreateInfo) {
563 std::lock_guard<std::mutex> lock(global_lock);
564 local_pCreateInfo = new safe_VkSwapchainCreateInfoKHR(pCreateInfo);
565 local_pCreateInfo->oldSwapchain =
566 (VkSwapchainKHR)my_map_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->oldSwapchain)];
567 // Need to pull surface mapping from the instance-level map
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700568 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(my_map_data->gpu), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600569 local_pCreateInfo->surface =
570 (VkSurfaceKHR)instance_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->surface)];
571 }
572
573 VkResult result = my_map_data->device_dispatch_table->CreateSwapchainKHR(
574 device, (const VkSwapchainCreateInfoKHR *)local_pCreateInfo, pAllocator, pSwapchain);
575 if (local_pCreateInfo) {
576 delete local_pCreateInfo;
577 }
578 if (VK_SUCCESS == result) {
579 std::lock_guard<std::mutex> lock(global_lock);
580 uint64_t unique_id = global_unique_id++;
581 my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pSwapchain);
582 *pSwapchain = reinterpret_cast<VkSwapchainKHR &>(unique_id);
583 }
584 return result;
585}
586
587VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
588 VkImage *pSwapchainImages) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700589 layer_data *my_device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600590 if (VK_NULL_HANDLE != swapchain) {
591 std::lock_guard<std::mutex> lock(global_lock);
592 swapchain = (VkSwapchainKHR)my_device_data->unique_id_mapping[reinterpret_cast<uint64_t &>(swapchain)];
593 }
594 VkResult result =
595 my_device_data->device_dispatch_table->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
596 // TODO : Need to add corresponding code to delete these images
597 if (VK_SUCCESS == result) {
598 if ((*pSwapchainImageCount > 0) && pSwapchainImages) {
599 uint64_t unique_id = 0;
600 std::lock_guard<std::mutex> lock(global_lock);
601 for (uint32_t i = 0; i < *pSwapchainImageCount; ++i) {
602 unique_id = global_unique_id++;
603 my_device_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(pSwapchainImages[i]);
604 pSwapchainImages[i] = reinterpret_cast<VkImage &>(unique_id);
605 }
606 }
607 }
608 return result;
609}
610
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700611VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorUpdateTemplateKHR(VkDevice device,
612 const VkDescriptorUpdateTemplateCreateInfoKHR *pCreateInfo,
613 const VkAllocationCallbacks *pAllocator,
614 VkDescriptorUpdateTemplateKHR *pDescriptorUpdateTemplate) {
615 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700616 safe_VkDescriptorUpdateTemplateCreateInfoKHR *local_create_info = NULL;
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700617 {
618 std::lock_guard<std::mutex> lock(global_lock);
619 if (pCreateInfo) {
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700620 local_create_info = new safe_VkDescriptorUpdateTemplateCreateInfoKHR(pCreateInfo);
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700621 if (pCreateInfo->descriptorSetLayout) {
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700622 local_create_info->descriptorSetLayout =
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700623 (VkDescriptorSetLayout)
624 dev_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->descriptorSetLayout)];
625 }
626 if (pCreateInfo->pipelineLayout) {
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700627 local_create_info->pipelineLayout =
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700628 (VkPipelineLayout)dev_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pCreateInfo->pipelineLayout)];
629 }
630 }
631 }
632 VkResult result = dev_data->device_dispatch_table->CreateDescriptorUpdateTemplateKHR(
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700633 device, (const VkDescriptorUpdateTemplateCreateInfoKHR *)local_create_info, pAllocator, pDescriptorUpdateTemplate);
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700634 if (VK_SUCCESS == result) {
635 std::lock_guard<std::mutex> lock(global_lock);
636 uint64_t unique_id = global_unique_id++;
637 dev_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(*pDescriptorUpdateTemplate);
638 *pDescriptorUpdateTemplate = reinterpret_cast<VkDescriptorUpdateTemplateKHR &>(unique_id);
Mark Lobodzinski4f3ce672017-03-03 10:28:21 -0700639
640 // Shadow template createInfo for later updates
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700641 std::unique_ptr<TEMPLATE_STATE> template_state(new TEMPLATE_STATE(*pDescriptorUpdateTemplate, local_create_info));
Mark Lobodzinski4f3ce672017-03-03 10:28:21 -0700642 dev_data->desc_template_map[unique_id] = std::move(template_state);
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700643 }
644 return result;
645}
646
647VKAPI_ATTR void VKAPI_CALL DestroyDescriptorUpdateTemplateKHR(VkDevice device,
648 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
649 const VkAllocationCallbacks *pAllocator) {
650 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
651 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700652 uint64_t descriptor_update_template_id = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
653 dev_data->desc_template_map.erase(descriptor_update_template_id);
654 descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[descriptor_update_template_id];
655 dev_data->unique_id_mapping.erase(descriptor_update_template_id);
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700656 lock.unlock();
657 dev_data->device_dispatch_table->DestroyDescriptorUpdateTemplateKHR(device, descriptorUpdateTemplate, pAllocator);
658}
659
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700660void *BuildUnwrappedUpdateTemplateBuffer(layer_data *dev_data, uint64_t descriptorUpdateTemplate, const void *pData) {
661 auto const template_map_entry = dev_data->desc_template_map.find(descriptorUpdateTemplate);
662 if (template_map_entry == dev_data->desc_template_map.end()) {
663 assert(0);
664 }
665 auto const &create_info = template_map_entry->second->create_info;
666 size_t allocation_size = 0;
667 std::vector<std::tuple<size_t, VkDebugReportObjectTypeEXT, void *>> template_entries;
668
669 for (uint32_t i = 0; i < create_info.descriptorUpdateEntryCount; i++) {
670 for (uint32_t j = 0; j < create_info.pDescriptorUpdateEntries[i].descriptorCount; j++) {
671 size_t offset = create_info.pDescriptorUpdateEntries[i].offset + j * create_info.pDescriptorUpdateEntries[i].stride;
672 char *update_entry = (char *)(pData) + offset;
673
674 switch (create_info.pDescriptorUpdateEntries[i].descriptorType) {
675 case VK_DESCRIPTOR_TYPE_SAMPLER:
676 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
677 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
678 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
679 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: {
680 auto image_entry = reinterpret_cast<VkDescriptorImageInfo *>(update_entry);
681 allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorImageInfo));
682
683 VkDescriptorImageInfo *wrapped_entry = new VkDescriptorImageInfo(*image_entry);
684 wrapped_entry->sampler = reinterpret_cast<VkSampler &>(
685 dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(image_entry->sampler)]);
686 wrapped_entry->imageView = reinterpret_cast<VkImageView &>(
687 dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(image_entry->imageView)]);
688 template_entries.emplace_back(offset, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
689 reinterpret_cast<void *>(wrapped_entry));
690 } break;
691
692 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
693 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
694 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
695 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
696 auto buffer_entry = reinterpret_cast<VkDescriptorBufferInfo *>(update_entry);
697 allocation_size = std::max(allocation_size, offset + sizeof(VkDescriptorBufferInfo));
698
699 VkDescriptorBufferInfo *wrapped_entry = new VkDescriptorBufferInfo(*buffer_entry);
Mark Lobodzinskid5197d02017-03-15 13:13:49 -0600700 wrapped_entry->buffer = reinterpret_cast<VkBuffer &>(
701 dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(buffer_entry->buffer)]);
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700702 template_entries.emplace_back(offset, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
703 reinterpret_cast<void *>(wrapped_entry));
704 } break;
705
706 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
707 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: {
Mark Lobodzinskid5197d02017-03-15 13:13:49 -0600708 auto buffer_view_handle = reinterpret_cast<uint64_t *>(update_entry);
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700709 allocation_size = std::max(allocation_size, offset + sizeof(VkBufferView));
710
Mark Lobodzinskid5197d02017-03-15 13:13:49 -0600711 uint64_t wrapped_entry = dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(*buffer_view_handle)];
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700712 template_entries.emplace_back(offset, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT,
713 reinterpret_cast<void *>(wrapped_entry));
714 } break;
715 default:
716 assert(0);
717 break;
718 }
719 }
720 }
721 // Allocate required buffer size and populate with source/unwrapped data
722 void *unwrapped_data = malloc(allocation_size);
723 for (auto &this_entry : template_entries) {
724 VkDebugReportObjectTypeEXT type = std::get<1>(this_entry);
725 void *destination = (char *)unwrapped_data + std::get<0>(this_entry);
726 void *source = (char *)std::get<2>(this_entry);
727
728 switch (type) {
729 case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
730 *(reinterpret_cast<VkDescriptorImageInfo *>(destination)) = *(reinterpret_cast<VkDescriptorImageInfo *>(source));
731 delete reinterpret_cast<VkDescriptorImageInfo *>(source);
732 break;
733 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
734 *(reinterpret_cast<VkDescriptorBufferInfo *>(destination)) = *(reinterpret_cast<VkDescriptorBufferInfo *>(source));
735 delete reinterpret_cast<VkDescriptorBufferInfo *>(source);
736 break;
737 case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT:
Mark Lobodzinskid5197d02017-03-15 13:13:49 -0600738 *(reinterpret_cast<VkBufferView *>(destination)) = reinterpret_cast<VkBufferView>(source);
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700739 break;
740 default:
741 assert(0);
742 break;
743 }
744 }
745 return (void *)unwrapped_data;
746}
747
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700748VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSetWithTemplateKHR(VkDevice device, VkDescriptorSet descriptorSet,
749 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
750 const void *pData) {
751 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700752 uint64_t template_handle = reinterpret_cast<uint64_t &>(descriptorUpdateTemplate);
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700753 {
754 std::lock_guard<std::mutex> lock(global_lock);
755 descriptorSet = (VkDescriptorSet)dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(descriptorSet)];
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700756 descriptorUpdateTemplate = (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[template_handle];
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700757 }
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700758 void *unwrapped_buffer = nullptr;
Mark Lobodzinski94d9e8c2017-03-06 16:18:19 -0700759 unwrapped_buffer = BuildUnwrappedUpdateTemplateBuffer(dev_data, template_handle, pData);
Mark Lobodzinskib523f7c2017-03-06 09:00:21 -0700760 dev_data->device_dispatch_table->UpdateDescriptorSetWithTemplateKHR(device, descriptorSet, descriptorUpdateTemplate,
761 unwrapped_buffer);
762 free(unwrapped_buffer);
Mark Lobodzinski71703a52017-03-03 08:40:16 -0700763}
764
765VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetWithTemplateKHR(VkCommandBuffer commandBuffer,
766 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
767 VkPipelineLayout layout, uint32_t set, const void *pData) {
768 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
769 {
770 std::lock_guard<std::mutex> lock(global_lock);
771 descriptorUpdateTemplate =
772 (VkDescriptorUpdateTemplateKHR)dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(descriptorUpdateTemplate)];
773 layout = (VkPipelineLayout)dev_data->unique_id_mapping[reinterpret_cast<uint64_t &>(layout)];
774 }
775 dev_data->device_dispatch_table->CmdPushDescriptorSetWithTemplateKHR(commandBuffer, descriptorUpdateTemplate, layout, set,
776 pData);
777}
778
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600779#ifndef __ANDROID__
780VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
781 VkDisplayPropertiesKHR *pProperties) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700782 layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600783 safe_VkDisplayPropertiesKHR *local_pProperties = NULL;
784 {
785 std::lock_guard<std::mutex> lock(global_lock);
786 if (pProperties) {
787 local_pProperties = new safe_VkDisplayPropertiesKHR[*pPropertyCount];
788 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
789 local_pProperties[idx0].initialize(&pProperties[idx0]);
790 if (pProperties[idx0].display) {
791 local_pProperties[idx0].display =
792 (VkDisplayKHR)my_map_data->unique_id_mapping[reinterpret_cast<const uint64_t &>(pProperties[idx0].display)];
793 }
794 }
795 }
796 }
797
798 VkResult result = my_map_data->instance_dispatch_table->GetPhysicalDeviceDisplayPropertiesKHR(
799 physicalDevice, pPropertyCount, (VkDisplayPropertiesKHR *)local_pProperties);
800 if (result == VK_SUCCESS && pProperties) {
801 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
802 std::lock_guard<std::mutex> lock(global_lock);
803
804 uint64_t unique_id = global_unique_id++;
805 my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(local_pProperties[idx0].display);
806 pProperties[idx0].display = reinterpret_cast<VkDisplayKHR &>(unique_id);
807 pProperties[idx0].displayName = local_pProperties[idx0].displayName;
808 pProperties[idx0].physicalDimensions = local_pProperties[idx0].physicalDimensions;
809 pProperties[idx0].physicalResolution = local_pProperties[idx0].physicalResolution;
810 pProperties[idx0].supportedTransforms = local_pProperties[idx0].supportedTransforms;
811 pProperties[idx0].planeReorderPossible = local_pProperties[idx0].planeReorderPossible;
812 pProperties[idx0].persistentContent = local_pProperties[idx0].persistentContent;
813 }
814 }
815 if (local_pProperties) {
816 delete[] local_pProperties;
817 }
818 return result;
819}
820
821VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneSupportedDisplaysKHR(VkPhysicalDevice physicalDevice, uint32_t planeIndex,
822 uint32_t *pDisplayCount, VkDisplayKHR *pDisplays) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700823 layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600824 VkResult result = my_map_data->instance_dispatch_table->GetDisplayPlaneSupportedDisplaysKHR(physicalDevice, planeIndex,
825 pDisplayCount, pDisplays);
826 if (VK_SUCCESS == result) {
827 if ((*pDisplayCount > 0) && pDisplays) {
828 std::lock_guard<std::mutex> lock(global_lock);
829 for (uint32_t i = 0; i < *pDisplayCount; i++) {
830 auto it = my_map_data->unique_id_mapping.find(reinterpret_cast<const uint64_t &>(pDisplays[i]));
831 assert(it != my_map_data->unique_id_mapping.end());
832 pDisplays[i] = reinterpret_cast<VkDisplayKHR &>(it->second);
833 }
834 }
835 }
836 return result;
837}
838
839VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
840 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700841 layer_data *my_map_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
Mark Lobodzinskif0650ff2017-01-03 08:52:14 -0700842 VkDisplayModePropertiesKHR *local_pProperties = NULL;
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600843 {
844 std::lock_guard<std::mutex> lock(global_lock);
845 display = (VkDisplayKHR)my_map_data->unique_id_mapping[reinterpret_cast<uint64_t &>(display)];
846 if (pProperties) {
Mark Lobodzinskif0650ff2017-01-03 08:52:14 -0700847 local_pProperties = new VkDisplayModePropertiesKHR[*pPropertyCount];
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600848 }
849 }
850
851 VkResult result = my_map_data->instance_dispatch_table->GetDisplayModePropertiesKHR(
852 physicalDevice, display, pPropertyCount, (VkDisplayModePropertiesKHR *)local_pProperties);
853 if (result == VK_SUCCESS && pProperties) {
854 for (uint32_t idx0 = 0; idx0 < *pPropertyCount; ++idx0) {
855 std::lock_guard<std::mutex> lock(global_lock);
856
857 uint64_t unique_id = global_unique_id++;
858 my_map_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(local_pProperties[idx0].displayMode);
859 pProperties[idx0].displayMode = reinterpret_cast<VkDisplayModeKHR &>(unique_id);
860 pProperties[idx0].parameters.visibleRegion.width = local_pProperties[idx0].parameters.visibleRegion.width;
861 pProperties[idx0].parameters.visibleRegion.height = local_pProperties[idx0].parameters.visibleRegion.height;
862 pProperties[idx0].parameters.refreshRate = local_pProperties[idx0].parameters.refreshRate;
863 }
864 }
865 if (local_pProperties) {
866 delete[] local_pProperties;
867 }
868 return result;
869}
Norbert Nopper1dec9a52016-11-25 07:55:13 +0100870
871VKAPI_ATTR VkResult VKAPI_CALL GetDisplayPlaneCapabilitiesKHR(VkPhysicalDevice physicalDevice, VkDisplayModeKHR mode,
872 uint32_t planeIndex, VkDisplayPlaneCapabilitiesKHR *pCapabilities) {
Tobin Ehlis8d6acde2017-02-08 07:40:40 -0700873 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
Norbert Nopper1dec9a52016-11-25 07:55:13 +0100874 {
875 std::lock_guard<std::mutex> lock(global_lock);
876 auto it = dev_data->unique_id_mapping.find(reinterpret_cast<uint64_t &>(mode));
877 if (it == dev_data->unique_id_mapping.end()) {
878 uint64_t unique_id = global_unique_id++;
879 dev_data->unique_id_mapping[unique_id] = reinterpret_cast<uint64_t &>(mode);
880
881 mode = reinterpret_cast<VkDisplayModeKHR &>(unique_id);
882 } else {
883 mode = reinterpret_cast<VkDisplayModeKHR &>(it->second);
884 }
885 }
886 VkResult result =
887 dev_data->instance_dispatch_table->GetDisplayPlaneCapabilitiesKHR(physicalDevice, mode, planeIndex, pCapabilities);
888 return result;
889}
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600890#endif
891
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700892} // namespace unique_objects
Mark Lobodzinskidc3bd852016-09-06 16:12:23 -0600893
894// vk_layer_logging.h expects these to be defined
895VKAPI_ATTR VkResult VKAPI_CALL vkCreateDebugReportCallbackEXT(VkInstance instance,
896 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
897 const VkAllocationCallbacks *pAllocator,
898 VkDebugReportCallbackEXT *pMsgCallback) {
899 return unique_objects::CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pMsgCallback);
900}
901
902VKAPI_ATTR void VKAPI_CALL vkDestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
903 const VkAllocationCallbacks *pAllocator) {
904 unique_objects::DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
905}
906
907VKAPI_ATTR void VKAPI_CALL vkDebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
908 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
909 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
910 unique_objects::DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
911}
912
913VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
914 VkExtensionProperties *pProperties) {
915 return unique_objects::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
916}
917
918VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
919 VkLayerProperties *pProperties) {
920 return unique_objects::EnumerateInstanceLayerProperties(pCount, pProperties);
921}
922
923VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
924 VkLayerProperties *pProperties) {
925 assert(physicalDevice == VK_NULL_HANDLE);
926 return unique_objects::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
927}
928
929VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
930 return unique_objects::GetDeviceProcAddr(dev, funcName);
931}
932
933VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
934 return unique_objects::GetInstanceProcAddr(instance, funcName);
935}
936
937VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
938 const char *pLayerName, uint32_t *pCount,
939 VkExtensionProperties *pProperties) {
940 assert(physicalDevice == VK_NULL_HANDLE);
941 return unique_objects::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
942}
Mark Young39389872017-01-19 21:10:49 -0700943
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700944VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
945 const char *funcName) {
Mark Young39389872017-01-19 21:10:49 -0700946 return unique_objects::GetPhysicalDeviceProcAddr(instance, funcName);
947}
948
949VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
950 assert(pVersionStruct != NULL);
951 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
952
953 // Fill in the function pointers if our version is at least capable of having the structure contain them.
954 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
955 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
956 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
957 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
958 }
959
960 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
961 unique_objects::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
962 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
963 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
964 }
965
966 return VK_SUCCESS;
967}