blob: 6463490190006743107fae5041fc08ac713ae885 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlis42586532014-11-14 13:01:02 -07005 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06006 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
Tobin Ehlis42586532014-11-14 13:01:02 -07009 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060010 * http://www.apache.org/licenses/LICENSE-2.0
Tobin Ehlis42586532014-11-14 13:01:02 -070011 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060012 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060017 *
18 * Author: Jon Ashburn <jon@lunarg.com>
19 * Author: Mark Lobodzinski <mark@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
Tobin Ehlis42586532014-11-14 13:01:02 -070021 */
22
Jeremy Hayes2f065b12016-04-13 10:54:17 -060023#include <mutex>
24
David Pinedo9316d3b2015-11-06 12:54:48 -070025#include "vulkan/vk_layer.h"
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060026#include "vk_layer_extension_utils.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060027#include "vk_enum_string_helper.h"
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -070028#include "vk_layer_table.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include "vk_layer_utils.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060030
Tobin Ehlisca915872014-11-18 11:28:33 -070031// Object Tracker ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070032typedef enum _OBJECT_TRACK_ERROR {
33 OBJTRACK_NONE, // Used for INFO & other non-error messages
34 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
35 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
36 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
37 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
38 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
39 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
40 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
41 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
Tobin Ehlisca915872014-11-18 11:28:33 -070042} OBJECT_TRACK_ERROR;
43
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070044// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050045typedef VkFlags ObjectStatusFlags;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070046typedef enum _ObjectStatusFlagBits {
47 OBJSTATUS_NONE = 0x00000000, // No status is set
48 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
49 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
50 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
51 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
52 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
53 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
54 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050055} ObjectStatusFlagBits;
Chia-I Wuf8693382015-04-16 22:02:10 +080056
Tobin Ehlis42586532014-11-14 13:01:02 -070057typedef struct _OBJTRACK_NODE {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070058 uint64_t vkObj; // Object handle
59 VkDebugReportObjectTypeEXT objType; // Object type identifier
60 ObjectStatusFlags status; // Object state
61 uint64_t parentObj; // Parent object
62 uint64_t belongsTo; // Object Scope -- owning device/instance
Tobin Ehlis42586532014-11-14 13:01:02 -070063} OBJTRACK_NODE;
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060064
Tobin Ehlis42586532014-11-14 13:01:02 -070065// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060066uint64_t objTrackGetObjectCount(VkDevice device);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070067uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060068
Tobin Ehlisca915872014-11-18 11:28:33 -070069// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060070typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070071typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -060072
Cody Northrop55443ef2015-09-28 15:09:32 -060073struct layer_data {
Mark Lobodzinskifae78852015-06-23 11:35:12 -060074 debug_report_data *report_data;
Cody Northrop9c93ec52016-04-28 09:55:08 -060075 // TODO: put instance data here
76 std::vector<VkDebugReportCallbackEXT> logging_callback;
77 bool wsi_enabled;
78 bool objtrack_extensions_enabled;
Ian Elliotted6b5ac2016-04-28 09:08:13 -060079 // The following are for keeping track of the temporary callbacks that can
80 // be used in vkCreateInstance and vkDestroyInstance:
81 uint32_t num_tmp_callbacks;
82 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
83 VkDebugReportCallbackEXT *tmp_callbacks;
Cody Northrop55443ef2015-09-28 15:09:32 -060084
Ian Elliotted6b5ac2016-04-28 09:08:13 -060085 layer_data()
86 : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false), num_tmp_callbacks(0),
87 tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr){};
Cody Northrop55443ef2015-09-28 15:09:32 -060088};
Mark Lobodzinskifae78852015-06-23 11:35:12 -060089
Jon Ashburn3dc39382015-09-17 10:00:32 -060090struct instExts {
91 bool wsi_enabled;
92};
93
94static std::unordered_map<void *, struct instExts> instanceExtMap;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070095static std::unordered_map<void *, layer_data *> layer_data_map;
96static device_table_map object_tracker_device_table_map;
97static instance_table_map object_tracker_instance_table_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -060098
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -060099// We need additionally validate image usage using a separate map
100// of swapchain-created images
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600101static std::unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600102
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600103static long long unsigned int object_track_index = 0;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600104static std::mutex global_lock;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600105
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700106#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600107
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700108static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
109static uint64_t numTotalObjs = 0;
110static VkQueueFamilyProperties *queueInfo = NULL;
111static uint32_t queueCount = 0;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600112
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700113template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
Mark Lobodzinski2eeb3c62015-09-01 08:52:55 -0600114
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600115//
116// Internal Object Tracker Functions
117//
118
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700119static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700120 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
121 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
Jon Ashburn8acd2332015-09-16 18:08:32 -0600122 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700123 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
124 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
125 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
126 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
127 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
Ian Elliott1064fe32015-07-06 14:31:32 -0600128 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700129 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700130 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
Ian Elliott1064fe32015-07-06 14:31:32 -0600131 my_device_data->wsi_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600132
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700133 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -0600134 my_device_data->objtrack_extensions_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600135 }
136}
137
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700138static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
Jon Ashburn3dc39382015-09-17 10:00:32 -0600139 uint32_t i;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700140 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
Jon Ashburn3dc39382015-09-17 10:00:32 -0600141 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
Michael Lentine56512bb2016-03-02 17:28:55 -0600142
143 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700144 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
145 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
146 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
147 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
148 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
149 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
150 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
151 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700152
153#if VK_USE_PLATFORM_WIN32_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700154 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
155 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
156 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700157#endif // VK_USE_PLATFORM_WIN32_KHR
158#ifdef VK_USE_PLATFORM_XCB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700159 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
160 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
161 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700162#endif // VK_USE_PLATFORM_XCB_KHR
163#ifdef VK_USE_PLATFORM_XLIB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700164 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
165 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
166 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700167#endif // VK_USE_PLATFORM_XLIB_KHR
168#ifdef VK_USE_PLATFORM_MIR_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700169 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
170 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
171 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700172#endif // VK_USE_PLATFORM_MIR_KHR
173#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700174 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
175 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
176 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700177#endif // VK_USE_PLATFORM_WAYLAND_KHR
178#ifdef VK_USE_PLATFORM_ANDROID_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700179 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700180#endif // VK_USE_PLATFORM_ANDROID_KHR
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700181
Jon Ashburn3dc39382015-09-17 10:00:32 -0600182 instanceExtMap[pDisp].wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700183 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700184 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
Jon Ashburn3dc39382015-09-17 10:00:32 -0600185 instanceExtMap[pDisp].wsi_enabled = true;
Jon Ashburn3dc39382015-09-17 10:00:32 -0600186 }
187}
188
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600189// Indicate device or instance dispatch table type
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700190typedef enum _DispTableType {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600191 DISP_TBL_TYPE_INSTANCE,
192 DISP_TBL_TYPE_DEVICE,
193} DispTableType;
194
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700195debug_report_data *mdd(const void *object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600196 dispatch_key key = get_dispatch_key(object);
197 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600198 return my_data->report_data;
199}
200
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700201debug_report_data *mid(VkInstance object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600202 dispatch_key key = get_dispatch_key(object);
203 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600204 return my_data->report_data;
205}
206
207// For each Queue's doubly linked-list of mem refs
208typedef struct _OT_MEM_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700209 VkDeviceMemory mem;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600210 struct _OT_MEM_INFO *pNextMI;
211 struct _OT_MEM_INFO *pPrevMI;
212
213} OT_MEM_INFO;
214
215// Track Queue information
216typedef struct _OT_QUEUE_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700217 OT_MEM_INFO *pMemRefList;
218 struct _OT_QUEUE_INFO *pNextQI;
219 uint32_t queueNodeIndex;
220 VkQueue queue;
221 uint32_t refCount;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600222} OT_QUEUE_INFO;
223
224// Global list of QueueInfo structures, one per queue
225static OT_QUEUE_INFO *g_pQueueInfo = NULL;
226
227// Convert an object type enum to an object type array index
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700228static uint32_t objTypeToIndex(uint32_t objType) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600229 uint32_t index = objType;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600230 return index;
231}
232
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600233// Add new queue to head of global queue list
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700234static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600235 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
236
237 if (pQueueInfo != NULL) {
238 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700239 pQueueInfo->queue = queue;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600240 pQueueInfo->queueNodeIndex = queueNodeIndex;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700241 pQueueInfo->pNextQI = g_pQueueInfo;
242 g_pQueueInfo = pQueueInfo;
243 } else {
244 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue),
245 __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
246 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600247 }
248}
249
250// Destroy memRef lists and free all memory
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700251static void destroyQueueMemRefLists(void) {
252 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600253 OT_QUEUE_INFO *pDelQueueInfo = NULL;
254 while (pQueueInfo != NULL) {
255 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
256 while (pMemInfo != NULL) {
257 OT_MEM_INFO *pDelMemInfo = pMemInfo;
258 pMemInfo = pMemInfo->pNextMI;
259 delete pDelMemInfo;
260 }
261 pDelQueueInfo = pQueueInfo;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700262 pQueueInfo = pQueueInfo->pNextQI;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600263 delete pDelQueueInfo;
264 }
265 g_pQueueInfo = pQueueInfo;
266}
267
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700268static void setGpuQueueInfoState(uint32_t count, void *pData) {
Tony Barbour59a47322015-06-24 16:06:58 -0600269 queueCount = count;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700270 queueInfo = (VkQueueFamilyProperties *)realloc((void *)queueInfo, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600271 if (queueInfo != NULL) {
Cody Northropd0802882015-08-03 17:04:53 -0600272 memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600273 }
274}
275
276// Check Queue type flags for selected queue operations
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700277static void validateQueueFlags(VkQueue queue, const char *function) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600278 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
279 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
280 pQueueInfo = pQueueInfo->pNextQI;
281 }
282 if (pQueueInfo != NULL) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700283 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700284 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
285 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
286 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600287 }
288 }
289}
290
Tony Barboura05dbaa2015-07-09 17:31:46 -0600291/* TODO: Port to new type safety */
292#if 0
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600293// Check object status for selected flag state
Courtney Goeltzenleuchtercd2a0992015-07-09 11:44:38 -0600294static VkBool32
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600295validate_status(
296 VkObject dispatchable_object,
297 VkObject vkObj,
298 VkObjectType objType,
299 ObjectStatusFlags status_mask,
300 ObjectStatusFlags status_flag,
301 VkFlags msg_flags,
302 OBJECT_TRACK_ERROR error_code,
303 const char *fail_msg)
304{
305 if (objMap.find(vkObj) != objMap.end()) {
306 OBJTRACK_NODE* pNode = objMap[vkObj];
307 if ((pNode->status & status_mask) != status_flag) {
308 char str[1024];
Mark Lobodzinski6085c2b2016-01-04 15:48:11 -0700309 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600310 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
Mark Young93ecb1d2016-01-13 13:47:16 -0700311 static_cast<uint64_t>(vkObj), fail_msg);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600312 return VK_FALSE;
313 }
314 return VK_TRUE;
315 }
316 else {
317 // If we do not find it print an error
Mark Lobodzinski6085c2b2016-01-04 15:48:11 -0700318 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600319 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
Mark Young93ecb1d2016-01-13 13:47:16 -0700320 static_cast<uint64_t>(vkObj), string_VkObjectType(objType));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600321 return VK_FALSE;
322 }
323}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600324#endif
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600325
326#include "vk_dispatch_table_helper.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600327
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600328static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
329
330 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600331}
332
Tony Barboura05dbaa2015-07-09 17:31:46 -0600333//
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700334// Forward declarations
Tony Barboura05dbaa2015-07-09 17:31:46 -0600335//
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600336
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700337static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
338static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
339static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700340static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700341static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
342static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700343static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
344 bool null_allowed);
345static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
346 bool null_allowed);
347static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
348 bool null_allowed);
349static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
350 VkDebugReportObjectTypeEXT objType, bool null_allowed);
351static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
352 bool null_allowed);
353static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
354 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700355static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700356static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
357 bool null_allowed);
358static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
359 bool null_allowed);
360static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
361 bool null_allowed);
362static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
363 bool null_allowed);
364static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
365 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700366static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700367static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
368static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
369static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
370static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700371static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
372 ObjectStatusFlags status_flag);
373static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
374 ObjectStatusFlags status_flag);
Tony Barboura05dbaa2015-07-09 17:31:46 -0600375#if 0
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700376static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType,
Tony Barboura05dbaa2015-07-09 17:31:46 -0600377 ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR error_code,
378 const char *fail_msg);
379#endif
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600380extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
381extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
382extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
383extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
384extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
385extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
386extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
387extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
388extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
389extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
390extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
391extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600392
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700393static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
394 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
395 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
396 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600397
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700398 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600399 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700400 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700401 pNewObjNode->status = OBJSTATUS_NONE;
402 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800403 VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600404 uint32_t objIndex = objTypeToIndex(objType);
405 numObjs[objIndex]++;
406 numTotalObjs++;
407}
408
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700409static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700410 // TODO: Add tracking of surface objects
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700411 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
412 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
413 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700414
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700415 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlis86684f92016-01-05 10:33:58 -0700416 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700417 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700418 pNewObjNode->status = OBJSTATUS_NONE;
419 pNewObjNode->vkObj = (uint64_t)(vkObj);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700420 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
421 uint32_t objIndex = objTypeToIndex(objType);
422 numObjs[objIndex]++;
423 numTotalObjs++;
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700424}
425
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700426static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700427 uint64_t object_handle = (uint64_t)(object);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700428 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700429 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
Tobin Ehlis86684f92016-01-05 10:33:58 -0700430 uint32_t objIndex = objTypeToIndex(pNode->objType);
431 assert(numTotalObjs > 0);
432 numTotalObjs--;
433 assert(numObjs[objIndex] > 0);
434 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700435 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
436 OBJTRACK_NONE, "OBJTRACK",
437 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
438 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
439 string_VkDebugReportObjectTypeEXT(pNode->objType));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700440 delete pNode;
441 VkSurfaceKHRMap.erase(object_handle);
442 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700443 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
444 OBJTRACK_NONE, "OBJTRACK",
445 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700446 }
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700447}
448
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700449static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
450 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
451 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
452 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
453 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tony Barboura05dbaa2015-07-09 17:31:46 -0600454
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700455 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
456 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700457 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700458 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
459 pNewObjNode->parentObj = (uint64_t)commandPool;
Mark Lobodzinski2fba0322016-01-23 18:31:23 -0700460 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
461 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
462 } else {
463 pNewObjNode->status = OBJSTATUS_NONE;
464 }
Michael Lentine13803dc2015-11-04 14:35:12 -0800465 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600466 uint32_t objIndex = objTypeToIndex(objType);
467 numObjs[objIndex]++;
468 numTotalObjs++;
469}
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700470
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700471static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700472 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
473 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700474 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700475
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700476 if (pNode->parentObj != (uint64_t)(commandPool)) {
477 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
478 OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
479 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
480 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
481 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool));
482 } else {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700483
484 uint32_t objIndex = objTypeToIndex(pNode->objType);
485 assert(numTotalObjs > 0);
486 numTotalObjs--;
487 assert(numObjs[objIndex] > 0);
488 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700489 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
490 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
491 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs,
492 numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700493 delete pNode;
494 VkCommandBufferMap.erase(object_handle);
495 }
496 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700497 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
498 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
499 object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700500 }
501}
502
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700503static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
504 VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinski510e20d2016-02-11 09:26:16 -0700505 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700506 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
507 (uint64_t)(vkObj));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700508
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700509 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
510 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700511 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700512 pNewObjNode->status = OBJSTATUS_NONE;
513 pNewObjNode->vkObj = (uint64_t)(vkObj);
514 pNewObjNode->parentObj = (uint64_t)descriptorPool;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700515 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
516 uint32_t objIndex = objTypeToIndex(objType);
517 numObjs[objIndex]++;
518 numTotalObjs++;
519}
520
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700521static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700522 uint64_t object_handle = (uint64_t)(descriptorSet);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700523 if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700524 OBJTRACK_NODE *pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700525
Mark Young93ecb1d2016-01-13 13:47:16 -0700526 if (pNode->parentObj != (uint64_t)(descriptorPool)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700527 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
528 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
529 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
530 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
531 (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700532 } else {
533 uint32_t objIndex = objTypeToIndex(pNode->objType);
534 assert(numTotalObjs > 0);
535 numTotalObjs--;
536 assert(numObjs[objIndex] > 0);
537 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700538 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
539 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
540 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex],
541 string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700542 delete pNode;
543 VkDescriptorSetMap.erase(object_handle);
544 }
545 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700546 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
547 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
548 object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700549 }
550}
551
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700552static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
553 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
554 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
555 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600556
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700557 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600558 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700559 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700560 pNewObjNode->status = OBJSTATUS_NONE;
561 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800562 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600563 uint32_t objIndex = objTypeToIndex(objType);
564 numObjs[objIndex]++;
565 numTotalObjs++;
566}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700567static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
568 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
569 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
570 "SwapchainImage", (uint64_t)(vkObj));
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600571
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700572 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
573 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
574 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
575 pNewObjNode->status = OBJSTATUS_NONE;
576 pNewObjNode->vkObj = (uint64_t)vkObj;
577 pNewObjNode->parentObj = (uint64_t)swapchain;
Mark Young93ecb1d2016-01-13 13:47:16 -0700578 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600579}
580
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700581static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
582 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
583 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
584 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700585
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700586 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700587 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
588 pNewObjNode->objType = objType;
589 pNewObjNode->status = OBJSTATUS_NONE;
590 pNewObjNode->vkObj = (uint64_t)(vkObj);
591 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode;
592 uint32_t objIndex = objTypeToIndex(objType);
593 numObjs[objIndex]++;
594 numTotalObjs++;
595}
596
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600597//
598// Non-auto-generated API functions called by generated code
599//
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700600VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
601 VkInstance *pInstance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700602 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
David Pinedoc0fa1ab2015-07-31 10:46:25 -0600603
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700604 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700605 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700606 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700607 if (fpCreateInstance == NULL) {
608 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600609 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700610
611 // Advance the link info for the next element on the chain
612 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
613
614 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
615 if (result != VK_SUCCESS) {
616 return result;
617 }
618
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700619 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
620 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
621 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700622
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600623 // Look for one or more debug report create info structures, and copy the
624 // callback(s) for each one found (for use by vkDestroyInstance)
625 layer_copy_tmp_callbacks(pCreateInfo->pNext, &my_data->num_tmp_callbacks, &my_data->tmp_dbg_create_infos,
626 &my_data->tmp_callbacks);
627
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700628 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
629 pCreateInfo->ppEnabledExtensionNames);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700630
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600631 init_object_tracker(my_data, pAllocator);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700632 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
633
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700634 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700635
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600636 return result;
637}
638
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700639void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700640 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600641
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600642 std::lock_guard<std::mutex> lock(global_lock);
643 if (pProperties != NULL) {
Cody Northropd0802882015-08-03 17:04:53 -0600644 setGpuQueueInfoState(*pCount, pProperties);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600645 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600646}
647
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700648VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
649 VkDevice *pDevice) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600650 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700651 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700652
653 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700654 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
655 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700656 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700657 if (fpCreateDevice == NULL) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700658 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600659 }
660
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700661 // Advance the link info for the next element on the chain
662 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
663
664 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
665 if (result != VK_SUCCESS) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700666 return result;
667 }
668
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700669 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
670 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
671 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700672
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700673 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700674
675 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
676
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700677 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700678 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700679 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
680 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700681
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600682 return result;
683}
684
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700685VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
686 VkPhysicalDevice *pPhysicalDevices) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600687 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600688 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700689 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600690 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600691 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700692 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700693 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
694 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600695 lock.lock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600696 if (result == VK_SUCCESS) {
697 if (pPhysicalDevices) {
698 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700699 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
Tobin Ehlisec598302015-09-15 15:02:17 -0600700 }
701 }
702 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600703 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600704 return result;
705}
706
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700707void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600708 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700709 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600710 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600711
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700712 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600713
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600714 lock.lock();
Courtney Goeltzenleuchter06d89472015-10-20 16:40:38 -0600715 addQueueInfo(queueNodeIndex, *pQueue);
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700716 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600717}
718
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700719VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
720 void **ppData) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600721 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600722 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700723 skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
724 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600725 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600726 if (skipCall == VK_TRUE)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700727 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600728
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700729 VkResult result =
730 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600731
732 return result;
733}
734
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700735void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600736 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600737 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700738 skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
739 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600740 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600741 if (skipCall == VK_TRUE)
742 return;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600743
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700744 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600745}
746
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700747VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600748 std::unique_lock<std::mutex> lock(global_lock);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800749 validateQueueFlags(queue, "QueueBindSparse");
750
751 for (uint32_t i = 0; i < bindInfoCount; i++) {
752 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700753 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800754 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700755 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800756 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700757 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800758 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600759 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600760
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700761 VkResult result =
762 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Mark Lobodzinski16e8bef2015-07-03 15:58:09 -0600763 return result;
764}
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600765
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700766VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
767 VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700768 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600769 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700770 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
771 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600772 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700773
774 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700775 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700776 }
777
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700778 VkResult result =
779 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700780
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600781 lock.lock();
Jon Ashburnf19916e2016-01-11 13:12:43 -0700782 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700783 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
784 pAllocateInfo->level);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700785 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600786 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700787
788 return result;
789}
790
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700791VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
792 VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600793 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600794 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700795 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700796 skipCall |=
797 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700798 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700799 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
800 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600801 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600802 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600803 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700804 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600805
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700806 VkResult result =
807 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600808
Chris Forbes539a87c2016-01-22 15:44:40 +1300809 if (VK_SUCCESS == result) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600810 lock.lock();
Chris Forbes539a87c2016-01-22 15:44:40 +1300811 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700812 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
813 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
Chris Forbes539a87c2016-01-22 15:44:40 +1300814 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600815 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600816 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600817
818 return result;
819}
820
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700821void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
822 const VkCommandBuffer *pCommandBuffers) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600823 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700824 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
825 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600826 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700827
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700828 get_dispatch_table(object_tracker_device_table_map, device)
829 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700830
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600831 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700832 for (uint32_t i = 0; i < commandBufferCount; i++) {
Michael Lentinefc6aa762015-11-20 12:11:42 -0800833 free_command_buffer(device, commandPool, *pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700834 pCommandBuffers++;
835 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700836}
837
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700838void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600839 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700840 // A swapchain's images are implicitly deleted when the swapchain is deleted.
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600841 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600842 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600843 while (itr != swapchainImageMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700844 OBJTRACK_NODE *pNode = (*itr).second;
Mark Young93ecb1d2016-01-13 13:47:16 -0700845 if (pNode->parentObj == (uint64_t)(swapchain)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700846 swapchainImageMap.erase(itr++);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600847 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700848 ++itr;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600849 }
850 }
Tobin Ehlis86684f92016-01-05 10:33:58 -0700851 destroy_swapchain_khr(device, swapchain);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600852 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600853
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700854 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600855}
856
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700857void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600858 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700859 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600860 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600861
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700862 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600863
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600864 lock.lock();
Michael Lentine13803dc2015-11-04 14:35:12 -0800865 destroy_device_memory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600866}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600867
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700868VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
869 const VkDescriptorSet *pDescriptorSets) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600870 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700871 validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
872 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600873 lock.unlock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700874 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
875 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
Tony Barbour770f80d2015-07-20 10:52:13 -0600876
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600877 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700878 for (uint32_t i = 0; i < count; i++) {
Michael Lentinefc6aa762015-11-20 12:11:42 -0800879 free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
Tony Barbour770f80d2015-07-20 10:52:13 -0600880 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600881 lock.unlock();
Tony Barbour770f80d2015-07-20 10:52:13 -0600882 return result;
883}
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600884
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700885void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700886 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600887 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700888 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
889 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600890 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700891 if (skipCall) {
892 return;
893 }
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700894 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700895 // Remove this pool's descriptor sets from our descriptorSet map.
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600896 lock.lock();
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600897 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700898 while (itr != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700899 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinskib29731a2015-11-18 11:01:02 -0700900 auto del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700901 if (pNode->parentObj == (uint64_t)(descriptorPool)) {
902 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700903 }
904 }
905 destroy_descriptor_pool(device, descriptorPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600906 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700907 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700908}
909
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700910void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700911 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600912 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700913 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
914 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600915 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700916 if (skipCall) {
917 return;
918 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600919 lock.lock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700920 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700921 // Remove this pool's cmdBuffers from our cmd buffer map.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600922 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
923 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700924 while (itr != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700925 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700926 del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700927 if (pNode->parentObj == (uint64_t)(commandPool)) {
Tobin Ehlis4192fdf2016-04-18 15:40:59 -0600928 free_command_buffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700929 }
930 }
931 destroy_command_pool(device, commandPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600932 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700933 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700934}
935
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700936VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600937 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600938 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700939 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600940 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600941 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700942 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600943
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700944 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
945 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600946
947 if (pSwapchainImages != NULL) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600948 lock.lock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600949 for (uint32_t i = 0; i < *pCount; i++) {
950 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
951 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600952 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600953 }
954 return result;
955}
956
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700957// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700958VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
959 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
960 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700961 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600962 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700963 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700964 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700965 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700966 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700967 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
968 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700969 }
970 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700971 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
972 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700973 }
974 if (pCreateInfos[idx0].pStages) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700975 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700976 if (pCreateInfos[idx0].pStages[idx1].module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700977 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
978 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700979 }
980 }
981 }
982 if (pCreateInfos[idx0].renderPass) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700983 skipCall |=
984 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700985 }
986 }
987 }
988 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700989 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700990 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600991 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700992 if (skipCall)
993 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700994 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
995 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600996 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700997 if (result == VK_SUCCESS) {
998 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700999 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001000 }
1001 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001002 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001003 return result;
1004}
1005
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001006// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001007VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1008 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1009 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001010 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001011 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001012 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001013 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001014 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001015 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001016 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1017 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001018 }
1019 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001020 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1021 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001022 }
1023 if (pCreateInfos[idx0].stage.module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001024 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
1025 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001026 }
1027 }
1028 }
1029 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001030 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001031 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001032 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001033 if (skipCall)
1034 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001035 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1036 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001037 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001038 if (result == VK_SUCCESS) {
1039 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001040 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001041 }
1042 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001043 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001044 return result;
1045}