blob: 2c4b9e7f0dd8100a79137446ee8d3ad47fc0e330 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlis42586532014-11-14 13:01:02 -07005 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
Tobin Ehlis42586532014-11-14 13:01:02 -070012 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070013 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
Tobin Ehlis42586532014-11-14 13:01:02 -070015 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070016 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
Tobin Ehlis42586532014-11-14 13:01:02 -070017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060024 *
25 * Author: Jon Ashburn <jon@lunarg.com>
26 * Author: Mark Lobodzinski <mark@lunarg.com>
27 * Author: Tobin Ehlis <tobin@lunarg.com>
Tobin Ehlis42586532014-11-14 13:01:02 -070028 */
29
David Pinedo9316d3b2015-11-06 12:54:48 -070030#include "vulkan/vk_layer.h"
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060031#include "vk_layer_extension_utils.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060032#include "vk_enum_string_helper.h"
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -070033#include "vk_layer_table.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060034
Tobin Ehlisca915872014-11-18 11:28:33 -070035// Object Tracker ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070036typedef enum _OBJECT_TRACK_ERROR {
37 OBJTRACK_NONE, // Used for INFO & other non-error messages
38 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
39 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
40 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
41 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
42 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
43 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
44 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
45 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
Tobin Ehlisca915872014-11-18 11:28:33 -070046} OBJECT_TRACK_ERROR;
47
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070048// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050049typedef VkFlags ObjectStatusFlags;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070050typedef enum _ObjectStatusFlagBits {
51 OBJSTATUS_NONE = 0x00000000, // No status is set
52 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
53 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
54 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
55 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
56 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
57 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
58 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050059} ObjectStatusFlagBits;
Chia-I Wuf8693382015-04-16 22:02:10 +080060
Tobin Ehlis42586532014-11-14 13:01:02 -070061typedef struct _OBJTRACK_NODE {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070062 uint64_t vkObj; // Object handle
63 VkDebugReportObjectTypeEXT objType; // Object type identifier
64 ObjectStatusFlags status; // Object state
65 uint64_t parentObj; // Parent object
66 uint64_t belongsTo; // Object Scope -- owning device/instance
Tobin Ehlis42586532014-11-14 13:01:02 -070067} OBJTRACK_NODE;
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060068
Tobin Ehlis42586532014-11-14 13:01:02 -070069// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060070uint64_t objTrackGetObjectCount(VkDevice device);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070071uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060072
Tobin Ehlisca915872014-11-18 11:28:33 -070073// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060074typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070075typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -060076
Cody Northrop55443ef2015-09-28 15:09:32 -060077struct layer_data {
Mark Lobodzinskifae78852015-06-23 11:35:12 -060078 debug_report_data *report_data;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070079 // TODO: put instance data here
80 VkDebugReportCallbackEXT logging_callback;
Ian Elliott1064fe32015-07-06 14:31:32 -060081 bool wsi_enabled;
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060082 bool objtrack_extensions_enabled;
Cody Northrop55443ef2015-09-28 15:09:32 -060083
Jon Ashburn5484e0c2016-03-08 17:48:44 -070084 layer_data() : report_data(nullptr), logging_callback(VK_NULL_HANDLE), wsi_enabled(false), objtrack_extensions_enabled(false){};
Cody Northrop55443ef2015-09-28 15:09:32 -060085};
Mark Lobodzinskifae78852015-06-23 11:35:12 -060086
Jon Ashburn3dc39382015-09-17 10:00:32 -060087struct instExts {
88 bool wsi_enabled;
89};
90
91static std::unordered_map<void *, struct instExts> instanceExtMap;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070092static std::unordered_map<void *, layer_data *> layer_data_map;
93static device_table_map object_tracker_device_table_map;
94static instance_table_map object_tracker_instance_table_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -060095
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -060096// We need additionally validate image usage using a separate map
97// of swapchain-created images
Jon Ashburn5484e0c2016-03-08 17:48:44 -070098static unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -060099
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600100static long long unsigned int object_track_index = 0;
101static int objLockInitialized = 0;
102static loader_platform_thread_mutex objLock;
103
104// Objects stored in a global map w/ struct containing basic info
Tony Barboura05dbaa2015-07-09 17:31:46 -0600105// unordered_map<const void*, OBJTRACK_NODE*> objMap;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600106
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700107#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600108
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700109static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
110static uint64_t numTotalObjs = 0;
111static VkQueueFamilyProperties *queueInfo = NULL;
112static uint32_t queueCount = 0;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600113
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700114template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
Mark Lobodzinski2eeb3c62015-09-01 08:52:55 -0600115
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600116//
117// Internal Object Tracker Functions
118//
119
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700120static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700121 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
122 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
Jon Ashburn8acd2332015-09-16 18:08:32 -0600123 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700124 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
125 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
126 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
127 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
128 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
Ian Elliott1064fe32015-07-06 14:31:32 -0600129 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700130 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700131 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
Ian Elliott1064fe32015-07-06 14:31:32 -0600132 my_device_data->wsi_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600133
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700134 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -0600135 my_device_data->objtrack_extensions_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600136 }
137}
138
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700139static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
Jon Ashburn3dc39382015-09-17 10:00:32 -0600140 uint32_t i;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700141 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
Jon Ashburn3dc39382015-09-17 10:00:32 -0600142 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
Michael Lentine56512bb2016-03-02 17:28:55 -0600143
144 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700145 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
146 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
147 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
148 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
149 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
150 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
151 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
152 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700153
154#if VK_USE_PLATFORM_WIN32_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700155 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
156 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
157 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700158#endif // VK_USE_PLATFORM_WIN32_KHR
159#ifdef VK_USE_PLATFORM_XCB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700160 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
161 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
162 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700163#endif // VK_USE_PLATFORM_XCB_KHR
164#ifdef VK_USE_PLATFORM_XLIB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700165 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
166 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
167 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700168#endif // VK_USE_PLATFORM_XLIB_KHR
169#ifdef VK_USE_PLATFORM_MIR_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700170 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
171 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
172 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700173#endif // VK_USE_PLATFORM_MIR_KHR
174#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700175 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
176 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
177 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700178#endif // VK_USE_PLATFORM_WAYLAND_KHR
179#ifdef VK_USE_PLATFORM_ANDROID_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700180 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700181#endif // VK_USE_PLATFORM_ANDROID_KHR
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700182
Jon Ashburn3dc39382015-09-17 10:00:32 -0600183 instanceExtMap[pDisp].wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700184 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700185 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
Jon Ashburn3dc39382015-09-17 10:00:32 -0600186 instanceExtMap[pDisp].wsi_enabled = true;
Jon Ashburn3dc39382015-09-17 10:00:32 -0600187 }
188}
189
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600190// Indicate device or instance dispatch table type
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700191typedef enum _DispTableType {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600192 DISP_TBL_TYPE_INSTANCE,
193 DISP_TBL_TYPE_DEVICE,
194} DispTableType;
195
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700196debug_report_data *mdd(const void *object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600197 dispatch_key key = get_dispatch_key(object);
198 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600199 return my_data->report_data;
200}
201
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700202debug_report_data *mid(VkInstance object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600203 dispatch_key key = get_dispatch_key(object);
204 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600205 return my_data->report_data;
206}
207
208// For each Queue's doubly linked-list of mem refs
209typedef struct _OT_MEM_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700210 VkDeviceMemory mem;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600211 struct _OT_MEM_INFO *pNextMI;
212 struct _OT_MEM_INFO *pPrevMI;
213
214} OT_MEM_INFO;
215
216// Track Queue information
217typedef struct _OT_QUEUE_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700218 OT_MEM_INFO *pMemRefList;
219 struct _OT_QUEUE_INFO *pNextQI;
220 uint32_t queueNodeIndex;
221 VkQueue queue;
222 uint32_t refCount;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600223} OT_QUEUE_INFO;
224
225// Global list of QueueInfo structures, one per queue
226static OT_QUEUE_INFO *g_pQueueInfo = NULL;
227
228// Convert an object type enum to an object type array index
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700229static uint32_t objTypeToIndex(uint32_t objType) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600230 uint32_t index = objType;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600231 return index;
232}
233
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600234// Add new queue to head of global queue list
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700235static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600236 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
237
238 if (pQueueInfo != NULL) {
239 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700240 pQueueInfo->queue = queue;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600241 pQueueInfo->queueNodeIndex = queueNodeIndex;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700242 pQueueInfo->pNextQI = g_pQueueInfo;
243 g_pQueueInfo = pQueueInfo;
244 } else {
245 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue),
246 __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
247 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600248 }
249}
250
251// Destroy memRef lists and free all memory
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700252static void destroyQueueMemRefLists(void) {
253 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600254 OT_QUEUE_INFO *pDelQueueInfo = NULL;
255 while (pQueueInfo != NULL) {
256 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
257 while (pMemInfo != NULL) {
258 OT_MEM_INFO *pDelMemInfo = pMemInfo;
259 pMemInfo = pMemInfo->pNextMI;
260 delete pDelMemInfo;
261 }
262 pDelQueueInfo = pQueueInfo;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700263 pQueueInfo = pQueueInfo->pNextQI;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600264 delete pDelQueueInfo;
265 }
266 g_pQueueInfo = pQueueInfo;
267}
268
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700269static void setGpuQueueInfoState(uint32_t count, void *pData) {
Tony Barbour59a47322015-06-24 16:06:58 -0600270 queueCount = count;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700271 queueInfo = (VkQueueFamilyProperties *)realloc((void *)queueInfo, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600272 if (queueInfo != NULL) {
Cody Northropd0802882015-08-03 17:04:53 -0600273 memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600274 }
275}
276
277// Check Queue type flags for selected queue operations
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700278static void validateQueueFlags(VkQueue queue, const char *function) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600279 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
280 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
281 pQueueInfo = pQueueInfo->pNextQI;
282 }
283 if (pQueueInfo != NULL) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700284 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700285 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
286 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
287 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600288 }
289 }
290}
291
Tony Barboura05dbaa2015-07-09 17:31:46 -0600292/* TODO: Port to new type safety */
293#if 0
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600294// Check object status for selected flag state
Courtney Goeltzenleuchtercd2a0992015-07-09 11:44:38 -0600295static VkBool32
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600296validate_status(
297 VkObject dispatchable_object,
298 VkObject vkObj,
299 VkObjectType objType,
300 ObjectStatusFlags status_mask,
301 ObjectStatusFlags status_flag,
302 VkFlags msg_flags,
303 OBJECT_TRACK_ERROR error_code,
304 const char *fail_msg)
305{
306 if (objMap.find(vkObj) != objMap.end()) {
307 OBJTRACK_NODE* pNode = objMap[vkObj];
308 if ((pNode->status & status_mask) != status_flag) {
309 char str[1024];
Mark Lobodzinski6085c2b2016-01-04 15:48:11 -0700310 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600311 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
Mark Young93ecb1d2016-01-13 13:47:16 -0700312 static_cast<uint64_t>(vkObj), fail_msg);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600313 return VK_FALSE;
314 }
315 return VK_TRUE;
316 }
317 else {
318 // If we do not find it print an error
Mark Lobodzinski6085c2b2016-01-04 15:48:11 -0700319 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600320 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
Mark Young93ecb1d2016-01-13 13:47:16 -0700321 static_cast<uint64_t>(vkObj), string_VkObjectType(objType));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600322 return VK_FALSE;
323 }
324}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600325#endif
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600326
327#include "vk_dispatch_table_helper.h"
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700328static void initObjectTracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600329 uint32_t report_flags = 0;
330 uint32_t debug_action = 0;
331 FILE *log_output = NULL;
332 const char *option_str;
Mark Lobodzinski8cbde242016-02-23 09:58:39 -0700333 // initialize object_tracker options
334 report_flags = getLayerOptionFlags("lunarg_object_tracker.report_flags", 0);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700335 getLayerOptionEnum("lunarg_object_tracker.debug_action", (uint32_t *)&debug_action);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600336
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700337 if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG) {
Mark Lobodzinski8cbde242016-02-23 09:58:39 -0700338 option_str = getLayerOption("lunarg_object_tracker.log_filename");
339 log_output = getLayerLogOutput(option_str, "lunarg_object_tracker");
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700340 VkDebugReportCallbackCreateInfoEXT dbgInfo;
Courtney Goeltzenleuchter05854bf2015-11-30 12:13:14 -0700341 memset(&dbgInfo, 0, sizeof(dbgInfo));
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700342 dbgInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT;
Courtney Goeltzenleuchter05854bf2015-11-30 12:13:14 -0700343 dbgInfo.pfnCallback = log_callback;
344 dbgInfo.pUserData = log_output;
345 dbgInfo.flags = report_flags;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700346 layer_create_msg_callback(my_data->report_data, &dbgInfo, pAllocator, &my_data->logging_callback);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600347 }
348
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700349 if (!objLockInitialized) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600350 // TODO/TBD: Need to delete this mutex sometime. How??? One
351 // suggestion is to call this during vkCreateInstance(), and then we
352 // can clean it up during vkDestroyInstance(). However, that requires
353 // that the layer have per-instance locks. We need to come back and
354 // address this soon.
355 loader_platform_thread_create_mutex(&objLock);
356 objLockInitialized = 1;
357 }
358}
359
Tony Barboura05dbaa2015-07-09 17:31:46 -0600360//
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700361// Forward declarations
Tony Barboura05dbaa2015-07-09 17:31:46 -0600362//
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600363
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700364static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
365static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
366static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700367static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700368static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
369static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700370static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
371 bool null_allowed);
372static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
373 bool null_allowed);
374static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
375 bool null_allowed);
376static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
377 VkDebugReportObjectTypeEXT objType, bool null_allowed);
378static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
379 bool null_allowed);
380static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
381 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700382static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700383static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
384 bool null_allowed);
385static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
386 bool null_allowed);
387static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
388 bool null_allowed);
389static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
390 bool null_allowed);
391static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
392 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700393static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
394static void destroy_command_buffer(VkCommandBuffer dispatchable_object, VkCommandBuffer object);
395static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
396static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
397static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
398static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700399static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
400 ObjectStatusFlags status_flag);
401static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
402 ObjectStatusFlags status_flag);
Tony Barboura05dbaa2015-07-09 17:31:46 -0600403#if 0
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700404static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType,
Tony Barboura05dbaa2015-07-09 17:31:46 -0600405 ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR error_code,
406 const char *fail_msg);
407#endif
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700408extern unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
409extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
410extern unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
411extern unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
412extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
413extern unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
414extern unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
415extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
416extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
417extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
418extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
419extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600420
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700421static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
422 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
423 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
424 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600425
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700426 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600427 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700428 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700429 pNewObjNode->status = OBJSTATUS_NONE;
430 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800431 VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600432 uint32_t objIndex = objTypeToIndex(objType);
433 numObjs[objIndex]++;
434 numTotalObjs++;
435}
436
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700437static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700438 // TODO: Add tracking of surface objects
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700439 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
440 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
441 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700442
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700443 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlis86684f92016-01-05 10:33:58 -0700444 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700445 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700446 pNewObjNode->status = OBJSTATUS_NONE;
447 pNewObjNode->vkObj = (uint64_t)(vkObj);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700448 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
449 uint32_t objIndex = objTypeToIndex(objType);
450 numObjs[objIndex]++;
451 numTotalObjs++;
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700452}
453
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700454static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700455 uint64_t object_handle = (uint64_t)(object);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700456 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700457 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
Tobin Ehlis86684f92016-01-05 10:33:58 -0700458 uint32_t objIndex = objTypeToIndex(pNode->objType);
459 assert(numTotalObjs > 0);
460 numTotalObjs--;
461 assert(numObjs[objIndex] > 0);
462 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700463 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
464 OBJTRACK_NONE, "OBJTRACK",
465 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
466 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
467 string_VkDebugReportObjectTypeEXT(pNode->objType));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700468 delete pNode;
469 VkSurfaceKHRMap.erase(object_handle);
470 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700471 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
472 OBJTRACK_NONE, "OBJTRACK",
473 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700474 }
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700475}
476
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700477static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
478 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
479 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
480 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
481 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tony Barboura05dbaa2015-07-09 17:31:46 -0600482
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700483 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
484 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700485 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700486 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
487 pNewObjNode->parentObj = (uint64_t)commandPool;
Mark Lobodzinski2fba0322016-01-23 18:31:23 -0700488 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
489 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
490 } else {
491 pNewObjNode->status = OBJSTATUS_NONE;
492 }
Michael Lentine13803dc2015-11-04 14:35:12 -0800493 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600494 uint32_t objIndex = objTypeToIndex(objType);
495 numObjs[objIndex]++;
496 numTotalObjs++;
497}
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700498
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700499static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700500 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
501 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700502 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700503
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700504 if (pNode->parentObj != (uint64_t)(commandPool)) {
505 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
506 OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
507 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
508 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
509 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool));
510 } else {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700511
512 uint32_t objIndex = objTypeToIndex(pNode->objType);
513 assert(numTotalObjs > 0);
514 numTotalObjs--;
515 assert(numObjs[objIndex] > 0);
516 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700517 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
518 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
519 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs,
520 numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700521 delete pNode;
522 VkCommandBufferMap.erase(object_handle);
523 }
524 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700525 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
526 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
527 object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700528 }
529}
530
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700531static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
532 VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinski510e20d2016-02-11 09:26:16 -0700533 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700534 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
535 (uint64_t)(vkObj));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700536
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700537 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
538 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700539 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700540 pNewObjNode->status = OBJSTATUS_NONE;
541 pNewObjNode->vkObj = (uint64_t)(vkObj);
542 pNewObjNode->parentObj = (uint64_t)descriptorPool;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700543 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
544 uint32_t objIndex = objTypeToIndex(objType);
545 numObjs[objIndex]++;
546 numTotalObjs++;
547}
548
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700549static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700550 uint64_t object_handle = (uint64_t)(descriptorSet);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700551 if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700552 OBJTRACK_NODE *pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700553
Mark Young93ecb1d2016-01-13 13:47:16 -0700554 if (pNode->parentObj != (uint64_t)(descriptorPool)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700555 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
556 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
557 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
558 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
559 (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700560 } else {
561 uint32_t objIndex = objTypeToIndex(pNode->objType);
562 assert(numTotalObjs > 0);
563 numTotalObjs--;
564 assert(numObjs[objIndex] > 0);
565 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700566 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
567 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
568 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex],
569 string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700570 delete pNode;
571 VkDescriptorSetMap.erase(object_handle);
572 }
573 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700574 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
575 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
576 object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700577 }
578}
579
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700580static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
581 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
582 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
583 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600584
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700585 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600586 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700587 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700588 pNewObjNode->status = OBJSTATUS_NONE;
589 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800590 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600591 uint32_t objIndex = objTypeToIndex(objType);
592 numObjs[objIndex]++;
593 numTotalObjs++;
594}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700595static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
596 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
597 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
598 "SwapchainImage", (uint64_t)(vkObj));
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600599
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700600 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
601 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
602 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
603 pNewObjNode->status = OBJSTATUS_NONE;
604 pNewObjNode->vkObj = (uint64_t)vkObj;
605 pNewObjNode->parentObj = (uint64_t)swapchain;
Mark Young93ecb1d2016-01-13 13:47:16 -0700606 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600607}
608
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700609static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
610 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
611 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
612 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700613
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700614 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700615 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
616 pNewObjNode->objType = objType;
617 pNewObjNode->status = OBJSTATUS_NONE;
618 pNewObjNode->vkObj = (uint64_t)(vkObj);
619 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode;
620 uint32_t objIndex = objTypeToIndex(objType);
621 numObjs[objIndex]++;
622 numTotalObjs++;
623}
624
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600625//
626// Non-auto-generated API functions called by generated code
627//
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700628VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
629 VkInstance *pInstance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700630 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
David Pinedoc0fa1ab2015-07-31 10:46:25 -0600631
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700632 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700633 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700634 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700635 if (fpCreateInstance == NULL) {
636 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600637 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700638
639 // Advance the link info for the next element on the chain
640 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
641
642 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
643 if (result != VK_SUCCESS) {
644 return result;
645 }
646
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700647 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
648 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
649 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700650
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700651 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
652 pCreateInfo->ppEnabledExtensionNames);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700653
654 initObjectTracker(my_data, pAllocator);
655 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
656
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700657 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700658
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600659 return result;
660}
661
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700662void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700663 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600664
665 loader_platform_thread_lock_mutex(&objLock);
Cody Northropd0802882015-08-03 17:04:53 -0600666 if (pProperties != NULL)
667 setGpuQueueInfoState(*pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600668 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600669}
670
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700671VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
672 VkDevice *pDevice) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600673 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700674 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700675
676 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700677 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
678 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700679 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700680 if (fpCreateDevice == NULL) {
681 loader_platform_thread_unlock_mutex(&objLock);
682 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600683 }
684
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700685 // Advance the link info for the next element on the chain
686 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
687
688 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
689 if (result != VK_SUCCESS) {
690 loader_platform_thread_unlock_mutex(&objLock);
691 return result;
692 }
693
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700694 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
695 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
696 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700697
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700698 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700699
700 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
701
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700702 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700703 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700704 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
705 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700706
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600707 loader_platform_thread_unlock_mutex(&objLock);
708 return result;
709}
710
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700711VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
712 VkPhysicalDevice *pPhysicalDevices) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600713 VkBool32 skipCall = VK_FALSE;
714 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700715 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600716 loader_platform_thread_unlock_mutex(&objLock);
717 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700718 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700719 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
720 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Tobin Ehlisec598302015-09-15 15:02:17 -0600721 loader_platform_thread_lock_mutex(&objLock);
722 if (result == VK_SUCCESS) {
723 if (pPhysicalDevices) {
724 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700725 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
Tobin Ehlisec598302015-09-15 15:02:17 -0600726 }
727 }
728 }
729 loader_platform_thread_unlock_mutex(&objLock);
730 return result;
731}
732
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700733void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600734 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700735 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600736 loader_platform_thread_unlock_mutex(&objLock);
737
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700738 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600739
740 loader_platform_thread_lock_mutex(&objLock);
Courtney Goeltzenleuchter06d89472015-10-20 16:40:38 -0600741 addQueueInfo(queueNodeIndex, *pQueue);
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700742 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600743 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600744}
745
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700746VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
747 void **ppData) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600748 VkBool32 skipCall = VK_FALSE;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600749 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700750 skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
751 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600752 loader_platform_thread_unlock_mutex(&objLock);
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600753 if (skipCall == VK_TRUE)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700754 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600755
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700756 VkResult result =
757 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600758
759 return result;
760}
761
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700762void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600763 VkBool32 skipCall = VK_FALSE;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600764 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700765 skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
766 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600767 loader_platform_thread_unlock_mutex(&objLock);
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600768 if (skipCall == VK_TRUE)
769 return;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600770
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700771 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600772}
773
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700774VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600775 loader_platform_thread_lock_mutex(&objLock);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800776 validateQueueFlags(queue, "QueueBindSparse");
777
778 for (uint32_t i = 0; i < bindInfoCount; i++) {
779 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700780 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800781 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700782 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800783 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700784 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800785 }
786
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600787 loader_platform_thread_unlock_mutex(&objLock);
788
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700789 VkResult result =
790 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Mark Lobodzinski16e8bef2015-07-03 15:58:09 -0600791 return result;
792}
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600793
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700794VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
795 VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700796 VkBool32 skipCall = VK_FALSE;
797 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700798 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
799 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700800 loader_platform_thread_unlock_mutex(&objLock);
801
802 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700803 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700804 }
805
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700806 VkResult result =
807 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700808
809 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700810 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700811 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
812 pAllocateInfo->level);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700813 }
814 loader_platform_thread_unlock_mutex(&objLock);
815
816 return result;
817}
818
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700819VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
820 VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600821 VkBool32 skipCall = VK_FALSE;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600822 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700823 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700824 skipCall |=
825 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700826 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700827 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
828 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600829 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600830 loader_platform_thread_unlock_mutex(&objLock);
Tobin Ehlisec598302015-09-15 15:02:17 -0600831 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700832 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600833
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700834 VkResult result =
835 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600836
Chris Forbes539a87c2016-01-22 15:44:40 +1300837 if (VK_SUCCESS == result) {
838 loader_platform_thread_lock_mutex(&objLock);
839 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700840 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
841 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
Chris Forbes539a87c2016-01-22 15:44:40 +1300842 }
843 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600844 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600845
846 return result;
847}
848
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700849void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
850 const VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700851 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700852 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
853 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700854 loader_platform_thread_unlock_mutex(&objLock);
855
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700856 get_dispatch_table(object_tracker_device_table_map, device)
857 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700858
859 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700860 for (uint32_t i = 0; i < commandBufferCount; i++) {
Michael Lentinefc6aa762015-11-20 12:11:42 -0800861 free_command_buffer(device, commandPool, *pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700862 pCommandBuffers++;
863 }
864 loader_platform_thread_unlock_mutex(&objLock);
865}
866
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700867void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600868 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700869 // A swapchain's images are implicitly deleted when the swapchain is deleted.
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600870 // Remove this swapchain's images from our map of such images.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700871 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600872 while (itr != swapchainImageMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700873 OBJTRACK_NODE *pNode = (*itr).second;
Mark Young93ecb1d2016-01-13 13:47:16 -0700874 if (pNode->parentObj == (uint64_t)(swapchain)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700875 swapchainImageMap.erase(itr++);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600876 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700877 ++itr;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600878 }
879 }
Tobin Ehlis86684f92016-01-05 10:33:58 -0700880 destroy_swapchain_khr(device, swapchain);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600881 loader_platform_thread_unlock_mutex(&objLock);
882
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700883 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600884}
885
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700886void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600887 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700888 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600889 loader_platform_thread_unlock_mutex(&objLock);
890
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700891 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600892
893 loader_platform_thread_lock_mutex(&objLock);
Michael Lentine13803dc2015-11-04 14:35:12 -0800894 destroy_device_memory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600895 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600896}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600897
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700898VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
899 const VkDescriptorSet *pDescriptorSets) {
Tony Barbour770f80d2015-07-20 10:52:13 -0600900 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700901 validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
902 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Tony Barbour770f80d2015-07-20 10:52:13 -0600903 loader_platform_thread_unlock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700904 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
905 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
Tony Barbour770f80d2015-07-20 10:52:13 -0600906
907 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700908 for (uint32_t i = 0; i < count; i++) {
Michael Lentinefc6aa762015-11-20 12:11:42 -0800909 free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
Tony Barbour770f80d2015-07-20 10:52:13 -0600910 }
911 loader_platform_thread_unlock_mutex(&objLock);
912 return result;
913}
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600914
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700915void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700916 VkBool32 skipCall = VK_FALSE;
917 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700918 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
919 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700920 loader_platform_thread_unlock_mutex(&objLock);
921 if (skipCall) {
922 return;
923 }
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700924 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700925 // Remove this pool's descriptor sets from our descriptorSet map.
926 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700927 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700928 while (itr != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700929 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinskib29731a2015-11-18 11:01:02 -0700930 auto del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700931 if (pNode->parentObj == (uint64_t)(descriptorPool)) {
932 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700933 }
934 }
935 destroy_descriptor_pool(device, descriptorPool);
936 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700937 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700938}
939
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700940void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700941 VkBool32 skipCall = VK_FALSE;
942 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700943 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
944 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700945 loader_platform_thread_unlock_mutex(&objLock);
946 if (skipCall) {
947 return;
948 }
949 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700950 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700951 // Remove this pool's cmdBuffers from our cmd buffer map.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700952 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
953 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700954 while (itr != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700955 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700956 del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700957 if (pNode->parentObj == (uint64_t)(commandPool)) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700958 destroy_command_buffer(reinterpret_cast<VkCommandBuffer>((*del_itr).first),
959 reinterpret_cast<VkCommandBuffer>((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700960 }
961 }
962 destroy_command_pool(device, commandPool);
963 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700964 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700965}
966
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700967VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600968 VkBool32 skipCall = VK_FALSE;
969 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700970 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600971 loader_platform_thread_unlock_mutex(&objLock);
972 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700973 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600974
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700975 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
976 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600977
978 if (pSwapchainImages != NULL) {
979 loader_platform_thread_lock_mutex(&objLock);
980 for (uint32_t i = 0; i < *pCount; i++) {
981 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
982 }
983 loader_platform_thread_unlock_mutex(&objLock);
984 }
985 return result;
986}
987
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700988// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700989VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
990 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
991 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700992 VkBool32 skipCall = VK_FALSE;
993 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700994 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700995 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700996 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700997 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700998 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
999 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001000 }
1001 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001002 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1003 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001004 }
1005 if (pCreateInfos[idx0].pStages) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001006 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001007 if (pCreateInfos[idx0].pStages[idx1].module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001008 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
1009 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001010 }
1011 }
1012 }
1013 if (pCreateInfos[idx0].renderPass) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001014 skipCall |=
1015 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001016 }
1017 }
1018 }
1019 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001020 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001021 }
1022 loader_platform_thread_unlock_mutex(&objLock);
1023 if (skipCall)
1024 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001025 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1026 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001027 loader_platform_thread_lock_mutex(&objLock);
1028 if (result == VK_SUCCESS) {
1029 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001030 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001031 }
1032 }
1033 loader_platform_thread_unlock_mutex(&objLock);
1034 return result;
1035}
1036
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001037// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001038VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1039 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1040 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001041 VkBool32 skipCall = VK_FALSE;
1042 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001043 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001044 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001045 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001046 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001047 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1048 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001049 }
1050 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001051 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1052 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001053 }
1054 if (pCreateInfos[idx0].stage.module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001055 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
1056 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001057 }
1058 }
1059 }
1060 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001061 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001062 }
1063 loader_platform_thread_unlock_mutex(&objLock);
1064 if (skipCall)
1065 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001066 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1067 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001068 loader_platform_thread_lock_mutex(&objLock);
1069 if (result == VK_SUCCESS) {
1070 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001071 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001072 }
1073 }
1074 loader_platform_thread_unlock_mutex(&objLock);
1075 return result;
1076}