blob: 664bf617f42c8a0cce7c14bfae9d38a7a79bf786 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlis42586532014-11-14 13:01:02 -07005 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials
11 * are furnished to do so, subject to the following conditions:
Tobin Ehlis42586532014-11-14 13:01:02 -070012 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070013 * The above copyright notice(s) and this permission notice shall be included
14 * in all copies or substantial portions of the Materials.
Tobin Ehlis42586532014-11-14 13:01:02 -070015 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070016 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
Tobin Ehlis42586532014-11-14 13:01:02 -070017 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070018 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060024 *
25 * Author: Jon Ashburn <jon@lunarg.com>
26 * Author: Mark Lobodzinski <mark@lunarg.com>
27 * Author: Tobin Ehlis <tobin@lunarg.com>
Tobin Ehlis42586532014-11-14 13:01:02 -070028 */
29
David Pinedo9316d3b2015-11-06 12:54:48 -070030#include "vulkan/vk_layer.h"
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060031#include "vk_layer_extension_utils.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060032#include "vk_enum_string_helper.h"
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -070033#include "vk_layer_table.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060034#include "vk_layer_utils.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060035
Tobin Ehlisca915872014-11-18 11:28:33 -070036// Object Tracker ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070037typedef enum _OBJECT_TRACK_ERROR {
38 OBJTRACK_NONE, // Used for INFO & other non-error messages
39 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
40 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
41 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
42 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
43 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
44 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
45 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
46 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
Tobin Ehlisca915872014-11-18 11:28:33 -070047} OBJECT_TRACK_ERROR;
48
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070049// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050050typedef VkFlags ObjectStatusFlags;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070051typedef enum _ObjectStatusFlagBits {
52 OBJSTATUS_NONE = 0x00000000, // No status is set
53 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
54 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
55 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
56 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
57 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
58 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
59 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050060} ObjectStatusFlagBits;
Chia-I Wuf8693382015-04-16 22:02:10 +080061
Tobin Ehlis42586532014-11-14 13:01:02 -070062typedef struct _OBJTRACK_NODE {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070063 uint64_t vkObj; // Object handle
64 VkDebugReportObjectTypeEXT objType; // Object type identifier
65 ObjectStatusFlags status; // Object state
66 uint64_t parentObj; // Parent object
67 uint64_t belongsTo; // Object Scope -- owning device/instance
Tobin Ehlis42586532014-11-14 13:01:02 -070068} OBJTRACK_NODE;
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060069
Tobin Ehlis42586532014-11-14 13:01:02 -070070// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060071uint64_t objTrackGetObjectCount(VkDevice device);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070072uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060073
Tobin Ehlisca915872014-11-18 11:28:33 -070074// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060075typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070076typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -060077
Cody Northrop55443ef2015-09-28 15:09:32 -060078struct layer_data {
Mark Lobodzinskifae78852015-06-23 11:35:12 -060079 debug_report_data *report_data;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070080 // TODO: put instance data here
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060081 std::vector<VkDebugReportCallbackEXT> logging_callback;
Ian Elliott1064fe32015-07-06 14:31:32 -060082 bool wsi_enabled;
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060083 bool objtrack_extensions_enabled;
Cody Northrop55443ef2015-09-28 15:09:32 -060084
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060085 layer_data() : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false){};
Cody Northrop55443ef2015-09-28 15:09:32 -060086};
Mark Lobodzinskifae78852015-06-23 11:35:12 -060087
Jon Ashburn3dc39382015-09-17 10:00:32 -060088struct instExts {
89 bool wsi_enabled;
90};
91
92static std::unordered_map<void *, struct instExts> instanceExtMap;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070093static std::unordered_map<void *, layer_data *> layer_data_map;
94static device_table_map object_tracker_device_table_map;
95static instance_table_map object_tracker_instance_table_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -060096
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -060097// We need additionally validate image usage using a separate map
98// of swapchain-created images
Jon Ashburn5484e0c2016-03-08 17:48:44 -070099static unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600100
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600101static long long unsigned int object_track_index = 0;
102static int objLockInitialized = 0;
103static loader_platform_thread_mutex objLock;
104
105// Objects stored in a global map w/ struct containing basic info
Tony Barboura05dbaa2015-07-09 17:31:46 -0600106// unordered_map<const void*, OBJTRACK_NODE*> objMap;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600107
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700108#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600109
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700110static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
111static uint64_t numTotalObjs = 0;
112static VkQueueFamilyProperties *queueInfo = NULL;
113static uint32_t queueCount = 0;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600114
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700115template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
Mark Lobodzinski2eeb3c62015-09-01 08:52:55 -0600116
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600117//
118// Internal Object Tracker Functions
119//
120
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700121static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700122 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
123 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
Jon Ashburn8acd2332015-09-16 18:08:32 -0600124 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700125 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
126 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
127 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
128 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
129 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
Ian Elliott1064fe32015-07-06 14:31:32 -0600130 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700131 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700132 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
Ian Elliott1064fe32015-07-06 14:31:32 -0600133 my_device_data->wsi_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600134
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700135 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -0600136 my_device_data->objtrack_extensions_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600137 }
138}
139
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700140static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
Jon Ashburn3dc39382015-09-17 10:00:32 -0600141 uint32_t i;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700142 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
Jon Ashburn3dc39382015-09-17 10:00:32 -0600143 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
Michael Lentine56512bb2016-03-02 17:28:55 -0600144
145 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700146 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
147 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
148 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
149 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
150 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
151 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
152 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
153 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700154
155#if VK_USE_PLATFORM_WIN32_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700156 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
157 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
158 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700159#endif // VK_USE_PLATFORM_WIN32_KHR
160#ifdef VK_USE_PLATFORM_XCB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700161 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
162 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
163 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700164#endif // VK_USE_PLATFORM_XCB_KHR
165#ifdef VK_USE_PLATFORM_XLIB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700166 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
167 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
168 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700169#endif // VK_USE_PLATFORM_XLIB_KHR
170#ifdef VK_USE_PLATFORM_MIR_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700171 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
172 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
173 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700174#endif // VK_USE_PLATFORM_MIR_KHR
175#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700176 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
177 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
178 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700179#endif // VK_USE_PLATFORM_WAYLAND_KHR
180#ifdef VK_USE_PLATFORM_ANDROID_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700181 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700182#endif // VK_USE_PLATFORM_ANDROID_KHR
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700183
Jon Ashburn3dc39382015-09-17 10:00:32 -0600184 instanceExtMap[pDisp].wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700185 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700186 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
Jon Ashburn3dc39382015-09-17 10:00:32 -0600187 instanceExtMap[pDisp].wsi_enabled = true;
Jon Ashburn3dc39382015-09-17 10:00:32 -0600188 }
189}
190
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600191// Indicate device or instance dispatch table type
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700192typedef enum _DispTableType {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600193 DISP_TBL_TYPE_INSTANCE,
194 DISP_TBL_TYPE_DEVICE,
195} DispTableType;
196
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700197debug_report_data *mdd(const void *object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600198 dispatch_key key = get_dispatch_key(object);
199 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600200 return my_data->report_data;
201}
202
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700203debug_report_data *mid(VkInstance object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600204 dispatch_key key = get_dispatch_key(object);
205 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600206 return my_data->report_data;
207}
208
209// For each Queue's doubly linked-list of mem refs
210typedef struct _OT_MEM_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700211 VkDeviceMemory mem;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600212 struct _OT_MEM_INFO *pNextMI;
213 struct _OT_MEM_INFO *pPrevMI;
214
215} OT_MEM_INFO;
216
217// Track Queue information
218typedef struct _OT_QUEUE_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700219 OT_MEM_INFO *pMemRefList;
220 struct _OT_QUEUE_INFO *pNextQI;
221 uint32_t queueNodeIndex;
222 VkQueue queue;
223 uint32_t refCount;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600224} OT_QUEUE_INFO;
225
226// Global list of QueueInfo structures, one per queue
227static OT_QUEUE_INFO *g_pQueueInfo = NULL;
228
229// Convert an object type enum to an object type array index
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700230static uint32_t objTypeToIndex(uint32_t objType) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600231 uint32_t index = objType;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600232 return index;
233}
234
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600235// Add new queue to head of global queue list
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700236static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600237 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
238
239 if (pQueueInfo != NULL) {
240 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700241 pQueueInfo->queue = queue;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600242 pQueueInfo->queueNodeIndex = queueNodeIndex;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700243 pQueueInfo->pNextQI = g_pQueueInfo;
244 g_pQueueInfo = pQueueInfo;
245 } else {
246 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue),
247 __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
248 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600249 }
250}
251
252// Destroy memRef lists and free all memory
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700253static void destroyQueueMemRefLists(void) {
254 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600255 OT_QUEUE_INFO *pDelQueueInfo = NULL;
256 while (pQueueInfo != NULL) {
257 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
258 while (pMemInfo != NULL) {
259 OT_MEM_INFO *pDelMemInfo = pMemInfo;
260 pMemInfo = pMemInfo->pNextMI;
261 delete pDelMemInfo;
262 }
263 pDelQueueInfo = pQueueInfo;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700264 pQueueInfo = pQueueInfo->pNextQI;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600265 delete pDelQueueInfo;
266 }
267 g_pQueueInfo = pQueueInfo;
268}
269
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700270static void setGpuQueueInfoState(uint32_t count, void *pData) {
Tony Barbour59a47322015-06-24 16:06:58 -0600271 queueCount = count;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700272 queueInfo = (VkQueueFamilyProperties *)realloc((void *)queueInfo, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600273 if (queueInfo != NULL) {
Cody Northropd0802882015-08-03 17:04:53 -0600274 memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600275 }
276}
277
278// Check Queue type flags for selected queue operations
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700279static void validateQueueFlags(VkQueue queue, const char *function) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600280 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
281 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
282 pQueueInfo = pQueueInfo->pNextQI;
283 }
284 if (pQueueInfo != NULL) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700285 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700286 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
287 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
288 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600289 }
290 }
291}
292
Tony Barboura05dbaa2015-07-09 17:31:46 -0600293/* TODO: Port to new type safety */
294#if 0
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600295// Check object status for selected flag state
Courtney Goeltzenleuchtercd2a0992015-07-09 11:44:38 -0600296static VkBool32
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600297validate_status(
298 VkObject dispatchable_object,
299 VkObject vkObj,
300 VkObjectType objType,
301 ObjectStatusFlags status_mask,
302 ObjectStatusFlags status_flag,
303 VkFlags msg_flags,
304 OBJECT_TRACK_ERROR error_code,
305 const char *fail_msg)
306{
307 if (objMap.find(vkObj) != objMap.end()) {
308 OBJTRACK_NODE* pNode = objMap[vkObj];
309 if ((pNode->status & status_mask) != status_flag) {
310 char str[1024];
Mark Lobodzinski6085c2b2016-01-04 15:48:11 -0700311 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600312 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
Mark Young93ecb1d2016-01-13 13:47:16 -0700313 static_cast<uint64_t>(vkObj), fail_msg);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600314 return VK_FALSE;
315 }
316 return VK_TRUE;
317 }
318 else {
319 // If we do not find it print an error
Mark Lobodzinski6085c2b2016-01-04 15:48:11 -0700320 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600321 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
Mark Young93ecb1d2016-01-13 13:47:16 -0700322 static_cast<uint64_t>(vkObj), string_VkObjectType(objType));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600323 return VK_FALSE;
324 }
325}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600326#endif
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600327
328#include "vk_dispatch_table_helper.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600329
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600330static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
331
332 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600333
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700334 if (!objLockInitialized) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600335 // TODO/TBD: Need to delete this mutex sometime. How??? One
336 // suggestion is to call this during vkCreateInstance(), and then we
337 // can clean it up during vkDestroyInstance(). However, that requires
338 // that the layer have per-instance locks. We need to come back and
339 // address this soon.
340 loader_platform_thread_create_mutex(&objLock);
341 objLockInitialized = 1;
342 }
343}
344
Tony Barboura05dbaa2015-07-09 17:31:46 -0600345//
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700346// Forward declarations
Tony Barboura05dbaa2015-07-09 17:31:46 -0600347//
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600348
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700349static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
350static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
351static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700352static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700353static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
354static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700355static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
356 bool null_allowed);
357static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
358 bool null_allowed);
359static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
360 bool null_allowed);
361static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
362 VkDebugReportObjectTypeEXT objType, bool null_allowed);
363static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
364 bool null_allowed);
365static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
366 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700367static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700368static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
369 bool null_allowed);
370static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
371 bool null_allowed);
372static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
373 bool null_allowed);
374static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
375 bool null_allowed);
376static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
377 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700378static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
379static void destroy_command_buffer(VkCommandBuffer dispatchable_object, VkCommandBuffer object);
380static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
381static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
382static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
383static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700384static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
385 ObjectStatusFlags status_flag);
386static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
387 ObjectStatusFlags status_flag);
Tony Barboura05dbaa2015-07-09 17:31:46 -0600388#if 0
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700389static VkBool32 validate_status(VkDevice dispatchable_object, VkFence object, VkDebugReportObjectTypeEXT objType,
Tony Barboura05dbaa2015-07-09 17:31:46 -0600390 ObjectStatusFlags status_mask, ObjectStatusFlags status_flag, VkFlags msg_flags, OBJECT_TRACK_ERROR error_code,
391 const char *fail_msg);
392#endif
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700393extern unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
394extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
395extern unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
396extern unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
397extern unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
398extern unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
399extern unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
400extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
401extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
402extern unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
403extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
404extern unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600405
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700406static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
407 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
408 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
409 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600410
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700411 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600412 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700413 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700414 pNewObjNode->status = OBJSTATUS_NONE;
415 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800416 VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600417 uint32_t objIndex = objTypeToIndex(objType);
418 numObjs[objIndex]++;
419 numTotalObjs++;
420}
421
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700422static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700423 // TODO: Add tracking of surface objects
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700424 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
425 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
426 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700427
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700428 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlis86684f92016-01-05 10:33:58 -0700429 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700430 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700431 pNewObjNode->status = OBJSTATUS_NONE;
432 pNewObjNode->vkObj = (uint64_t)(vkObj);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700433 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
434 uint32_t objIndex = objTypeToIndex(objType);
435 numObjs[objIndex]++;
436 numTotalObjs++;
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700437}
438
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700439static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700440 uint64_t object_handle = (uint64_t)(object);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700441 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700442 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
Tobin Ehlis86684f92016-01-05 10:33:58 -0700443 uint32_t objIndex = objTypeToIndex(pNode->objType);
444 assert(numTotalObjs > 0);
445 numTotalObjs--;
446 assert(numObjs[objIndex] > 0);
447 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700448 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
449 OBJTRACK_NONE, "OBJTRACK",
450 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
451 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
452 string_VkDebugReportObjectTypeEXT(pNode->objType));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700453 delete pNode;
454 VkSurfaceKHRMap.erase(object_handle);
455 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700456 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
457 OBJTRACK_NONE, "OBJTRACK",
458 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700459 }
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700460}
461
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700462static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
463 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
464 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
465 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
466 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tony Barboura05dbaa2015-07-09 17:31:46 -0600467
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700468 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
469 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700470 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700471 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
472 pNewObjNode->parentObj = (uint64_t)commandPool;
Mark Lobodzinski2fba0322016-01-23 18:31:23 -0700473 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
474 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
475 } else {
476 pNewObjNode->status = OBJSTATUS_NONE;
477 }
Michael Lentine13803dc2015-11-04 14:35:12 -0800478 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600479 uint32_t objIndex = objTypeToIndex(objType);
480 numObjs[objIndex]++;
481 numTotalObjs++;
482}
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700483
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700484static void free_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700485 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
486 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700487 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700488
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700489 if (pNode->parentObj != (uint64_t)(commandPool)) {
490 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
491 OBJTRACK_COMMAND_POOL_MISMATCH, "OBJTRACK",
492 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
493 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
494 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, (uint64_t)(commandPool));
495 } else {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700496
497 uint32_t objIndex = objTypeToIndex(pNode->objType);
498 assert(numTotalObjs > 0);
499 numTotalObjs--;
500 assert(numObjs[objIndex] > 0);
501 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700502 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
503 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
504 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer), numTotalObjs,
505 numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700506 delete pNode;
507 VkCommandBufferMap.erase(object_handle);
508 }
509 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700510 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
511 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
512 object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700513 }
514}
515
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700516static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
517 VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinski510e20d2016-02-11 09:26:16 -0700518 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700519 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
520 (uint64_t)(vkObj));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700521
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700522 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
523 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700524 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700525 pNewObjNode->status = OBJSTATUS_NONE;
526 pNewObjNode->vkObj = (uint64_t)(vkObj);
527 pNewObjNode->parentObj = (uint64_t)descriptorPool;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700528 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
529 uint32_t objIndex = objTypeToIndex(objType);
530 numObjs[objIndex]++;
531 numTotalObjs++;
532}
533
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700534static void free_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700535 uint64_t object_handle = (uint64_t)(descriptorSet);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700536 if (VkDescriptorSetMap.find(object_handle) != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700537 OBJTRACK_NODE *pNode = VkDescriptorSetMap[(uint64_t)descriptorSet];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700538
Mark Young93ecb1d2016-01-13 13:47:16 -0700539 if (pNode->parentObj != (uint64_t)(descriptorPool)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700540 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
541 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
542 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
543 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
544 (uint64_t)(descriptorSet), pNode->parentObj, (uint64_t)(descriptorPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700545 } else {
546 uint32_t objIndex = objTypeToIndex(pNode->objType);
547 assert(numTotalObjs > 0);
548 numTotalObjs--;
549 assert(numObjs[objIndex] > 0);
550 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700551 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_NONE,
552 "OBJTRACK", "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
553 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(descriptorSet), numTotalObjs, numObjs[objIndex],
554 string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700555 delete pNode;
556 VkDescriptorSetMap.erase(object_handle);
557 }
558 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700559 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
560 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
561 object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700562 }
563}
564
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700565static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
566 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
567 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
568 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600569
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700570 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600571 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700572 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700573 pNewObjNode->status = OBJSTATUS_NONE;
574 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800575 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600576 uint32_t objIndex = objTypeToIndex(objType);
577 numObjs[objIndex]++;
578 numTotalObjs++;
579}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700580static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
581 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
582 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
583 "SwapchainImage", (uint64_t)(vkObj));
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600584
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700585 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
586 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
587 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
588 pNewObjNode->status = OBJSTATUS_NONE;
589 pNewObjNode->vkObj = (uint64_t)vkObj;
590 pNewObjNode->parentObj = (uint64_t)swapchain;
Mark Young93ecb1d2016-01-13 13:47:16 -0700591 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600592}
593
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700594static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
595 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
596 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
597 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700598
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700599 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700600 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
601 pNewObjNode->objType = objType;
602 pNewObjNode->status = OBJSTATUS_NONE;
603 pNewObjNode->vkObj = (uint64_t)(vkObj);
604 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode;
605 uint32_t objIndex = objTypeToIndex(objType);
606 numObjs[objIndex]++;
607 numTotalObjs++;
608}
609
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600610//
611// Non-auto-generated API functions called by generated code
612//
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700613VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
614 VkInstance *pInstance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700615 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
David Pinedoc0fa1ab2015-07-31 10:46:25 -0600616
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700617 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700618 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700619 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700620 if (fpCreateInstance == NULL) {
621 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600622 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700623
624 // Advance the link info for the next element on the chain
625 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
626
627 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
628 if (result != VK_SUCCESS) {
629 return result;
630 }
631
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700632 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
633 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
634 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700635
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700636 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
637 pCreateInfo->ppEnabledExtensionNames);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700638
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600639 init_object_tracker(my_data, pAllocator);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700640 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
641
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700642 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700643
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600644 return result;
645}
646
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700647void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700648 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600649
650 loader_platform_thread_lock_mutex(&objLock);
Cody Northropd0802882015-08-03 17:04:53 -0600651 if (pProperties != NULL)
652 setGpuQueueInfoState(*pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600653 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600654}
655
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700656VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
657 VkDevice *pDevice) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600658 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700659 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700660
661 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700662 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
663 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700664 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700665 if (fpCreateDevice == NULL) {
666 loader_platform_thread_unlock_mutex(&objLock);
667 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600668 }
669
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700670 // Advance the link info for the next element on the chain
671 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
672
673 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
674 if (result != VK_SUCCESS) {
675 loader_platform_thread_unlock_mutex(&objLock);
676 return result;
677 }
678
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700679 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
680 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
681 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700682
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700683 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700684
685 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
686
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700687 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700688 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700689 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
690 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700691
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600692 loader_platform_thread_unlock_mutex(&objLock);
693 return result;
694}
695
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700696VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
697 VkPhysicalDevice *pPhysicalDevices) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600698 VkBool32 skipCall = VK_FALSE;
699 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700700 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600701 loader_platform_thread_unlock_mutex(&objLock);
702 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700703 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700704 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
705 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Tobin Ehlisec598302015-09-15 15:02:17 -0600706 loader_platform_thread_lock_mutex(&objLock);
707 if (result == VK_SUCCESS) {
708 if (pPhysicalDevices) {
709 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700710 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
Tobin Ehlisec598302015-09-15 15:02:17 -0600711 }
712 }
713 }
714 loader_platform_thread_unlock_mutex(&objLock);
715 return result;
716}
717
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700718void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600719 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700720 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600721 loader_platform_thread_unlock_mutex(&objLock);
722
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700723 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600724
725 loader_platform_thread_lock_mutex(&objLock);
Courtney Goeltzenleuchter06d89472015-10-20 16:40:38 -0600726 addQueueInfo(queueNodeIndex, *pQueue);
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700727 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600728 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600729}
730
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700731VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
732 void **ppData) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600733 VkBool32 skipCall = VK_FALSE;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600734 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700735 skipCall |= set_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
736 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600737 loader_platform_thread_unlock_mutex(&objLock);
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600738 if (skipCall == VK_TRUE)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700739 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600740
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700741 VkResult result =
742 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600743
744 return result;
745}
746
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700747void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600748 VkBool32 skipCall = VK_FALSE;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600749 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700750 skipCall |= reset_device_memory_status(device, mem, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, OBJSTATUS_GPU_MEM_MAPPED);
751 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600752 loader_platform_thread_unlock_mutex(&objLock);
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600753 if (skipCall == VK_TRUE)
754 return;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600755
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700756 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600757}
758
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700759VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600760 loader_platform_thread_lock_mutex(&objLock);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800761 validateQueueFlags(queue, "QueueBindSparse");
762
763 for (uint32_t i = 0; i < bindInfoCount; i++) {
764 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700765 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800766 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700767 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800768 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700769 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800770 }
771
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600772 loader_platform_thread_unlock_mutex(&objLock);
773
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700774 VkResult result =
775 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Mark Lobodzinski16e8bef2015-07-03 15:58:09 -0600776 return result;
777}
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600778
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700779VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
780 VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700781 VkBool32 skipCall = VK_FALSE;
782 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700783 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
784 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700785 loader_platform_thread_unlock_mutex(&objLock);
786
787 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700788 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700789 }
790
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700791 VkResult result =
792 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700793
794 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700795 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700796 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
797 pAllocateInfo->level);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700798 }
799 loader_platform_thread_unlock_mutex(&objLock);
800
801 return result;
802}
803
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700804VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
805 VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600806 VkBool32 skipCall = VK_FALSE;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600807 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700808 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700809 skipCall |=
810 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700811 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700812 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
813 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600814 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600815 loader_platform_thread_unlock_mutex(&objLock);
Tobin Ehlisec598302015-09-15 15:02:17 -0600816 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700817 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600818
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700819 VkResult result =
820 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600821
Chris Forbes539a87c2016-01-22 15:44:40 +1300822 if (VK_SUCCESS == result) {
823 loader_platform_thread_lock_mutex(&objLock);
824 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700825 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
826 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
Chris Forbes539a87c2016-01-22 15:44:40 +1300827 }
828 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600829 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600830
831 return result;
832}
833
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700834void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
835 const VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700836 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700837 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
838 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700839 loader_platform_thread_unlock_mutex(&objLock);
840
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700841 get_dispatch_table(object_tracker_device_table_map, device)
842 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700843
844 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700845 for (uint32_t i = 0; i < commandBufferCount; i++) {
Michael Lentinefc6aa762015-11-20 12:11:42 -0800846 free_command_buffer(device, commandPool, *pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700847 pCommandBuffers++;
848 }
849 loader_platform_thread_unlock_mutex(&objLock);
850}
851
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700852void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600853 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700854 // A swapchain's images are implicitly deleted when the swapchain is deleted.
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600855 // Remove this swapchain's images from our map of such images.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700856 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600857 while (itr != swapchainImageMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700858 OBJTRACK_NODE *pNode = (*itr).second;
Mark Young93ecb1d2016-01-13 13:47:16 -0700859 if (pNode->parentObj == (uint64_t)(swapchain)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700860 swapchainImageMap.erase(itr++);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600861 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700862 ++itr;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600863 }
864 }
Tobin Ehlis86684f92016-01-05 10:33:58 -0700865 destroy_swapchain_khr(device, swapchain);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600866 loader_platform_thread_unlock_mutex(&objLock);
867
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700868 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600869}
870
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700871void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600872 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700873 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600874 loader_platform_thread_unlock_mutex(&objLock);
875
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700876 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600877
878 loader_platform_thread_lock_mutex(&objLock);
Michael Lentine13803dc2015-11-04 14:35:12 -0800879 destroy_device_memory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600880 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600881}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600882
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700883VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
884 const VkDescriptorSet *pDescriptorSets) {
Tony Barbour770f80d2015-07-20 10:52:13 -0600885 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700886 validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
887 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Tony Barbour770f80d2015-07-20 10:52:13 -0600888 loader_platform_thread_unlock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700889 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
890 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
Tony Barbour770f80d2015-07-20 10:52:13 -0600891
892 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700893 for (uint32_t i = 0; i < count; i++) {
Michael Lentinefc6aa762015-11-20 12:11:42 -0800894 free_descriptor_set(device, descriptorPool, *pDescriptorSets++);
Tony Barbour770f80d2015-07-20 10:52:13 -0600895 }
896 loader_platform_thread_unlock_mutex(&objLock);
897 return result;
898}
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600899
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700900void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700901 VkBool32 skipCall = VK_FALSE;
902 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700903 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
904 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700905 loader_platform_thread_unlock_mutex(&objLock);
906 if (skipCall) {
907 return;
908 }
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700909 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700910 // Remove this pool's descriptor sets from our descriptorSet map.
911 loader_platform_thread_lock_mutex(&objLock);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700912 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700913 while (itr != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700914 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinskib29731a2015-11-18 11:01:02 -0700915 auto del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700916 if (pNode->parentObj == (uint64_t)(descriptorPool)) {
917 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700918 }
919 }
920 destroy_descriptor_pool(device, descriptorPool);
921 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700922 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700923}
924
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700925void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700926 VkBool32 skipCall = VK_FALSE;
927 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700928 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
929 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700930 loader_platform_thread_unlock_mutex(&objLock);
931 if (skipCall) {
932 return;
933 }
934 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700935 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700936 // Remove this pool's cmdBuffers from our cmd buffer map.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700937 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
938 unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700939 while (itr != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700940 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700941 del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700942 if (pNode->parentObj == (uint64_t)(commandPool)) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700943 destroy_command_buffer(reinterpret_cast<VkCommandBuffer>((*del_itr).first),
944 reinterpret_cast<VkCommandBuffer>((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700945 }
946 }
947 destroy_command_pool(device, commandPool);
948 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700949 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700950}
951
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700952VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600953 VkBool32 skipCall = VK_FALSE;
954 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700955 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600956 loader_platform_thread_unlock_mutex(&objLock);
957 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700958 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600959
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700960 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
961 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600962
963 if (pSwapchainImages != NULL) {
964 loader_platform_thread_lock_mutex(&objLock);
965 for (uint32_t i = 0; i < *pCount; i++) {
966 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
967 }
968 loader_platform_thread_unlock_mutex(&objLock);
969 }
970 return result;
971}
972
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700973// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700974VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
975 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
976 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700977 VkBool32 skipCall = VK_FALSE;
978 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700979 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700980 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700981 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700982 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700983 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
984 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700985 }
986 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700987 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
988 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700989 }
990 if (pCreateInfos[idx0].pStages) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700991 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700992 if (pCreateInfos[idx0].pStages[idx1].module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700993 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
994 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700995 }
996 }
997 }
998 if (pCreateInfos[idx0].renderPass) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700999 skipCall |=
1000 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001001 }
1002 }
1003 }
1004 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001005 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001006 }
1007 loader_platform_thread_unlock_mutex(&objLock);
1008 if (skipCall)
1009 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001010 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1011 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001012 loader_platform_thread_lock_mutex(&objLock);
1013 if (result == VK_SUCCESS) {
1014 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001015 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001016 }
1017 }
1018 loader_platform_thread_unlock_mutex(&objLock);
1019 return result;
1020}
1021
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001022// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001023VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1024 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1025 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001026 VkBool32 skipCall = VK_FALSE;
1027 loader_platform_thread_lock_mutex(&objLock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001028 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001029 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001030 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001031 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001032 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1033 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001034 }
1035 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001036 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1037 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001038 }
1039 if (pCreateInfos[idx0].stage.module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001040 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
1041 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001042 }
1043 }
1044 }
1045 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001046 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001047 }
1048 loader_platform_thread_unlock_mutex(&objLock);
1049 if (skipCall)
1050 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001051 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1052 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001053 loader_platform_thread_lock_mutex(&objLock);
1054 if (result == VK_SUCCESS) {
1055 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001056 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001057 }
1058 }
1059 loader_platform_thread_unlock_mutex(&objLock);
1060 return result;
1061}