blob: 0f58bbc5768b6678d6db10bcdd4357cadd692e45 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlis42586532014-11-14 13:01:02 -07005 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06006 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
Tobin Ehlis42586532014-11-14 13:01:02 -07009 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060010 * http://www.apache.org/licenses/LICENSE-2.0
Tobin Ehlis42586532014-11-14 13:01:02 -070011 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060012 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060017 *
18 * Author: Jon Ashburn <jon@lunarg.com>
19 * Author: Mark Lobodzinski <mark@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
Tobin Ehlis42586532014-11-14 13:01:02 -070021 */
22
Jeremy Hayes2f065b12016-04-13 10:54:17 -060023#include <mutex>
24
David Pinedo9316d3b2015-11-06 12:54:48 -070025#include "vulkan/vk_layer.h"
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060026#include "vk_layer_extension_utils.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060027#include "vk_enum_string_helper.h"
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -070028#include "vk_layer_table.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include "vk_layer_utils.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060030
Tobin Ehlisca915872014-11-18 11:28:33 -070031// Object Tracker ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070032typedef enum _OBJECT_TRACK_ERROR {
33 OBJTRACK_NONE, // Used for INFO & other non-error messages
34 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
35 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
Jon Ashburn5484e0c2016-03-08 17:48:44 -070036 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
Jon Ashburn5484e0c2016-03-08 17:48:44 -070037 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
38 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
39 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
Tobin Ehlisca915872014-11-18 11:28:33 -070040} OBJECT_TRACK_ERROR;
41
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070042// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050043typedef VkFlags ObjectStatusFlags;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070044typedef enum _ObjectStatusFlagBits {
45 OBJSTATUS_NONE = 0x00000000, // No status is set
46 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
47 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
48 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
49 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
50 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
51 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
52 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050053} ObjectStatusFlagBits;
Chia-I Wuf8693382015-04-16 22:02:10 +080054
Tobin Ehlis42586532014-11-14 13:01:02 -070055typedef struct _OBJTRACK_NODE {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070056 uint64_t vkObj; // Object handle
57 VkDebugReportObjectTypeEXT objType; // Object type identifier
58 ObjectStatusFlags status; // Object state
59 uint64_t parentObj; // Parent object
60 uint64_t belongsTo; // Object Scope -- owning device/instance
Tobin Ehlis42586532014-11-14 13:01:02 -070061} OBJTRACK_NODE;
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060062
Tobin Ehlis42586532014-11-14 13:01:02 -070063// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060064uint64_t objTrackGetObjectCount(VkDevice device);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070065uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060066
Tobin Ehlisca915872014-11-18 11:28:33 -070067// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060068typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070069typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -060070
Cody Northrop55443ef2015-09-28 15:09:32 -060071struct layer_data {
Mark Lobodzinskifae78852015-06-23 11:35:12 -060072 debug_report_data *report_data;
Cody Northrop9c93ec52016-04-28 09:55:08 -060073 // TODO: put instance data here
74 std::vector<VkDebugReportCallbackEXT> logging_callback;
75 bool wsi_enabled;
76 bool objtrack_extensions_enabled;
Ian Elliotted6b5ac2016-04-28 09:08:13 -060077 // The following are for keeping track of the temporary callbacks that can
78 // be used in vkCreateInstance and vkDestroyInstance:
79 uint32_t num_tmp_callbacks;
80 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
81 VkDebugReportCallbackEXT *tmp_callbacks;
Cody Northrop55443ef2015-09-28 15:09:32 -060082
Ian Elliotted6b5ac2016-04-28 09:08:13 -060083 layer_data()
84 : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false), num_tmp_callbacks(0),
85 tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr){};
Cody Northrop55443ef2015-09-28 15:09:32 -060086};
Mark Lobodzinskifae78852015-06-23 11:35:12 -060087
Jon Ashburn3dc39382015-09-17 10:00:32 -060088struct instExts {
89 bool wsi_enabled;
90};
91
92static std::unordered_map<void *, struct instExts> instanceExtMap;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070093static std::unordered_map<void *, layer_data *> layer_data_map;
94static device_table_map object_tracker_device_table_map;
95static instance_table_map object_tracker_instance_table_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -060096
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -060097// We need additionally validate image usage using a separate map
98// of swapchain-created images
Mark Lobodzinskif93272b2016-05-02 12:08:24 -060099static std::unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600100
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600101static long long unsigned int object_track_index = 0;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600102static std::mutex global_lock;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600103
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700104#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600105
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700106static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
107static uint64_t numTotalObjs = 0;
108static VkQueueFamilyProperties *queueInfo = NULL;
109static uint32_t queueCount = 0;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600110
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700111template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
Mark Lobodzinski2eeb3c62015-09-01 08:52:55 -0600112
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600113//
114// Internal Object Tracker Functions
115//
116
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700117static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700118 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
119 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
Jon Ashburn8acd2332015-09-16 18:08:32 -0600120 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700121 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
122 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
123 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
124 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
125 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
Ian Elliott1064fe32015-07-06 14:31:32 -0600126 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700127 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700128 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
Ian Elliott1064fe32015-07-06 14:31:32 -0600129 my_device_data->wsi_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600130
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700131 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -0600132 my_device_data->objtrack_extensions_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600133 }
134}
135
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700136static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
Jon Ashburn3dc39382015-09-17 10:00:32 -0600137 uint32_t i;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700138 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
Jon Ashburn3dc39382015-09-17 10:00:32 -0600139 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
Michael Lentine56512bb2016-03-02 17:28:55 -0600140
141 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700142 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
143 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
144 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
145 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
146 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
147 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
148 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
149 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700150
151#if VK_USE_PLATFORM_WIN32_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700152 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
153 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
154 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700155#endif // VK_USE_PLATFORM_WIN32_KHR
156#ifdef VK_USE_PLATFORM_XCB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700157 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
158 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
159 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700160#endif // VK_USE_PLATFORM_XCB_KHR
161#ifdef VK_USE_PLATFORM_XLIB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700162 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
163 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
164 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700165#endif // VK_USE_PLATFORM_XLIB_KHR
166#ifdef VK_USE_PLATFORM_MIR_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700167 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
168 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
169 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700170#endif // VK_USE_PLATFORM_MIR_KHR
171#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700172 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
173 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
174 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700175#endif // VK_USE_PLATFORM_WAYLAND_KHR
176#ifdef VK_USE_PLATFORM_ANDROID_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700177 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700178#endif // VK_USE_PLATFORM_ANDROID_KHR
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700179
Jon Ashburn3dc39382015-09-17 10:00:32 -0600180 instanceExtMap[pDisp].wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700181 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700182 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
Jon Ashburn3dc39382015-09-17 10:00:32 -0600183 instanceExtMap[pDisp].wsi_enabled = true;
Jon Ashburn3dc39382015-09-17 10:00:32 -0600184 }
185}
186
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600187// Indicate device or instance dispatch table type
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700188typedef enum _DispTableType {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600189 DISP_TBL_TYPE_INSTANCE,
190 DISP_TBL_TYPE_DEVICE,
191} DispTableType;
192
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700193debug_report_data *mdd(const void *object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600194 dispatch_key key = get_dispatch_key(object);
195 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600196 return my_data->report_data;
197}
198
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700199debug_report_data *mid(VkInstance object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600200 dispatch_key key = get_dispatch_key(object);
201 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600202 return my_data->report_data;
203}
204
205// For each Queue's doubly linked-list of mem refs
206typedef struct _OT_MEM_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700207 VkDeviceMemory mem;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600208 struct _OT_MEM_INFO *pNextMI;
209 struct _OT_MEM_INFO *pPrevMI;
210
211} OT_MEM_INFO;
212
213// Track Queue information
214typedef struct _OT_QUEUE_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700215 OT_MEM_INFO *pMemRefList;
216 struct _OT_QUEUE_INFO *pNextQI;
217 uint32_t queueNodeIndex;
218 VkQueue queue;
219 uint32_t refCount;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600220} OT_QUEUE_INFO;
221
222// Global list of QueueInfo structures, one per queue
223static OT_QUEUE_INFO *g_pQueueInfo = NULL;
224
225// Convert an object type enum to an object type array index
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700226static uint32_t objTypeToIndex(uint32_t objType) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600227 uint32_t index = objType;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600228 return index;
229}
230
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600231// Add new queue to head of global queue list
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700232static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600233 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
234
235 if (pQueueInfo != NULL) {
236 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700237 pQueueInfo->queue = queue;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600238 pQueueInfo->queueNodeIndex = queueNodeIndex;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700239 pQueueInfo->pNextQI = g_pQueueInfo;
240 g_pQueueInfo = pQueueInfo;
241 } else {
242 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, reinterpret_cast<uint64_t>(queue),
243 __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
244 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600245 }
246}
247
248// Destroy memRef lists and free all memory
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700249static void destroyQueueMemRefLists(void) {
250 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600251 OT_QUEUE_INFO *pDelQueueInfo = NULL;
252 while (pQueueInfo != NULL) {
253 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
254 while (pMemInfo != NULL) {
255 OT_MEM_INFO *pDelMemInfo = pMemInfo;
256 pMemInfo = pMemInfo->pNextMI;
257 delete pDelMemInfo;
258 }
259 pDelQueueInfo = pQueueInfo;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700260 pQueueInfo = pQueueInfo->pNextQI;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600261 delete pDelQueueInfo;
262 }
263 g_pQueueInfo = pQueueInfo;
264}
265
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700266static void setGpuQueueInfoState(uint32_t count, void *pData) {
Tony Barbour59a47322015-06-24 16:06:58 -0600267 queueCount = count;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700268 queueInfo = (VkQueueFamilyProperties *)realloc((void *)queueInfo, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600269 if (queueInfo != NULL) {
Cody Northropd0802882015-08-03 17:04:53 -0600270 memcpy(queueInfo, pData, count * sizeof(VkQueueFamilyProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600271 }
272}
273
274// Check Queue type flags for selected queue operations
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700275static void validateQueueFlags(VkQueue queue, const char *function) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600276 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
277 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
278 pQueueInfo = pQueueInfo->pNextQI;
279 }
280 if (pQueueInfo != NULL) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700281 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700282 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
283 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
284 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600285 }
286 }
287}
288
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600289#include "vk_dispatch_table_helper.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600290
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600291static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
292
293 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600294}
295
Tony Barboura05dbaa2015-07-09 17:31:46 -0600296//
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700297// Forward declarations
Tony Barboura05dbaa2015-07-09 17:31:46 -0600298//
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600299
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700300static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
301static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
302static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700303static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700304static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
305static VkBool32 validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700306static VkBool32 validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
307 bool null_allowed);
308static VkBool32 validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
309 bool null_allowed);
310static VkBool32 validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
311 bool null_allowed);
312static VkBool32 validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
313 VkDebugReportObjectTypeEXT objType, bool null_allowed);
314static VkBool32 validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
315 bool null_allowed);
316static VkBool32 validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
317 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700318static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700319static VkBool32 validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
320 bool null_allowed);
321static VkBool32 validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
322 bool null_allowed);
323static VkBool32 validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
324 bool null_allowed);
325static VkBool32 validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
326 bool null_allowed);
327static VkBool32 validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
328 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700329static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700330static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
331static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
332static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
333static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700334static VkBool32 set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
335 ObjectStatusFlags status_flag);
336static VkBool32 reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
337 ObjectStatusFlags status_flag);
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600338extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
339extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
340extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
341extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
342extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
343extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
344extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
345extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
346extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
347extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
348extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
349extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600350
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700351static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
352 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
353 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
354 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600355
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700356 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600357 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700358 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700359 pNewObjNode->status = OBJSTATUS_NONE;
360 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800361 VkPhysicalDeviceMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600362 uint32_t objIndex = objTypeToIndex(objType);
363 numObjs[objIndex]++;
364 numTotalObjs++;
365}
366
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700367static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700368 // TODO: Add tracking of surface objects
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700369 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
370 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
371 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700372
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700373 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlis86684f92016-01-05 10:33:58 -0700374 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700375 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700376 pNewObjNode->status = OBJSTATUS_NONE;
377 pNewObjNode->vkObj = (uint64_t)(vkObj);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700378 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
379 uint32_t objIndex = objTypeToIndex(objType);
380 numObjs[objIndex]++;
381 numTotalObjs++;
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700382}
383
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700384static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700385 uint64_t object_handle = (uint64_t)(object);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700386 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700387 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
Tobin Ehlis86684f92016-01-05 10:33:58 -0700388 uint32_t objIndex = objTypeToIndex(pNode->objType);
389 assert(numTotalObjs > 0);
390 numTotalObjs--;
391 assert(numObjs[objIndex] > 0);
392 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700393 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
394 OBJTRACK_NONE, "OBJTRACK",
395 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
396 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
397 string_VkDebugReportObjectTypeEXT(pNode->objType));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700398 delete pNode;
399 VkSurfaceKHRMap.erase(object_handle);
400 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700401 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
402 OBJTRACK_NONE, "OBJTRACK",
403 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700404 }
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700405}
406
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700407static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
408 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
409 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
410 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
411 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tony Barboura05dbaa2015-07-09 17:31:46 -0600412
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700413 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
414 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700415 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700416 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
417 pNewObjNode->parentObj = (uint64_t)commandPool;
Mark Lobodzinski2fba0322016-01-23 18:31:23 -0700418 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
419 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
420 } else {
421 pNewObjNode->status = OBJSTATUS_NONE;
422 }
Michael Lentine13803dc2015-11-04 14:35:12 -0800423 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600424 uint32_t objIndex = objTypeToIndex(objType);
425 numObjs[objIndex]++;
426 numTotalObjs++;
427}
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700428
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600429static bool validate_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
430 bool skipCall = false;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700431 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
432 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700433 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700434
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700435 if (pNode->parentObj != (uint64_t)(commandPool)) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600436 skipCall |= log_msg(
437 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_COMMAND_POOL_MISMATCH,
438 "OBJTRACK", "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
439 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
440 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, reinterpret_cast<uint64_t>(commandPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700441 }
442 } else {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600443 skipCall |= log_msg(
444 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
445 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700446 }
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600447 return skipCall;
448}
449
450static bool free_command_buffer(VkDevice device, VkCommandBuffer commandBuffer) {
451 bool skipCall = false;
452 auto cbItem = VkCommandBufferMap.find(reinterpret_cast<uint64_t>(commandBuffer));
453 if (cbItem != VkCommandBufferMap.end()) {
454 OBJTRACK_NODE *pNode = cbItem->second;
455 uint32_t objIndex = objTypeToIndex(pNode->objType);
456 assert(numTotalObjs > 0);
457 numTotalObjs--;
458 assert(numObjs[objIndex] > 0);
459 numObjs[objIndex]--;
460 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType,
461 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, OBJTRACK_NONE, "OBJTRACK",
462 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
463 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer),
464 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
465 delete pNode;
466 VkCommandBufferMap.erase(cbItem);
467 }
468 return skipCall;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700469}
470
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700471static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
472 VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinski510e20d2016-02-11 09:26:16 -0700473 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700474 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
475 (uint64_t)(vkObj));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700476
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700477 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
478 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700479 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700480 pNewObjNode->status = OBJSTATUS_NONE;
481 pNewObjNode->vkObj = (uint64_t)(vkObj);
482 pNewObjNode->parentObj = (uint64_t)descriptorPool;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700483 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
484 uint32_t objIndex = objTypeToIndex(objType);
485 numObjs[objIndex]++;
486 numTotalObjs++;
487}
488
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600489static bool validate_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
490 bool skipCall = false;
Mark Young93ecb1d2016-01-13 13:47:16 -0700491 uint64_t object_handle = (uint64_t)(descriptorSet);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600492 auto dsItem = VkDescriptorSetMap.find(object_handle);
493 if (dsItem != VkDescriptorSetMap.end()) {
494 OBJTRACK_NODE *pNode = dsItem->second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700495
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600496 if (pNode->parentObj != reinterpret_cast<uint64_t>(descriptorPool)) {
497 skipCall |=
498 log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
499 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
500 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
501 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
502 reinterpret_cast<uint64_t>(descriptorSet), pNode->parentObj, reinterpret_cast<uint64_t>(descriptorPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700503 }
504 } else {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600505 skipCall |= log_msg(
506 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
507 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700508 }
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600509 return skipCall;
510}
511
512static bool free_descriptor_set(VkDevice device, VkDescriptorSet descriptorSet) {
513 bool skipCall = false;
514 auto dsItem = VkDescriptorSetMap.find(reinterpret_cast<uint64_t>(descriptorSet));
515 if (dsItem != VkDescriptorSetMap.end()) {
516 OBJTRACK_NODE *pNode = dsItem->second;
517 uint32_t objIndex = objTypeToIndex(pNode->objType);
518 assert(numTotalObjs > 0);
519 numTotalObjs--;
520 assert(numObjs[objIndex] > 0);
521 numObjs[objIndex]--;
522 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, reinterpret_cast<uint64_t>(descriptorSet), __LINE__,
523 OBJTRACK_NONE, "OBJTRACK",
524 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
525 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(descriptorSet), numTotalObjs,
526 numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
527 delete pNode;
528 VkDescriptorSetMap.erase(dsItem);
529 }
530 return skipCall;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700531}
532
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700533static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
534 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
535 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
536 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600537
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700538 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlisec598302015-09-15 15:02:17 -0600539 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700540 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700541 pNewObjNode->status = OBJSTATUS_NONE;
542 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
Michael Lentine13803dc2015-11-04 14:35:12 -0800543 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tobin Ehlisec598302015-09-15 15:02:17 -0600544 uint32_t objIndex = objTypeToIndex(objType);
545 numObjs[objIndex]++;
546 numTotalObjs++;
547}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700548static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
549 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
550 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
551 "SwapchainImage", (uint64_t)(vkObj));
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600552
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700553 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
554 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
555 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
556 pNewObjNode->status = OBJSTATUS_NONE;
557 pNewObjNode->vkObj = (uint64_t)vkObj;
558 pNewObjNode->parentObj = (uint64_t)swapchain;
Mark Young93ecb1d2016-01-13 13:47:16 -0700559 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600560}
561
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700562static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
563 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
564 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
565 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700566
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700567 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700568 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
569 pNewObjNode->objType = objType;
570 pNewObjNode->status = OBJSTATUS_NONE;
571 pNewObjNode->vkObj = (uint64_t)(vkObj);
572 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode;
573 uint32_t objIndex = objTypeToIndex(objType);
574 numObjs[objIndex]++;
575 numTotalObjs++;
576}
577
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600578//
579// Non-auto-generated API functions called by generated code
580//
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700581VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
582 VkInstance *pInstance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700583 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
David Pinedoc0fa1ab2015-07-31 10:46:25 -0600584
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700585 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700586 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700587 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700588 if (fpCreateInstance == NULL) {
589 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600590 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700591
592 // Advance the link info for the next element on the chain
593 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
594
595 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
596 if (result != VK_SUCCESS) {
597 return result;
598 }
599
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700600 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
601 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
602 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700603
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600604 // Look for one or more debug report create info structures, and copy the
605 // callback(s) for each one found (for use by vkDestroyInstance)
606 layer_copy_tmp_callbacks(pCreateInfo->pNext, &my_data->num_tmp_callbacks, &my_data->tmp_dbg_create_infos,
607 &my_data->tmp_callbacks);
608
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700609 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
610 pCreateInfo->ppEnabledExtensionNames);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700611
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600612 init_object_tracker(my_data, pAllocator);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700613 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
614
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700615 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700616
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600617 return result;
618}
619
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700620void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700621 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600622
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600623 std::lock_guard<std::mutex> lock(global_lock);
624 if (pProperties != NULL) {
Cody Northropd0802882015-08-03 17:04:53 -0600625 setGpuQueueInfoState(*pCount, pProperties);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600626 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600627}
628
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700629VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
630 VkDevice *pDevice) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600631 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700632 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700633
634 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700635 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
636 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700637 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700638 if (fpCreateDevice == NULL) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700639 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600640 }
641
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700642 // Advance the link info for the next element on the chain
643 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
644
645 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
646 if (result != VK_SUCCESS) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700647 return result;
648 }
649
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700650 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
651 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
652 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700653
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700654 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700655
656 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
657
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700658 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700659 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700660 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
661 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700662
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600663 return result;
664}
665
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700666VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
667 VkPhysicalDevice *pPhysicalDevices) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600668 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600669 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700670 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600671 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600672 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700673 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700674 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
675 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600676 lock.lock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600677 if (result == VK_SUCCESS) {
678 if (pPhysicalDevices) {
679 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700680 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
Tobin Ehlisec598302015-09-15 15:02:17 -0600681 }
682 }
683 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600684 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600685 return result;
686}
687
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700688void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600689 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700690 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600691 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600692
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700693 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600694
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600695 lock.lock();
Courtney Goeltzenleuchter06d89472015-10-20 16:40:38 -0600696 addQueueInfo(queueNodeIndex, *pQueue);
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700697 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600698}
699
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700700VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
701 void **ppData) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600702 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600703 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700704 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600705 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600706 if (skipCall == VK_TRUE)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700707 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600708
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700709 VkResult result =
710 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600711
712 return result;
713}
714
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700715void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600716 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600717 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700718 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600719 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600720 if (skipCall == VK_TRUE)
721 return;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600722
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700723 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600724}
725
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700726VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600727 std::unique_lock<std::mutex> lock(global_lock);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800728 validateQueueFlags(queue, "QueueBindSparse");
729
730 for (uint32_t i = 0; i < bindInfoCount; i++) {
731 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700732 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800733 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700734 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800735 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700736 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800737 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600738 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600739
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700740 VkResult result =
741 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Mark Lobodzinski16e8bef2015-07-03 15:58:09 -0600742 return result;
743}
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600744
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700745VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
746 VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700747 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600748 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700749 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
750 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600751 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700752
753 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700754 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700755 }
756
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700757 VkResult result =
758 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700759
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600760 lock.lock();
Jon Ashburnf19916e2016-01-11 13:12:43 -0700761 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700762 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
763 pAllocateInfo->level);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700764 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600765 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700766
767 return result;
768}
769
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700770VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
771 VkDescriptorSet *pDescriptorSets) {
Tobin Ehlisec598302015-09-15 15:02:17 -0600772 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600773 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700774 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700775 skipCall |=
776 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700777 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700778 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
779 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600780 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600781 lock.unlock();
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600782 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700783 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600784 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600785
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700786 VkResult result =
787 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600788
Chris Forbes539a87c2016-01-22 15:44:40 +1300789 if (VK_SUCCESS == result) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600790 lock.lock();
Chris Forbes539a87c2016-01-22 15:44:40 +1300791 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700792 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
793 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
Chris Forbes539a87c2016-01-22 15:44:40 +1300794 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600795 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600796 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600797
798 return result;
799}
800
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700801void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
802 const VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600803 bool skipCall = false;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600804 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700805 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
806 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600807 for (uint32_t i = 0; i < commandBufferCount; i++) {
808 skipCall |= validate_command_buffer(device, commandPool, pCommandBuffers[i]);
809 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700810
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600811 lock.unlock();
812 if (!skipCall) {
813 get_dispatch_table(object_tracker_device_table_map, device)
814 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
815 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700816
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600817 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700818 for (uint32_t i = 0; i < commandBufferCount; i++) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600819 free_command_buffer(device, pCommandBuffers[i]);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700820 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700821}
822
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700823void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600824 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700825 // A swapchain's images are implicitly deleted when the swapchain is deleted.
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600826 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600827 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600828 while (itr != swapchainImageMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700829 OBJTRACK_NODE *pNode = (*itr).second;
Mark Young93ecb1d2016-01-13 13:47:16 -0700830 if (pNode->parentObj == (uint64_t)(swapchain)) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700831 swapchainImageMap.erase(itr++);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600832 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700833 ++itr;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600834 }
835 }
Tobin Ehlis86684f92016-01-05 10:33:58 -0700836 destroy_swapchain_khr(device, swapchain);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600837 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600838
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700839 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600840}
841
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700842void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600843 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700844 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600845 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600846
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700847 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600848
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600849 lock.lock();
Michael Lentine13803dc2015-11-04 14:35:12 -0800850 destroy_device_memory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600851}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600852
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700853VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
854 const VkDescriptorSet *pDescriptorSets) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600855 bool skipCall = false;
856 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600857 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600858 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
859 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
860 for (uint32_t i = 0; i < count; i++) {
861 skipCall |= validate_descriptor_set(device, descriptorPool, pDescriptorSets[i]);
862 }
863
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600864 lock.unlock();
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600865 if (!skipCall) {
866 result = get_dispatch_table(object_tracker_device_table_map, device)
867 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
868 }
Tony Barbour770f80d2015-07-20 10:52:13 -0600869
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600870 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700871 for (uint32_t i = 0; i < count; i++) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600872 free_descriptor_set(device, pDescriptorSets[i]);
Tony Barbour770f80d2015-07-20 10:52:13 -0600873 }
Tony Barbour770f80d2015-07-20 10:52:13 -0600874 return result;
875}
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600876
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700877void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700878 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600879 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700880 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
881 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600882 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700883 if (skipCall) {
884 return;
885 }
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700886 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700887 // Remove this pool's descriptor sets from our descriptorSet map.
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600888 lock.lock();
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600889 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700890 while (itr != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700891 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinskib29731a2015-11-18 11:01:02 -0700892 auto del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700893 if (pNode->parentObj == (uint64_t)(descriptorPool)) {
894 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700895 }
896 }
897 destroy_descriptor_pool(device, descriptorPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600898 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700899 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700900}
901
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700902void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600903 VkBool32 skipCall = false;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600904 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700905 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
906 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600907 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700908 if (skipCall) {
909 return;
910 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600911 lock.lock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700912 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700913 // Remove this pool's cmdBuffers from our cmd buffer map.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600914 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
915 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700916 while (itr != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700917 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700918 del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700919 if (pNode->parentObj == (uint64_t)(commandPool)) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600920 skipCall |= validate_command_buffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
921 free_command_buffer(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700922 }
923 }
924 destroy_command_pool(device, commandPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600925 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700926 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700927}
928
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700929VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600930 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600931 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700932 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600933 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600934 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700935 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600936
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700937 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
938 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600939
940 if (pSwapchainImages != NULL) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600941 lock.lock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600942 for (uint32_t i = 0; i < *pCount; i++) {
943 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
944 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600945 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600946 }
947 return result;
948}
949
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700950// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700951VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
952 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
953 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700954 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600955 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700956 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700957 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700958 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700959 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700960 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
961 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700962 }
963 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700964 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
965 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700966 }
967 if (pCreateInfos[idx0].pStages) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700968 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700969 if (pCreateInfos[idx0].pStages[idx1].module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700970 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
971 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700972 }
973 }
974 }
975 if (pCreateInfos[idx0].renderPass) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700976 skipCall |=
977 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700978 }
979 }
980 }
981 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700982 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700983 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600984 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700985 if (skipCall)
986 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700987 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
988 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600989 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700990 if (result == VK_SUCCESS) {
991 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700992 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700993 }
994 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600995 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700996 return result;
997}
998
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700999// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001000VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1001 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1002 VkPipeline *pPipelines) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001003 VkBool32 skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001004 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001005 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001006 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001007 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001008 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001009 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1010 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001011 }
1012 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001013 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1014 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001015 }
1016 if (pCreateInfos[idx0].stage.module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001017 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
1018 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001019 }
1020 }
1021 }
1022 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001023 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001024 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001025 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001026 if (skipCall)
1027 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001028 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1029 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001030 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001031 if (result == VK_SUCCESS) {
1032 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001033 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001034 }
1035 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001036 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001037 return result;
1038}