blob: 67361ed4a46017ac2637930d243f76e26ef2f3c3 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlis42586532014-11-14 13:01:02 -07005 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06006 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
Tobin Ehlis42586532014-11-14 13:01:02 -07009 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060010 * http://www.apache.org/licenses/LICENSE-2.0
Tobin Ehlis42586532014-11-14 13:01:02 -070011 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060012 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060017 *
18 * Author: Jon Ashburn <jon@lunarg.com>
19 * Author: Mark Lobodzinski <mark@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
Tobin Ehlis42586532014-11-14 13:01:02 -070021 */
22
Jeremy Hayes2f065b12016-04-13 10:54:17 -060023#include <mutex>
24
David Pinedo9316d3b2015-11-06 12:54:48 -070025#include "vulkan/vk_layer.h"
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060026#include "vk_layer_extension_utils.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060027#include "vk_enum_string_helper.h"
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -070028#include "vk_layer_table.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include "vk_layer_utils.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060030
Chia-I Wucdb70962016-05-13 14:07:36 +080031namespace object_tracker {
32
Tobin Ehlisca915872014-11-18 11:28:33 -070033// Object Tracker ERROR codes
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -060034enum OBJECT_TRACK_ERROR {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070035 OBJTRACK_NONE, // Used for INFO & other non-error messages
36 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
37 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
Jon Ashburn5484e0c2016-03-08 17:48:44 -070038 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
Jon Ashburn5484e0c2016-03-08 17:48:44 -070039 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
40 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
41 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -060042};
Tobin Ehlisca915872014-11-18 11:28:33 -070043
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070044// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050045typedef VkFlags ObjectStatusFlags;
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -060046enum ObjectStatusFlagBits {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070047 OBJSTATUS_NONE = 0x00000000, // No status is set
48 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
49 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
50 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
51 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
52 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
53 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
54 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -060055};
Chia-I Wuf8693382015-04-16 22:02:10 +080056
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -060057struct OBJTRACK_NODE {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070058 uint64_t vkObj; // Object handle
59 VkDebugReportObjectTypeEXT objType; // Object type identifier
60 ObjectStatusFlags status; // Object state
61 uint64_t parentObj; // Parent object
62 uint64_t belongsTo; // Object Scope -- owning device/instance
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -060063};
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060064
Tobin Ehlis42586532014-11-14 13:01:02 -070065// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060066uint64_t objTrackGetObjectCount(VkDevice device);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070067uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060068
Tobin Ehlisca915872014-11-18 11:28:33 -070069// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060070typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070071typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -060072
Cody Northrop55443ef2015-09-28 15:09:32 -060073struct layer_data {
Chia-I Wu1ad50f42016-05-17 07:57:15 +080074 VkInstance instance;
75
Mark Lobodzinskifae78852015-06-23 11:35:12 -060076 debug_report_data *report_data;
Cody Northrop9c93ec52016-04-28 09:55:08 -060077 // TODO: put instance data here
78 std::vector<VkDebugReportCallbackEXT> logging_callback;
79 bool wsi_enabled;
80 bool objtrack_extensions_enabled;
Ian Elliotted6b5ac2016-04-28 09:08:13 -060081 // The following are for keeping track of the temporary callbacks that can
82 // be used in vkCreateInstance and vkDestroyInstance:
83 uint32_t num_tmp_callbacks;
84 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
85 VkDebugReportCallbackEXT *tmp_callbacks;
Cody Northrop55443ef2015-09-28 15:09:32 -060086
Ian Elliotted6b5ac2016-04-28 09:08:13 -060087 layer_data()
88 : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false), num_tmp_callbacks(0),
89 tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr){};
Cody Northrop55443ef2015-09-28 15:09:32 -060090};
Mark Lobodzinskifae78852015-06-23 11:35:12 -060091
Mark Lobodzinskife1f0662016-06-24 09:57:32 -060092struct instance_extension_enables {
Jon Ashburn3dc39382015-09-17 10:00:32 -060093 bool wsi_enabled;
Mark Lobodzinskife1f0662016-06-24 09:57:32 -060094 bool xlib_enabled;
95 bool xcb_enabled;
96 bool wayland_enabled;
97 bool mir_enabled;
98 bool android_enabled;
99 bool win32_enabled;
Jon Ashburn3dc39382015-09-17 10:00:32 -0600100};
101
Mark Lobodzinskife1f0662016-06-24 09:57:32 -0600102static std::unordered_map<void *, struct instance_extension_enables> instanceExtMap;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700103static std::unordered_map<void *, layer_data *> layer_data_map;
104static device_table_map object_tracker_device_table_map;
105static instance_table_map object_tracker_instance_table_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600106
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600107// We need additionally validate image usage using a separate map
108// of swapchain-created images
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600109static std::unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600110
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600111static long long unsigned int object_track_index = 0;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600112static std::mutex global_lock;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600113
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700114#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600115
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700116static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
117static uint64_t numTotalObjs = 0;
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600118std::vector<VkQueueFamilyProperties> queue_family_properties;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600119
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600120//
121// Internal Object Tracker Functions
122//
123
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700124static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700125 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
126 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
Jon Ashburn8acd2332015-09-16 18:08:32 -0600127 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700128 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
129 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
130 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
131 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
132 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
Ian Elliott1064fe32015-07-06 14:31:32 -0600133 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700134 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700135 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
Ian Elliott1064fe32015-07-06 14:31:32 -0600136 my_device_data->wsi_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600137
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700138 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -0600139 my_device_data->objtrack_extensions_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600140 }
141}
142
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700143static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700144 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
Jon Ashburn3dc39382015-09-17 10:00:32 -0600145 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
Michael Lentine56512bb2016-03-02 17:28:55 -0600146
147 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700148 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
149 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
150 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
151 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
152 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
153 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
154 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
155 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Jon Ashburnbd846452016-06-30 10:21:55 -0600156#ifndef __ANDROID__
Petros Bantolas2b40be72016-04-15 11:02:59 +0100157 pDisp->GetPhysicalDeviceDisplayPropertiesKHR =
158 (PFN_vkGetPhysicalDeviceDisplayPropertiesKHR)gpa(instance, "vkGetPhysicalDeviceDisplayPropertiesKHR");
159 pDisp->GetPhysicalDeviceDisplayPlanePropertiesKHR =
160 (PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR)gpa(instance, "vkGetPhysicalDeviceDisplayPlanePropertiesKHR");
161 pDisp->GetDisplayPlaneSupportedDisplaysKHR =
162 (PFN_vkGetDisplayPlaneSupportedDisplaysKHR)gpa(instance, "vkGetDisplayPlaneSupportedDisplaysKHR");
163 pDisp->GetDisplayModePropertiesKHR =
164 (PFN_vkGetDisplayModePropertiesKHR)gpa(instance, "vkGetDisplayModePropertiesKHR");
165 pDisp->CreateDisplayModeKHR =
166 (PFN_vkCreateDisplayModeKHR)gpa(instance, "vkCreateDisplayModeKHR");
167 pDisp->GetDisplayPlaneCapabilitiesKHR =
168 (PFN_vkGetDisplayPlaneCapabilitiesKHR)gpa(instance, "vkGetDisplayPlaneCapabilitiesKHR");
169 pDisp->CreateDisplayPlaneSurfaceKHR =
170 (PFN_vkCreateDisplayPlaneSurfaceKHR)gpa(instance, "vkCreateDisplayPlaneSurfaceKHR");
Jon Ashburnbd846452016-06-30 10:21:55 -0600171#endif
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700172
173#if VK_USE_PLATFORM_WIN32_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700174 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
175 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
176 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700177#endif // VK_USE_PLATFORM_WIN32_KHR
178#ifdef VK_USE_PLATFORM_XCB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700179 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
180 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
181 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700182#endif // VK_USE_PLATFORM_XCB_KHR
183#ifdef VK_USE_PLATFORM_XLIB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700184 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
185 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
186 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700187#endif // VK_USE_PLATFORM_XLIB_KHR
188#ifdef VK_USE_PLATFORM_MIR_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700189 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
190 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
191 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700192#endif // VK_USE_PLATFORM_MIR_KHR
193#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700194 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
195 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
196 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700197#endif // VK_USE_PLATFORM_WAYLAND_KHR
198#ifdef VK_USE_PLATFORM_ANDROID_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700199 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700200#endif // VK_USE_PLATFORM_ANDROID_KHR
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700201
Mark Lobodzinskife1f0662016-06-24 09:57:32 -0600202 instanceExtMap[pDisp] = {};
203
204 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
205 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0) {
Jon Ashburn3dc39382015-09-17 10:00:32 -0600206 instanceExtMap[pDisp].wsi_enabled = true;
Mark Lobodzinskife1f0662016-06-24 09:57:32 -0600207 }
208#ifdef VK_USE_PLATFORM_XLIB_KHR
209 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XLIB_SURFACE_EXTENSION_NAME) == 0) {
210 instanceExtMap[pDisp].xlib_enabled = true;
211 }
212#endif
213#ifdef VK_USE_PLATFORM_XCB_KHR
214 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_XCB_SURFACE_EXTENSION_NAME) == 0) {
215 instanceExtMap[pDisp].xcb_enabled = true;
216 }
217#endif
218#ifdef VK_USE_PLATFORM_WAYLAND_KHR
219 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME) == 0) {
220 instanceExtMap[pDisp].wayland_enabled = true;
221 }
222#endif
223#ifdef VK_USE_PLATFORM_MIR_KHR
224 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_MIR_SURFACE_EXTENSION_NAME) == 0) {
225 instanceExtMap[pDisp].mir_enabled = true;
226 }
227#endif
228#ifdef VK_USE_PLATFORM_ANDROID_KHR
229 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_ANDROID_SURFACE_EXTENSION_NAME) == 0) {
230 instanceExtMap[pDisp].android_enabled = true;
231 }
232#endif
233#ifdef VK_USE_PLATFORM_WIN32_KHR
234 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_WIN32_SURFACE_EXTENSION_NAME) == 0) {
235 instanceExtMap[pDisp].win32_enabled = true;
236 }
237#endif
Jon Ashburn3dc39382015-09-17 10:00:32 -0600238 }
Mark Lobodzinskife1f0662016-06-24 09:57:32 -0600239
Jon Ashburn3dc39382015-09-17 10:00:32 -0600240}
241
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600242// Indicate device or instance dispatch table type
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -0600243enum DispTableType {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600244 DISP_TBL_TYPE_INSTANCE,
245 DISP_TBL_TYPE_DEVICE,
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -0600246};
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600247
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700248debug_report_data *mdd(const void *object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600249 dispatch_key key = get_dispatch_key(object);
250 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600251 return my_data->report_data;
252}
253
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700254debug_report_data *mid(VkInstance object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600255 dispatch_key key = get_dispatch_key(object);
256 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600257 return my_data->report_data;
258}
259
260// For each Queue's doubly linked-list of mem refs
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -0600261struct OT_MEM_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700262 VkDeviceMemory mem;
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -0600263 OT_MEM_INFO *pNextMI;
264 OT_MEM_INFO *pPrevMI;
265};
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600266
267// Track Queue information
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -0600268struct OT_QUEUE_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700269 OT_MEM_INFO *pMemRefList;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700270 uint32_t queueNodeIndex;
271 VkQueue queue;
272 uint32_t refCount;
Mark Lobodzinskibd1fecd2016-05-19 17:06:56 -0600273};
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600274
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600275// Global map of structures, one per queue
276std::unordered_map<VkQueue, OT_QUEUE_INFO *> queue_info_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600277
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600278#include "vk_dispatch_table_helper.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600279
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600280static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
281
282 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600283}
284
Tony Barboura05dbaa2015-07-09 17:31:46 -0600285//
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700286// Forward declarations
Tony Barboura05dbaa2015-07-09 17:31:46 -0600287//
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600288
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700289static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
290static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
291static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700292static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700293static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
Jon Ashburn5e026df2016-06-15 08:19:07 -0600294static void create_display_khr(VkPhysicalDevice dispatchable_object, VkDisplayKHR vkObj, VkDebugReportObjectTypeEXT objType);
Jon Ashburn665d1d52016-06-28 16:59:36 -0600295static void create_display_mode_khr(VkPhysicalDevice dispatchable_object, VkDisplayModeKHR vkObj, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600296static bool validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
297static bool validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700298 bool null_allowed);
Jon Ashburn5e026df2016-06-15 08:19:07 -0600299static bool validate_physical_device(VkPhysicalDevice dispatchable_object, VkPhysicalDevice object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600300static bool validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700301 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600302static bool validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700303 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600304static bool validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700305 VkDebugReportObjectTypeEXT objType, bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600306static bool validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700307 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600308static bool validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700309 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700310static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600311static bool validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700312 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600313static bool validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700314 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600315static bool validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700316 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600317static bool validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700318 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600319static bool validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700320 bool null_allowed);
Jon Ashburn665d1d52016-06-28 16:59:36 -0600321static bool validate_display_khr(VkPhysicalDevice dispatchable_object, VkDisplayKHR object, VkDebugReportObjectTypeEXT objType,
322 bool null_allowed);
323static bool validate_display_mode_khr(VkInstance dispatchable_object, VkDisplayModeKHR object, VkDebugReportObjectTypeEXT objType,
324 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700325static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700326static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
327static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
328static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
329static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600330static bool set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700331 ObjectStatusFlags status_flag);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600332static bool reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700333 ObjectStatusFlags status_flag);
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600334static void destroy_queue(VkQueue dispatchable_object, VkQueue object);
335
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600336extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
337extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
338extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
339extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
340extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
341extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
342extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
343extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
344extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
345extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
346extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
347extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600348extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600349
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600350// Convert an object type enum to an object type array index
351static uint32_t objTypeToIndex(uint32_t objType) {
352 uint32_t index = objType;
353 return index;
354}
355
356// Add new queue to head of global queue list
357static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
358 auto queueItem = queue_info_map.find(queue);
359 if (queueItem == queue_info_map.end()) {
360 OT_QUEUE_INFO *p_queue_info = new OT_QUEUE_INFO;
361 if (p_queue_info != NULL) {
362 memset(p_queue_info, 0, sizeof(OT_QUEUE_INFO));
363 p_queue_info->queue = queue;
364 p_queue_info->queueNodeIndex = queueNodeIndex;
365 queue_info_map[queue] = p_queue_info;
366 } else {
367 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
368 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
369 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
370 }
371 }
372}
373
374// Destroy memRef lists and free all memory
375static void destroyQueueMemRefLists() {
376 for (auto queue_item : queue_info_map) {
377 OT_MEM_INFO *p_mem_info = queue_item.second->pMemRefList;
378 while (p_mem_info != NULL) {
379 OT_MEM_INFO *p_del_mem_info = p_mem_info;
380 p_mem_info = p_mem_info->pNextMI;
381 delete p_del_mem_info;
382 }
383 delete queue_item.second;
384 }
385 queue_info_map.clear();
386
387 // Destroy the items in the queue map
388 auto queue = VkQueueMap.begin();
389 while (queue != VkQueueMap.end()) {
390 uint32_t obj_index = objTypeToIndex(queue->second->objType);
391 assert(numTotalObjs > 0);
392 numTotalObjs--;
393 assert(numObjs[obj_index] > 0);
394 numObjs[obj_index]--;
395 log_msg(mdd(reinterpret_cast<VkQueue>(queue->second->vkObj)), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, queue->second->objType,
396 queue->second->vkObj, __LINE__, OBJTRACK_NONE, "OBJTRACK",
397 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
398 string_VkDebugReportObjectTypeEXT(queue->second->objType), queue->second->vkObj, numTotalObjs, numObjs[obj_index],
399 string_VkDebugReportObjectTypeEXT(queue->second->objType));
400 delete queue->second;
401 queue = VkQueueMap.erase(queue);
402 }
403}
404
405// Check Queue type flags for selected queue operations
406static void validateQueueFlags(VkQueue queue, const char *function) {
407
408 auto queue_item = queue_info_map.find(queue);
409 if (queue_item != queue_info_map.end()) {
410 OT_QUEUE_INFO *pQueueInfo = queue_item->second;
411 if (pQueueInfo != NULL) {
412 if ((queue_family_properties[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
413 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
414 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
415 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
416 }
417 }
418 }
419}
420
421static void create_physical_device(VkInstance instance, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
422 log_msg(mdd(instance), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700423 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
424 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600425
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600426 uint64_t physical_device_handle = reinterpret_cast<uint64_t>(vkObj);
427 auto pd_item = VkPhysicalDeviceMap.find(physical_device_handle);
428 if (pd_item == VkPhysicalDeviceMap.end()) {
429 OBJTRACK_NODE *p_new_obj_node = new OBJTRACK_NODE;
430 p_new_obj_node->objType = objType;
431 p_new_obj_node->belongsTo = reinterpret_cast<uint64_t>(instance);
432 p_new_obj_node->status = OBJSTATUS_NONE;
433 p_new_obj_node->vkObj = physical_device_handle;
434 VkPhysicalDeviceMap[physical_device_handle] = p_new_obj_node;
435 uint32_t objIndex = objTypeToIndex(objType);
436 numObjs[objIndex]++;
437 numTotalObjs++;
438 }
Tobin Ehlisec598302015-09-15 15:02:17 -0600439}
440
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700441static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700442 // TODO: Add tracking of surface objects
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700443 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
444 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
445 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700446
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700447 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlis86684f92016-01-05 10:33:58 -0700448 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700449 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700450 pNewObjNode->status = OBJSTATUS_NONE;
451 pNewObjNode->vkObj = (uint64_t)(vkObj);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700452 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
453 uint32_t objIndex = objTypeToIndex(objType);
454 numObjs[objIndex]++;
455 numTotalObjs++;
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700456}
457
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700458static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700459 uint64_t object_handle = (uint64_t)(object);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700460 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700461 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
Tobin Ehlis86684f92016-01-05 10:33:58 -0700462 uint32_t objIndex = objTypeToIndex(pNode->objType);
463 assert(numTotalObjs > 0);
464 numTotalObjs--;
465 assert(numObjs[objIndex] > 0);
466 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700467 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
468 OBJTRACK_NONE, "OBJTRACK",
Mark Muelleraab36502016-05-03 13:17:29 -0600469 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (0x%" PRIx64 " total objs remain & 0x%" PRIx64 " %s objs).",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700470 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
471 string_VkDebugReportObjectTypeEXT(pNode->objType));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700472 delete pNode;
473 VkSurfaceKHRMap.erase(object_handle);
474 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700475 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
476 OBJTRACK_NONE, "OBJTRACK",
477 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700478 }
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700479}
480
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700481static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
482 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
483 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
484 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
485 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tony Barboura05dbaa2015-07-09 17:31:46 -0600486
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700487 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
488 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700489 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700490 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
491 pNewObjNode->parentObj = (uint64_t)commandPool;
Mark Lobodzinski2fba0322016-01-23 18:31:23 -0700492 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
493 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
494 } else {
495 pNewObjNode->status = OBJSTATUS_NONE;
496 }
Michael Lentine13803dc2015-11-04 14:35:12 -0800497 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600498 uint32_t objIndex = objTypeToIndex(objType);
499 numObjs[objIndex]++;
500 numTotalObjs++;
501}
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700502
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600503static bool validate_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
504 bool skipCall = false;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700505 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
506 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700507 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700508
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700509 if (pNode->parentObj != (uint64_t)(commandPool)) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600510 skipCall |= log_msg(
511 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_COMMAND_POOL_MISMATCH,
512 "OBJTRACK", "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
513 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600514 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, reinterpret_cast<uint64_t &>(commandPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700515 }
516 } else {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600517 skipCall |= log_msg(
518 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
519 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700520 }
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600521 return skipCall;
522}
523
524static bool free_command_buffer(VkDevice device, VkCommandBuffer commandBuffer) {
525 bool skipCall = false;
526 auto cbItem = VkCommandBufferMap.find(reinterpret_cast<uint64_t>(commandBuffer));
527 if (cbItem != VkCommandBufferMap.end()) {
528 OBJTRACK_NODE *pNode = cbItem->second;
529 uint32_t objIndex = objTypeToIndex(pNode->objType);
530 assert(numTotalObjs > 0);
531 numTotalObjs--;
532 assert(numObjs[objIndex] > 0);
533 numObjs[objIndex]--;
534 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType,
535 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, OBJTRACK_NONE, "OBJTRACK",
536 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
537 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer),
538 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
539 delete pNode;
540 VkCommandBufferMap.erase(cbItem);
541 }
542 return skipCall;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700543}
544
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700545static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
546 VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinski510e20d2016-02-11 09:26:16 -0700547 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700548 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
549 (uint64_t)(vkObj));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700550
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700551 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
552 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700553 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700554 pNewObjNode->status = OBJSTATUS_NONE;
555 pNewObjNode->vkObj = (uint64_t)(vkObj);
556 pNewObjNode->parentObj = (uint64_t)descriptorPool;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700557 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
558 uint32_t objIndex = objTypeToIndex(objType);
559 numObjs[objIndex]++;
560 numTotalObjs++;
561}
562
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600563static bool validate_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
564 bool skipCall = false;
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600565 uint64_t object_handle = reinterpret_cast<uint64_t &>(descriptorSet);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600566 auto dsItem = VkDescriptorSetMap.find(object_handle);
567 if (dsItem != VkDescriptorSetMap.end()) {
568 OBJTRACK_NODE *pNode = dsItem->second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700569
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600570 if (pNode->parentObj != reinterpret_cast<uint64_t &>(descriptorPool)) {
571 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
572 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
573 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
574 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
575 reinterpret_cast<uint64_t &>(descriptorSet), pNode->parentObj,
576 reinterpret_cast<uint64_t &>(descriptorPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700577 }
578 } else {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600579 skipCall |= log_msg(
580 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
581 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700582 }
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600583 return skipCall;
584}
585
586static bool free_descriptor_set(VkDevice device, VkDescriptorSet descriptorSet) {
587 bool skipCall = false;
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600588 auto dsItem = VkDescriptorSetMap.find(reinterpret_cast<uint64_t &>(descriptorSet));
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600589 if (dsItem != VkDescriptorSetMap.end()) {
590 OBJTRACK_NODE *pNode = dsItem->second;
591 uint32_t objIndex = objTypeToIndex(pNode->objType);
592 assert(numTotalObjs > 0);
593 numTotalObjs--;
594 assert(numObjs[objIndex] > 0);
595 numObjs[objIndex]--;
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600596 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType,
597 reinterpret_cast<uint64_t &>(descriptorSet), __LINE__, OBJTRACK_NONE, "OBJTRACK",
598 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
599 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t &>(descriptorSet),
600 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600601 delete pNode;
602 VkDescriptorSetMap.erase(dsItem);
603 }
604 return skipCall;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700605}
606
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600607static void create_queue(VkDevice device, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
608
609 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700610 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
611 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600612
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600613 OBJTRACK_NODE *p_obj_node = NULL;
614 auto queue_item = VkQueueMap.find(reinterpret_cast<uint64_t>(vkObj));
615 if (queue_item == VkQueueMap.end()) {
616 p_obj_node = new OBJTRACK_NODE;
617 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = p_obj_node;
618 uint32_t objIndex = objTypeToIndex(objType);
619 numObjs[objIndex]++;
620 numTotalObjs++;
621 } else {
622 p_obj_node = queue_item->second;
623 }
624 p_obj_node->objType = objType;
625 p_obj_node->belongsTo = reinterpret_cast<uint64_t>(device);
626 p_obj_node->status = OBJSTATUS_NONE;
627 p_obj_node->vkObj = reinterpret_cast<uint64_t>(vkObj);
Tobin Ehlisec598302015-09-15 15:02:17 -0600628}
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600629
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700630static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
631 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
632 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
633 "SwapchainImage", (uint64_t)(vkObj));
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600634
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700635 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
636 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
637 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
638 pNewObjNode->status = OBJSTATUS_NONE;
639 pNewObjNode->vkObj = (uint64_t)vkObj;
640 pNewObjNode->parentObj = (uint64_t)swapchain;
Mark Young93ecb1d2016-01-13 13:47:16 -0700641 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600642}
643
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700644static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
645 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
646 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
647 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700648
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700649 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700650 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
651 pNewObjNode->objType = objType;
652 pNewObjNode->status = OBJSTATUS_NONE;
653 pNewObjNode->vkObj = (uint64_t)(vkObj);
654 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode;
655 uint32_t objIndex = objTypeToIndex(objType);
656 numObjs[objIndex]++;
657 numTotalObjs++;
658}
659
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600660//
661// Non-auto-generated API functions called by generated code
662//
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700663VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
664 VkInstance *pInstance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700665 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
David Pinedoc0fa1ab2015-07-31 10:46:25 -0600666
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700667 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700668 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700669 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700670 if (fpCreateInstance == NULL) {
671 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600672 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700673
674 // Advance the link info for the next element on the chain
675 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
676
677 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
678 if (result != VK_SUCCESS) {
679 return result;
680 }
681
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700682 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
Chia-I Wu1ad50f42016-05-17 07:57:15 +0800683 my_data->instance = *pInstance;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700684 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
685 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700686
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600687 // Look for one or more debug report create info structures, and copy the
688 // callback(s) for each one found (for use by vkDestroyInstance)
689 layer_copy_tmp_callbacks(pCreateInfo->pNext, &my_data->num_tmp_callbacks, &my_data->tmp_dbg_create_infos,
690 &my_data->tmp_callbacks);
691
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700692 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
693 pCreateInfo->ppEnabledExtensionNames);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700694
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600695 init_object_tracker(my_data, pAllocator);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700696 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
697
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700698 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700699
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600700 return result;
701}
702
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700703void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700704 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600705
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600706 std::lock_guard<std::mutex> lock(global_lock);
707 if (pProperties != NULL) {
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600708 for (uint32_t i = 0; i < *pCount; i++) {
709 queue_family_properties.emplace_back(pProperties[i]);
710 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600711 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600712}
713
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700714VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
715 VkDevice *pDevice) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600716 std::lock_guard<std::mutex> lock(global_lock);
Chia-I Wu1ad50f42016-05-17 07:57:15 +0800717 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700718 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700719
720 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700721 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
722 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Chia-I Wu1ad50f42016-05-17 07:57:15 +0800723 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(my_instance_data->instance, "vkCreateDevice");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700724 if (fpCreateDevice == NULL) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700725 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600726 }
727
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700728 // Advance the link info for the next element on the chain
729 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
730
731 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
732 if (result != VK_SUCCESS) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700733 return result;
734 }
735
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700736 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
737 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700738
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700739 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700740
741 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
742
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700743 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700744 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700745 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
746 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700747
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600748 return result;
749}
750
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700751VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
752 VkPhysicalDevice *pPhysicalDevices) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600753 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600754 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700755 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600756 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600757 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700758 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700759 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
760 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600761 lock.lock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600762 if (result == VK_SUCCESS) {
763 if (pPhysicalDevices) {
764 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700765 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
Tobin Ehlisec598302015-09-15 15:02:17 -0600766 }
767 }
768 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600769 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600770 return result;
771}
772
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700773void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600774 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700775 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600776 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600777
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700778 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600779
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600780 lock.lock();
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600781
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700782 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600783 addQueueInfo(queueNodeIndex, *pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600784}
785
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700786VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
787 void **ppData) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600788 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600789 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700790 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600791 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600792 if (skipCall == VK_TRUE)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700793 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600794
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700795 VkResult result =
796 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600797
798 return result;
799}
800
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700801void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600802 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600803 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700804 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600805 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600806 if (skipCall == VK_TRUE)
807 return;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600808
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700809 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600810}
811
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700812VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600813 std::unique_lock<std::mutex> lock(global_lock);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800814 validateQueueFlags(queue, "QueueBindSparse");
815
816 for (uint32_t i = 0; i < bindInfoCount; i++) {
817 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700818 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800819 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700820 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800821 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700822 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800823 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600824 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600825
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700826 VkResult result =
827 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Mark Lobodzinski16e8bef2015-07-03 15:58:09 -0600828 return result;
829}
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600830
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700831VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
832 VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600833 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600834 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700835 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
836 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600837 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700838
839 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700840 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700841 }
842
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700843 VkResult result =
844 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700845
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600846 lock.lock();
Jon Ashburnf19916e2016-01-11 13:12:43 -0700847 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700848 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
849 pAllocateInfo->level);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700850 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600851 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700852
853 return result;
854}
855
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700856VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
857 VkDescriptorSet *pDescriptorSets) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600858 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600859 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700860 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700861 skipCall |=
862 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700863 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700864 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
865 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600866 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600867 lock.unlock();
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600868 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700869 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600870 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600871
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700872 VkResult result =
873 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600874
Chris Forbes539a87c2016-01-22 15:44:40 +1300875 if (VK_SUCCESS == result) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600876 lock.lock();
Chris Forbes539a87c2016-01-22 15:44:40 +1300877 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700878 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
879 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
Chris Forbes539a87c2016-01-22 15:44:40 +1300880 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600881 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600882 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600883
884 return result;
885}
886
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700887void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
888 const VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600889 bool skipCall = false;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600890 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700891 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
892 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600893 for (uint32_t i = 0; i < commandBufferCount; i++) {
894 skipCall |= validate_command_buffer(device, commandPool, pCommandBuffers[i]);
895 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700896
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600897 lock.unlock();
898 if (!skipCall) {
899 get_dispatch_table(object_tracker_device_table_map, device)
900 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
901 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700902
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600903 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700904 for (uint32_t i = 0; i < commandBufferCount; i++) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600905 free_command_buffer(device, pCommandBuffers[i]);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700906 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700907}
908
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700909void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600910 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700911 // A swapchain's images are implicitly deleted when the swapchain is deleted.
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600912 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600913 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600914 while (itr != swapchainImageMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700915 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600916 if (pNode->parentObj == reinterpret_cast<uint64_t &>(swapchain)) {
917 delete pNode;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700918 swapchainImageMap.erase(itr++);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600919 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700920 ++itr;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600921 }
922 }
Tobin Ehlis86684f92016-01-05 10:33:58 -0700923 destroy_swapchain_khr(device, swapchain);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600924 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600925
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700926 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600927}
928
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700929void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600930 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700931 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600932 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600933
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700934 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600935
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600936 lock.lock();
Michael Lentine13803dc2015-11-04 14:35:12 -0800937 destroy_device_memory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600938}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600939
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700940VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
941 const VkDescriptorSet *pDescriptorSets) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600942 bool skipCall = false;
943 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600944 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600945 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
946 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
947 for (uint32_t i = 0; i < count; i++) {
948 skipCall |= validate_descriptor_set(device, descriptorPool, pDescriptorSets[i]);
949 }
950
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600951 lock.unlock();
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600952 if (!skipCall) {
953 result = get_dispatch_table(object_tracker_device_table_map, device)
954 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
955 }
Tony Barbour770f80d2015-07-20 10:52:13 -0600956
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600957 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700958 for (uint32_t i = 0; i < count; i++) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600959 free_descriptor_set(device, pDescriptorSets[i]);
Tony Barbour770f80d2015-07-20 10:52:13 -0600960 }
Tony Barbour770f80d2015-07-20 10:52:13 -0600961 return result;
962}
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600963
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700964void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600965 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600966 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700967 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
968 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600969 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700970 if (skipCall) {
971 return;
972 }
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700973 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700974 // Remove this pool's descriptor sets from our descriptorSet map.
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600975 lock.lock();
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600976 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700977 while (itr != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700978 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinskib29731a2015-11-18 11:01:02 -0700979 auto del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700980 if (pNode->parentObj == (uint64_t)(descriptorPool)) {
981 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700982 }
983 }
984 destroy_descriptor_pool(device, descriptorPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600985 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700986 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700987}
988
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700989void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600990 bool skipCall = false;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600991 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700992 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
993 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600994 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700995 if (skipCall) {
996 return;
997 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600998 lock.lock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700999 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -07001000 // Remove this pool's cmdBuffers from our cmd buffer map.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -06001001 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
1002 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -07001003 while (itr != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001004 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -07001005 del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -07001006 if (pNode->parentObj == (uint64_t)(commandPool)) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -06001007 skipCall |= validate_command_buffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
1008 free_command_buffer(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -07001009 }
1010 }
1011 destroy_command_pool(device, commandPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001012 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001013 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -07001014}
1015
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001016VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -06001017 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001018 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001019 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001020 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -06001021 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -07001022 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -06001023
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001024 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1025 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -06001026
1027 if (pSwapchainImages != NULL) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001028 lock.lock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -06001029 for (uint32_t i = 0; i < *pCount; i++) {
1030 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
1031 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001032 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -06001033 }
1034 return result;
1035}
1036
Jon Ashburnbd846452016-06-30 10:21:55 -06001037#ifndef __ANDROID__
Jon Ashburn665d1d52016-06-28 16:59:36 -06001038VkResult explicit_GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t* pPropertyCount, VkDisplayPropertiesKHR* pProperties)
Jon Ashburn5e026df2016-06-15 08:19:07 -06001039{
1040 bool skipCall = false;
1041 {
1042 std::lock_guard<std::mutex> lock(global_lock);
Jon Ashburn665d1d52016-06-28 16:59:36 -06001043 if (physicalDevice) {
1044 skipCall |= validate_physical_device(physicalDevice, physicalDevice, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, false);
1045 }
Jon Ashburn5e026df2016-06-15 08:19:07 -06001046 }
1047 if (skipCall)
1048 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn665d1d52016-06-28 16:59:36 -06001049 VkResult result = get_dispatch_table(object_tracker_instance_table_map, physicalDevice)->GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
1050 if (VK_SUCCESS == result && pProperties) {
1051 std::lock_guard<std::mutex> lock(global_lock);
1052 for (uint32_t idx0=0; idx0<*pPropertyCount; ++idx0) {
1053 if (pProperties[idx0].display) {
1054 create_display_khr(physicalDevice, pProperties[idx0].display, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT);
1055 }
1056 }
1057 }
1058 return result;
1059}
1060
1061VkResult explicit_GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display, uint32_t* pPropertyCount, VkDisplayModePropertiesKHR* pProperties)
1062{
1063 bool skipCall = false;
Jon Ashburn5e026df2016-06-15 08:19:07 -06001064 {
1065 std::lock_guard<std::mutex> lock(global_lock);
Jon Ashburn665d1d52016-06-28 16:59:36 -06001066 skipCall |= validate_display_khr(physicalDevice, display, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, false);
1067 if (physicalDevice) {
1068 skipCall |= validate_physical_device(physicalDevice, physicalDevice, VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT, false);
1069 }
1070 }
1071 if (skipCall)
1072 return VK_ERROR_VALIDATION_FAILED_EXT;
1073 VkResult result = get_dispatch_table(object_tracker_instance_table_map, physicalDevice)->GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
1074 if (VK_SUCCESS == result && pProperties) {
1075 std::lock_guard<std::mutex> lock(global_lock);
1076 for (uint32_t idx0=0; idx0<*pPropertyCount; ++idx0) {
1077 if (pProperties[idx0].displayMode) {
1078 create_display_mode_khr(physicalDevice, pProperties[idx0].displayMode, VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT);
Jon Ashburn5e026df2016-06-15 08:19:07 -06001079 }
1080 }
1081 }
1082 return result;
1083}
Jon Ashburnbd846452016-06-30 10:21:55 -06001084#endif
Jon Ashburn5e026df2016-06-15 08:19:07 -06001085
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001086// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001087VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1088 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1089 VkPipeline *pPipelines) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -06001090 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001091 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001092 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001093 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001094 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001095 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001096 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1097 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001098 }
1099 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001100 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1101 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001102 }
1103 if (pCreateInfos[idx0].pStages) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001104 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001105 if (pCreateInfos[idx0].pStages[idx1].module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001106 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
1107 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001108 }
1109 }
1110 }
1111 if (pCreateInfos[idx0].renderPass) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001112 skipCall |=
1113 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001114 }
1115 }
1116 }
1117 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001118 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001119 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001120 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001121 if (skipCall)
1122 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001123 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1124 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001125 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001126 if (result == VK_SUCCESS) {
1127 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001128 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001129 }
1130 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001131 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001132 return result;
1133}
1134
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001135// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001136VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1137 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1138 VkPipeline *pPipelines) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -06001139 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001140 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001141 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001142 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001143 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001144 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001145 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1146 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001147 }
1148 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001149 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1150 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001151 }
1152 if (pCreateInfos[idx0].stage.module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001153 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
1154 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001155 }
1156 }
1157 }
1158 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001159 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001160 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001161 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001162 if (skipCall)
1163 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001164 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1165 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001166 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001167 if (result == VK_SUCCESS) {
1168 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001169 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001170 }
1171 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001172 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001173 return result;
1174}
Chia-I Wucdb70962016-05-13 14:07:36 +08001175
1176} // namespace object_tracker