blob: c90b09913dedec80130b15902a2e81be5efb89db [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
4 * Copyright (C) 2015-2016 Google Inc.
Tobin Ehlis42586532014-11-14 13:01:02 -07005 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06006 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
Tobin Ehlis42586532014-11-14 13:01:02 -07009 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060010 * http://www.apache.org/licenses/LICENSE-2.0
Tobin Ehlis42586532014-11-14 13:01:02 -070011 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060012 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060017 *
18 * Author: Jon Ashburn <jon@lunarg.com>
19 * Author: Mark Lobodzinski <mark@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
Tobin Ehlis42586532014-11-14 13:01:02 -070021 */
22
Jeremy Hayes2f065b12016-04-13 10:54:17 -060023#include <mutex>
24
David Pinedo9316d3b2015-11-06 12:54:48 -070025#include "vulkan/vk_layer.h"
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -060026#include "vk_layer_extension_utils.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060027#include "vk_enum_string_helper.h"
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -070028#include "vk_layer_table.h"
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -060029#include "vk_layer_utils.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060030
Tobin Ehlisca915872014-11-18 11:28:33 -070031// Object Tracker ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070032typedef enum _OBJECT_TRACK_ERROR {
33 OBJTRACK_NONE, // Used for INFO & other non-error messages
34 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
35 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
Jon Ashburn5484e0c2016-03-08 17:48:44 -070036 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
Jon Ashburn5484e0c2016-03-08 17:48:44 -070037 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
38 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, // Descriptor Pools specified incorrectly
39 OBJTRACK_COMMAND_POOL_MISMATCH, // Command Pools specified incorrectly
Tobin Ehlisca915872014-11-18 11:28:33 -070040} OBJECT_TRACK_ERROR;
41
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070042// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050043typedef VkFlags ObjectStatusFlags;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070044typedef enum _ObjectStatusFlagBits {
45 OBJSTATUS_NONE = 0x00000000, // No status is set
46 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
47 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
48 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
49 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
50 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
51 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
52 OBJSTATUS_COMMAND_BUFFER_SECONDARY = 0x00000040, // Command Buffer is of type SECONDARY
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050053} ObjectStatusFlagBits;
Chia-I Wuf8693382015-04-16 22:02:10 +080054
Tobin Ehlis42586532014-11-14 13:01:02 -070055typedef struct _OBJTRACK_NODE {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070056 uint64_t vkObj; // Object handle
57 VkDebugReportObjectTypeEXT objType; // Object type identifier
58 ObjectStatusFlags status; // Object state
59 uint64_t parentObj; // Parent object
60 uint64_t belongsTo; // Object Scope -- owning device/instance
Tobin Ehlis42586532014-11-14 13:01:02 -070061} OBJTRACK_NODE;
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060062
Tobin Ehlis42586532014-11-14 13:01:02 -070063// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060064uint64_t objTrackGetObjectCount(VkDevice device);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070065uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkDebugReportObjectTypeEXT type);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060066
Tobin Ehlisca915872014-11-18 11:28:33 -070067// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060068typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070069typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkDebugReportObjectTypeEXT);
Mark Lobodzinskifae78852015-06-23 11:35:12 -060070
Cody Northrop55443ef2015-09-28 15:09:32 -060071struct layer_data {
Mark Lobodzinskifae78852015-06-23 11:35:12 -060072 debug_report_data *report_data;
Cody Northrop9c93ec52016-04-28 09:55:08 -060073 // TODO: put instance data here
74 std::vector<VkDebugReportCallbackEXT> logging_callback;
75 bool wsi_enabled;
76 bool objtrack_extensions_enabled;
Ian Elliotted6b5ac2016-04-28 09:08:13 -060077 // The following are for keeping track of the temporary callbacks that can
78 // be used in vkCreateInstance and vkDestroyInstance:
79 uint32_t num_tmp_callbacks;
80 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
81 VkDebugReportCallbackEXT *tmp_callbacks;
Cody Northrop55443ef2015-09-28 15:09:32 -060082
Ian Elliotted6b5ac2016-04-28 09:08:13 -060083 layer_data()
84 : report_data(nullptr), wsi_enabled(false), objtrack_extensions_enabled(false), num_tmp_callbacks(0),
85 tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr){};
Cody Northrop55443ef2015-09-28 15:09:32 -060086};
Mark Lobodzinskifae78852015-06-23 11:35:12 -060087
Jon Ashburn3dc39382015-09-17 10:00:32 -060088struct instExts {
89 bool wsi_enabled;
90};
91
92static std::unordered_map<void *, struct instExts> instanceExtMap;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070093static std::unordered_map<void *, layer_data *> layer_data_map;
94static device_table_map object_tracker_device_table_map;
95static instance_table_map object_tracker_instance_table_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -060096
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -060097// We need additionally validate image usage using a separate map
98// of swapchain-created images
Mark Lobodzinskif93272b2016-05-02 12:08:24 -060099static std::unordered_map<uint64_t, OBJTRACK_NODE *> swapchainImageMap;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600100
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600101static long long unsigned int object_track_index = 0;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600102static std::mutex global_lock;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600103
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700104#define NUM_OBJECT_TYPES (VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT + 1)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600105
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700106static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
107static uint64_t numTotalObjs = 0;
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600108std::vector<VkQueueFamilyProperties> queue_family_properties;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600109
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700110template layer_data *get_my_data_ptr<layer_data>(void *data_key, std::unordered_map<void *, layer_data *> &data_map);
Mark Lobodzinski2eeb3c62015-09-01 08:52:55 -0600111
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600112//
113// Internal Object Tracker Functions
114//
115
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700116static void createDeviceRegisterExtensions(const VkDeviceCreateInfo *pCreateInfo, VkDevice device) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700117 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(device), layer_data_map);
118 VkLayerDispatchTable *pDisp = get_dispatch_table(object_tracker_device_table_map, device);
Jon Ashburn8acd2332015-09-16 18:08:32 -0600119 PFN_vkGetDeviceProcAddr gpa = pDisp->GetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700120 pDisp->CreateSwapchainKHR = (PFN_vkCreateSwapchainKHR)gpa(device, "vkCreateSwapchainKHR");
121 pDisp->DestroySwapchainKHR = (PFN_vkDestroySwapchainKHR)gpa(device, "vkDestroySwapchainKHR");
122 pDisp->GetSwapchainImagesKHR = (PFN_vkGetSwapchainImagesKHR)gpa(device, "vkGetSwapchainImagesKHR");
123 pDisp->AcquireNextImageKHR = (PFN_vkAcquireNextImageKHR)gpa(device, "vkAcquireNextImageKHR");
124 pDisp->QueuePresentKHR = (PFN_vkQueuePresentKHR)gpa(device, "vkQueuePresentKHR");
Ian Elliott1064fe32015-07-06 14:31:32 -0600125 my_device_data->wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700126 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700127 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SWAPCHAIN_EXTENSION_NAME) == 0)
Ian Elliott1064fe32015-07-06 14:31:32 -0600128 my_device_data->wsi_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600129
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700130 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], "OBJTRACK_EXTENSIONS") == 0)
Courtney Goeltzenleuchterfce8cd22015-07-05 22:13:43 -0600131 my_device_data->objtrack_extensions_enabled = true;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600132 }
133}
134
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700135static void createInstanceRegisterExtensions(const VkInstanceCreateInfo *pCreateInfo, VkInstance instance) {
Jon Ashburn3dc39382015-09-17 10:00:32 -0600136 uint32_t i;
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700137 VkLayerInstanceDispatchTable *pDisp = get_dispatch_table(object_tracker_instance_table_map, instance);
Jon Ashburn3dc39382015-09-17 10:00:32 -0600138 PFN_vkGetInstanceProcAddr gpa = pDisp->GetInstanceProcAddr;
Michael Lentine56512bb2016-03-02 17:28:55 -0600139
140 pDisp->DestroySurfaceKHR = (PFN_vkDestroySurfaceKHR)gpa(instance, "vkDestroySurfaceKHR");
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700141 pDisp->GetPhysicalDeviceSurfaceSupportKHR =
142 (PFN_vkGetPhysicalDeviceSurfaceSupportKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceSupportKHR");
143 pDisp->GetPhysicalDeviceSurfaceCapabilitiesKHR =
144 (PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceCapabilitiesKHR");
145 pDisp->GetPhysicalDeviceSurfaceFormatsKHR =
146 (PFN_vkGetPhysicalDeviceSurfaceFormatsKHR)gpa(instance, "vkGetPhysicalDeviceSurfaceFormatsKHR");
147 pDisp->GetPhysicalDeviceSurfacePresentModesKHR =
148 (PFN_vkGetPhysicalDeviceSurfacePresentModesKHR)gpa(instance, "vkGetPhysicalDeviceSurfacePresentModesKHR");
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700149
150#if VK_USE_PLATFORM_WIN32_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700151 pDisp->CreateWin32SurfaceKHR = (PFN_vkCreateWin32SurfaceKHR)gpa(instance, "vkCreateWin32SurfaceKHR");
152 pDisp->GetPhysicalDeviceWin32PresentationSupportKHR =
153 (PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWin32PresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700154#endif // VK_USE_PLATFORM_WIN32_KHR
155#ifdef VK_USE_PLATFORM_XCB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700156 pDisp->CreateXcbSurfaceKHR = (PFN_vkCreateXcbSurfaceKHR)gpa(instance, "vkCreateXcbSurfaceKHR");
157 pDisp->GetPhysicalDeviceXcbPresentationSupportKHR =
158 (PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXcbPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700159#endif // VK_USE_PLATFORM_XCB_KHR
160#ifdef VK_USE_PLATFORM_XLIB_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700161 pDisp->CreateXlibSurfaceKHR = (PFN_vkCreateXlibSurfaceKHR)gpa(instance, "vkCreateXlibSurfaceKHR");
162 pDisp->GetPhysicalDeviceXlibPresentationSupportKHR =
163 (PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceXlibPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700164#endif // VK_USE_PLATFORM_XLIB_KHR
165#ifdef VK_USE_PLATFORM_MIR_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700166 pDisp->CreateMirSurfaceKHR = (PFN_vkCreateMirSurfaceKHR)gpa(instance, "vkCreateMirSurfaceKHR");
167 pDisp->GetPhysicalDeviceMirPresentationSupportKHR =
168 (PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceMirPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700169#endif // VK_USE_PLATFORM_MIR_KHR
170#ifdef VK_USE_PLATFORM_WAYLAND_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700171 pDisp->CreateWaylandSurfaceKHR = (PFN_vkCreateWaylandSurfaceKHR)gpa(instance, "vkCreateWaylandSurfaceKHR");
172 pDisp->GetPhysicalDeviceWaylandPresentationSupportKHR =
173 (PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR)gpa(instance, "vkGetPhysicalDeviceWaylandPresentationSupportKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700174#endif // VK_USE_PLATFORM_WAYLAND_KHR
175#ifdef VK_USE_PLATFORM_ANDROID_KHR
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700176 pDisp->CreateAndroidSurfaceKHR = (PFN_vkCreateAndroidSurfaceKHR)gpa(instance, "vkCreateAndroidSurfaceKHR");
Mark Lobodzinskia8a5f852015-12-10 16:25:21 -0700177#endif // VK_USE_PLATFORM_ANDROID_KHR
Mark Lobodzinskie86e1382015-11-24 15:50:44 -0700178
Jon Ashburn3dc39382015-09-17 10:00:32 -0600179 instanceExtMap[pDisp].wsi_enabled = false;
Jon Ashburnf19916e2016-01-11 13:12:43 -0700180 for (i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700181 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i], VK_KHR_SURFACE_EXTENSION_NAME) == 0)
Jon Ashburn3dc39382015-09-17 10:00:32 -0600182 instanceExtMap[pDisp].wsi_enabled = true;
Jon Ashburn3dc39382015-09-17 10:00:32 -0600183 }
184}
185
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600186// Indicate device or instance dispatch table type
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700187typedef enum _DispTableType {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600188 DISP_TBL_TYPE_INSTANCE,
189 DISP_TBL_TYPE_DEVICE,
190} DispTableType;
191
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700192debug_report_data *mdd(const void *object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600193 dispatch_key key = get_dispatch_key(object);
194 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600195 return my_data->report_data;
196}
197
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700198debug_report_data *mid(VkInstance object) {
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600199 dispatch_key key = get_dispatch_key(object);
200 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600201 return my_data->report_data;
202}
203
204// For each Queue's doubly linked-list of mem refs
205typedef struct _OT_MEM_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700206 VkDeviceMemory mem;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600207 struct _OT_MEM_INFO *pNextMI;
208 struct _OT_MEM_INFO *pPrevMI;
209
210} OT_MEM_INFO;
211
212// Track Queue information
213typedef struct _OT_QUEUE_INFO {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700214 OT_MEM_INFO *pMemRefList;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700215 uint32_t queueNodeIndex;
216 VkQueue queue;
217 uint32_t refCount;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600218} OT_QUEUE_INFO;
219
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600220// Global map of structures, one per queue
221std::unordered_map<VkQueue, OT_QUEUE_INFO *> queue_info_map;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600222
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600223#include "vk_dispatch_table_helper.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600224
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600225static void init_object_tracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
226
227 layer_debug_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600228}
229
Tony Barboura05dbaa2015-07-09 17:31:46 -0600230//
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700231// Forward declarations
Tony Barboura05dbaa2015-07-09 17:31:46 -0600232//
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600233
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700234static void create_physical_device(VkInstance dispatchable_object, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType);
235static void create_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType);
236static void create_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700237static void create_device(VkPhysicalDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700238static void create_queue(VkDevice dispatchable_object, VkQueue vkObj, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600239static bool validate_image(VkQueue dispatchable_object, VkImage object, VkDebugReportObjectTypeEXT objType, bool null_allowed);
240static bool validate_instance(VkInstance dispatchable_object, VkInstance object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700241 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600242static bool validate_device(VkDevice dispatchable_object, VkDevice object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700243 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600244static bool validate_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700245 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600246static bool validate_descriptor_set_layout(VkDevice dispatchable_object, VkDescriptorSetLayout object,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700247 VkDebugReportObjectTypeEXT objType, bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600248static bool validate_command_pool(VkDevice dispatchable_object, VkCommandPool object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700249 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600250static bool validate_buffer(VkQueue dispatchable_object, VkBuffer object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700251 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700252static void create_pipeline(VkDevice dispatchable_object, VkPipeline vkObj, VkDebugReportObjectTypeEXT objType);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600253static bool validate_pipeline_cache(VkDevice dispatchable_object, VkPipelineCache object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700254 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600255static bool validate_render_pass(VkDevice dispatchable_object, VkRenderPass object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700256 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600257static bool validate_shader_module(VkDevice dispatchable_object, VkShaderModule object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700258 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600259static bool validate_pipeline_layout(VkDevice dispatchable_object, VkPipelineLayout object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700260 bool null_allowed);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600261static bool validate_pipeline(VkDevice dispatchable_object, VkPipeline object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700262 bool null_allowed);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700263static void destroy_command_pool(VkDevice dispatchable_object, VkCommandPool object);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700264static void destroy_descriptor_pool(VkDevice dispatchable_object, VkDescriptorPool object);
265static void destroy_descriptor_set(VkDevice dispatchable_object, VkDescriptorSet object);
266static void destroy_device_memory(VkDevice dispatchable_object, VkDeviceMemory object);
267static void destroy_swapchain_khr(VkDevice dispatchable_object, VkSwapchainKHR object);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600268static bool set_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700269 ObjectStatusFlags status_flag);
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600270static bool reset_device_memory_status(VkDevice dispatchable_object, VkDeviceMemory object, VkDebugReportObjectTypeEXT objType,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700271 ObjectStatusFlags status_flag);
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600272static void destroy_queue(VkQueue dispatchable_object, VkQueue object);
273
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600274extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkPhysicalDeviceMap;
275extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDeviceMap;
276extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkImageMap;
277extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
278extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkDescriptorSetMap;
279extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkBufferMap;
280extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkFenceMap;
281extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSemaphoreMap;
282extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandPoolMap;
283extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkCommandBufferMap;
284extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSwapchainKHRMap;
285extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkSurfaceKHRMap;
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600286extern std::unordered_map<uint64_t, OBJTRACK_NODE *> VkQueueMap;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600287
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600288// Convert an object type enum to an object type array index
289static uint32_t objTypeToIndex(uint32_t objType) {
290 uint32_t index = objType;
291 return index;
292}
293
294// Add new queue to head of global queue list
295static void addQueueInfo(uint32_t queueNodeIndex, VkQueue queue) {
296 auto queueItem = queue_info_map.find(queue);
297 if (queueItem == queue_info_map.end()) {
298 OT_QUEUE_INFO *p_queue_info = new OT_QUEUE_INFO;
299 if (p_queue_info != NULL) {
300 memset(p_queue_info, 0, sizeof(OT_QUEUE_INFO));
301 p_queue_info->queue = queue;
302 p_queue_info->queueNodeIndex = queueNodeIndex;
303 queue_info_map[queue] = p_queue_info;
304 } else {
305 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
306 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
307 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
308 }
309 }
310}
311
312// Destroy memRef lists and free all memory
313static void destroyQueueMemRefLists() {
314 for (auto queue_item : queue_info_map) {
315 OT_MEM_INFO *p_mem_info = queue_item.second->pMemRefList;
316 while (p_mem_info != NULL) {
317 OT_MEM_INFO *p_del_mem_info = p_mem_info;
318 p_mem_info = p_mem_info->pNextMI;
319 delete p_del_mem_info;
320 }
321 delete queue_item.second;
322 }
323 queue_info_map.clear();
324
325 // Destroy the items in the queue map
326 auto queue = VkQueueMap.begin();
327 while (queue != VkQueueMap.end()) {
328 uint32_t obj_index = objTypeToIndex(queue->second->objType);
329 assert(numTotalObjs > 0);
330 numTotalObjs--;
331 assert(numObjs[obj_index] > 0);
332 numObjs[obj_index]--;
333 log_msg(mdd(reinterpret_cast<VkQueue>(queue->second->vkObj)), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, queue->second->objType,
334 queue->second->vkObj, __LINE__, OBJTRACK_NONE, "OBJTRACK",
335 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
336 string_VkDebugReportObjectTypeEXT(queue->second->objType), queue->second->vkObj, numTotalObjs, numObjs[obj_index],
337 string_VkDebugReportObjectTypeEXT(queue->second->objType));
338 delete queue->second;
339 queue = VkQueueMap.erase(queue);
340 }
341}
342
343// Check Queue type flags for selected queue operations
344static void validateQueueFlags(VkQueue queue, const char *function) {
345
346 auto queue_item = queue_info_map.find(queue);
347 if (queue_item != queue_info_map.end()) {
348 OT_QUEUE_INFO *pQueueInfo = queue_item->second;
349 if (pQueueInfo != NULL) {
350 if ((queue_family_properties[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) == 0) {
351 log_msg(mdd(queue), VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
352 reinterpret_cast<uint64_t>(queue), __LINE__, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
353 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set", function);
354 }
355 }
356 }
357}
358
359static void create_physical_device(VkInstance instance, VkPhysicalDevice vkObj, VkDebugReportObjectTypeEXT objType) {
360 log_msg(mdd(instance), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700361 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
362 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600363
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600364 uint64_t physical_device_handle = reinterpret_cast<uint64_t>(vkObj);
365 auto pd_item = VkPhysicalDeviceMap.find(physical_device_handle);
366 if (pd_item == VkPhysicalDeviceMap.end()) {
367 OBJTRACK_NODE *p_new_obj_node = new OBJTRACK_NODE;
368 p_new_obj_node->objType = objType;
369 p_new_obj_node->belongsTo = reinterpret_cast<uint64_t>(instance);
370 p_new_obj_node->status = OBJSTATUS_NONE;
371 p_new_obj_node->vkObj = physical_device_handle;
372 VkPhysicalDeviceMap[physical_device_handle] = p_new_obj_node;
373 uint32_t objIndex = objTypeToIndex(objType);
374 numObjs[objIndex]++;
375 numTotalObjs++;
376 }
Tobin Ehlisec598302015-09-15 15:02:17 -0600377}
378
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700379static void create_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR vkObj, VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700380 // TODO: Add tracking of surface objects
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700381 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
382 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
383 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700384
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700385 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Tobin Ehlis86684f92016-01-05 10:33:58 -0700386 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700387 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700388 pNewObjNode->status = OBJSTATUS_NONE;
389 pNewObjNode->vkObj = (uint64_t)(vkObj);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700390 VkSurfaceKHRMap[(uint64_t)vkObj] = pNewObjNode;
391 uint32_t objIndex = objTypeToIndex(objType);
392 numObjs[objIndex]++;
393 numTotalObjs++;
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700394}
395
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700396static void destroy_surface_khr(VkInstance dispatchable_object, VkSurfaceKHR object) {
Mark Young93ecb1d2016-01-13 13:47:16 -0700397 uint64_t object_handle = (uint64_t)(object);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700398 if (VkSurfaceKHRMap.find(object_handle) != VkSurfaceKHRMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700399 OBJTRACK_NODE *pNode = VkSurfaceKHRMap[(uint64_t)object];
Tobin Ehlis86684f92016-01-05 10:33:58 -0700400 uint32_t objIndex = objTypeToIndex(pNode->objType);
401 assert(numTotalObjs > 0);
402 numTotalObjs--;
403 assert(numObjs[objIndex] > 0);
404 numObjs[objIndex]--;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700405 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType, object_handle, __LINE__,
406 OBJTRACK_NONE, "OBJTRACK",
Mark Muelleraab36502016-05-03 13:17:29 -0600407 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (0x%" PRIx64 " total objs remain & 0x%" PRIx64 " %s objs).",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700408 string_VkDebugReportObjectTypeEXT(pNode->objType), (uint64_t)(object), numTotalObjs, numObjs[objIndex],
409 string_VkDebugReportObjectTypeEXT(pNode->objType));
Tobin Ehlis86684f92016-01-05 10:33:58 -0700410 delete pNode;
411 VkSurfaceKHRMap.erase(object_handle);
412 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700413 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__,
414 OBJTRACK_NONE, "OBJTRACK",
415 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Tobin Ehlis86684f92016-01-05 10:33:58 -0700416 }
Mark Lobodzinskib49b6e52015-11-26 10:59:58 -0700417}
418
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700419static void alloc_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer vkObj,
420 VkDebugReportObjectTypeEXT objType, VkCommandBufferLevel level) {
421 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__, OBJTRACK_NONE,
422 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
423 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tony Barboura05dbaa2015-07-09 17:31:46 -0600424
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700425 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
426 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700427 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700428 pNewObjNode->vkObj = reinterpret_cast<uint64_t>(vkObj);
429 pNewObjNode->parentObj = (uint64_t)commandPool;
Mark Lobodzinski2fba0322016-01-23 18:31:23 -0700430 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
431 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
432 } else {
433 pNewObjNode->status = OBJSTATUS_NONE;
434 }
Michael Lentine13803dc2015-11-04 14:35:12 -0800435 VkCommandBufferMap[reinterpret_cast<uint64_t>(vkObj)] = pNewObjNode;
Tony Barboura05dbaa2015-07-09 17:31:46 -0600436 uint32_t objIndex = objTypeToIndex(objType);
437 numObjs[objIndex]++;
438 numTotalObjs++;
439}
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700440
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600441static bool validate_command_buffer(VkDevice device, VkCommandPool commandPool, VkCommandBuffer commandBuffer) {
442 bool skipCall = false;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700443 uint64_t object_handle = reinterpret_cast<uint64_t>(commandBuffer);
444 if (VkCommandBufferMap.find(object_handle) != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700445 OBJTRACK_NODE *pNode = VkCommandBufferMap[(uint64_t)commandBuffer];
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700446
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700447 if (pNode->parentObj != (uint64_t)(commandPool)) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600448 skipCall |= log_msg(
449 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__, OBJTRACK_COMMAND_POOL_MISMATCH,
450 "OBJTRACK", "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
451 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600452 reinterpret_cast<uint64_t>(commandBuffer), pNode->parentObj, reinterpret_cast<uint64_t &>(commandPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700453 }
454 } else {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600455 skipCall |= log_msg(
456 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
457 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700458 }
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600459 return skipCall;
460}
461
462static bool free_command_buffer(VkDevice device, VkCommandBuffer commandBuffer) {
463 bool skipCall = false;
464 auto cbItem = VkCommandBufferMap.find(reinterpret_cast<uint64_t>(commandBuffer));
465 if (cbItem != VkCommandBufferMap.end()) {
466 OBJTRACK_NODE *pNode = cbItem->second;
467 uint32_t objIndex = objTypeToIndex(pNode->objType);
468 assert(numTotalObjs > 0);
469 numTotalObjs--;
470 assert(numObjs[objIndex] > 0);
471 numObjs[objIndex]--;
472 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType,
473 reinterpret_cast<uint64_t>(commandBuffer), __LINE__, OBJTRACK_NONE, "OBJTRACK",
474 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
475 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t>(commandBuffer),
476 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
477 delete pNode;
478 VkCommandBufferMap.erase(cbItem);
479 }
480 return skipCall;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700481}
482
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700483static void alloc_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet vkObj,
484 VkDebugReportObjectTypeEXT objType) {
Mark Lobodzinski510e20d2016-02-11 09:26:16 -0700485 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE, "OBJTRACK",
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700486 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++, string_VkDebugReportObjectTypeEXT(objType),
487 (uint64_t)(vkObj));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700488
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700489 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
490 pNewObjNode->objType = objType;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700491 pNewObjNode->belongsTo = (uint64_t)device;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700492 pNewObjNode->status = OBJSTATUS_NONE;
493 pNewObjNode->vkObj = (uint64_t)(vkObj);
494 pNewObjNode->parentObj = (uint64_t)descriptorPool;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700495 VkDescriptorSetMap[(uint64_t)vkObj] = pNewObjNode;
496 uint32_t objIndex = objTypeToIndex(objType);
497 numObjs[objIndex]++;
498 numTotalObjs++;
499}
500
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600501static bool validate_descriptor_set(VkDevice device, VkDescriptorPool descriptorPool, VkDescriptorSet descriptorSet) {
502 bool skipCall = false;
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600503 uint64_t object_handle = reinterpret_cast<uint64_t &>(descriptorSet);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600504 auto dsItem = VkDescriptorSetMap.find(object_handle);
505 if (dsItem != VkDescriptorSetMap.end()) {
506 OBJTRACK_NODE *pNode = dsItem->second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700507
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600508 if (pNode->parentObj != reinterpret_cast<uint64_t &>(descriptorPool)) {
509 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, pNode->objType, object_handle, __LINE__,
510 OBJTRACK_DESCRIPTOR_POOL_MISMATCH, "OBJTRACK",
511 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
512 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
513 reinterpret_cast<uint64_t &>(descriptorSet), pNode->parentObj,
514 reinterpret_cast<uint64_t &>(descriptorPool));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700515 }
516 } else {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600517 skipCall |= log_msg(
518 mdd(device), VK_DEBUG_REPORT_ERROR_BIT_EXT, (VkDebugReportObjectTypeEXT)0, object_handle, __LINE__, OBJTRACK_NONE,
519 "OBJTRACK", "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?", object_handle);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700520 }
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600521 return skipCall;
522}
523
524static bool free_descriptor_set(VkDevice device, VkDescriptorSet descriptorSet) {
525 bool skipCall = false;
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600526 auto dsItem = VkDescriptorSetMap.find(reinterpret_cast<uint64_t &>(descriptorSet));
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600527 if (dsItem != VkDescriptorSetMap.end()) {
528 OBJTRACK_NODE *pNode = dsItem->second;
529 uint32_t objIndex = objTypeToIndex(pNode->objType);
530 assert(numTotalObjs > 0);
531 numTotalObjs--;
532 assert(numObjs[objIndex] > 0);
533 numObjs[objIndex]--;
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600534 skipCall |= log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, pNode->objType,
535 reinterpret_cast<uint64_t &>(descriptorSet), __LINE__, OBJTRACK_NONE, "OBJTRACK",
536 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " %s objs).",
537 string_VkDebugReportObjectTypeEXT(pNode->objType), reinterpret_cast<uint64_t &>(descriptorSet),
538 numTotalObjs, numObjs[objIndex], string_VkDebugReportObjectTypeEXT(pNode->objType));
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600539 delete pNode;
540 VkDescriptorSetMap.erase(dsItem);
541 }
542 return skipCall;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700543}
544
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600545static void create_queue(VkDevice device, VkQueue vkObj, VkDebugReportObjectTypeEXT objType) {
546
547 log_msg(mdd(device), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, reinterpret_cast<uint64_t>(vkObj), __LINE__,
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700548 OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
549 string_VkDebugReportObjectTypeEXT(objType), reinterpret_cast<uint64_t>(vkObj));
Tobin Ehlisec598302015-09-15 15:02:17 -0600550
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600551 OBJTRACK_NODE *p_obj_node = NULL;
552 auto queue_item = VkQueueMap.find(reinterpret_cast<uint64_t>(vkObj));
553 if (queue_item == VkQueueMap.end()) {
554 p_obj_node = new OBJTRACK_NODE;
555 VkQueueMap[reinterpret_cast<uint64_t>(vkObj)] = p_obj_node;
556 uint32_t objIndex = objTypeToIndex(objType);
557 numObjs[objIndex]++;
558 numTotalObjs++;
559 } else {
560 p_obj_node = queue_item->second;
561 }
562 p_obj_node->objType = objType;
563 p_obj_node->belongsTo = reinterpret_cast<uint64_t>(device);
564 p_obj_node->status = OBJSTATUS_NONE;
565 p_obj_node->vkObj = reinterpret_cast<uint64_t>(vkObj);
Tobin Ehlisec598302015-09-15 15:02:17 -0600566}
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600567
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700568static void create_swapchain_image_obj(VkDevice dispatchable_object, VkImage vkObj, VkSwapchainKHR swapchain) {
569 log_msg(mdd(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, (uint64_t)vkObj,
570 __LINE__, OBJTRACK_NONE, "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
571 "SwapchainImage", (uint64_t)(vkObj));
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600572
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700573 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
574 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
575 pNewObjNode->objType = VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT;
576 pNewObjNode->status = OBJSTATUS_NONE;
577 pNewObjNode->vkObj = (uint64_t)vkObj;
578 pNewObjNode->parentObj = (uint64_t)swapchain;
Mark Young93ecb1d2016-01-13 13:47:16 -0700579 swapchainImageMap[(uint64_t)(vkObj)] = pNewObjNode;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600580}
581
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700582static void create_device(VkInstance dispatchable_object, VkDevice vkObj, VkDebugReportObjectTypeEXT objType) {
583 log_msg(mid(dispatchable_object), VK_DEBUG_REPORT_INFORMATION_BIT_EXT, objType, (uint64_t)(vkObj), __LINE__, OBJTRACK_NONE,
584 "OBJTRACK", "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64, object_track_index++,
585 string_VkDebugReportObjectTypeEXT(objType), (uint64_t)(vkObj));
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700586
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700587 OBJTRACK_NODE *pNewObjNode = new OBJTRACK_NODE;
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700588 pNewObjNode->belongsTo = (uint64_t)dispatchable_object;
589 pNewObjNode->objType = objType;
590 pNewObjNode->status = OBJSTATUS_NONE;
591 pNewObjNode->vkObj = (uint64_t)(vkObj);
592 VkDeviceMap[(uint64_t)vkObj] = pNewObjNode;
593 uint32_t objIndex = objTypeToIndex(objType);
594 numObjs[objIndex]++;
595 numTotalObjs++;
596}
597
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600598//
599// Non-auto-generated API functions called by generated code
600//
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700601VkResult explicit_CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
602 VkInstance *pInstance) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700603 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
David Pinedoc0fa1ab2015-07-31 10:46:25 -0600604
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700605 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700606 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700607 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700608 if (fpCreateInstance == NULL) {
609 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600610 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700611
612 // Advance the link info for the next element on the chain
613 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
614
615 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
616 if (result != VK_SUCCESS) {
617 return result;
618 }
619
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700620 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
621 initInstanceTable(*pInstance, fpGetInstanceProcAddr, object_tracker_instance_table_map);
622 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(object_tracker_instance_table_map, *pInstance);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700623
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600624 // Look for one or more debug report create info structures, and copy the
625 // callback(s) for each one found (for use by vkDestroyInstance)
626 layer_copy_tmp_callbacks(pCreateInfo->pNext, &my_data->num_tmp_callbacks, &my_data->tmp_dbg_create_infos,
627 &my_data->tmp_callbacks);
628
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700629 my_data->report_data = debug_report_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
630 pCreateInfo->ppEnabledExtensionNames);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700631
Mark Lobodzinski1079e1b2016-03-15 14:21:59 -0600632 init_object_tracker(my_data, pAllocator);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700633 createInstanceRegisterExtensions(pCreateInfo, *pInstance);
634
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700635 create_instance(*pInstance, *pInstance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700636
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600637 return result;
638}
639
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700640void explicit_GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice gpu, uint32_t *pCount, VkQueueFamilyProperties *pProperties) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700641 get_dispatch_table(object_tracker_instance_table_map, gpu)->GetPhysicalDeviceQueueFamilyProperties(gpu, pCount, pProperties);
Tony Barbour59a47322015-06-24 16:06:58 -0600642
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600643 std::lock_guard<std::mutex> lock(global_lock);
644 if (pProperties != NULL) {
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600645 for (uint32_t i = 0; i < *pCount; i++) {
646 queue_family_properties.emplace_back(pProperties[i]);
647 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600648 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600649}
650
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700651VkResult explicit_CreateDevice(VkPhysicalDevice gpu, const VkDeviceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
652 VkDevice *pDevice) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600653 std::lock_guard<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700654 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700655
656 assert(chain_info->u.pLayerInfo);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700657 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
658 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700659 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(NULL, "vkCreateDevice");
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700660 if (fpCreateDevice == NULL) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700661 return VK_ERROR_INITIALIZATION_FAILED;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600662 }
663
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700664 // Advance the link info for the next element on the chain
665 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
666
667 VkResult result = fpCreateDevice(gpu, pCreateInfo, pAllocator, pDevice);
668 if (result != VK_SUCCESS) {
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700669 return result;
670 }
671
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700672 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
673 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
674 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700675
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700676 initDeviceTable(*pDevice, fpGetDeviceProcAddr, object_tracker_device_table_map);
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700677
678 createDeviceRegisterExtensions(pCreateInfo, *pDevice);
679
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700680 if (VkPhysicalDeviceMap.find((uint64_t)gpu) != VkPhysicalDeviceMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700681 OBJTRACK_NODE *pNewObjNode = VkPhysicalDeviceMap[(uint64_t)gpu];
Mark Lobodzinskic857fb32016-03-08 15:10:00 -0700682 create_device((VkInstance)pNewObjNode->belongsTo, *pDevice, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT);
683 }
Courtney Goeltzenleuchter00150eb2016-01-08 12:18:43 -0700684
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600685 return result;
686}
687
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700688VkResult explicit_EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
689 VkPhysicalDevice *pPhysicalDevices) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600690 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600691 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700692 skipCall |= validate_instance(instance, instance, VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600693 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600694 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700695 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700696 VkResult result = get_dispatch_table(object_tracker_instance_table_map, instance)
697 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600698 lock.lock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600699 if (result == VK_SUCCESS) {
700 if (pPhysicalDevices) {
701 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700702 create_physical_device(instance, pPhysicalDevices[i], VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT);
Tobin Ehlisec598302015-09-15 15:02:17 -0600703 }
704 }
705 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600706 lock.unlock();
Tobin Ehlisec598302015-09-15 15:02:17 -0600707 return result;
708}
709
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700710void explicit_GetDeviceQueue(VkDevice device, uint32_t queueNodeIndex, uint32_t queueIndex, VkQueue *pQueue) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600711 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700712 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600713 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600714
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700715 get_dispatch_table(object_tracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600716
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600717 lock.lock();
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600718
Courtney Goeltzenleuchter7415d5a2015-12-09 15:48:16 -0700719 create_queue(device, *pQueue, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT);
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600720 addQueueInfo(queueNodeIndex, *pQueue);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600721}
722
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700723VkResult explicit_MapMemory(VkDevice device, VkDeviceMemory mem, VkDeviceSize offset, VkDeviceSize size, VkFlags flags,
724 void **ppData) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600725 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600726 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700727 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600728 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600729 if (skipCall == VK_TRUE)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700730 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600731
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700732 VkResult result =
733 get_dispatch_table(object_tracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600734
735 return result;
736}
737
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700738void explicit_UnmapMemory(VkDevice device, VkDeviceMemory mem) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600739 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600740 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700741 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600742 lock.unlock();
Tobin Ehlisc9ac2b62015-09-11 12:57:55 -0600743 if (skipCall == VK_TRUE)
744 return;
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600745
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700746 get_dispatch_table(object_tracker_device_table_map, device)->UnmapMemory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600747}
748
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700749VkResult explicit_QueueBindSparse(VkQueue queue, uint32_t bindInfoCount, const VkBindSparseInfo *pBindInfo, VkFence fence) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600750 std::unique_lock<std::mutex> lock(global_lock);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800751 validateQueueFlags(queue, "QueueBindSparse");
752
753 for (uint32_t i = 0; i < bindInfoCount; i++) {
754 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700755 validate_buffer(queue, pBindInfo[i].pBufferBinds[j].buffer, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800756 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700757 validate_image(queue, pBindInfo[i].pImageOpaqueBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800758 for (uint32_t j = 0; j < pBindInfo[i].imageBindCount; j++)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700759 validate_image(queue, pBindInfo[i].pImageBinds[j].image, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT, false);
Chia-I Wu1ff4c3d2015-10-26 16:55:27 +0800760 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600761 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600762
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700763 VkResult result =
764 get_dispatch_table(object_tracker_device_table_map, queue)->QueueBindSparse(queue, bindInfoCount, pBindInfo, fence);
Mark Lobodzinski16e8bef2015-07-03 15:58:09 -0600765 return result;
766}
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600767
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700768VkResult explicit_AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
769 VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600770 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600771 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700772 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
773 skipCall |= validate_command_pool(device, pAllocateInfo->commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600774 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700775
776 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700777 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700778 }
779
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700780 VkResult result =
781 get_dispatch_table(object_tracker_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700782
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600783 lock.lock();
Jon Ashburnf19916e2016-01-11 13:12:43 -0700784 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700785 alloc_command_buffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
786 pAllocateInfo->level);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700787 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600788 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700789
790 return result;
791}
792
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700793VkResult explicit_AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
794 VkDescriptorSet *pDescriptorSets) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600795 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600796 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700797 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700798 skipCall |=
799 validate_descriptor_pool(device, pAllocateInfo->descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jon Ashburnf19916e2016-01-11 13:12:43 -0700800 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700801 skipCall |= validate_descriptor_set_layout(device, pAllocateInfo->pSetLayouts[i],
802 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT, false);
Tobin Ehlisec598302015-09-15 15:02:17 -0600803 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600804 lock.unlock();
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600805 if (skipCall) {
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700806 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600807 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600808
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700809 VkResult result =
810 get_dispatch_table(object_tracker_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600811
Chris Forbes539a87c2016-01-22 15:44:40 +1300812 if (VK_SUCCESS == result) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600813 lock.lock();
Chris Forbes539a87c2016-01-22 15:44:40 +1300814 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700815 alloc_descriptor_set(device, pAllocateInfo->descriptorPool, pDescriptorSets[i],
816 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT);
Chris Forbes539a87c2016-01-22 15:44:40 +1300817 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600818 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600819 }
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600820
821 return result;
822}
823
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700824void explicit_FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
825 const VkCommandBuffer *pCommandBuffers) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600826 bool skipCall = false;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600827 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700828 validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
829 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600830 for (uint32_t i = 0; i < commandBufferCount; i++) {
831 skipCall |= validate_command_buffer(device, commandPool, pCommandBuffers[i]);
832 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700833
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600834 lock.unlock();
835 if (!skipCall) {
836 get_dispatch_table(object_tracker_device_table_map, device)
837 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
838 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700839
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600840 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700841 for (uint32_t i = 0; i < commandBufferCount; i++) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600842 free_command_buffer(device, pCommandBuffers[i]);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700843 }
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700844}
845
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700846void explicit_DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600847 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700848 // A swapchain's images are implicitly deleted when the swapchain is deleted.
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600849 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600850 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = swapchainImageMap.begin();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600851 while (itr != swapchainImageMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700852 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski7f537292016-05-12 15:14:07 -0600853 if (pNode->parentObj == reinterpret_cast<uint64_t &>(swapchain)) {
854 delete pNode;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700855 swapchainImageMap.erase(itr++);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600856 } else {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700857 ++itr;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600858 }
859 }
Tobin Ehlis86684f92016-01-05 10:33:58 -0700860 destroy_swapchain_khr(device, swapchain);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600861 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600862
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700863 get_dispatch_table(object_tracker_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600864}
865
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700866void explicit_FreeMemory(VkDevice device, VkDeviceMemory mem, const VkAllocationCallbacks *pAllocator) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600867 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700868 validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600869 lock.unlock();
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600870
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700871 get_dispatch_table(object_tracker_device_table_map, device)->FreeMemory(device, mem, pAllocator);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600872
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600873 lock.lock();
Michael Lentine13803dc2015-11-04 14:35:12 -0800874 destroy_device_memory(device, mem);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600875}
Tony Barboura05dbaa2015-07-09 17:31:46 -0600876
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700877VkResult explicit_FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t count,
878 const VkDescriptorSet *pDescriptorSets) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600879 bool skipCall = false;
880 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600881 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600882 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
883 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
884 for (uint32_t i = 0; i < count; i++) {
885 skipCall |= validate_descriptor_set(device, descriptorPool, pDescriptorSets[i]);
886 }
887
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600888 lock.unlock();
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600889 if (!skipCall) {
890 result = get_dispatch_table(object_tracker_device_table_map, device)
891 ->FreeDescriptorSets(device, descriptorPool, count, pDescriptorSets);
892 }
Tony Barbour770f80d2015-07-20 10:52:13 -0600893
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600894 lock.lock();
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700895 for (uint32_t i = 0; i < count; i++) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600896 free_descriptor_set(device, pDescriptorSets[i]);
Tony Barbour770f80d2015-07-20 10:52:13 -0600897 }
Tony Barbour770f80d2015-07-20 10:52:13 -0600898 return result;
899}
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600900
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700901void explicit_DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600902 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600903 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700904 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
905 skipCall |= validate_descriptor_pool(device, descriptorPool, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600906 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700907 if (skipCall) {
908 return;
909 }
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700910 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700911 // Remove this pool's descriptor sets from our descriptorSet map.
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600912 lock.lock();
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600913 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkDescriptorSetMap.begin();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700914 while (itr != VkDescriptorSetMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700915 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinskib29731a2015-11-18 11:01:02 -0700916 auto del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700917 if (pNode->parentObj == (uint64_t)(descriptorPool)) {
918 destroy_descriptor_set(device, (VkDescriptorSet)((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700919 }
920 }
921 destroy_descriptor_pool(device, descriptorPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600922 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700923 get_dispatch_table(object_tracker_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700924}
925
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700926void explicit_DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600927 bool skipCall = false;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600928 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700929 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
930 skipCall |= validate_command_pool(device, commandPool, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600931 lock.unlock();
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700932 if (skipCall) {
933 return;
934 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600935 lock.lock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700936 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700937 // Remove this pool's cmdBuffers from our cmd buffer map.
Mark Lobodzinskif93272b2016-05-02 12:08:24 -0600938 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator itr = VkCommandBufferMap.begin();
939 std::unordered_map<uint64_t, OBJTRACK_NODE *>::iterator del_itr;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700940 while (itr != VkCommandBufferMap.end()) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700941 OBJTRACK_NODE *pNode = (*itr).second;
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700942 del_itr = itr++;
Mark Young93ecb1d2016-01-13 13:47:16 -0700943 if (pNode->parentObj == (uint64_t)(commandPool)) {
Mark Lobodzinski772cd3d2016-05-03 08:39:24 -0600944 skipCall |= validate_command_buffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
945 free_command_buffer(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700946 }
947 }
948 destroy_command_pool(device, commandPool);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600949 lock.unlock();
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700950 get_dispatch_table(object_tracker_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
Mark Lobodzinski5f5c0e12015-11-12 16:02:35 -0700951}
952
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700953VkResult explicit_GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pCount, VkImage *pSwapchainImages) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600954 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600955 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700956 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600957 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600958 if (skipCall)
Courtney Goeltzenleuchter52fee652015-12-10 16:41:22 -0700959 return VK_ERROR_VALIDATION_FAILED_EXT;
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600960
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700961 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
962 ->GetSwapchainImagesKHR(device, swapchain, pCount, pSwapchainImages);
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600963
964 if (pSwapchainImages != NULL) {
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600965 lock.lock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600966 for (uint32_t i = 0; i < *pCount; i++) {
967 create_swapchain_image_obj(device, pSwapchainImages[i], swapchain);
968 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600969 lock.unlock();
Mark Lobodzinskie6d3f2c2015-10-14 13:16:33 -0600970 }
971 return result;
972}
973
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700974// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700975VkResult explicit_CreateGraphicsPipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
976 const VkGraphicsPipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
977 VkPipeline *pPipelines) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -0600978 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -0600979 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700980 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700981 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700982 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700983 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700984 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
985 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700986 }
987 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700988 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
989 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700990 }
991 if (pCreateInfos[idx0].pStages) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700992 for (uint32_t idx1 = 0; idx1 < pCreateInfos[idx0].stageCount; ++idx1) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700993 if (pCreateInfos[idx0].pStages[idx1].module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700994 skipCall |= validate_shader_module(device, pCreateInfos[idx0].pStages[idx1].module,
995 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -0700996 }
997 }
998 }
999 if (pCreateInfos[idx0].renderPass) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001000 skipCall |=
1001 validate_render_pass(device, pCreateInfos[idx0].renderPass, VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001002 }
1003 }
1004 }
1005 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001006 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001007 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001008 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001009 if (skipCall)
1010 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001011 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1012 ->CreateGraphicsPipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001013 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001014 if (result == VK_SUCCESS) {
1015 for (uint32_t idx2 = 0; idx2 < createInfoCount; ++idx2) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001016 create_pipeline(device, pPipelines[idx2], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001017 }
1018 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001019 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001020 return result;
1021}
1022
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001023// TODO: Add special case to codegen to cover validating all the pipelines instead of just the first
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001024VkResult explicit_CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
1025 const VkComputePipelineCreateInfo *pCreateInfos, const VkAllocationCallbacks *pAllocator,
1026 VkPipeline *pPipelines) {
Mark Lobodzinski2abefa92016-05-05 11:45:57 -06001027 bool skipCall = VK_FALSE;
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001028 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001029 skipCall |= validate_device(device, device, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001030 if (pCreateInfos) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001031 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001032 if (pCreateInfos[idx0].basePipelineHandle) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001033 skipCall |= validate_pipeline(device, pCreateInfos[idx0].basePipelineHandle,
1034 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT, true);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001035 }
1036 if (pCreateInfos[idx0].layout) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001037 skipCall |= validate_pipeline_layout(device, pCreateInfos[idx0].layout,
1038 VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001039 }
1040 if (pCreateInfos[idx0].stage.module) {
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001041 skipCall |= validate_shader_module(device, pCreateInfos[idx0].stage.module,
1042 VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001043 }
1044 }
1045 }
1046 if (pipelineCache) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001047 skipCall |= validate_pipeline_cache(device, pipelineCache, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT, false);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001048 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001049 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001050 if (skipCall)
1051 return VK_ERROR_VALIDATION_FAILED_EXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -07001052 VkResult result = get_dispatch_table(object_tracker_device_table_map, device)
1053 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001054 lock.lock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001055 if (result == VK_SUCCESS) {
1056 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -07001057 create_pipeline(device, pPipelines[idx1], VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT);
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001058 }
1059 }
Jeremy Hayes2f065b12016-04-13 10:54:17 -06001060 lock.unlock();
Mark Lobodzinski154329b2016-01-26 09:55:28 -07001061 return result;
1062}