blob: 62bfa48763644aff36f0825e0c6a3c0f1d68ac9a [file] [log] [blame]
Tobin Ehlis42586532014-11-14 13:01:02 -07001/*
Courtney Goeltzenleuchterd8e229c2015-04-08 15:36:08 -06002 * Vulkan
Tobin Ehlis42586532014-11-14 13:01:02 -07003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Tobin Ehlis0c6f9ee2015-07-03 09:42:57 -060025#include "vk_layer.h"
Courtney Goeltzenleuchterf579fa62015-06-10 17:39:03 -060026#include "vk_enum_string_helper.h"
Mark Lobodzinskifae78852015-06-23 11:35:12 -060027
Tobin Ehlisca915872014-11-18 11:28:33 -070028// Object Tracker ERROR codes
29typedef enum _OBJECT_TRACK_ERROR
30{
Chia-I Wu09cf5f02015-01-05 14:33:42 +080031 OBJTRACK_NONE, // Used for INFO & other non-error messages
32 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
33 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
34 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
Mark Lobodzinski03a7e982015-05-05 18:24:45 -050035 OBJTRACK_OBJECT_TYPE_MISMATCH, // Object did not match corresponding Object Type
Chia-I Wu09cf5f02015-01-05 14:33:42 +080036 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
37 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070038 OBJTRACK_INVALID_FENCE, // Requested status of unsubmitted fence object
Tobin Ehlis803cc492015-06-08 17:36:28 -060039 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
Tobin Ehlisca915872014-11-18 11:28:33 -070040} OBJECT_TRACK_ERROR;
41
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070042// Object Status -- used to track state of individual objects
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050043typedef VkFlags ObjectStatusFlags;
44typedef enum _ObjectStatusFlagBits
Tobin Ehlis91ce77e2015-01-16 08:56:30 -070045{
Mark Lobodzinski40370872015-02-03 10:06:31 -060046 OBJSTATUS_NONE = 0x00000000, // No status is set
47 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
48 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
49 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
50 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
51 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
Mark Lobodzinski4988dea2015-02-03 11:52:26 -060052 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050053} ObjectStatusFlagBits;
Chia-I Wuf8693382015-04-16 22:02:10 +080054
Tobin Ehlis42586532014-11-14 13:01:02 -070055typedef struct _OBJTRACK_NODE {
Mark Lobodzinski38f0db22015-05-20 17:33:47 -050056 VkObject vkObj;
57 VkObjectType objType;
58 ObjectStatusFlags status;
Tobin Ehlis42586532014-11-14 13:01:02 -070059} OBJTRACK_NODE;
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060060
Tobin Ehlis42586532014-11-14 13:01:02 -070061// prototype for extension functions
Mark Lobodzinskifae78852015-06-23 11:35:12 -060062uint64_t objTrackGetObjectCount(VkDevice device);
63VkResult objTrackGetObjects(VkDevice device, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
64uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkObjectType type);
65VkResult objTrackGetObjectsOfType(VkDevice, VkObjectType type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
Mark Lobodzinskiaae93e52015-02-09 10:20:53 -060066
Tobin Ehlisca915872014-11-18 11:28:33 -070067// Func ptr typedefs
Mark Lobodzinskifae78852015-06-23 11:35:12 -060068typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
69typedef VkResult (*OBJ_TRACK_GET_OBJECTS)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
70typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkObjectType);
71typedef VkResult (*OBJ_TRACK_GET_OBJECTS_OF_TYPE)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
72
73typedef struct _layer_data {
74 debug_report_data *report_data;
75 //TODO: put instance data here
76 VkDbgMsgCallback logging_callback;
77} layer_data;
78
79static std::unordered_map<void*, layer_data *> layer_data_map;
80static device_table_map ObjectTracker_device_table_map;
81static instance_table_map ObjectTracker_instance_table_map;
82
83static long long unsigned int object_track_index = 0;
84static int objLockInitialized = 0;
85static loader_platform_thread_mutex objLock;
86
87// Objects stored in a global map w/ struct containing basic info
88unordered_map<VkObject, OBJTRACK_NODE*> objMap;
89
90#define NUM_OBJECT_TYPES (VK_NUM_OBJECT_TYPE + (VK_OBJECT_TYPE_SWAP_CHAIN_WSI - VK_OBJECT_TYPE_DISPLAY_WSI))
91
92static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
93static uint64_t numTotalObjs = 0;
94static VkPhysicalDeviceQueueProperties *queueInfo = NULL;
95static uint32_t queueCount = 0;
96
97template layer_data *get_my_data_ptr<layer_data>(
98 void *data_key, std::unordered_map<void *, layer_data *> &data_map);
99
100//
101// Internal Object Tracker Functions
102//
103
104struct devExts {
105 bool wsi_lunarg_enabled;
106};
107
108static std::unordered_map<void *, struct devExts> deviceExtMap;
109
110static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
111{
112 uint32_t i, ext_idx;
113 VkLayerDispatchTable *pDisp = device_dispatch_table(device);
114 deviceExtMap[pDisp].wsi_lunarg_enabled = false;
115 for (i = 0; i < pCreateInfo->extensionCount; i++) {
116 if (strcmp(pCreateInfo->pEnabledExtensions[i].name, VK_WSI_LUNARG_EXTENSION_NAME) == 0)
117 deviceExtMap[pDisp].wsi_lunarg_enabled = true;
118
119 }
120}
121
122// Indicate device or instance dispatch table type
123typedef enum _DispTableType
124{
125 DISP_TBL_TYPE_INSTANCE,
126 DISP_TBL_TYPE_DEVICE,
127} DispTableType;
128
129debug_report_data *mdd(VkObject object)
130{
131 dispatch_key key = get_dispatch_key(object);
132 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600133 return my_data->report_data;
134}
135
136debug_report_data *mid(VkInstance object)
137{
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600138 dispatch_key key = get_dispatch_key(object);
139 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600140 return my_data->report_data;
141}
142
143// For each Queue's doubly linked-list of mem refs
144typedef struct _OT_MEM_INFO {
145 VkDeviceMemory mem;
146 struct _OT_MEM_INFO *pNextMI;
147 struct _OT_MEM_INFO *pPrevMI;
148
149} OT_MEM_INFO;
150
151// Track Queue information
152typedef struct _OT_QUEUE_INFO {
153 OT_MEM_INFO *pMemRefList;
154 struct _OT_QUEUE_INFO *pNextQI;
155 uint32_t queueNodeIndex;
156 VkQueue queue;
157 uint32_t refCount;
158} OT_QUEUE_INFO;
159
160// Global list of QueueInfo structures, one per queue
161static OT_QUEUE_INFO *g_pQueueInfo = NULL;
162
163// Convert an object type enum to an object type array index
164static uint32_t
165objTypeToIndex(
166 uint32_t objType)
167{
168 uint32_t index = objType;
169 if (objType > VK_OBJECT_TYPE_END_RANGE) {
170 // These come from vk_wsi_lunarg.h, rebase
171 index = (index -(VK_WSI_LUNARG_EXTENSION_NUMBER * -1000)) + VK_OBJECT_TYPE_END_RANGE;
172 }
173 return index;
174}
175
176// Validate that object is in the object map
177static void
178validate_object(
179 const VkObject dispatchable_object,
180 const VkObject object)
181{
182 if (objMap.find(object) == objMap.end()) {
Tobin Ehlis79338452015-06-25 12:29:25 -0600183 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600184 "Invalid Object %p", object);
185 }
186}
187
188// Validate that object parameter matches designated object type
189static void
190validateObjectType(
191 VkObject dispatchable_object,
192 const char *apiName,
193 VkObjectType objType,
194 VkObject object)
195{
196 if (objMap.find(object) != objMap.end()) {
197 OBJTRACK_NODE* pNode = objMap[object];
198 // Found our object, check type
199 if (strcmp(string_VkObjectType(pNode->objType), string_VkObjectType(objType)) != 0) {
200 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, object, 0, OBJTRACK_OBJECT_TYPE_MISMATCH, "OBJTRACK",
201 "ERROR: Object Parameter Type %s does not match designated type %s", string_VkObjectType(pNode->objType), string_VkObjectType(objType));
202 }
203 }
204}
205
206// Add new queue to head of global queue list
207static void
208addQueueInfo(
209 uint32_t queueNodeIndex,
210 VkQueue queue)
211{
212 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
213
214 if (pQueueInfo != NULL) {
215 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
216 pQueueInfo->queue = queue;
217 pQueueInfo->queueNodeIndex = queueNodeIndex;
218 pQueueInfo->pNextQI = g_pQueueInfo;
219 g_pQueueInfo = pQueueInfo;
220 }
221 else {
222 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
223 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
224 }
225}
226
227// Destroy memRef lists and free all memory
228static void
229destroyQueueMemRefLists(void)
230{
231 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
232 OT_QUEUE_INFO *pDelQueueInfo = NULL;
233 while (pQueueInfo != NULL) {
234 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
235 while (pMemInfo != NULL) {
236 OT_MEM_INFO *pDelMemInfo = pMemInfo;
237 pMemInfo = pMemInfo->pNextMI;
238 delete pDelMemInfo;
239 }
240 pDelQueueInfo = pQueueInfo;
241 pQueueInfo = pQueueInfo->pNextQI;
242 delete pDelQueueInfo;
243 }
244 g_pQueueInfo = pQueueInfo;
245}
246
247
248static void
249create_obj(
250 VkObject dispatchable_object,
251 VkObject vkObj,
252 VkObjectType objType)
253{
254 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
255 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkObjectType(objType),
256 reinterpret_cast<VkUintPtrLeast64>(vkObj));
257
258 OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
259 pNewObjNode->vkObj = vkObj;
260 pNewObjNode->objType = objType;
261 pNewObjNode->status = OBJSTATUS_NONE;
262 objMap[vkObj] = pNewObjNode;
263 uint32_t objIndex = objTypeToIndex(objType);
264 numObjs[objIndex]++;
265 numTotalObjs++;
266}
267
268// Parse global list to find obj type, then remove obj from obj type list, finally
269// remove obj from global list
270static void
271destroy_obj(
272 VkObject dispatchable_object,
273 VkObject vkObj)
274{
275 if (objMap.find(vkObj) != objMap.end()) {
276 OBJTRACK_NODE* pNode = objMap[vkObj];
277 uint32_t objIndex = objTypeToIndex(pNode->objType);
278 assert(numTotalObjs > 0);
279 numTotalObjs--;
280 assert(numObjs[objIndex] > 0);
281 numObjs[objIndex]--;
282 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, pNode->objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
283 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%lu total objs remain & %lu %s objs).",
284 string_VkObjectType(pNode->objType), reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj), numTotalObjs, numObjs[objIndex],
285 string_VkObjectType(pNode->objType));
286
287 delete pNode;
288 objMap.erase(vkObj);
289 } else {
290 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
291 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
292 reinterpret_cast<VkUintPtrLeast64>(vkObj));
293 }
294}
295
296// Set selected flag state for an object node
297static void
298set_status(
299 VkObject dispatchable_object,
300 VkObject vkObj,
301 VkObjectType objType,
302 ObjectStatusFlags status_flag)
303{
304 if (vkObj != VK_NULL_HANDLE) {
305 if (objMap.find(vkObj) != objMap.end()) {
306 OBJTRACK_NODE* pNode = objMap[vkObj];
307 pNode->status |= status_flag;
308 return;
309 }
310 else {
311 // If we do not find it print an error
312 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
313 "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
314 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
315 }
316 }
317}
318
319// Reset selected flag state for an object node
320static void
321reset_status(
322 VkObject dispatchable_object,
323 VkObject vkObj,
324 VkObjectType objType,
325 ObjectStatusFlags status_flag)
326{
327 if (objMap.find(vkObj) != objMap.end()) {
328 OBJTRACK_NODE* pNode = objMap[vkObj];
329 pNode->status &= ~status_flag;
330 return;
331 }
332 else {
333 // If we do not find it print an error
334 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
335 "Unable to reset status for non-existent object 0x%" PRIxLEAST64 " of %s type",
336 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
337 }
338}
339
340static void
341setGpuQueueInfoState(
Tony Barbour59a47322015-06-24 16:06:58 -0600342 uint32_t count,
343 void *pData)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600344{
Tony Barbour59a47322015-06-24 16:06:58 -0600345 queueCount = count;
346 queueInfo = (VkPhysicalDeviceQueueProperties*)realloc((void*)queueInfo, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600347 if (queueInfo != NULL) {
Tony Barbour59a47322015-06-24 16:06:58 -0600348 memcpy(queueInfo, pData, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600349 }
350}
351
352// Check Queue type flags for selected queue operations
353static void
354validateQueueFlags(
355 VkQueue queue,
356 const char *function)
357{
358 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
359 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
360 pQueueInfo = pQueueInfo->pNextQI;
361 }
362 if (pQueueInfo != NULL) {
363 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_MEMMGR_BIT) == 0) {
364 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
365 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not set", function);
366 } else {
367 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
368 "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not known", function);
369 }
370 }
371}
372
373// Check object status for selected flag state
374static bool32_t
375validate_status(
376 VkObject dispatchable_object,
377 VkObject vkObj,
378 VkObjectType objType,
379 ObjectStatusFlags status_mask,
380 ObjectStatusFlags status_flag,
381 VkFlags msg_flags,
382 OBJECT_TRACK_ERROR error_code,
383 const char *fail_msg)
384{
385 if (objMap.find(vkObj) != objMap.end()) {
386 OBJTRACK_NODE* pNode = objMap[vkObj];
387 if ((pNode->status & status_mask) != status_flag) {
388 char str[1024];
389 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
390 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
391 reinterpret_cast<VkUintPtrLeast64>(vkObj), fail_msg);
392 return VK_FALSE;
393 }
394 return VK_TRUE;
395 }
396 else {
397 // If we do not find it print an error
398 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
399 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
400 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
401 return VK_FALSE;
402 }
403}
404
405#include "vk_dispatch_table_helper.h"
406static void
407initObjectTracker(
408 layer_data *my_data)
409{
410 uint32_t report_flags = 0;
411 uint32_t debug_action = 0;
412 FILE *log_output = NULL;
413 const char *option_str;
414 // initialize ObjectTracker options
415 report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
416 getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
417
418 if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
419 {
420 option_str = getLayerOption("ObjectTrackerLogFilename");
421 if (option_str) {
422 log_output = fopen(option_str, "w");
423 }
424 if (log_output == NULL) {
425 log_output = stdout;
426 }
427
428 layer_create_msg_callback(my_data->report_data, report_flags, log_callback, (void *) log_output, &my_data->logging_callback);
429 }
430
431 if (!objLockInitialized)
432 {
433 // TODO/TBD: Need to delete this mutex sometime. How??? One
434 // suggestion is to call this during vkCreateInstance(), and then we
435 // can clean it up during vkDestroyInstance(). However, that requires
436 // that the layer have per-instance locks. We need to come back and
437 // address this soon.
438 loader_platform_thread_create_mutex(&objLock);
439 objLockInitialized = 1;
440 }
441}
442
443
444//
445// Non-auto-generated API functions called by generated code
446//
447
448VkResult
449explicit_CreateInstance(
450 const VkInstanceCreateInfo *pCreateInfo,
451 VkInstance * pInstance)
452{
453 loader_platform_thread_lock_mutex(&objLock);
454 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, *pInstance);
455 VkResult result = pInstanceTable->CreateInstance(pCreateInfo, pInstance);
456
457 if (result == VK_SUCCESS) {
458 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
459 my_data->report_data = debug_report_create_instance(
460 pInstanceTable,
461 *pInstance,
462 pCreateInfo->extensionCount,
463 pCreateInfo->pEnabledExtensions);
464
465 initObjectTracker(my_data);
Tobin Ehlis3e46c802015-06-30 14:31:50 -0600466 create_obj(*pInstance, *pInstance, VK_OBJECT_TYPE_INSTANCE);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600467 }
468 loader_platform_thread_unlock_mutex(&objLock);
469 return result;
470}
471
472VkResult
473explicit_DestroyInstance(
474 VkInstance instance)
475{
476 loader_platform_thread_lock_mutex(&objLock);
477 validate_object(instance, instance);
478
479 destroy_obj(instance, instance);
480 // Report any remaining objects in LL
481 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
482 OBJTRACK_NODE* pNode = it->second; if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
483 // Cannot destroy physical device so ignore
484 } else {
485 log_msg(mid(instance), VK_DBG_REPORT_ERROR_BIT, pNode->objType, pNode->vkObj, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
486 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
487 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
488 }
489 }
490
491 dispatch_key key = get_dispatch_key(instance);
492 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, instance);
493 VkResult result = pInstanceTable->DestroyInstance(instance);
494
495 // Clean up logging callback, if any
496 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
497 if (my_data->logging_callback) {
498 layer_destroy_msg_callback(my_data->report_data, my_data->logging_callback);
499 }
500
501 layer_debug_report_destroy_instance(mid(instance));
502 layer_data_map.erase(pInstanceTable);
503
504 ObjectTracker_instance_table_map.erase(key);
505 assert(ObjectTracker_instance_table_map.size() == 0 && "Should not have any instance mappings hanging around");
506
507 loader_platform_thread_unlock_mutex(&objLock);
508 return result;
509}
510
511VkResult
Tony Barbour59a47322015-06-24 16:06:58 -0600512explicit_GetPhysicalDeviceQueueProperties(
513 VkPhysicalDevice gpu,
514 uint32_t count,
515 VkPhysicalDeviceQueueProperties* pProperties)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600516{
Tony Barbour59a47322015-06-24 16:06:58 -0600517 VkResult result = get_dispatch_table(ObjectTracker_instance_table_map, gpu)->GetPhysicalDeviceQueueProperties(gpu, count, pProperties);
518
519 loader_platform_thread_lock_mutex(&objLock);
520 setGpuQueueInfoState(count, pProperties);
521 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600522 return result;
523}
524
525VkResult
526explicit_CreateDevice(
527 VkPhysicalDevice gpu,
528 const VkDeviceCreateInfo *pCreateInfo,
529 VkDevice *pDevice)
530{
531 loader_platform_thread_lock_mutex(&objLock);
Courtney Goeltzenleuchterca173b82015-06-25 18:01:43 -0600532// VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, gpu);
533 VkLayerDispatchTable *pDeviceTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
534 VkResult result = pDeviceTable->CreateDevice(gpu, pCreateInfo, pDevice);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600535 if (result == VK_SUCCESS) {
536 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
537 //// VkLayerDispatchTable *pTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
538 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
539 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchterca173b82015-06-25 18:01:43 -0600540 create_obj(*pDevice, *pDevice, VK_OBJECT_TYPE_DEVICE);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600541 }
542
543 loader_platform_thread_unlock_mutex(&objLock);
544 return result;
545}
546
547VkResult
548explicit_DestroyDevice(
549 VkDevice device)
550{
551 loader_platform_thread_lock_mutex(&objLock);
552 validate_object(device, device);
553 destroy_obj(device, device);
554 // Report any remaining objects
555 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
556 OBJTRACK_NODE* pNode = it->second;
557 if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
558 // Cannot destroy physical device so ignore
559 } else {
560 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_PHYSICAL_DEVICE, device, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
561 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
562 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
563 }
564 }
565 // Clean up Queue's MemRef Linked Lists
566 destroyQueueMemRefLists();
567
568 loader_platform_thread_unlock_mutex(&objLock);
569
570 dispatch_key key = get_dispatch_key(device);
571 VkLayerDispatchTable *pDisp = get_dispatch_table(ObjectTracker_device_table_map, device);
572 VkResult result = pDisp->DestroyDevice(device);
573 deviceExtMap.erase(pDisp);
574 ObjectTracker_device_table_map.erase(key);
575 assert(ObjectTracker_device_table_map.size() == 0 && "Should not have any instance mappings hanging around");
576
577 return result;
578}
579
580VkResult
581explicit_GetDeviceQueue(
582 VkDevice device,
583 uint32_t queueNodeIndex,
584 uint32_t queueIndex,
585 VkQueue *pQueue)
586{
587 loader_platform_thread_lock_mutex(&objLock);
588 validate_object(device, device);
589 loader_platform_thread_unlock_mutex(&objLock);
590
591 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
592
593 loader_platform_thread_lock_mutex(&objLock);
594 addQueueInfo(queueNodeIndex, *pQueue);
595 loader_platform_thread_unlock_mutex(&objLock);
596
597 return result;
598}
599
600VkResult
601explicit_QueueSubmit(
602 VkQueue queue,
603 uint32_t cmdBufferCount,
604 const VkCmdBuffer *pCmdBuffers,
605 VkFence fence)
606{
607 loader_platform_thread_lock_mutex(&objLock);
608 set_status(queue, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED);
609 // TODO: Fix for updated memory reference mechanism
610 // validate_memory_mapping_status(pMemRefs, memRefCount);
611 // validate_mem_ref_count(memRefCount);
612 loader_platform_thread_unlock_mutex(&objLock);
613
614 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
615
616 return result;
617}
618
619VkResult
620explicit_MapMemory(
621 VkDevice device,
622 VkDeviceMemory mem,
623 VkDeviceSize offset,
624 VkDeviceSize size,
625 VkFlags flags,
626 void **ppData)
627{
628 loader_platform_thread_lock_mutex(&objLock);
629 set_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
630 validate_object(device, device);
631 loader_platform_thread_unlock_mutex(&objLock);
632
633 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
634
635 return result;
636}
637
638VkResult
639explicit_UnmapMemory(
640 VkDevice device,
641 VkDeviceMemory mem)
642{
643 loader_platform_thread_lock_mutex(&objLock);
644 reset_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
645 validate_object(device, device);
646 loader_platform_thread_unlock_mutex(&objLock);
647
648 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->UnmapMemory(device, mem);
649
650 return result;
651}
652
653VkResult
654explicit_DestroyObject(
655 VkDevice device,
656 VkObjectType objType,
657 VkObject object)
658{
659 loader_platform_thread_lock_mutex(&objLock);
660 validate_object(device, device);
661 validate_object(device, object);
662 loader_platform_thread_unlock_mutex(&objLock);
663
664 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->DestroyObject(device, objType, object);
665
666 loader_platform_thread_lock_mutex(&objLock);
667 validateObjectType(device, "vkDestroyObject", objType, object);
668 destroy_obj(device, object);
669 loader_platform_thread_unlock_mutex(&objLock);
670
671 return result;
672}
673
674VkResult
Tony Barbour59a47322015-06-24 16:06:58 -0600675explicit_GetObjectMemoryRequirements(
676 VkDevice device,
677 VkObjectType objType,
678 VkObject object,
679 VkMemoryRequirements* pMemoryRequirements)
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600680{
681 loader_platform_thread_lock_mutex(&objLock);
Tony Barbour59a47322015-06-24 16:06:58 -0600682 validateObjectType(device, "vkGetObjectMemoryRequirements", objType, object);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600683 validate_object(device, device);
684 validate_object(device, object);
685 loader_platform_thread_unlock_mutex(&objLock);
686
Tony Barbour59a47322015-06-24 16:06:58 -0600687 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetObjectMemoryRequirements(device, objType, object, pMemoryRequirements);
Mark Lobodzinskifae78852015-06-23 11:35:12 -0600688
689 return result;
690}
691
692VkResult
693explicit_QueueBindSparseBufferMemory(
694 VkQueue queue,
695 VkBuffer buffer,
696 VkDeviceSize rangeOffset,
697 VkDeviceSize rangeSize,
698 VkDeviceMemory mem,
699 VkDeviceSize memOffset)
700{
701 loader_platform_thread_lock_mutex(&objLock);
702 validateQueueFlags(queue, "QueueBindSparseBufferMemory");
703 validate_object(queue, buffer);
704 loader_platform_thread_unlock_mutex(&objLock);
705
706 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseBufferMemory(queue, buffer, rangeOffset, rangeSize, mem, memOffset);
707 return result;
708}
709
710VkResult
711explicit_QueueBindSparseImageMemory(
712 VkQueue queue,
713 VkImage image,
714 const VkImageMemoryBindInfo *pBindInfo,
715 VkDeviceMemory mem,
716 VkDeviceSize memOffset)
717{
718 loader_platform_thread_lock_mutex(&objLock);
719 validateQueueFlags(queue, "QueueBindSparseImageMemory");
720 loader_platform_thread_unlock_mutex(&objLock);
721
722 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseImageMemory(queue, image, pBindInfo, mem, memOffset);
723 return result;
724}
725
726
727VkResult
728explicit_GetFenceStatus(
729 VkDevice device,
730 VkFence fence)
731{
732 loader_platform_thread_lock_mutex(&objLock);
733 // Warn if submitted_flag is not set
734 validate_status(device, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
735 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Status Requested for Unsubmitted Fence");
736 validate_object(device, device);
737 loader_platform_thread_unlock_mutex(&objLock);
738
739 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetFenceStatus(device, fence);
740
741 return result;
742}
743
744VkResult
745explicit_WaitForFences(
746 VkDevice device,
747 uint32_t fenceCount,
748 const VkFence *pFences,
749 bool32_t waitAll,
750 uint64_t timeout)
751{
752 loader_platform_thread_lock_mutex(&objLock);
753 // Warn if waiting on unsubmitted fence
754 for (uint32_t i = 0; i < fenceCount; i++) {
755 validate_status(device, pFences[i], VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
756 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Waiting for Unsubmitted Fence");
757 }
758 validate_object(device, device);
759 loader_platform_thread_unlock_mutex(&objLock);
760
761 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
762
763 return result;
764}
765
766VkResult
767explicit_AllocDescriptorSets(
768 VkDevice device,
769 VkDescriptorPool descriptorPool,
770 VkDescriptorSetUsage setUsage,
771 uint32_t count,
772 const VkDescriptorSetLayout *pSetLayouts,
773 VkDescriptorSet *pDescriptorSets,
774 uint32_t *pCount)
775{
776 loader_platform_thread_lock_mutex(&objLock);
777 validate_object(device, device);
778 validate_object(device, descriptorPool);
779 loader_platform_thread_unlock_mutex(&objLock);
780
781 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->AllocDescriptorSets(
782 device, descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
783
784 loader_platform_thread_lock_mutex(&objLock);
785 for (uint32_t i = 0; i < *pCount; i++) {
786 create_obj(device, pDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET);
787 }
788 loader_platform_thread_unlock_mutex(&objLock);
789
790 return result;
791}
792
793VkResult
794explicit_DestroySwapChainWSI(
795 VkSwapChainWSI swapChain)
796{
797
798 loader_platform_thread_lock_mutex(&objLock);
799 destroy_obj(swapChain, swapChain);
800 loader_platform_thread_unlock_mutex(&objLock);
801
802 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, swapChain)->DestroySwapChainWSI(swapChain);
803
804 return result;
805}
806
807VkResult
808explicit_FreeMemory(
809 VkDevice device,
810 VkDeviceMemory mem)
811{
812 loader_platform_thread_lock_mutex(&objLock);
813 validate_object(device, device);
814 loader_platform_thread_unlock_mutex(&objLock);
815
816 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->FreeMemory(device, mem);
817
818 loader_platform_thread_lock_mutex(&objLock);
819 destroy_obj(device, mem);
820 loader_platform_thread_unlock_mutex(&objLock);
821
822 return result;
823}
824
825
826// ObjectTracker Extensions
827
828uint64_t
829objTrackGetObjectsCount(
830 VkDevice device)
831{
832 return numTotalObjs;
833}
834
835VkResult
836objTrackGetObjects(
837 VkDevice device,
838 uint64_t objCount,
839 OBJTRACK_NODE *pObjNodeArray)
840{
841 // This bool flags if we're pulling all objs or just a single class of objs
842 // Check the count first thing
843 uint64_t maxObjCount = numTotalObjs;
844 if (objCount > maxObjCount) {
845 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
846 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu total objs", objCount, maxObjCount);
847 return VK_ERROR_INVALID_VALUE;
848 }
849 auto it = objMap.begin();
850 for (uint64_t i = 0; i < objCount; i++) {
851 if (objMap.end() == it) {
852 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
853 "OBJ INTERNAL ERROR : Ran out of objs! Should have %lu, but only copied %lu and not the requested %lu.", maxObjCount, i, objCount);
854 return VK_ERROR_UNKNOWN;
855 }
856 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
857 ++it;
858 }
859 return VK_SUCCESS;
860}
861
862uint64_t
863objTrackGetObjectsOfTypeCount(
864 VkDevice device,
865 VkObjectType type)
866{
867 return numObjs[type];
868}
869
870VkResult
871objTrackGetObjectsOfType(
872 VkDevice device,
873 VkObjectType type,
874 uint64_t objCount,
875 OBJTRACK_NODE *pObjNodeArray)
876{
877 // Check the count first thing
878 uint64_t maxObjCount = numObjs[type];
879 if (objCount > maxObjCount) {
880 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
881 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu objs of type %s",
882 objCount, maxObjCount, string_VkObjectType(type));
883 return VK_ERROR_INVALID_VALUE;
884 }
885 auto it = objMap.begin();
886 for (uint64_t i = 0; i < objCount; i++) {
887 // Get next object of correct type
888 while ((objMap.end() != it) && (it->second->objType != type))
889 ++it;
890 if (objMap.end() == it) {
891 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
892 "OBJ INTERNAL ERROR : Ran out of %s objs! Should have %lu, but only copied %lu and not the requested %lu.",
893 string_VkObjectType(type), maxObjCount, i, objCount);
894 return VK_ERROR_UNKNOWN;
895 }
896 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
897 }
898 return VK_SUCCESS;
899}
900