blob: 27d25932d0223c217fc96f2890fb8df5f71a7462 [file] [log] [blame]
Tobin Ehlisacab8882014-11-14 13:01:02 -07001/*
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -06002 * Vulkan
Tobin Ehlisacab8882014-11-14 13:01:02 -07003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060025#include "vkLayer.h"
Courtney Goeltzenleuchter1c7c65d2015-06-10 17:39:03 -060026#include "vk_enum_string_helper.h"
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060027
Tobin Ehlis3c26a542014-11-18 11:28:33 -070028// Object Tracker ERROR codes
29typedef enum _OBJECT_TRACK_ERROR
30{
Chia-I Wub1466182015-01-05 14:33:42 +080031 OBJTRACK_NONE, // Used for INFO & other non-error messages
32 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
33 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
34 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
Mark Lobodzinski7a3d5ff2015-05-05 18:24:45 -050035 OBJTRACK_OBJECT_TYPE_MISMATCH, // Object did not match corresponding Object Type
Chia-I Wub1466182015-01-05 14:33:42 +080036 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
37 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
Tobin Ehlis235c20e2015-01-16 08:56:30 -070038 OBJTRACK_INVALID_FENCE, // Requested status of unsubmitted fence object
Tobin Ehlis586aa012015-06-08 17:36:28 -060039 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
Tobin Ehlis3c26a542014-11-18 11:28:33 -070040} OBJECT_TRACK_ERROR;
41
Tobin Ehlis235c20e2015-01-16 08:56:30 -070042// Object Status -- used to track state of individual objects
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050043typedef VkFlags ObjectStatusFlags;
44typedef enum _ObjectStatusFlagBits
Tobin Ehlis235c20e2015-01-16 08:56:30 -070045{
Mark Lobodzinski01552702015-02-03 10:06:31 -060046 OBJSTATUS_NONE = 0x00000000, // No status is set
47 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
48 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
49 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
50 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
51 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
Mark Lobodzinski4186e712015-02-03 11:52:26 -060052 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050053} ObjectStatusFlagBits;
Chia-I Wu5b66aa52015-04-16 22:02:10 +080054
Tobin Ehlisacab8882014-11-14 13:01:02 -070055typedef struct _OBJTRACK_NODE {
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050056 VkObject vkObj;
57 VkObjectType objType;
58 ObjectStatusFlags status;
Tobin Ehlisacab8882014-11-14 13:01:02 -070059} OBJTRACK_NODE;
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060060
Tobin Ehlisacab8882014-11-14 13:01:02 -070061// prototype for extension functions
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060062uint64_t objTrackGetObjectCount(VkDevice device);
63VkResult objTrackGetObjects(VkDevice device, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
64uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkObjectType type);
65VkResult objTrackGetObjectsOfType(VkDevice, VkObjectType type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060066
Tobin Ehlis3c26a542014-11-18 11:28:33 -070067// Func ptr typedefs
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060068typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
69typedef VkResult (*OBJ_TRACK_GET_OBJECTS)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
70typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkObjectType);
71typedef VkResult (*OBJ_TRACK_GET_OBJECTS_OF_TYPE)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
72
73typedef struct _layer_data {
74 debug_report_data *report_data;
75 //TODO: put instance data here
76 VkDbgMsgCallback logging_callback;
77} layer_data;
78
79static std::unordered_map<void*, layer_data *> layer_data_map;
80static device_table_map ObjectTracker_device_table_map;
81static instance_table_map ObjectTracker_instance_table_map;
82
83static long long unsigned int object_track_index = 0;
84static int objLockInitialized = 0;
85static loader_platform_thread_mutex objLock;
86
87// Objects stored in a global map w/ struct containing basic info
88unordered_map<VkObject, OBJTRACK_NODE*> objMap;
89
90#define NUM_OBJECT_TYPES (VK_NUM_OBJECT_TYPE + (VK_OBJECT_TYPE_SWAP_CHAIN_WSI - VK_OBJECT_TYPE_DISPLAY_WSI))
91
92static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
93static uint64_t numTotalObjs = 0;
94static VkPhysicalDeviceQueueProperties *queueInfo = NULL;
95static uint32_t queueCount = 0;
96
97template layer_data *get_my_data_ptr<layer_data>(
98 void *data_key, std::unordered_map<void *, layer_data *> &data_map);
99
100//
101// Internal Object Tracker Functions
102//
103
104struct devExts {
105 bool wsi_lunarg_enabled;
106};
107
108static std::unordered_map<void *, struct devExts> deviceExtMap;
109
110static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
111{
112 uint32_t i, ext_idx;
113 VkLayerDispatchTable *pDisp = device_dispatch_table(device);
114 deviceExtMap[pDisp].wsi_lunarg_enabled = false;
115 for (i = 0; i < pCreateInfo->extensionCount; i++) {
116 if (strcmp(pCreateInfo->pEnabledExtensions[i].name, VK_WSI_LUNARG_EXTENSION_NAME) == 0)
117 deviceExtMap[pDisp].wsi_lunarg_enabled = true;
118
119 }
120}
121
122// Indicate device or instance dispatch table type
123typedef enum _DispTableType
124{
125 DISP_TBL_TYPE_INSTANCE,
126 DISP_TBL_TYPE_DEVICE,
127} DispTableType;
128
129debug_report_data *mdd(VkObject object)
130{
131 dispatch_key key = get_dispatch_key(object);
132 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
133 assert(my_data->report_data != NULL);
134 return my_data->report_data;
135}
136
137debug_report_data *mid(VkInstance object)
138{
139// return mdd((VkObject) object);
140 dispatch_key key = get_dispatch_key(object);
141 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
142 assert(my_data->report_data != NULL);
143 return my_data->report_data;
144}
145
146// For each Queue's doubly linked-list of mem refs
147typedef struct _OT_MEM_INFO {
148 VkDeviceMemory mem;
149 struct _OT_MEM_INFO *pNextMI;
150 struct _OT_MEM_INFO *pPrevMI;
151
152} OT_MEM_INFO;
153
154// Track Queue information
155typedef struct _OT_QUEUE_INFO {
156 OT_MEM_INFO *pMemRefList;
157 struct _OT_QUEUE_INFO *pNextQI;
158 uint32_t queueNodeIndex;
159 VkQueue queue;
160 uint32_t refCount;
161} OT_QUEUE_INFO;
162
163// Global list of QueueInfo structures, one per queue
164static OT_QUEUE_INFO *g_pQueueInfo = NULL;
165
166// Convert an object type enum to an object type array index
167static uint32_t
168objTypeToIndex(
169 uint32_t objType)
170{
171 uint32_t index = objType;
172 if (objType > VK_OBJECT_TYPE_END_RANGE) {
173 // These come from vk_wsi_lunarg.h, rebase
174 index = (index -(VK_WSI_LUNARG_EXTENSION_NUMBER * -1000)) + VK_OBJECT_TYPE_END_RANGE;
175 }
176 return index;
177}
178
179// Validate that object is in the object map
180static void
181validate_object(
182 const VkObject dispatchable_object,
183 const VkObject object)
184{
185 if (objMap.find(object) == objMap.end()) {
Tobin Ehlis3aa50502015-06-25 12:29:25 -0600186 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600187 "Invalid Object %p", object);
188 }
189}
190
191// Validate that object parameter matches designated object type
192static void
193validateObjectType(
194 VkObject dispatchable_object,
195 const char *apiName,
196 VkObjectType objType,
197 VkObject object)
198{
199 if (objMap.find(object) != objMap.end()) {
200 OBJTRACK_NODE* pNode = objMap[object];
201 // Found our object, check type
202 if (strcmp(string_VkObjectType(pNode->objType), string_VkObjectType(objType)) != 0) {
203 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, object, 0, OBJTRACK_OBJECT_TYPE_MISMATCH, "OBJTRACK",
204 "ERROR: Object Parameter Type %s does not match designated type %s", string_VkObjectType(pNode->objType), string_VkObjectType(objType));
205 }
206 }
207}
208
209// Add new queue to head of global queue list
210static void
211addQueueInfo(
212 uint32_t queueNodeIndex,
213 VkQueue queue)
214{
215 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
216
217 if (pQueueInfo != NULL) {
218 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
219 pQueueInfo->queue = queue;
220 pQueueInfo->queueNodeIndex = queueNodeIndex;
221 pQueueInfo->pNextQI = g_pQueueInfo;
222 g_pQueueInfo = pQueueInfo;
223 }
224 else {
225 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
226 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
227 }
228}
229
230// Destroy memRef lists and free all memory
231static void
232destroyQueueMemRefLists(void)
233{
234 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
235 OT_QUEUE_INFO *pDelQueueInfo = NULL;
236 while (pQueueInfo != NULL) {
237 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
238 while (pMemInfo != NULL) {
239 OT_MEM_INFO *pDelMemInfo = pMemInfo;
240 pMemInfo = pMemInfo->pNextMI;
241 delete pDelMemInfo;
242 }
243 pDelQueueInfo = pQueueInfo;
244 pQueueInfo = pQueueInfo->pNextQI;
245 delete pDelQueueInfo;
246 }
247 g_pQueueInfo = pQueueInfo;
248}
249
250
251static void
252create_obj(
253 VkObject dispatchable_object,
254 VkObject vkObj,
255 VkObjectType objType)
256{
257 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
258 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkObjectType(objType),
259 reinterpret_cast<VkUintPtrLeast64>(vkObj));
260
261 OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
262 pNewObjNode->vkObj = vkObj;
263 pNewObjNode->objType = objType;
264 pNewObjNode->status = OBJSTATUS_NONE;
265 objMap[vkObj] = pNewObjNode;
266 uint32_t objIndex = objTypeToIndex(objType);
267 numObjs[objIndex]++;
268 numTotalObjs++;
269}
270
271// Parse global list to find obj type, then remove obj from obj type list, finally
272// remove obj from global list
273static void
274destroy_obj(
275 VkObject dispatchable_object,
276 VkObject vkObj)
277{
278 if (objMap.find(vkObj) != objMap.end()) {
279 OBJTRACK_NODE* pNode = objMap[vkObj];
280 uint32_t objIndex = objTypeToIndex(pNode->objType);
281 assert(numTotalObjs > 0);
282 numTotalObjs--;
283 assert(numObjs[objIndex] > 0);
284 numObjs[objIndex]--;
285 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, pNode->objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
286 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%lu total objs remain & %lu %s objs).",
287 string_VkObjectType(pNode->objType), reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj), numTotalObjs, numObjs[objIndex],
288 string_VkObjectType(pNode->objType));
289
290 delete pNode;
291 objMap.erase(vkObj);
292 } else {
293 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
294 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
295 reinterpret_cast<VkUintPtrLeast64>(vkObj));
296 }
297}
298
299// Set selected flag state for an object node
300static void
301set_status(
302 VkObject dispatchable_object,
303 VkObject vkObj,
304 VkObjectType objType,
305 ObjectStatusFlags status_flag)
306{
307 if (vkObj != VK_NULL_HANDLE) {
308 if (objMap.find(vkObj) != objMap.end()) {
309 OBJTRACK_NODE* pNode = objMap[vkObj];
310 pNode->status |= status_flag;
311 return;
312 }
313 else {
314 // If we do not find it print an error
315 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
316 "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
317 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
318 }
319 }
320}
321
322// Reset selected flag state for an object node
323static void
324reset_status(
325 VkObject dispatchable_object,
326 VkObject vkObj,
327 VkObjectType objType,
328 ObjectStatusFlags status_flag)
329{
330 if (objMap.find(vkObj) != objMap.end()) {
331 OBJTRACK_NODE* pNode = objMap[vkObj];
332 pNode->status &= ~status_flag;
333 return;
334 }
335 else {
336 // If we do not find it print an error
337 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
338 "Unable to reset status for non-existent object 0x%" PRIxLEAST64 " of %s type",
339 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
340 }
341}
342
343static void
344setGpuQueueInfoState(
Tony Barbour426b9052015-06-24 16:06:58 -0600345 uint32_t count,
346 void *pData)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600347{
Tony Barbour426b9052015-06-24 16:06:58 -0600348 queueCount = count;
349 queueInfo = (VkPhysicalDeviceQueueProperties*)realloc((void*)queueInfo, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600350 if (queueInfo != NULL) {
Tony Barbour426b9052015-06-24 16:06:58 -0600351 memcpy(queueInfo, pData, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600352 }
353}
354
355// Check Queue type flags for selected queue operations
356static void
357validateQueueFlags(
358 VkQueue queue,
359 const char *function)
360{
361 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
362 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
363 pQueueInfo = pQueueInfo->pNextQI;
364 }
365 if (pQueueInfo != NULL) {
366 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_MEMMGR_BIT) == 0) {
367 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
368 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not set", function);
369 } else {
370 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
371 "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not known", function);
372 }
373 }
374}
375
376// Check object status for selected flag state
377static bool32_t
378validate_status(
379 VkObject dispatchable_object,
380 VkObject vkObj,
381 VkObjectType objType,
382 ObjectStatusFlags status_mask,
383 ObjectStatusFlags status_flag,
384 VkFlags msg_flags,
385 OBJECT_TRACK_ERROR error_code,
386 const char *fail_msg)
387{
388 if (objMap.find(vkObj) != objMap.end()) {
389 OBJTRACK_NODE* pNode = objMap[vkObj];
390 if ((pNode->status & status_mask) != status_flag) {
391 char str[1024];
392 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
393 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
394 reinterpret_cast<VkUintPtrLeast64>(vkObj), fail_msg);
395 return VK_FALSE;
396 }
397 return VK_TRUE;
398 }
399 else {
400 // If we do not find it print an error
401 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
402 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
403 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
404 return VK_FALSE;
405 }
406}
407
408#include "vk_dispatch_table_helper.h"
409static void
410initObjectTracker(
411 layer_data *my_data)
412{
413 uint32_t report_flags = 0;
414 uint32_t debug_action = 0;
415 FILE *log_output = NULL;
416 const char *option_str;
417 // initialize ObjectTracker options
418 report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
419 getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
420
421 if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
422 {
423 option_str = getLayerOption("ObjectTrackerLogFilename");
424 if (option_str) {
425 log_output = fopen(option_str, "w");
426 }
427 if (log_output == NULL) {
428 log_output = stdout;
429 }
430
431 layer_create_msg_callback(my_data->report_data, report_flags, log_callback, (void *) log_output, &my_data->logging_callback);
432 }
433
434 if (!objLockInitialized)
435 {
436 // TODO/TBD: Need to delete this mutex sometime. How??? One
437 // suggestion is to call this during vkCreateInstance(), and then we
438 // can clean it up during vkDestroyInstance(). However, that requires
439 // that the layer have per-instance locks. We need to come back and
440 // address this soon.
441 loader_platform_thread_create_mutex(&objLock);
442 objLockInitialized = 1;
443 }
444}
445
446
447//
448// Non-auto-generated API functions called by generated code
449//
450
451VkResult
452explicit_CreateInstance(
453 const VkInstanceCreateInfo *pCreateInfo,
454 VkInstance * pInstance)
455{
456 loader_platform_thread_lock_mutex(&objLock);
457 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, *pInstance);
458 VkResult result = pInstanceTable->CreateInstance(pCreateInfo, pInstance);
459
460 if (result == VK_SUCCESS) {
461 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
462 my_data->report_data = debug_report_create_instance(
463 pInstanceTable,
464 *pInstance,
465 pCreateInfo->extensionCount,
466 pCreateInfo->pEnabledExtensions);
467
468 initObjectTracker(my_data);
469 }
470 loader_platform_thread_unlock_mutex(&objLock);
471 return result;
472}
473
474VkResult
475explicit_DestroyInstance(
476 VkInstance instance)
477{
478 loader_platform_thread_lock_mutex(&objLock);
479 validate_object(instance, instance);
480
481 destroy_obj(instance, instance);
482 // Report any remaining objects in LL
483 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
484 OBJTRACK_NODE* pNode = it->second; if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
485 // Cannot destroy physical device so ignore
486 } else {
487 log_msg(mid(instance), VK_DBG_REPORT_ERROR_BIT, pNode->objType, pNode->vkObj, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
488 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
489 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
490 }
491 }
492
493 dispatch_key key = get_dispatch_key(instance);
494 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, instance);
495 VkResult result = pInstanceTable->DestroyInstance(instance);
496
497 // Clean up logging callback, if any
498 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
499 if (my_data->logging_callback) {
500 layer_destroy_msg_callback(my_data->report_data, my_data->logging_callback);
501 }
502
503 layer_debug_report_destroy_instance(mid(instance));
504 layer_data_map.erase(pInstanceTable);
505
506 ObjectTracker_instance_table_map.erase(key);
507 assert(ObjectTracker_instance_table_map.size() == 0 && "Should not have any instance mappings hanging around");
508
509 loader_platform_thread_unlock_mutex(&objLock);
510 return result;
511}
512
513VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600514explicit_GetPhysicalDeviceQueueProperties(
515 VkPhysicalDevice gpu,
516 uint32_t count,
517 VkPhysicalDeviceQueueProperties* pProperties)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600518{
Tony Barbour426b9052015-06-24 16:06:58 -0600519 VkResult result = get_dispatch_table(ObjectTracker_instance_table_map, gpu)->GetPhysicalDeviceQueueProperties(gpu, count, pProperties);
520
521 loader_platform_thread_lock_mutex(&objLock);
522 setGpuQueueInfoState(count, pProperties);
523 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600524 return result;
525}
526
527VkResult
528explicit_CreateDevice(
529 VkPhysicalDevice gpu,
530 const VkDeviceCreateInfo *pCreateInfo,
531 VkDevice *pDevice)
532{
533 loader_platform_thread_lock_mutex(&objLock);
534 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, gpu);
535 VkResult result = pInstanceTable->CreateDevice(gpu, pCreateInfo, pDevice);
536 if (result == VK_SUCCESS) {
537 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
538 //// VkLayerDispatchTable *pTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
539 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
540 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
541 create_obj(gpu, *pDevice, VK_OBJECT_TYPE_DEVICE);
542 }
543
544 loader_platform_thread_unlock_mutex(&objLock);
545 return result;
546}
547
548VkResult
549explicit_DestroyDevice(
550 VkDevice device)
551{
552 loader_platform_thread_lock_mutex(&objLock);
553 validate_object(device, device);
554 destroy_obj(device, device);
555 // Report any remaining objects
556 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
557 OBJTRACK_NODE* pNode = it->second;
558 if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
559 // Cannot destroy physical device so ignore
560 } else {
561 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_PHYSICAL_DEVICE, device, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
562 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
563 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
564 }
565 }
566 // Clean up Queue's MemRef Linked Lists
567 destroyQueueMemRefLists();
568
569 loader_platform_thread_unlock_mutex(&objLock);
570
571 dispatch_key key = get_dispatch_key(device);
572 VkLayerDispatchTable *pDisp = get_dispatch_table(ObjectTracker_device_table_map, device);
573 VkResult result = pDisp->DestroyDevice(device);
574 deviceExtMap.erase(pDisp);
575 ObjectTracker_device_table_map.erase(key);
576 assert(ObjectTracker_device_table_map.size() == 0 && "Should not have any instance mappings hanging around");
577
578 return result;
579}
580
581VkResult
582explicit_GetDeviceQueue(
583 VkDevice device,
584 uint32_t queueNodeIndex,
585 uint32_t queueIndex,
586 VkQueue *pQueue)
587{
588 loader_platform_thread_lock_mutex(&objLock);
589 validate_object(device, device);
590 loader_platform_thread_unlock_mutex(&objLock);
591
592 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
593
594 loader_platform_thread_lock_mutex(&objLock);
595 addQueueInfo(queueNodeIndex, *pQueue);
596 loader_platform_thread_unlock_mutex(&objLock);
597
598 return result;
599}
600
601VkResult
602explicit_QueueSubmit(
603 VkQueue queue,
604 uint32_t cmdBufferCount,
605 const VkCmdBuffer *pCmdBuffers,
606 VkFence fence)
607{
608 loader_platform_thread_lock_mutex(&objLock);
609 set_status(queue, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED);
610 // TODO: Fix for updated memory reference mechanism
611 // validate_memory_mapping_status(pMemRefs, memRefCount);
612 // validate_mem_ref_count(memRefCount);
613 loader_platform_thread_unlock_mutex(&objLock);
614
615 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
616
617 return result;
618}
619
620VkResult
621explicit_MapMemory(
622 VkDevice device,
623 VkDeviceMemory mem,
624 VkDeviceSize offset,
625 VkDeviceSize size,
626 VkFlags flags,
627 void **ppData)
628{
629 loader_platform_thread_lock_mutex(&objLock);
630 set_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
631 validate_object(device, device);
632 loader_platform_thread_unlock_mutex(&objLock);
633
634 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
635
636 return result;
637}
638
639VkResult
640explicit_UnmapMemory(
641 VkDevice device,
642 VkDeviceMemory mem)
643{
644 loader_platform_thread_lock_mutex(&objLock);
645 reset_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
646 validate_object(device, device);
647 loader_platform_thread_unlock_mutex(&objLock);
648
649 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->UnmapMemory(device, mem);
650
651 return result;
652}
653
654VkResult
655explicit_DestroyObject(
656 VkDevice device,
657 VkObjectType objType,
658 VkObject object)
659{
660 loader_platform_thread_lock_mutex(&objLock);
661 validate_object(device, device);
662 validate_object(device, object);
663 loader_platform_thread_unlock_mutex(&objLock);
664
665 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->DestroyObject(device, objType, object);
666
667 loader_platform_thread_lock_mutex(&objLock);
668 validateObjectType(device, "vkDestroyObject", objType, object);
669 destroy_obj(device, object);
670 loader_platform_thread_unlock_mutex(&objLock);
671
672 return result;
673}
674
675VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600676explicit_GetObjectMemoryRequirements(
677 VkDevice device,
678 VkObjectType objType,
679 VkObject object,
680 VkMemoryRequirements* pMemoryRequirements)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600681{
682 loader_platform_thread_lock_mutex(&objLock);
Tony Barbour426b9052015-06-24 16:06:58 -0600683 validateObjectType(device, "vkGetObjectMemoryRequirements", objType, object);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600684 validate_object(device, device);
685 validate_object(device, object);
686 loader_platform_thread_unlock_mutex(&objLock);
687
Tony Barbour426b9052015-06-24 16:06:58 -0600688 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetObjectMemoryRequirements(device, objType, object, pMemoryRequirements);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600689
690 return result;
691}
692
693VkResult
694explicit_QueueBindSparseBufferMemory(
695 VkQueue queue,
696 VkBuffer buffer,
697 VkDeviceSize rangeOffset,
698 VkDeviceSize rangeSize,
699 VkDeviceMemory mem,
700 VkDeviceSize memOffset)
701{
702 loader_platform_thread_lock_mutex(&objLock);
703 validateQueueFlags(queue, "QueueBindSparseBufferMemory");
704 validate_object(queue, buffer);
705 loader_platform_thread_unlock_mutex(&objLock);
706
707 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseBufferMemory(queue, buffer, rangeOffset, rangeSize, mem, memOffset);
708 return result;
709}
710
711VkResult
712explicit_QueueBindSparseImageMemory(
713 VkQueue queue,
714 VkImage image,
715 const VkImageMemoryBindInfo *pBindInfo,
716 VkDeviceMemory mem,
717 VkDeviceSize memOffset)
718{
719 loader_platform_thread_lock_mutex(&objLock);
720 validateQueueFlags(queue, "QueueBindSparseImageMemory");
721 loader_platform_thread_unlock_mutex(&objLock);
722
723 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseImageMemory(queue, image, pBindInfo, mem, memOffset);
724 return result;
725}
726
727
728VkResult
729explicit_GetFenceStatus(
730 VkDevice device,
731 VkFence fence)
732{
733 loader_platform_thread_lock_mutex(&objLock);
734 // Warn if submitted_flag is not set
735 validate_status(device, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
736 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Status Requested for Unsubmitted Fence");
737 validate_object(device, device);
738 loader_platform_thread_unlock_mutex(&objLock);
739
740 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetFenceStatus(device, fence);
741
742 return result;
743}
744
745VkResult
746explicit_WaitForFences(
747 VkDevice device,
748 uint32_t fenceCount,
749 const VkFence *pFences,
750 bool32_t waitAll,
751 uint64_t timeout)
752{
753 loader_platform_thread_lock_mutex(&objLock);
754 // Warn if waiting on unsubmitted fence
755 for (uint32_t i = 0; i < fenceCount; i++) {
756 validate_status(device, pFences[i], VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
757 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Waiting for Unsubmitted Fence");
758 }
759 validate_object(device, device);
760 loader_platform_thread_unlock_mutex(&objLock);
761
762 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
763
764 return result;
765}
766
767VkResult
768explicit_AllocDescriptorSets(
769 VkDevice device,
770 VkDescriptorPool descriptorPool,
771 VkDescriptorSetUsage setUsage,
772 uint32_t count,
773 const VkDescriptorSetLayout *pSetLayouts,
774 VkDescriptorSet *pDescriptorSets,
775 uint32_t *pCount)
776{
777 loader_platform_thread_lock_mutex(&objLock);
778 validate_object(device, device);
779 validate_object(device, descriptorPool);
780 loader_platform_thread_unlock_mutex(&objLock);
781
782 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->AllocDescriptorSets(
783 device, descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
784
785 loader_platform_thread_lock_mutex(&objLock);
786 for (uint32_t i = 0; i < *pCount; i++) {
787 create_obj(device, pDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET);
788 }
789 loader_platform_thread_unlock_mutex(&objLock);
790
791 return result;
792}
793
794VkResult
795explicit_DestroySwapChainWSI(
796 VkSwapChainWSI swapChain)
797{
798
799 loader_platform_thread_lock_mutex(&objLock);
800 destroy_obj(swapChain, swapChain);
801 loader_platform_thread_unlock_mutex(&objLock);
802
803 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, swapChain)->DestroySwapChainWSI(swapChain);
804
805 return result;
806}
807
808VkResult
809explicit_FreeMemory(
810 VkDevice device,
811 VkDeviceMemory mem)
812{
813 loader_platform_thread_lock_mutex(&objLock);
814 validate_object(device, device);
815 loader_platform_thread_unlock_mutex(&objLock);
816
817 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->FreeMemory(device, mem);
818
819 loader_platform_thread_lock_mutex(&objLock);
820 destroy_obj(device, mem);
821 loader_platform_thread_unlock_mutex(&objLock);
822
823 return result;
824}
825
826
827// ObjectTracker Extensions
828
829uint64_t
830objTrackGetObjectsCount(
831 VkDevice device)
832{
833 return numTotalObjs;
834}
835
836VkResult
837objTrackGetObjects(
838 VkDevice device,
839 uint64_t objCount,
840 OBJTRACK_NODE *pObjNodeArray)
841{
842 // This bool flags if we're pulling all objs or just a single class of objs
843 // Check the count first thing
844 uint64_t maxObjCount = numTotalObjs;
845 if (objCount > maxObjCount) {
846 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
847 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu total objs", objCount, maxObjCount);
848 return VK_ERROR_INVALID_VALUE;
849 }
850 auto it = objMap.begin();
851 for (uint64_t i = 0; i < objCount; i++) {
852 if (objMap.end() == it) {
853 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
854 "OBJ INTERNAL ERROR : Ran out of objs! Should have %lu, but only copied %lu and not the requested %lu.", maxObjCount, i, objCount);
855 return VK_ERROR_UNKNOWN;
856 }
857 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
858 ++it;
859 }
860 return VK_SUCCESS;
861}
862
863uint64_t
864objTrackGetObjectsOfTypeCount(
865 VkDevice device,
866 VkObjectType type)
867{
868 return numObjs[type];
869}
870
871VkResult
872objTrackGetObjectsOfType(
873 VkDevice device,
874 VkObjectType type,
875 uint64_t objCount,
876 OBJTRACK_NODE *pObjNodeArray)
877{
878 // Check the count first thing
879 uint64_t maxObjCount = numObjs[type];
880 if (objCount > maxObjCount) {
881 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
882 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu objs of type %s",
883 objCount, maxObjCount, string_VkObjectType(type));
884 return VK_ERROR_INVALID_VALUE;
885 }
886 auto it = objMap.begin();
887 for (uint64_t i = 0; i < objCount; i++) {
888 // Get next object of correct type
889 while ((objMap.end() != it) && (it->second->objType != type))
890 ++it;
891 if (objMap.end() == it) {
892 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
893 "OBJ INTERNAL ERROR : Ran out of %s objs! Should have %lu, but only copied %lu and not the requested %lu.",
894 string_VkObjectType(type), maxObjCount, i, objCount);
895 return VK_ERROR_UNKNOWN;
896 }
897 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
898 }
899 return VK_SUCCESS;
900}
901