blob: 643c3b519d339ce79b537f8dfc790e146ac66118 [file] [log] [blame]
Tobin Ehlisacab8882014-11-14 13:01:02 -07001/*
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -06002 * Vulkan
Tobin Ehlisacab8882014-11-14 13:01:02 -07003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060025#include "vkLayer.h"
Courtney Goeltzenleuchter1c7c65d2015-06-10 17:39:03 -060026#include "vk_enum_string_helper.h"
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060027
Tobin Ehlis3c26a542014-11-18 11:28:33 -070028// Object Tracker ERROR codes
29typedef enum _OBJECT_TRACK_ERROR
30{
Chia-I Wub1466182015-01-05 14:33:42 +080031 OBJTRACK_NONE, // Used for INFO & other non-error messages
32 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
33 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
34 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
Mark Lobodzinski7a3d5ff2015-05-05 18:24:45 -050035 OBJTRACK_OBJECT_TYPE_MISMATCH, // Object did not match corresponding Object Type
Chia-I Wub1466182015-01-05 14:33:42 +080036 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
37 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
Tobin Ehlis235c20e2015-01-16 08:56:30 -070038 OBJTRACK_INVALID_FENCE, // Requested status of unsubmitted fence object
Tobin Ehlis586aa012015-06-08 17:36:28 -060039 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
Tobin Ehlis3c26a542014-11-18 11:28:33 -070040} OBJECT_TRACK_ERROR;
41
Tobin Ehlis235c20e2015-01-16 08:56:30 -070042// Object Status -- used to track state of individual objects
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050043typedef VkFlags ObjectStatusFlags;
44typedef enum _ObjectStatusFlagBits
Tobin Ehlis235c20e2015-01-16 08:56:30 -070045{
Mark Lobodzinski01552702015-02-03 10:06:31 -060046 OBJSTATUS_NONE = 0x00000000, // No status is set
47 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
48 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
49 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
50 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
51 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
Mark Lobodzinski4186e712015-02-03 11:52:26 -060052 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050053} ObjectStatusFlagBits;
Chia-I Wu5b66aa52015-04-16 22:02:10 +080054
Tobin Ehlisacab8882014-11-14 13:01:02 -070055typedef struct _OBJTRACK_NODE {
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050056 VkObject vkObj;
57 VkObjectType objType;
58 ObjectStatusFlags status;
Tobin Ehlisacab8882014-11-14 13:01:02 -070059} OBJTRACK_NODE;
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060060
Tobin Ehlisacab8882014-11-14 13:01:02 -070061// prototype for extension functions
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060062uint64_t objTrackGetObjectCount(VkDevice device);
63VkResult objTrackGetObjects(VkDevice device, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
64uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkObjectType type);
65VkResult objTrackGetObjectsOfType(VkDevice, VkObjectType type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060066
Tobin Ehlis3c26a542014-11-18 11:28:33 -070067// Func ptr typedefs
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060068typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
69typedef VkResult (*OBJ_TRACK_GET_OBJECTS)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
70typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkObjectType);
71typedef VkResult (*OBJ_TRACK_GET_OBJECTS_OF_TYPE)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
72
73typedef struct _layer_data {
74 debug_report_data *report_data;
75 //TODO: put instance data here
76 VkDbgMsgCallback logging_callback;
77} layer_data;
78
79static std::unordered_map<void*, layer_data *> layer_data_map;
80static device_table_map ObjectTracker_device_table_map;
81static instance_table_map ObjectTracker_instance_table_map;
82
83static long long unsigned int object_track_index = 0;
84static int objLockInitialized = 0;
85static loader_platform_thread_mutex objLock;
86
87// Objects stored in a global map w/ struct containing basic info
88unordered_map<VkObject, OBJTRACK_NODE*> objMap;
89
90#define NUM_OBJECT_TYPES (VK_NUM_OBJECT_TYPE + (VK_OBJECT_TYPE_SWAP_CHAIN_WSI - VK_OBJECT_TYPE_DISPLAY_WSI))
91
92static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
93static uint64_t numTotalObjs = 0;
94static VkPhysicalDeviceQueueProperties *queueInfo = NULL;
95static uint32_t queueCount = 0;
96
97template layer_data *get_my_data_ptr<layer_data>(
98 void *data_key, std::unordered_map<void *, layer_data *> &data_map);
99
100//
101// Internal Object Tracker Functions
102//
103
104struct devExts {
105 bool wsi_lunarg_enabled;
106};
107
108static std::unordered_map<void *, struct devExts> deviceExtMap;
109
110static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
111{
112 uint32_t i, ext_idx;
113 VkLayerDispatchTable *pDisp = device_dispatch_table(device);
114 deviceExtMap[pDisp].wsi_lunarg_enabled = false;
115 for (i = 0; i < pCreateInfo->extensionCount; i++) {
116 if (strcmp(pCreateInfo->pEnabledExtensions[i].name, VK_WSI_LUNARG_EXTENSION_NAME) == 0)
117 deviceExtMap[pDisp].wsi_lunarg_enabled = true;
118
119 }
120}
121
122// Indicate device or instance dispatch table type
123typedef enum _DispTableType
124{
125 DISP_TBL_TYPE_INSTANCE,
126 DISP_TBL_TYPE_DEVICE,
127} DispTableType;
128
129debug_report_data *mdd(VkObject object)
130{
131 dispatch_key key = get_dispatch_key(object);
132 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600133 return my_data->report_data;
134}
135
136debug_report_data *mid(VkInstance object)
137{
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600138 dispatch_key key = get_dispatch_key(object);
139 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600140 return my_data->report_data;
141}
142
143// For each Queue's doubly linked-list of mem refs
144typedef struct _OT_MEM_INFO {
145 VkDeviceMemory mem;
146 struct _OT_MEM_INFO *pNextMI;
147 struct _OT_MEM_INFO *pPrevMI;
148
149} OT_MEM_INFO;
150
151// Track Queue information
152typedef struct _OT_QUEUE_INFO {
153 OT_MEM_INFO *pMemRefList;
154 struct _OT_QUEUE_INFO *pNextQI;
155 uint32_t queueNodeIndex;
156 VkQueue queue;
157 uint32_t refCount;
158} OT_QUEUE_INFO;
159
160// Global list of QueueInfo structures, one per queue
161static OT_QUEUE_INFO *g_pQueueInfo = NULL;
162
163// Convert an object type enum to an object type array index
164static uint32_t
165objTypeToIndex(
166 uint32_t objType)
167{
168 uint32_t index = objType;
169 if (objType > VK_OBJECT_TYPE_END_RANGE) {
170 // These come from vk_wsi_lunarg.h, rebase
171 index = (index -(VK_WSI_LUNARG_EXTENSION_NUMBER * -1000)) + VK_OBJECT_TYPE_END_RANGE;
172 }
173 return index;
174}
175
176// Validate that object is in the object map
177static void
178validate_object(
179 const VkObject dispatchable_object,
180 const VkObject object)
181{
182 if (objMap.find(object) == objMap.end()) {
Tobin Ehlis3aa50502015-06-25 12:29:25 -0600183 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600184 "Invalid Object %p", object);
185 }
186}
187
188// Validate that object parameter matches designated object type
189static void
190validateObjectType(
191 VkObject dispatchable_object,
192 const char *apiName,
193 VkObjectType objType,
194 VkObject object)
195{
196 if (objMap.find(object) != objMap.end()) {
197 OBJTRACK_NODE* pNode = objMap[object];
198 // Found our object, check type
199 if (strcmp(string_VkObjectType(pNode->objType), string_VkObjectType(objType)) != 0) {
200 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, object, 0, OBJTRACK_OBJECT_TYPE_MISMATCH, "OBJTRACK",
201 "ERROR: Object Parameter Type %s does not match designated type %s", string_VkObjectType(pNode->objType), string_VkObjectType(objType));
202 }
203 }
204}
205
206// Add new queue to head of global queue list
207static void
208addQueueInfo(
209 uint32_t queueNodeIndex,
210 VkQueue queue)
211{
212 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
213
214 if (pQueueInfo != NULL) {
215 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
216 pQueueInfo->queue = queue;
217 pQueueInfo->queueNodeIndex = queueNodeIndex;
218 pQueueInfo->pNextQI = g_pQueueInfo;
219 g_pQueueInfo = pQueueInfo;
220 }
221 else {
222 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
223 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
224 }
225}
226
227// Destroy memRef lists and free all memory
228static void
229destroyQueueMemRefLists(void)
230{
231 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
232 OT_QUEUE_INFO *pDelQueueInfo = NULL;
233 while (pQueueInfo != NULL) {
234 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
235 while (pMemInfo != NULL) {
236 OT_MEM_INFO *pDelMemInfo = pMemInfo;
237 pMemInfo = pMemInfo->pNextMI;
238 delete pDelMemInfo;
239 }
240 pDelQueueInfo = pQueueInfo;
241 pQueueInfo = pQueueInfo->pNextQI;
242 delete pDelQueueInfo;
243 }
244 g_pQueueInfo = pQueueInfo;
245}
246
247
248static void
249create_obj(
250 VkObject dispatchable_object,
251 VkObject vkObj,
252 VkObjectType objType)
253{
254 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
255 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkObjectType(objType),
256 reinterpret_cast<VkUintPtrLeast64>(vkObj));
257
258 OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
259 pNewObjNode->vkObj = vkObj;
260 pNewObjNode->objType = objType;
261 pNewObjNode->status = OBJSTATUS_NONE;
262 objMap[vkObj] = pNewObjNode;
263 uint32_t objIndex = objTypeToIndex(objType);
264 numObjs[objIndex]++;
265 numTotalObjs++;
266}
267
268// Parse global list to find obj type, then remove obj from obj type list, finally
269// remove obj from global list
270static void
271destroy_obj(
272 VkObject dispatchable_object,
273 VkObject vkObj)
274{
275 if (objMap.find(vkObj) != objMap.end()) {
276 OBJTRACK_NODE* pNode = objMap[vkObj];
277 uint32_t objIndex = objTypeToIndex(pNode->objType);
278 assert(numTotalObjs > 0);
279 numTotalObjs--;
280 assert(numObjs[objIndex] > 0);
281 numObjs[objIndex]--;
282 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, pNode->objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
283 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%lu total objs remain & %lu %s objs).",
284 string_VkObjectType(pNode->objType), reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj), numTotalObjs, numObjs[objIndex],
285 string_VkObjectType(pNode->objType));
286
287 delete pNode;
288 objMap.erase(vkObj);
289 } else {
290 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
291 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
292 reinterpret_cast<VkUintPtrLeast64>(vkObj));
293 }
294}
295
296// Set selected flag state for an object node
297static void
298set_status(
299 VkObject dispatchable_object,
300 VkObject vkObj,
301 VkObjectType objType,
302 ObjectStatusFlags status_flag)
303{
304 if (vkObj != VK_NULL_HANDLE) {
305 if (objMap.find(vkObj) != objMap.end()) {
306 OBJTRACK_NODE* pNode = objMap[vkObj];
307 pNode->status |= status_flag;
308 return;
309 }
310 else {
311 // If we do not find it print an error
312 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
313 "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
314 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
315 }
316 }
317}
318
319// Reset selected flag state for an object node
320static void
321reset_status(
322 VkObject dispatchable_object,
323 VkObject vkObj,
324 VkObjectType objType,
325 ObjectStatusFlags status_flag)
326{
327 if (objMap.find(vkObj) != objMap.end()) {
328 OBJTRACK_NODE* pNode = objMap[vkObj];
329 pNode->status &= ~status_flag;
330 return;
331 }
332 else {
333 // If we do not find it print an error
334 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
335 "Unable to reset status for non-existent object 0x%" PRIxLEAST64 " of %s type",
336 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
337 }
338}
339
340static void
341setGpuQueueInfoState(
Tony Barbour426b9052015-06-24 16:06:58 -0600342 uint32_t count,
343 void *pData)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600344{
Tony Barbour426b9052015-06-24 16:06:58 -0600345 queueCount = count;
346 queueInfo = (VkPhysicalDeviceQueueProperties*)realloc((void*)queueInfo, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600347 if (queueInfo != NULL) {
Tony Barbour426b9052015-06-24 16:06:58 -0600348 memcpy(queueInfo, pData, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600349 }
350}
351
352// Check Queue type flags for selected queue operations
353static void
354validateQueueFlags(
355 VkQueue queue,
356 const char *function)
357{
358 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
359 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
360 pQueueInfo = pQueueInfo->pNextQI;
361 }
362 if (pQueueInfo != NULL) {
363 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_MEMMGR_BIT) == 0) {
364 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
365 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not set", function);
366 } else {
367 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
368 "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not known", function);
369 }
370 }
371}
372
373// Check object status for selected flag state
374static bool32_t
375validate_status(
376 VkObject dispatchable_object,
377 VkObject vkObj,
378 VkObjectType objType,
379 ObjectStatusFlags status_mask,
380 ObjectStatusFlags status_flag,
381 VkFlags msg_flags,
382 OBJECT_TRACK_ERROR error_code,
383 const char *fail_msg)
384{
385 if (objMap.find(vkObj) != objMap.end()) {
386 OBJTRACK_NODE* pNode = objMap[vkObj];
387 if ((pNode->status & status_mask) != status_flag) {
388 char str[1024];
389 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
390 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
391 reinterpret_cast<VkUintPtrLeast64>(vkObj), fail_msg);
392 return VK_FALSE;
393 }
394 return VK_TRUE;
395 }
396 else {
397 // If we do not find it print an error
398 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
399 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
400 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
401 return VK_FALSE;
402 }
403}
404
405#include "vk_dispatch_table_helper.h"
406static void
407initObjectTracker(
408 layer_data *my_data)
409{
410 uint32_t report_flags = 0;
411 uint32_t debug_action = 0;
412 FILE *log_output = NULL;
413 const char *option_str;
414 // initialize ObjectTracker options
415 report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
416 getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
417
418 if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
419 {
420 option_str = getLayerOption("ObjectTrackerLogFilename");
421 if (option_str) {
422 log_output = fopen(option_str, "w");
423 }
424 if (log_output == NULL) {
425 log_output = stdout;
426 }
427
428 layer_create_msg_callback(my_data->report_data, report_flags, log_callback, (void *) log_output, &my_data->logging_callback);
429 }
430
431 if (!objLockInitialized)
432 {
433 // TODO/TBD: Need to delete this mutex sometime. How??? One
434 // suggestion is to call this during vkCreateInstance(), and then we
435 // can clean it up during vkDestroyInstance(). However, that requires
436 // that the layer have per-instance locks. We need to come back and
437 // address this soon.
438 loader_platform_thread_create_mutex(&objLock);
439 objLockInitialized = 1;
440 }
441}
442
443
444//
445// Non-auto-generated API functions called by generated code
446//
447
448VkResult
449explicit_CreateInstance(
450 const VkInstanceCreateInfo *pCreateInfo,
451 VkInstance * pInstance)
452{
453 loader_platform_thread_lock_mutex(&objLock);
454 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, *pInstance);
455 VkResult result = pInstanceTable->CreateInstance(pCreateInfo, pInstance);
456
457 if (result == VK_SUCCESS) {
458 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
459 my_data->report_data = debug_report_create_instance(
460 pInstanceTable,
461 *pInstance,
462 pCreateInfo->extensionCount,
463 pCreateInfo->pEnabledExtensions);
464
465 initObjectTracker(my_data);
466 }
467 loader_platform_thread_unlock_mutex(&objLock);
468 return result;
469}
470
471VkResult
472explicit_DestroyInstance(
473 VkInstance instance)
474{
475 loader_platform_thread_lock_mutex(&objLock);
476 validate_object(instance, instance);
477
478 destroy_obj(instance, instance);
479 // Report any remaining objects in LL
480 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
481 OBJTRACK_NODE* pNode = it->second; if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
482 // Cannot destroy physical device so ignore
483 } else {
484 log_msg(mid(instance), VK_DBG_REPORT_ERROR_BIT, pNode->objType, pNode->vkObj, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
485 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
486 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
487 }
488 }
489
490 dispatch_key key = get_dispatch_key(instance);
491 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, instance);
492 VkResult result = pInstanceTable->DestroyInstance(instance);
493
494 // Clean up logging callback, if any
495 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
496 if (my_data->logging_callback) {
497 layer_destroy_msg_callback(my_data->report_data, my_data->logging_callback);
498 }
499
500 layer_debug_report_destroy_instance(mid(instance));
501 layer_data_map.erase(pInstanceTable);
502
503 ObjectTracker_instance_table_map.erase(key);
504 assert(ObjectTracker_instance_table_map.size() == 0 && "Should not have any instance mappings hanging around");
505
506 loader_platform_thread_unlock_mutex(&objLock);
507 return result;
508}
509
510VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600511explicit_GetPhysicalDeviceQueueProperties(
512 VkPhysicalDevice gpu,
513 uint32_t count,
514 VkPhysicalDeviceQueueProperties* pProperties)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600515{
Tony Barbour426b9052015-06-24 16:06:58 -0600516 VkResult result = get_dispatch_table(ObjectTracker_instance_table_map, gpu)->GetPhysicalDeviceQueueProperties(gpu, count, pProperties);
517
518 loader_platform_thread_lock_mutex(&objLock);
519 setGpuQueueInfoState(count, pProperties);
520 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600521 return result;
522}
523
524VkResult
525explicit_CreateDevice(
526 VkPhysicalDevice gpu,
527 const VkDeviceCreateInfo *pCreateInfo,
528 VkDevice *pDevice)
529{
530 loader_platform_thread_lock_mutex(&objLock);
531 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, gpu);
532 VkResult result = pInstanceTable->CreateDevice(gpu, pCreateInfo, pDevice);
533 if (result == VK_SUCCESS) {
534 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
535 //// VkLayerDispatchTable *pTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
536 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
537 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
538 create_obj(gpu, *pDevice, VK_OBJECT_TYPE_DEVICE);
539 }
540
541 loader_platform_thread_unlock_mutex(&objLock);
542 return result;
543}
544
545VkResult
546explicit_DestroyDevice(
547 VkDevice device)
548{
549 loader_platform_thread_lock_mutex(&objLock);
550 validate_object(device, device);
551 destroy_obj(device, device);
552 // Report any remaining objects
553 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
554 OBJTRACK_NODE* pNode = it->second;
555 if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
556 // Cannot destroy physical device so ignore
557 } else {
558 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_PHYSICAL_DEVICE, device, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
559 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
560 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
561 }
562 }
563 // Clean up Queue's MemRef Linked Lists
564 destroyQueueMemRefLists();
565
566 loader_platform_thread_unlock_mutex(&objLock);
567
568 dispatch_key key = get_dispatch_key(device);
569 VkLayerDispatchTable *pDisp = get_dispatch_table(ObjectTracker_device_table_map, device);
570 VkResult result = pDisp->DestroyDevice(device);
571 deviceExtMap.erase(pDisp);
572 ObjectTracker_device_table_map.erase(key);
573 assert(ObjectTracker_device_table_map.size() == 0 && "Should not have any instance mappings hanging around");
574
575 return result;
576}
577
578VkResult
579explicit_GetDeviceQueue(
580 VkDevice device,
581 uint32_t queueNodeIndex,
582 uint32_t queueIndex,
583 VkQueue *pQueue)
584{
585 loader_platform_thread_lock_mutex(&objLock);
586 validate_object(device, device);
587 loader_platform_thread_unlock_mutex(&objLock);
588
589 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
590
591 loader_platform_thread_lock_mutex(&objLock);
592 addQueueInfo(queueNodeIndex, *pQueue);
593 loader_platform_thread_unlock_mutex(&objLock);
594
595 return result;
596}
597
598VkResult
599explicit_QueueSubmit(
600 VkQueue queue,
601 uint32_t cmdBufferCount,
602 const VkCmdBuffer *pCmdBuffers,
603 VkFence fence)
604{
605 loader_platform_thread_lock_mutex(&objLock);
606 set_status(queue, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED);
607 // TODO: Fix for updated memory reference mechanism
608 // validate_memory_mapping_status(pMemRefs, memRefCount);
609 // validate_mem_ref_count(memRefCount);
610 loader_platform_thread_unlock_mutex(&objLock);
611
612 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
613
614 return result;
615}
616
617VkResult
618explicit_MapMemory(
619 VkDevice device,
620 VkDeviceMemory mem,
621 VkDeviceSize offset,
622 VkDeviceSize size,
623 VkFlags flags,
624 void **ppData)
625{
626 loader_platform_thread_lock_mutex(&objLock);
627 set_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
628 validate_object(device, device);
629 loader_platform_thread_unlock_mutex(&objLock);
630
631 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
632
633 return result;
634}
635
636VkResult
637explicit_UnmapMemory(
638 VkDevice device,
639 VkDeviceMemory mem)
640{
641 loader_platform_thread_lock_mutex(&objLock);
642 reset_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
643 validate_object(device, device);
644 loader_platform_thread_unlock_mutex(&objLock);
645
646 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->UnmapMemory(device, mem);
647
648 return result;
649}
650
651VkResult
652explicit_DestroyObject(
653 VkDevice device,
654 VkObjectType objType,
655 VkObject object)
656{
657 loader_platform_thread_lock_mutex(&objLock);
658 validate_object(device, device);
659 validate_object(device, object);
660 loader_platform_thread_unlock_mutex(&objLock);
661
662 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->DestroyObject(device, objType, object);
663
664 loader_platform_thread_lock_mutex(&objLock);
665 validateObjectType(device, "vkDestroyObject", objType, object);
666 destroy_obj(device, object);
667 loader_platform_thread_unlock_mutex(&objLock);
668
669 return result;
670}
671
672VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600673explicit_GetObjectMemoryRequirements(
674 VkDevice device,
675 VkObjectType objType,
676 VkObject object,
677 VkMemoryRequirements* pMemoryRequirements)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600678{
679 loader_platform_thread_lock_mutex(&objLock);
Tony Barbour426b9052015-06-24 16:06:58 -0600680 validateObjectType(device, "vkGetObjectMemoryRequirements", objType, object);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600681 validate_object(device, device);
682 validate_object(device, object);
683 loader_platform_thread_unlock_mutex(&objLock);
684
Tony Barbour426b9052015-06-24 16:06:58 -0600685 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetObjectMemoryRequirements(device, objType, object, pMemoryRequirements);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600686
687 return result;
688}
689
690VkResult
691explicit_QueueBindSparseBufferMemory(
692 VkQueue queue,
693 VkBuffer buffer,
694 VkDeviceSize rangeOffset,
695 VkDeviceSize rangeSize,
696 VkDeviceMemory mem,
697 VkDeviceSize memOffset)
698{
699 loader_platform_thread_lock_mutex(&objLock);
700 validateQueueFlags(queue, "QueueBindSparseBufferMemory");
701 validate_object(queue, buffer);
702 loader_platform_thread_unlock_mutex(&objLock);
703
704 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseBufferMemory(queue, buffer, rangeOffset, rangeSize, mem, memOffset);
705 return result;
706}
707
708VkResult
709explicit_QueueBindSparseImageMemory(
710 VkQueue queue,
711 VkImage image,
712 const VkImageMemoryBindInfo *pBindInfo,
713 VkDeviceMemory mem,
714 VkDeviceSize memOffset)
715{
716 loader_platform_thread_lock_mutex(&objLock);
717 validateQueueFlags(queue, "QueueBindSparseImageMemory");
718 loader_platform_thread_unlock_mutex(&objLock);
719
720 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseImageMemory(queue, image, pBindInfo, mem, memOffset);
721 return result;
722}
723
724
725VkResult
726explicit_GetFenceStatus(
727 VkDevice device,
728 VkFence fence)
729{
730 loader_platform_thread_lock_mutex(&objLock);
731 // Warn if submitted_flag is not set
732 validate_status(device, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
733 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Status Requested for Unsubmitted Fence");
734 validate_object(device, device);
735 loader_platform_thread_unlock_mutex(&objLock);
736
737 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetFenceStatus(device, fence);
738
739 return result;
740}
741
742VkResult
743explicit_WaitForFences(
744 VkDevice device,
745 uint32_t fenceCount,
746 const VkFence *pFences,
747 bool32_t waitAll,
748 uint64_t timeout)
749{
750 loader_platform_thread_lock_mutex(&objLock);
751 // Warn if waiting on unsubmitted fence
752 for (uint32_t i = 0; i < fenceCount; i++) {
753 validate_status(device, pFences[i], VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
754 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Waiting for Unsubmitted Fence");
755 }
756 validate_object(device, device);
757 loader_platform_thread_unlock_mutex(&objLock);
758
759 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
760
761 return result;
762}
763
764VkResult
765explicit_AllocDescriptorSets(
766 VkDevice device,
767 VkDescriptorPool descriptorPool,
768 VkDescriptorSetUsage setUsage,
769 uint32_t count,
770 const VkDescriptorSetLayout *pSetLayouts,
771 VkDescriptorSet *pDescriptorSets,
772 uint32_t *pCount)
773{
774 loader_platform_thread_lock_mutex(&objLock);
775 validate_object(device, device);
776 validate_object(device, descriptorPool);
777 loader_platform_thread_unlock_mutex(&objLock);
778
779 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->AllocDescriptorSets(
780 device, descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
781
782 loader_platform_thread_lock_mutex(&objLock);
783 for (uint32_t i = 0; i < *pCount; i++) {
784 create_obj(device, pDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET);
785 }
786 loader_platform_thread_unlock_mutex(&objLock);
787
788 return result;
789}
790
791VkResult
792explicit_DestroySwapChainWSI(
793 VkSwapChainWSI swapChain)
794{
795
796 loader_platform_thread_lock_mutex(&objLock);
797 destroy_obj(swapChain, swapChain);
798 loader_platform_thread_unlock_mutex(&objLock);
799
800 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, swapChain)->DestroySwapChainWSI(swapChain);
801
802 return result;
803}
804
805VkResult
806explicit_FreeMemory(
807 VkDevice device,
808 VkDeviceMemory mem)
809{
810 loader_platform_thread_lock_mutex(&objLock);
811 validate_object(device, device);
812 loader_platform_thread_unlock_mutex(&objLock);
813
814 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->FreeMemory(device, mem);
815
816 loader_platform_thread_lock_mutex(&objLock);
817 destroy_obj(device, mem);
818 loader_platform_thread_unlock_mutex(&objLock);
819
820 return result;
821}
822
823
824// ObjectTracker Extensions
825
826uint64_t
827objTrackGetObjectsCount(
828 VkDevice device)
829{
830 return numTotalObjs;
831}
832
833VkResult
834objTrackGetObjects(
835 VkDevice device,
836 uint64_t objCount,
837 OBJTRACK_NODE *pObjNodeArray)
838{
839 // This bool flags if we're pulling all objs or just a single class of objs
840 // Check the count first thing
841 uint64_t maxObjCount = numTotalObjs;
842 if (objCount > maxObjCount) {
843 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
844 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu total objs", objCount, maxObjCount);
845 return VK_ERROR_INVALID_VALUE;
846 }
847 auto it = objMap.begin();
848 for (uint64_t i = 0; i < objCount; i++) {
849 if (objMap.end() == it) {
850 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
851 "OBJ INTERNAL ERROR : Ran out of objs! Should have %lu, but only copied %lu and not the requested %lu.", maxObjCount, i, objCount);
852 return VK_ERROR_UNKNOWN;
853 }
854 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
855 ++it;
856 }
857 return VK_SUCCESS;
858}
859
860uint64_t
861objTrackGetObjectsOfTypeCount(
862 VkDevice device,
863 VkObjectType type)
864{
865 return numObjs[type];
866}
867
868VkResult
869objTrackGetObjectsOfType(
870 VkDevice device,
871 VkObjectType type,
872 uint64_t objCount,
873 OBJTRACK_NODE *pObjNodeArray)
874{
875 // Check the count first thing
876 uint64_t maxObjCount = numObjs[type];
877 if (objCount > maxObjCount) {
878 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
879 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu objs of type %s",
880 objCount, maxObjCount, string_VkObjectType(type));
881 return VK_ERROR_INVALID_VALUE;
882 }
883 auto it = objMap.begin();
884 for (uint64_t i = 0; i < objCount; i++) {
885 // Get next object of correct type
886 while ((objMap.end() != it) && (it->second->objType != type))
887 ++it;
888 if (objMap.end() == it) {
889 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
890 "OBJ INTERNAL ERROR : Ran out of %s objs! Should have %lu, but only copied %lu and not the requested %lu.",
891 string_VkObjectType(type), maxObjCount, i, objCount);
892 return VK_ERROR_UNKNOWN;
893 }
894 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
895 }
896 return VK_SUCCESS;
897}
898