blob: 329ca10d0f6e6cda0789ee11fa5224b48b7250c9 [file] [log] [blame]
Tobin Ehlisacab8882014-11-14 13:01:02 -07001/*
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -06002 * Vulkan
Tobin Ehlisacab8882014-11-14 13:01:02 -07003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060025#include "vkLayer.h"
Courtney Goeltzenleuchter1c7c65d2015-06-10 17:39:03 -060026#include "vk_enum_string_helper.h"
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060027
Tobin Ehlis3c26a542014-11-18 11:28:33 -070028// Object Tracker ERROR codes
29typedef enum _OBJECT_TRACK_ERROR
30{
Chia-I Wub1466182015-01-05 14:33:42 +080031 OBJTRACK_NONE, // Used for INFO & other non-error messages
32 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
33 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
34 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
Mark Lobodzinski7a3d5ff2015-05-05 18:24:45 -050035 OBJTRACK_OBJECT_TYPE_MISMATCH, // Object did not match corresponding Object Type
Chia-I Wub1466182015-01-05 14:33:42 +080036 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
37 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
Tobin Ehlis235c20e2015-01-16 08:56:30 -070038 OBJTRACK_INVALID_FENCE, // Requested status of unsubmitted fence object
Tobin Ehlis586aa012015-06-08 17:36:28 -060039 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
Tobin Ehlis3c26a542014-11-18 11:28:33 -070040} OBJECT_TRACK_ERROR;
41
Tobin Ehlis235c20e2015-01-16 08:56:30 -070042// Object Status -- used to track state of individual objects
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050043typedef VkFlags ObjectStatusFlags;
44typedef enum _ObjectStatusFlagBits
Tobin Ehlis235c20e2015-01-16 08:56:30 -070045{
Mark Lobodzinski01552702015-02-03 10:06:31 -060046 OBJSTATUS_NONE = 0x00000000, // No status is set
47 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
48 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
49 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
50 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
51 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
Mark Lobodzinski4186e712015-02-03 11:52:26 -060052 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050053} ObjectStatusFlagBits;
Chia-I Wu5b66aa52015-04-16 22:02:10 +080054
Tobin Ehlisacab8882014-11-14 13:01:02 -070055typedef struct _OBJTRACK_NODE {
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050056 VkObject vkObj;
57 VkObjectType objType;
58 ObjectStatusFlags status;
Tobin Ehlisacab8882014-11-14 13:01:02 -070059} OBJTRACK_NODE;
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060060
Tobin Ehlisacab8882014-11-14 13:01:02 -070061// prototype for extension functions
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060062uint64_t objTrackGetObjectCount(VkDevice device);
63VkResult objTrackGetObjects(VkDevice device, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
64uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkObjectType type);
65VkResult objTrackGetObjectsOfType(VkDevice, VkObjectType type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060066
Tobin Ehlis3c26a542014-11-18 11:28:33 -070067// Func ptr typedefs
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060068typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
69typedef VkResult (*OBJ_TRACK_GET_OBJECTS)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
70typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkObjectType);
71typedef VkResult (*OBJ_TRACK_GET_OBJECTS_OF_TYPE)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
72
73typedef struct _layer_data {
74 debug_report_data *report_data;
75 //TODO: put instance data here
76 VkDbgMsgCallback logging_callback;
77} layer_data;
78
79static std::unordered_map<void*, layer_data *> layer_data_map;
80static device_table_map ObjectTracker_device_table_map;
81static instance_table_map ObjectTracker_instance_table_map;
82
83static long long unsigned int object_track_index = 0;
84static int objLockInitialized = 0;
85static loader_platform_thread_mutex objLock;
86
87// Objects stored in a global map w/ struct containing basic info
88unordered_map<VkObject, OBJTRACK_NODE*> objMap;
89
90#define NUM_OBJECT_TYPES (VK_NUM_OBJECT_TYPE + (VK_OBJECT_TYPE_SWAP_CHAIN_WSI - VK_OBJECT_TYPE_DISPLAY_WSI))
91
92static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
93static uint64_t numTotalObjs = 0;
94static VkPhysicalDeviceQueueProperties *queueInfo = NULL;
95static uint32_t queueCount = 0;
96
97template layer_data *get_my_data_ptr<layer_data>(
98 void *data_key, std::unordered_map<void *, layer_data *> &data_map);
99
100//
101// Internal Object Tracker Functions
102//
103
104struct devExts {
105 bool wsi_lunarg_enabled;
106};
107
108static std::unordered_map<void *, struct devExts> deviceExtMap;
109
110static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
111{
112 uint32_t i, ext_idx;
113 VkLayerDispatchTable *pDisp = device_dispatch_table(device);
114 deviceExtMap[pDisp].wsi_lunarg_enabled = false;
115 for (i = 0; i < pCreateInfo->extensionCount; i++) {
116 if (strcmp(pCreateInfo->pEnabledExtensions[i].name, VK_WSI_LUNARG_EXTENSION_NAME) == 0)
117 deviceExtMap[pDisp].wsi_lunarg_enabled = true;
118
119 }
120}
121
122// Indicate device or instance dispatch table type
123typedef enum _DispTableType
124{
125 DISP_TBL_TYPE_INSTANCE,
126 DISP_TBL_TYPE_DEVICE,
127} DispTableType;
128
129debug_report_data *mdd(VkObject object)
130{
131 dispatch_key key = get_dispatch_key(object);
132 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600133 return my_data->report_data;
134}
135
136debug_report_data *mid(VkInstance object)
137{
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600138 dispatch_key key = get_dispatch_key(object);
139 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600140 return my_data->report_data;
141}
142
143// For each Queue's doubly linked-list of mem refs
144typedef struct _OT_MEM_INFO {
145 VkDeviceMemory mem;
146 struct _OT_MEM_INFO *pNextMI;
147 struct _OT_MEM_INFO *pPrevMI;
148
149} OT_MEM_INFO;
150
151// Track Queue information
152typedef struct _OT_QUEUE_INFO {
153 OT_MEM_INFO *pMemRefList;
154 struct _OT_QUEUE_INFO *pNextQI;
155 uint32_t queueNodeIndex;
156 VkQueue queue;
157 uint32_t refCount;
158} OT_QUEUE_INFO;
159
160// Global list of QueueInfo structures, one per queue
161static OT_QUEUE_INFO *g_pQueueInfo = NULL;
162
163// Convert an object type enum to an object type array index
164static uint32_t
165objTypeToIndex(
166 uint32_t objType)
167{
168 uint32_t index = objType;
169 if (objType > VK_OBJECT_TYPE_END_RANGE) {
170 // These come from vk_wsi_lunarg.h, rebase
171 index = (index -(VK_WSI_LUNARG_EXTENSION_NUMBER * -1000)) + VK_OBJECT_TYPE_END_RANGE;
172 }
173 return index;
174}
175
176// Validate that object is in the object map
177static void
178validate_object(
179 const VkObject dispatchable_object,
180 const VkObject object)
181{
182 if (objMap.find(object) == objMap.end()) {
Tobin Ehlis3aa50502015-06-25 12:29:25 -0600183 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, object, 0, OBJTRACK_INVALID_OBJECT, "OBJTRACK",
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600184 "Invalid Object %p", object);
185 }
186}
187
188// Validate that object parameter matches designated object type
189static void
190validateObjectType(
191 VkObject dispatchable_object,
192 const char *apiName,
193 VkObjectType objType,
194 VkObject object)
195{
196 if (objMap.find(object) != objMap.end()) {
197 OBJTRACK_NODE* pNode = objMap[object];
198 // Found our object, check type
199 if (strcmp(string_VkObjectType(pNode->objType), string_VkObjectType(objType)) != 0) {
200 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, object, 0, OBJTRACK_OBJECT_TYPE_MISMATCH, "OBJTRACK",
201 "ERROR: Object Parameter Type %s does not match designated type %s", string_VkObjectType(pNode->objType), string_VkObjectType(objType));
202 }
203 }
204}
205
206// Add new queue to head of global queue list
207static void
208addQueueInfo(
209 uint32_t queueNodeIndex,
210 VkQueue queue)
211{
212 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
213
214 if (pQueueInfo != NULL) {
215 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
216 pQueueInfo->queue = queue;
217 pQueueInfo->queueNodeIndex = queueNodeIndex;
218 pQueueInfo->pNextQI = g_pQueueInfo;
219 g_pQueueInfo = pQueueInfo;
220 }
221 else {
222 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
223 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
224 }
225}
226
227// Destroy memRef lists and free all memory
228static void
229destroyQueueMemRefLists(void)
230{
231 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
232 OT_QUEUE_INFO *pDelQueueInfo = NULL;
233 while (pQueueInfo != NULL) {
234 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
235 while (pMemInfo != NULL) {
236 OT_MEM_INFO *pDelMemInfo = pMemInfo;
237 pMemInfo = pMemInfo->pNextMI;
238 delete pDelMemInfo;
239 }
240 pDelQueueInfo = pQueueInfo;
241 pQueueInfo = pQueueInfo->pNextQI;
242 delete pDelQueueInfo;
243 }
244 g_pQueueInfo = pQueueInfo;
245}
246
247
248static void
249create_obj(
250 VkObject dispatchable_object,
251 VkObject vkObj,
252 VkObjectType objType)
253{
254 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
255 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkObjectType(objType),
256 reinterpret_cast<VkUintPtrLeast64>(vkObj));
257
258 OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
259 pNewObjNode->vkObj = vkObj;
260 pNewObjNode->objType = objType;
261 pNewObjNode->status = OBJSTATUS_NONE;
262 objMap[vkObj] = pNewObjNode;
263 uint32_t objIndex = objTypeToIndex(objType);
264 numObjs[objIndex]++;
265 numTotalObjs++;
266}
267
268// Parse global list to find obj type, then remove obj from obj type list, finally
269// remove obj from global list
270static void
271destroy_obj(
272 VkObject dispatchable_object,
273 VkObject vkObj)
274{
275 if (objMap.find(vkObj) != objMap.end()) {
276 OBJTRACK_NODE* pNode = objMap[vkObj];
277 uint32_t objIndex = objTypeToIndex(pNode->objType);
278 assert(numTotalObjs > 0);
279 numTotalObjs--;
280 assert(numObjs[objIndex] > 0);
281 numObjs[objIndex]--;
282 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, pNode->objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
283 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%lu total objs remain & %lu %s objs).",
284 string_VkObjectType(pNode->objType), reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj), numTotalObjs, numObjs[objIndex],
285 string_VkObjectType(pNode->objType));
286
287 delete pNode;
288 objMap.erase(vkObj);
289 } else {
290 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
291 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
292 reinterpret_cast<VkUintPtrLeast64>(vkObj));
293 }
294}
295
296// Set selected flag state for an object node
297static void
298set_status(
299 VkObject dispatchable_object,
300 VkObject vkObj,
301 VkObjectType objType,
302 ObjectStatusFlags status_flag)
303{
304 if (vkObj != VK_NULL_HANDLE) {
305 if (objMap.find(vkObj) != objMap.end()) {
306 OBJTRACK_NODE* pNode = objMap[vkObj];
307 pNode->status |= status_flag;
308 return;
309 }
310 else {
311 // If we do not find it print an error
312 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
313 "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
314 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
315 }
316 }
317}
318
319// Reset selected flag state for an object node
320static void
321reset_status(
322 VkObject dispatchable_object,
323 VkObject vkObj,
324 VkObjectType objType,
325 ObjectStatusFlags status_flag)
326{
327 if (objMap.find(vkObj) != objMap.end()) {
328 OBJTRACK_NODE* pNode = objMap[vkObj];
329 pNode->status &= ~status_flag;
330 return;
331 }
332 else {
333 // If we do not find it print an error
334 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
335 "Unable to reset status for non-existent object 0x%" PRIxLEAST64 " of %s type",
336 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
337 }
338}
339
340static void
341setGpuQueueInfoState(
Tony Barbour426b9052015-06-24 16:06:58 -0600342 uint32_t count,
343 void *pData)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600344{
Tony Barbour426b9052015-06-24 16:06:58 -0600345 queueCount = count;
346 queueInfo = (VkPhysicalDeviceQueueProperties*)realloc((void*)queueInfo, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600347 if (queueInfo != NULL) {
Tony Barbour426b9052015-06-24 16:06:58 -0600348 memcpy(queueInfo, pData, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600349 }
350}
351
352// Check Queue type flags for selected queue operations
353static void
354validateQueueFlags(
355 VkQueue queue,
356 const char *function)
357{
358 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
359 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
360 pQueueInfo = pQueueInfo->pNextQI;
361 }
362 if (pQueueInfo != NULL) {
363 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_MEMMGR_BIT) == 0) {
364 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
365 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not set", function);
366 } else {
367 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
368 "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not known", function);
369 }
370 }
371}
372
373// Check object status for selected flag state
374static bool32_t
375validate_status(
376 VkObject dispatchable_object,
377 VkObject vkObj,
378 VkObjectType objType,
379 ObjectStatusFlags status_mask,
380 ObjectStatusFlags status_flag,
381 VkFlags msg_flags,
382 OBJECT_TRACK_ERROR error_code,
383 const char *fail_msg)
384{
385 if (objMap.find(vkObj) != objMap.end()) {
386 OBJTRACK_NODE* pNode = objMap[vkObj];
387 if ((pNode->status & status_mask) != status_flag) {
388 char str[1024];
389 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
390 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
391 reinterpret_cast<VkUintPtrLeast64>(vkObj), fail_msg);
392 return VK_FALSE;
393 }
394 return VK_TRUE;
395 }
396 else {
397 // If we do not find it print an error
398 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
399 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
400 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
401 return VK_FALSE;
402 }
403}
404
405#include "vk_dispatch_table_helper.h"
406static void
407initObjectTracker(
408 layer_data *my_data)
409{
410 uint32_t report_flags = 0;
411 uint32_t debug_action = 0;
412 FILE *log_output = NULL;
413 const char *option_str;
414 // initialize ObjectTracker options
415 report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
416 getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
417
418 if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
419 {
420 option_str = getLayerOption("ObjectTrackerLogFilename");
421 if (option_str) {
422 log_output = fopen(option_str, "w");
423 }
424 if (log_output == NULL) {
425 log_output = stdout;
426 }
427
428 layer_create_msg_callback(my_data->report_data, report_flags, log_callback, (void *) log_output, &my_data->logging_callback);
429 }
430
431 if (!objLockInitialized)
432 {
433 // TODO/TBD: Need to delete this mutex sometime. How??? One
434 // suggestion is to call this during vkCreateInstance(), and then we
435 // can clean it up during vkDestroyInstance(). However, that requires
436 // that the layer have per-instance locks. We need to come back and
437 // address this soon.
438 loader_platform_thread_create_mutex(&objLock);
439 objLockInitialized = 1;
440 }
441}
442
443
444//
445// Non-auto-generated API functions called by generated code
446//
447
448VkResult
449explicit_CreateInstance(
450 const VkInstanceCreateInfo *pCreateInfo,
451 VkInstance * pInstance)
452{
453 loader_platform_thread_lock_mutex(&objLock);
454 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, *pInstance);
455 VkResult result = pInstanceTable->CreateInstance(pCreateInfo, pInstance);
456
457 if (result == VK_SUCCESS) {
458 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
459 my_data->report_data = debug_report_create_instance(
460 pInstanceTable,
461 *pInstance,
462 pCreateInfo->extensionCount,
463 pCreateInfo->pEnabledExtensions);
464
465 initObjectTracker(my_data);
466 }
467 loader_platform_thread_unlock_mutex(&objLock);
468 return result;
469}
470
471VkResult
472explicit_DestroyInstance(
473 VkInstance instance)
474{
475 loader_platform_thread_lock_mutex(&objLock);
476 validate_object(instance, instance);
477
478 destroy_obj(instance, instance);
479 // Report any remaining objects in LL
480 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
481 OBJTRACK_NODE* pNode = it->second; if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
482 // Cannot destroy physical device so ignore
483 } else {
484 log_msg(mid(instance), VK_DBG_REPORT_ERROR_BIT, pNode->objType, pNode->vkObj, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
485 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
486 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
487 }
488 }
489
490 dispatch_key key = get_dispatch_key(instance);
491 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, instance);
492 VkResult result = pInstanceTable->DestroyInstance(instance);
493
494 // Clean up logging callback, if any
495 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
496 if (my_data->logging_callback) {
497 layer_destroy_msg_callback(my_data->report_data, my_data->logging_callback);
498 }
499
500 layer_debug_report_destroy_instance(mid(instance));
501 layer_data_map.erase(pInstanceTable);
502
503 ObjectTracker_instance_table_map.erase(key);
504 assert(ObjectTracker_instance_table_map.size() == 0 && "Should not have any instance mappings hanging around");
505
506 loader_platform_thread_unlock_mutex(&objLock);
507 return result;
508}
509
510VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600511explicit_GetPhysicalDeviceQueueProperties(
512 VkPhysicalDevice gpu,
513 uint32_t count,
514 VkPhysicalDeviceQueueProperties* pProperties)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600515{
Tony Barbour426b9052015-06-24 16:06:58 -0600516 VkResult result = get_dispatch_table(ObjectTracker_instance_table_map, gpu)->GetPhysicalDeviceQueueProperties(gpu, count, pProperties);
517
518 loader_platform_thread_lock_mutex(&objLock);
519 setGpuQueueInfoState(count, pProperties);
520 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600521 return result;
522}
523
524VkResult
525explicit_CreateDevice(
526 VkPhysicalDevice gpu,
527 const VkDeviceCreateInfo *pCreateInfo,
528 VkDevice *pDevice)
529{
530 loader_platform_thread_lock_mutex(&objLock);
Courtney Goeltzenleuchterbe637992015-06-25 18:01:43 -0600531// VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, gpu);
532 VkLayerDispatchTable *pDeviceTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
533 VkResult result = pDeviceTable->CreateDevice(gpu, pCreateInfo, pDevice);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600534 if (result == VK_SUCCESS) {
535 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
536 //// VkLayerDispatchTable *pTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
537 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
538 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
Courtney Goeltzenleuchterbe637992015-06-25 18:01:43 -0600539 create_obj(*pDevice, *pDevice, VK_OBJECT_TYPE_DEVICE);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600540 }
541
542 loader_platform_thread_unlock_mutex(&objLock);
543 return result;
544}
545
546VkResult
547explicit_DestroyDevice(
548 VkDevice device)
549{
550 loader_platform_thread_lock_mutex(&objLock);
551 validate_object(device, device);
552 destroy_obj(device, device);
553 // Report any remaining objects
554 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
555 OBJTRACK_NODE* pNode = it->second;
556 if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
557 // Cannot destroy physical device so ignore
558 } else {
559 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_PHYSICAL_DEVICE, device, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
560 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
561 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
562 }
563 }
564 // Clean up Queue's MemRef Linked Lists
565 destroyQueueMemRefLists();
566
567 loader_platform_thread_unlock_mutex(&objLock);
568
569 dispatch_key key = get_dispatch_key(device);
570 VkLayerDispatchTable *pDisp = get_dispatch_table(ObjectTracker_device_table_map, device);
571 VkResult result = pDisp->DestroyDevice(device);
572 deviceExtMap.erase(pDisp);
573 ObjectTracker_device_table_map.erase(key);
574 assert(ObjectTracker_device_table_map.size() == 0 && "Should not have any instance mappings hanging around");
575
576 return result;
577}
578
579VkResult
580explicit_GetDeviceQueue(
581 VkDevice device,
582 uint32_t queueNodeIndex,
583 uint32_t queueIndex,
584 VkQueue *pQueue)
585{
586 loader_platform_thread_lock_mutex(&objLock);
587 validate_object(device, device);
588 loader_platform_thread_unlock_mutex(&objLock);
589
590 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
591
592 loader_platform_thread_lock_mutex(&objLock);
593 addQueueInfo(queueNodeIndex, *pQueue);
594 loader_platform_thread_unlock_mutex(&objLock);
595
596 return result;
597}
598
599VkResult
600explicit_QueueSubmit(
601 VkQueue queue,
602 uint32_t cmdBufferCount,
603 const VkCmdBuffer *pCmdBuffers,
604 VkFence fence)
605{
606 loader_platform_thread_lock_mutex(&objLock);
607 set_status(queue, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED);
608 // TODO: Fix for updated memory reference mechanism
609 // validate_memory_mapping_status(pMemRefs, memRefCount);
610 // validate_mem_ref_count(memRefCount);
611 loader_platform_thread_unlock_mutex(&objLock);
612
613 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
614
615 return result;
616}
617
618VkResult
619explicit_MapMemory(
620 VkDevice device,
621 VkDeviceMemory mem,
622 VkDeviceSize offset,
623 VkDeviceSize size,
624 VkFlags flags,
625 void **ppData)
626{
627 loader_platform_thread_lock_mutex(&objLock);
628 set_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
629 validate_object(device, device);
630 loader_platform_thread_unlock_mutex(&objLock);
631
632 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
633
634 return result;
635}
636
637VkResult
638explicit_UnmapMemory(
639 VkDevice device,
640 VkDeviceMemory mem)
641{
642 loader_platform_thread_lock_mutex(&objLock);
643 reset_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
644 validate_object(device, device);
645 loader_platform_thread_unlock_mutex(&objLock);
646
647 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->UnmapMemory(device, mem);
648
649 return result;
650}
651
652VkResult
653explicit_DestroyObject(
654 VkDevice device,
655 VkObjectType objType,
656 VkObject object)
657{
658 loader_platform_thread_lock_mutex(&objLock);
659 validate_object(device, device);
660 validate_object(device, object);
661 loader_platform_thread_unlock_mutex(&objLock);
662
663 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->DestroyObject(device, objType, object);
664
665 loader_platform_thread_lock_mutex(&objLock);
666 validateObjectType(device, "vkDestroyObject", objType, object);
667 destroy_obj(device, object);
668 loader_platform_thread_unlock_mutex(&objLock);
669
670 return result;
671}
672
673VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600674explicit_GetObjectMemoryRequirements(
675 VkDevice device,
676 VkObjectType objType,
677 VkObject object,
678 VkMemoryRequirements* pMemoryRequirements)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600679{
680 loader_platform_thread_lock_mutex(&objLock);
Tony Barbour426b9052015-06-24 16:06:58 -0600681 validateObjectType(device, "vkGetObjectMemoryRequirements", objType, object);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600682 validate_object(device, device);
683 validate_object(device, object);
684 loader_platform_thread_unlock_mutex(&objLock);
685
Tony Barbour426b9052015-06-24 16:06:58 -0600686 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetObjectMemoryRequirements(device, objType, object, pMemoryRequirements);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600687
688 return result;
689}
690
691VkResult
692explicit_QueueBindSparseBufferMemory(
693 VkQueue queue,
694 VkBuffer buffer,
695 VkDeviceSize rangeOffset,
696 VkDeviceSize rangeSize,
697 VkDeviceMemory mem,
698 VkDeviceSize memOffset)
699{
700 loader_platform_thread_lock_mutex(&objLock);
701 validateQueueFlags(queue, "QueueBindSparseBufferMemory");
702 validate_object(queue, buffer);
703 loader_platform_thread_unlock_mutex(&objLock);
704
705 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseBufferMemory(queue, buffer, rangeOffset, rangeSize, mem, memOffset);
706 return result;
707}
708
709VkResult
710explicit_QueueBindSparseImageMemory(
711 VkQueue queue,
712 VkImage image,
713 const VkImageMemoryBindInfo *pBindInfo,
714 VkDeviceMemory mem,
715 VkDeviceSize memOffset)
716{
717 loader_platform_thread_lock_mutex(&objLock);
718 validateQueueFlags(queue, "QueueBindSparseImageMemory");
719 loader_platform_thread_unlock_mutex(&objLock);
720
721 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseImageMemory(queue, image, pBindInfo, mem, memOffset);
722 return result;
723}
724
725
726VkResult
727explicit_GetFenceStatus(
728 VkDevice device,
729 VkFence fence)
730{
731 loader_platform_thread_lock_mutex(&objLock);
732 // Warn if submitted_flag is not set
733 validate_status(device, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
734 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Status Requested for Unsubmitted Fence");
735 validate_object(device, device);
736 loader_platform_thread_unlock_mutex(&objLock);
737
738 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetFenceStatus(device, fence);
739
740 return result;
741}
742
743VkResult
744explicit_WaitForFences(
745 VkDevice device,
746 uint32_t fenceCount,
747 const VkFence *pFences,
748 bool32_t waitAll,
749 uint64_t timeout)
750{
751 loader_platform_thread_lock_mutex(&objLock);
752 // Warn if waiting on unsubmitted fence
753 for (uint32_t i = 0; i < fenceCount; i++) {
754 validate_status(device, pFences[i], VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
755 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Waiting for Unsubmitted Fence");
756 }
757 validate_object(device, device);
758 loader_platform_thread_unlock_mutex(&objLock);
759
760 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
761
762 return result;
763}
764
765VkResult
766explicit_AllocDescriptorSets(
767 VkDevice device,
768 VkDescriptorPool descriptorPool,
769 VkDescriptorSetUsage setUsage,
770 uint32_t count,
771 const VkDescriptorSetLayout *pSetLayouts,
772 VkDescriptorSet *pDescriptorSets,
773 uint32_t *pCount)
774{
775 loader_platform_thread_lock_mutex(&objLock);
776 validate_object(device, device);
777 validate_object(device, descriptorPool);
778 loader_platform_thread_unlock_mutex(&objLock);
779
780 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->AllocDescriptorSets(
781 device, descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
782
783 loader_platform_thread_lock_mutex(&objLock);
784 for (uint32_t i = 0; i < *pCount; i++) {
785 create_obj(device, pDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET);
786 }
787 loader_platform_thread_unlock_mutex(&objLock);
788
789 return result;
790}
791
792VkResult
793explicit_DestroySwapChainWSI(
794 VkSwapChainWSI swapChain)
795{
796
797 loader_platform_thread_lock_mutex(&objLock);
798 destroy_obj(swapChain, swapChain);
799 loader_platform_thread_unlock_mutex(&objLock);
800
801 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, swapChain)->DestroySwapChainWSI(swapChain);
802
803 return result;
804}
805
806VkResult
807explicit_FreeMemory(
808 VkDevice device,
809 VkDeviceMemory mem)
810{
811 loader_platform_thread_lock_mutex(&objLock);
812 validate_object(device, device);
813 loader_platform_thread_unlock_mutex(&objLock);
814
815 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->FreeMemory(device, mem);
816
817 loader_platform_thread_lock_mutex(&objLock);
818 destroy_obj(device, mem);
819 loader_platform_thread_unlock_mutex(&objLock);
820
821 return result;
822}
823
824
825// ObjectTracker Extensions
826
827uint64_t
828objTrackGetObjectsCount(
829 VkDevice device)
830{
831 return numTotalObjs;
832}
833
834VkResult
835objTrackGetObjects(
836 VkDevice device,
837 uint64_t objCount,
838 OBJTRACK_NODE *pObjNodeArray)
839{
840 // This bool flags if we're pulling all objs or just a single class of objs
841 // Check the count first thing
842 uint64_t maxObjCount = numTotalObjs;
843 if (objCount > maxObjCount) {
844 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
845 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu total objs", objCount, maxObjCount);
846 return VK_ERROR_INVALID_VALUE;
847 }
848 auto it = objMap.begin();
849 for (uint64_t i = 0; i < objCount; i++) {
850 if (objMap.end() == it) {
851 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
852 "OBJ INTERNAL ERROR : Ran out of objs! Should have %lu, but only copied %lu and not the requested %lu.", maxObjCount, i, objCount);
853 return VK_ERROR_UNKNOWN;
854 }
855 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
856 ++it;
857 }
858 return VK_SUCCESS;
859}
860
861uint64_t
862objTrackGetObjectsOfTypeCount(
863 VkDevice device,
864 VkObjectType type)
865{
866 return numObjs[type];
867}
868
869VkResult
870objTrackGetObjectsOfType(
871 VkDevice device,
872 VkObjectType type,
873 uint64_t objCount,
874 OBJTRACK_NODE *pObjNodeArray)
875{
876 // Check the count first thing
877 uint64_t maxObjCount = numObjs[type];
878 if (objCount > maxObjCount) {
879 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
880 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu objs of type %s",
881 objCount, maxObjCount, string_VkObjectType(type));
882 return VK_ERROR_INVALID_VALUE;
883 }
884 auto it = objMap.begin();
885 for (uint64_t i = 0; i < objCount; i++) {
886 // Get next object of correct type
887 while ((objMap.end() != it) && (it->second->objType != type))
888 ++it;
889 if (objMap.end() == it) {
890 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
891 "OBJ INTERNAL ERROR : Ran out of %s objs! Should have %lu, but only copied %lu and not the requested %lu.",
892 string_VkObjectType(type), maxObjCount, i, objCount);
893 return VK_ERROR_UNKNOWN;
894 }
895 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
896 }
897 return VK_SUCCESS;
898}
899