blob: c52862f08ee2357a04b929db3e7301dc74a8d962 [file] [log] [blame]
Tobin Ehlisacab8882014-11-14 13:01:02 -07001/*
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -06002 * Vulkan
Tobin Ehlisacab8882014-11-14 13:01:02 -07003 *
4 * Copyright (C) 2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 */
24
Courtney Goeltzenleuchter9cc421e2015-04-08 15:36:08 -060025#include "vkLayer.h"
Courtney Goeltzenleuchter1c7c65d2015-06-10 17:39:03 -060026#include "vk_enum_string_helper.h"
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060027
Tobin Ehlis3c26a542014-11-18 11:28:33 -070028// Object Tracker ERROR codes
29typedef enum _OBJECT_TRACK_ERROR
30{
Chia-I Wub1466182015-01-05 14:33:42 +080031 OBJTRACK_NONE, // Used for INFO & other non-error messages
32 OBJTRACK_UNKNOWN_OBJECT, // Updating uses of object that's not in global object list
33 OBJTRACK_INTERNAL_ERROR, // Bug with data tracking within the layer
34 OBJTRACK_DESTROY_OBJECT_FAILED, // Couldn't find object to be destroyed
35 OBJTRACK_MISSING_OBJECT, // Attempted look-up on object that isn't in global object list
Mark Lobodzinski7a3d5ff2015-05-05 18:24:45 -050036 OBJTRACK_OBJECT_TYPE_MISMATCH, // Object did not match corresponding Object Type
Chia-I Wub1466182015-01-05 14:33:42 +080037 OBJTRACK_OBJECT_LEAK, // OBJECT was not correctly freed/destroyed
38 OBJTRACK_OBJCOUNT_MAX_EXCEEDED, // Request for Object data in excess of max obj count
Tobin Ehlis235c20e2015-01-16 08:56:30 -070039 OBJTRACK_INVALID_FENCE, // Requested status of unsubmitted fence object
Mark Lobodzinski01552702015-02-03 10:06:31 -060040 OBJTRACK_VIEWPORT_NOT_BOUND, // Draw submitted with no viewport state object bound
41 OBJTRACK_RASTER_NOT_BOUND, // Draw submitted with no raster state object bound
42 OBJTRACK_COLOR_BLEND_NOT_BOUND, // Draw submitted with no color blend state object bound
43 OBJTRACK_DEPTH_STENCIL_NOT_BOUND, // Draw submitted with no depth-stencil state object bound
Mark Lobodzinski4186e712015-02-03 11:52:26 -060044 OBJTRACK_GPU_MEM_MAPPED, // Mem object ref'd in cmd buff is still mapped
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060045 OBJTRACK_GETGPUINFO_NOT_CALLED, // Gpu Information has not been requested before drawing
46 OBJTRACK_MEMREFCOUNT_MAX_EXCEEDED, // Number of QueueSubmit memory references exceeds GPU maximum
Tobin Ehlis586aa012015-06-08 17:36:28 -060047 OBJTRACK_INVALID_OBJECT, // Object used that has never been created
Tobin Ehlis3c26a542014-11-18 11:28:33 -070048} OBJECT_TRACK_ERROR;
49
Tobin Ehlis235c20e2015-01-16 08:56:30 -070050// Object Status -- used to track state of individual objects
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050051typedef VkFlags ObjectStatusFlags;
52typedef enum _ObjectStatusFlagBits
Tobin Ehlis235c20e2015-01-16 08:56:30 -070053{
Mark Lobodzinski01552702015-02-03 10:06:31 -060054 OBJSTATUS_NONE = 0x00000000, // No status is set
55 OBJSTATUS_FENCE_IS_SUBMITTED = 0x00000001, // Fence has been submitted
56 OBJSTATUS_VIEWPORT_BOUND = 0x00000002, // Viewport state object has been bound
57 OBJSTATUS_RASTER_BOUND = 0x00000004, // Viewport state object has been bound
58 OBJSTATUS_COLOR_BLEND_BOUND = 0x00000008, // Viewport state object has been bound
59 OBJSTATUS_DEPTH_STENCIL_BOUND = 0x00000010, // Viewport state object has been bound
Mark Lobodzinski4186e712015-02-03 11:52:26 -060060 OBJSTATUS_GPU_MEM_MAPPED = 0x00000020, // Memory object is currently mapped
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050061} ObjectStatusFlagBits;
Chia-I Wu5b66aa52015-04-16 22:02:10 +080062
Tobin Ehlisacab8882014-11-14 13:01:02 -070063typedef struct _OBJTRACK_NODE {
Mark Lobodzinski7d2d5ac2015-05-20 17:33:47 -050064 VkObject vkObj;
65 VkObjectType objType;
66 ObjectStatusFlags status;
Tobin Ehlisacab8882014-11-14 13:01:02 -070067} OBJTRACK_NODE;
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060068
Tobin Ehlisacab8882014-11-14 13:01:02 -070069// prototype for extension functions
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060070uint64_t objTrackGetObjectCount(VkDevice device);
71VkResult objTrackGetObjects(VkDevice device, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
72uint64_t objTrackGetObjectsOfTypeCount(VkDevice, VkObjectType type);
73VkResult objTrackGetObjectsOfType(VkDevice, VkObjectType type, uint64_t objCount, OBJTRACK_NODE* pObjNodeArray);
Mark Lobodzinskie1d3f0c2015-02-09 10:20:53 -060074
Tobin Ehlis3c26a542014-11-18 11:28:33 -070075// Func ptr typedefs
Mark Lobodzinski14305ad2015-06-23 11:35:12 -060076typedef uint64_t (*OBJ_TRACK_GET_OBJECT_COUNT)(VkDevice);
77typedef VkResult (*OBJ_TRACK_GET_OBJECTS)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
78typedef uint64_t (*OBJ_TRACK_GET_OBJECTS_OF_TYPE_COUNT)(VkDevice, VkObjectType);
79typedef VkResult (*OBJ_TRACK_GET_OBJECTS_OF_TYPE)(VkDevice, VkObjectType, uint64_t, OBJTRACK_NODE*);
80
81typedef struct _layer_data {
82 debug_report_data *report_data;
83 //TODO: put instance data here
84 VkDbgMsgCallback logging_callback;
85} layer_data;
86
87static std::unordered_map<void*, layer_data *> layer_data_map;
88static device_table_map ObjectTracker_device_table_map;
89static instance_table_map ObjectTracker_instance_table_map;
90
91static long long unsigned int object_track_index = 0;
92static int objLockInitialized = 0;
93static loader_platform_thread_mutex objLock;
94
95// Objects stored in a global map w/ struct containing basic info
96unordered_map<VkObject, OBJTRACK_NODE*> objMap;
97
98#define NUM_OBJECT_TYPES (VK_NUM_OBJECT_TYPE + (VK_OBJECT_TYPE_SWAP_CHAIN_WSI - VK_OBJECT_TYPE_DISPLAY_WSI))
99
100static uint64_t numObjs[NUM_OBJECT_TYPES] = {0};
101static uint64_t numTotalObjs = 0;
102static VkPhysicalDeviceQueueProperties *queueInfo = NULL;
103static uint32_t queueCount = 0;
104
105template layer_data *get_my_data_ptr<layer_data>(
106 void *data_key, std::unordered_map<void *, layer_data *> &data_map);
107
108//
109// Internal Object Tracker Functions
110//
111
112struct devExts {
113 bool wsi_lunarg_enabled;
114};
115
116static std::unordered_map<void *, struct devExts> deviceExtMap;
117
118static void createDeviceRegisterExtensions(const VkDeviceCreateInfo* pCreateInfo, VkDevice device)
119{
120 uint32_t i, ext_idx;
121 VkLayerDispatchTable *pDisp = device_dispatch_table(device);
122 deviceExtMap[pDisp].wsi_lunarg_enabled = false;
123 for (i = 0; i < pCreateInfo->extensionCount; i++) {
124 if (strcmp(pCreateInfo->pEnabledExtensions[i].name, VK_WSI_LUNARG_EXTENSION_NAME) == 0)
125 deviceExtMap[pDisp].wsi_lunarg_enabled = true;
126
127 }
128}
129
130// Indicate device or instance dispatch table type
131typedef enum _DispTableType
132{
133 DISP_TBL_TYPE_INSTANCE,
134 DISP_TBL_TYPE_DEVICE,
135} DispTableType;
136
137debug_report_data *mdd(VkObject object)
138{
139 dispatch_key key = get_dispatch_key(object);
140 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
141 assert(my_data->report_data != NULL);
142 return my_data->report_data;
143}
144
145debug_report_data *mid(VkInstance object)
146{
147// return mdd((VkObject) object);
148 dispatch_key key = get_dispatch_key(object);
149 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
150 assert(my_data->report_data != NULL);
151 return my_data->report_data;
152}
153
154// For each Queue's doubly linked-list of mem refs
155typedef struct _OT_MEM_INFO {
156 VkDeviceMemory mem;
157 struct _OT_MEM_INFO *pNextMI;
158 struct _OT_MEM_INFO *pPrevMI;
159
160} OT_MEM_INFO;
161
162// Track Queue information
163typedef struct _OT_QUEUE_INFO {
164 OT_MEM_INFO *pMemRefList;
165 struct _OT_QUEUE_INFO *pNextQI;
166 uint32_t queueNodeIndex;
167 VkQueue queue;
168 uint32_t refCount;
169} OT_QUEUE_INFO;
170
171// Global list of QueueInfo structures, one per queue
172static OT_QUEUE_INFO *g_pQueueInfo = NULL;
173
174// Convert an object type enum to an object type array index
175static uint32_t
176objTypeToIndex(
177 uint32_t objType)
178{
179 uint32_t index = objType;
180 if (objType > VK_OBJECT_TYPE_END_RANGE) {
181 // These come from vk_wsi_lunarg.h, rebase
182 index = (index -(VK_WSI_LUNARG_EXTENSION_NUMBER * -1000)) + VK_OBJECT_TYPE_END_RANGE;
183 }
184 return index;
185}
186
187// Validate that object is in the object map
188static void
189validate_object(
190 const VkObject dispatchable_object,
191 const VkObject object)
192{
193 if (objMap.find(object) == objMap.end()) {
194 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, object, 0, OBJTRACK_OBJECT_TYPE_MISMATCH, "OBJTRACK",
195 "Invalid Object %p", object);
196 }
197}
198
199// Validate that object parameter matches designated object type
200static void
201validateObjectType(
202 VkObject dispatchable_object,
203 const char *apiName,
204 VkObjectType objType,
205 VkObject object)
206{
207 if (objMap.find(object) != objMap.end()) {
208 OBJTRACK_NODE* pNode = objMap[object];
209 // Found our object, check type
210 if (strcmp(string_VkObjectType(pNode->objType), string_VkObjectType(objType)) != 0) {
211 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, object, 0, OBJTRACK_OBJECT_TYPE_MISMATCH, "OBJTRACK",
212 "ERROR: Object Parameter Type %s does not match designated type %s", string_VkObjectType(pNode->objType), string_VkObjectType(objType));
213 }
214 }
215}
216
217// Add new queue to head of global queue list
218static void
219addQueueInfo(
220 uint32_t queueNodeIndex,
221 VkQueue queue)
222{
223 OT_QUEUE_INFO *pQueueInfo = new OT_QUEUE_INFO;
224
225 if (pQueueInfo != NULL) {
226 memset(pQueueInfo, 0, sizeof(OT_QUEUE_INFO));
227 pQueueInfo->queue = queue;
228 pQueueInfo->queueNodeIndex = queueNodeIndex;
229 pQueueInfo->pNextQI = g_pQueueInfo;
230 g_pQueueInfo = pQueueInfo;
231 }
232 else {
233 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
234 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
235 }
236}
237
238// Destroy memRef lists and free all memory
239static void
240destroyQueueMemRefLists(void)
241{
242 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
243 OT_QUEUE_INFO *pDelQueueInfo = NULL;
244 while (pQueueInfo != NULL) {
245 OT_MEM_INFO *pMemInfo = pQueueInfo->pMemRefList;
246 while (pMemInfo != NULL) {
247 OT_MEM_INFO *pDelMemInfo = pMemInfo;
248 pMemInfo = pMemInfo->pNextMI;
249 delete pDelMemInfo;
250 }
251 pDelQueueInfo = pQueueInfo;
252 pQueueInfo = pQueueInfo->pNextQI;
253 delete pDelQueueInfo;
254 }
255 g_pQueueInfo = pQueueInfo;
256}
257
258
259static void
260create_obj(
261 VkObject dispatchable_object,
262 VkObject vkObj,
263 VkObjectType objType)
264{
265 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
266 "OBJ[%llu] : CREATE %s object 0x%" PRIxLEAST64 , object_track_index++, string_VkObjectType(objType),
267 reinterpret_cast<VkUintPtrLeast64>(vkObj));
268
269 OBJTRACK_NODE* pNewObjNode = new OBJTRACK_NODE;
270 pNewObjNode->vkObj = vkObj;
271 pNewObjNode->objType = objType;
272 pNewObjNode->status = OBJSTATUS_NONE;
273 objMap[vkObj] = pNewObjNode;
274 uint32_t objIndex = objTypeToIndex(objType);
275 numObjs[objIndex]++;
276 numTotalObjs++;
277}
278
279// Parse global list to find obj type, then remove obj from obj type list, finally
280// remove obj from global list
281static void
282destroy_obj(
283 VkObject dispatchable_object,
284 VkObject vkObj)
285{
286 if (objMap.find(vkObj) != objMap.end()) {
287 OBJTRACK_NODE* pNode = objMap[vkObj];
288 uint32_t objIndex = objTypeToIndex(pNode->objType);
289 assert(numTotalObjs > 0);
290 numTotalObjs--;
291 assert(numObjs[objIndex] > 0);
292 numObjs[objIndex]--;
293 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_INFO_BIT, pNode->objType, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
294 "OBJ_STAT Destroy %s obj 0x%" PRIxLEAST64 " (%lu total objs remain & %lu %s objs).",
295 string_VkObjectType(pNode->objType), reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj), numTotalObjs, numObjs[objIndex],
296 string_VkObjectType(pNode->objType));
297
298 delete pNode;
299 objMap.erase(vkObj);
300 } else {
301 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
302 "Unable to remove obj 0x%" PRIxLEAST64 ". Was it created? Has it already been destroyed?",
303 reinterpret_cast<VkUintPtrLeast64>(vkObj));
304 }
305}
306
307// Set selected flag state for an object node
308static void
309set_status(
310 VkObject dispatchable_object,
311 VkObject vkObj,
312 VkObjectType objType,
313 ObjectStatusFlags status_flag)
314{
315 if (vkObj != VK_NULL_HANDLE) {
316 if (objMap.find(vkObj) != objMap.end()) {
317 OBJTRACK_NODE* pNode = objMap[vkObj];
318 pNode->status |= status_flag;
319 return;
320 }
321 else {
322 // If we do not find it print an error
323 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, vkObj, 0, OBJTRACK_NONE, "OBJTRACK",
324 "Unable to set status for non-existent object 0x%" PRIxLEAST64 " of %s type",
325 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
326 }
327 }
328}
329
330// Reset selected flag state for an object node
331static void
332reset_status(
333 VkObject dispatchable_object,
334 VkObject vkObj,
335 VkObjectType objType,
336 ObjectStatusFlags status_flag)
337{
338 if (objMap.find(vkObj) != objMap.end()) {
339 OBJTRACK_NODE* pNode = objMap[vkObj];
340 pNode->status &= ~status_flag;
341 return;
342 }
343 else {
344 // If we do not find it print an error
345 log_msg(mdd(dispatchable_object), VK_DBG_REPORT_ERROR_BIT, objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
346 "Unable to reset status for non-existent object 0x%" PRIxLEAST64 " of %s type",
347 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
348 }
349}
350
351static void
352setGpuQueueInfoState(
Tony Barbour426b9052015-06-24 16:06:58 -0600353 uint32_t count,
354 void *pData)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600355{
Tony Barbour426b9052015-06-24 16:06:58 -0600356 queueCount = count;
357 queueInfo = (VkPhysicalDeviceQueueProperties*)realloc((void*)queueInfo, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600358 if (queueInfo != NULL) {
Tony Barbour426b9052015-06-24 16:06:58 -0600359 memcpy(queueInfo, pData, count * sizeof(VkPhysicalDeviceQueueProperties));
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600360 }
361}
362
363// Check Queue type flags for selected queue operations
364static void
365validateQueueFlags(
366 VkQueue queue,
367 const char *function)
368{
369 OT_QUEUE_INFO *pQueueInfo = g_pQueueInfo;
370 while ((pQueueInfo != NULL) && (pQueueInfo->queue != queue)) {
371 pQueueInfo = pQueueInfo->pNextQI;
372 }
373 if (pQueueInfo != NULL) {
374 if ((queueInfo != NULL) && (queueInfo[pQueueInfo->queueNodeIndex].queueFlags & VK_QUEUE_SPARSE_MEMMGR_BIT) == 0) {
375 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
376 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not set", function);
377 } else {
378 log_msg(mdd(queue), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_QUEUE, queue, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
379 "Attempting %s on a possibly non-memory-management capable queue -- VK_QUEUE_SPARSE_MEMMGR_BIT not known", function);
380 }
381 }
382}
383
384// Check object status for selected flag state
385static bool32_t
386validate_status(
387 VkObject dispatchable_object,
388 VkObject vkObj,
389 VkObjectType objType,
390 ObjectStatusFlags status_mask,
391 ObjectStatusFlags status_flag,
392 VkFlags msg_flags,
393 OBJECT_TRACK_ERROR error_code,
394 const char *fail_msg)
395{
396 if (objMap.find(vkObj) != objMap.end()) {
397 OBJTRACK_NODE* pNode = objMap[vkObj];
398 if ((pNode->status & status_mask) != status_flag) {
399 char str[1024];
400 log_msg(mdd(dispatchable_object), msg_flags, pNode->objType, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
401 "OBJECT VALIDATION WARNING: %s object 0x%" PRIxLEAST64 ": %s", string_VkObjectType(objType),
402 reinterpret_cast<VkUintPtrLeast64>(vkObj), fail_msg);
403 return VK_FALSE;
404 }
405 return VK_TRUE;
406 }
407 else {
408 // If we do not find it print an error
409 log_msg(mdd(dispatchable_object), msg_flags, (VkObjectType) 0, vkObj, 0, OBJTRACK_UNKNOWN_OBJECT, "OBJTRACK",
410 "Unable to obtain status for non-existent object 0x%" PRIxLEAST64 " of %s type",
411 reinterpret_cast<VkUintPtrLeast64>(vkObj), string_VkObjectType(objType));
412 return VK_FALSE;
413 }
414}
415
416#include "vk_dispatch_table_helper.h"
417static void
418initObjectTracker(
419 layer_data *my_data)
420{
421 uint32_t report_flags = 0;
422 uint32_t debug_action = 0;
423 FILE *log_output = NULL;
424 const char *option_str;
425 // initialize ObjectTracker options
426 report_flags = getLayerOptionFlags("ObjectTrackerReportFlags", 0);
427 getLayerOptionEnum("ObjectTrackerDebugAction", (uint32_t *) &debug_action);
428
429 if (debug_action & VK_DBG_LAYER_ACTION_LOG_MSG)
430 {
431 option_str = getLayerOption("ObjectTrackerLogFilename");
432 if (option_str) {
433 log_output = fopen(option_str, "w");
434 }
435 if (log_output == NULL) {
436 log_output = stdout;
437 }
438
439 layer_create_msg_callback(my_data->report_data, report_flags, log_callback, (void *) log_output, &my_data->logging_callback);
440 }
441
442 if (!objLockInitialized)
443 {
444 // TODO/TBD: Need to delete this mutex sometime. How??? One
445 // suggestion is to call this during vkCreateInstance(), and then we
446 // can clean it up during vkDestroyInstance(). However, that requires
447 // that the layer have per-instance locks. We need to come back and
448 // address this soon.
449 loader_platform_thread_create_mutex(&objLock);
450 objLockInitialized = 1;
451 }
452}
453
454
455//
456// Non-auto-generated API functions called by generated code
457//
458
459VkResult
460explicit_CreateInstance(
461 const VkInstanceCreateInfo *pCreateInfo,
462 VkInstance * pInstance)
463{
464 loader_platform_thread_lock_mutex(&objLock);
465 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, *pInstance);
466 VkResult result = pInstanceTable->CreateInstance(pCreateInfo, pInstance);
467
468 if (result == VK_SUCCESS) {
469 layer_data *my_data = get_my_data_ptr(get_dispatch_key(*pInstance), layer_data_map);
470 my_data->report_data = debug_report_create_instance(
471 pInstanceTable,
472 *pInstance,
473 pCreateInfo->extensionCount,
474 pCreateInfo->pEnabledExtensions);
475
476 initObjectTracker(my_data);
477 }
478 loader_platform_thread_unlock_mutex(&objLock);
479 return result;
480}
481
482VkResult
483explicit_DestroyInstance(
484 VkInstance instance)
485{
486 loader_platform_thread_lock_mutex(&objLock);
487 validate_object(instance, instance);
488
489 destroy_obj(instance, instance);
490 // Report any remaining objects in LL
491 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
492 OBJTRACK_NODE* pNode = it->second; if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
493 // Cannot destroy physical device so ignore
494 } else {
495 log_msg(mid(instance), VK_DBG_REPORT_ERROR_BIT, pNode->objType, pNode->vkObj, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
496 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
497 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
498 }
499 }
500
501 dispatch_key key = get_dispatch_key(instance);
502 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, instance);
503 VkResult result = pInstanceTable->DestroyInstance(instance);
504
505 // Clean up logging callback, if any
506 layer_data *my_data = get_my_data_ptr(key, layer_data_map);
507 if (my_data->logging_callback) {
508 layer_destroy_msg_callback(my_data->report_data, my_data->logging_callback);
509 }
510
511 layer_debug_report_destroy_instance(mid(instance));
512 layer_data_map.erase(pInstanceTable);
513
514 ObjectTracker_instance_table_map.erase(key);
515 assert(ObjectTracker_instance_table_map.size() == 0 && "Should not have any instance mappings hanging around");
516
517 loader_platform_thread_unlock_mutex(&objLock);
518 return result;
519}
520
521VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600522explicit_GetPhysicalDeviceQueueProperties(
523 VkPhysicalDevice gpu,
524 uint32_t count,
525 VkPhysicalDeviceQueueProperties* pProperties)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600526{
Tony Barbour426b9052015-06-24 16:06:58 -0600527 VkResult result = get_dispatch_table(ObjectTracker_instance_table_map, gpu)->GetPhysicalDeviceQueueProperties(gpu, count, pProperties);
528
529 loader_platform_thread_lock_mutex(&objLock);
530 setGpuQueueInfoState(count, pProperties);
531 loader_platform_thread_unlock_mutex(&objLock);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600532 return result;
533}
534
535VkResult
536explicit_CreateDevice(
537 VkPhysicalDevice gpu,
538 const VkDeviceCreateInfo *pCreateInfo,
539 VkDevice *pDevice)
540{
541 loader_platform_thread_lock_mutex(&objLock);
542 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ObjectTracker_instance_table_map, gpu);
543 VkResult result = pInstanceTable->CreateDevice(gpu, pCreateInfo, pDevice);
544 if (result == VK_SUCCESS) {
545 layer_data *my_instance_data = get_my_data_ptr(get_dispatch_key(gpu), layer_data_map);
546 //// VkLayerDispatchTable *pTable = get_dispatch_table(ObjectTracker_device_table_map, *pDevice);
547 layer_data *my_device_data = get_my_data_ptr(get_dispatch_key(*pDevice), layer_data_map);
548 my_device_data->report_data = layer_debug_report_create_device(my_instance_data->report_data, *pDevice);
549 create_obj(gpu, *pDevice, VK_OBJECT_TYPE_DEVICE);
550 }
551
552 loader_platform_thread_unlock_mutex(&objLock);
553 return result;
554}
555
556VkResult
557explicit_DestroyDevice(
558 VkDevice device)
559{
560 loader_platform_thread_lock_mutex(&objLock);
561 validate_object(device, device);
562 destroy_obj(device, device);
563 // Report any remaining objects
564 for (auto it = objMap.begin(); it != objMap.end(); ++it) {
565 OBJTRACK_NODE* pNode = it->second;
566 if ((pNode->objType == VK_OBJECT_TYPE_PHYSICAL_DEVICE) || (pNode->objType == VK_OBJECT_TYPE_QUEUE)) {
567 // Cannot destroy physical device so ignore
568 } else {
569 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, VK_OBJECT_TYPE_PHYSICAL_DEVICE, device, 0, OBJTRACK_OBJECT_LEAK, "OBJTRACK",
570 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.", string_VkObjectType(pNode->objType),
571 reinterpret_cast<VkUintPtrLeast64>(pNode->vkObj));
572 }
573 }
574 // Clean up Queue's MemRef Linked Lists
575 destroyQueueMemRefLists();
576
577 loader_platform_thread_unlock_mutex(&objLock);
578
579 dispatch_key key = get_dispatch_key(device);
580 VkLayerDispatchTable *pDisp = get_dispatch_table(ObjectTracker_device_table_map, device);
581 VkResult result = pDisp->DestroyDevice(device);
582 deviceExtMap.erase(pDisp);
583 ObjectTracker_device_table_map.erase(key);
584 assert(ObjectTracker_device_table_map.size() == 0 && "Should not have any instance mappings hanging around");
585
586 return result;
587}
588
589VkResult
590explicit_GetDeviceQueue(
591 VkDevice device,
592 uint32_t queueNodeIndex,
593 uint32_t queueIndex,
594 VkQueue *pQueue)
595{
596 loader_platform_thread_lock_mutex(&objLock);
597 validate_object(device, device);
598 loader_platform_thread_unlock_mutex(&objLock);
599
600 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetDeviceQueue(device, queueNodeIndex, queueIndex, pQueue);
601
602 loader_platform_thread_lock_mutex(&objLock);
603 addQueueInfo(queueNodeIndex, *pQueue);
604 loader_platform_thread_unlock_mutex(&objLock);
605
606 return result;
607}
608
609VkResult
610explicit_QueueSubmit(
611 VkQueue queue,
612 uint32_t cmdBufferCount,
613 const VkCmdBuffer *pCmdBuffers,
614 VkFence fence)
615{
616 loader_platform_thread_lock_mutex(&objLock);
617 set_status(queue, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED);
618 // TODO: Fix for updated memory reference mechanism
619 // validate_memory_mapping_status(pMemRefs, memRefCount);
620 // validate_mem_ref_count(memRefCount);
621 loader_platform_thread_unlock_mutex(&objLock);
622
623 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueSubmit(queue, cmdBufferCount, pCmdBuffers, fence);
624
625 return result;
626}
627
628VkResult
629explicit_MapMemory(
630 VkDevice device,
631 VkDeviceMemory mem,
632 VkDeviceSize offset,
633 VkDeviceSize size,
634 VkFlags flags,
635 void **ppData)
636{
637 loader_platform_thread_lock_mutex(&objLock);
638 set_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
639 validate_object(device, device);
640 loader_platform_thread_unlock_mutex(&objLock);
641
642 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->MapMemory(device, mem, offset, size, flags, ppData);
643
644 return result;
645}
646
647VkResult
648explicit_UnmapMemory(
649 VkDevice device,
650 VkDeviceMemory mem)
651{
652 loader_platform_thread_lock_mutex(&objLock);
653 reset_status(device, mem, VK_OBJECT_TYPE_DEVICE_MEMORY, OBJSTATUS_GPU_MEM_MAPPED);
654 validate_object(device, device);
655 loader_platform_thread_unlock_mutex(&objLock);
656
657 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->UnmapMemory(device, mem);
658
659 return result;
660}
661
662VkResult
663explicit_DestroyObject(
664 VkDevice device,
665 VkObjectType objType,
666 VkObject object)
667{
668 loader_platform_thread_lock_mutex(&objLock);
669 validate_object(device, device);
670 validate_object(device, object);
671 loader_platform_thread_unlock_mutex(&objLock);
672
673 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->DestroyObject(device, objType, object);
674
675 loader_platform_thread_lock_mutex(&objLock);
676 validateObjectType(device, "vkDestroyObject", objType, object);
677 destroy_obj(device, object);
678 loader_platform_thread_unlock_mutex(&objLock);
679
680 return result;
681}
682
683VkResult
Tony Barbour426b9052015-06-24 16:06:58 -0600684explicit_GetObjectMemoryRequirements(
685 VkDevice device,
686 VkObjectType objType,
687 VkObject object,
688 VkMemoryRequirements* pMemoryRequirements)
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600689{
690 loader_platform_thread_lock_mutex(&objLock);
Tony Barbour426b9052015-06-24 16:06:58 -0600691 validateObjectType(device, "vkGetObjectMemoryRequirements", objType, object);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600692 validate_object(device, device);
693 validate_object(device, object);
694 loader_platform_thread_unlock_mutex(&objLock);
695
Tony Barbour426b9052015-06-24 16:06:58 -0600696 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetObjectMemoryRequirements(device, objType, object, pMemoryRequirements);
Mark Lobodzinski14305ad2015-06-23 11:35:12 -0600697
698 return result;
699}
700
701VkResult
702explicit_QueueBindSparseBufferMemory(
703 VkQueue queue,
704 VkBuffer buffer,
705 VkDeviceSize rangeOffset,
706 VkDeviceSize rangeSize,
707 VkDeviceMemory mem,
708 VkDeviceSize memOffset)
709{
710 loader_platform_thread_lock_mutex(&objLock);
711 validateQueueFlags(queue, "QueueBindSparseBufferMemory");
712 validate_object(queue, buffer);
713 loader_platform_thread_unlock_mutex(&objLock);
714
715 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseBufferMemory(queue, buffer, rangeOffset, rangeSize, mem, memOffset);
716 return result;
717}
718
719VkResult
720explicit_QueueBindSparseImageMemory(
721 VkQueue queue,
722 VkImage image,
723 const VkImageMemoryBindInfo *pBindInfo,
724 VkDeviceMemory mem,
725 VkDeviceSize memOffset)
726{
727 loader_platform_thread_lock_mutex(&objLock);
728 validateQueueFlags(queue, "QueueBindSparseImageMemory");
729 loader_platform_thread_unlock_mutex(&objLock);
730
731 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, queue)->QueueBindSparseImageMemory(queue, image, pBindInfo, mem, memOffset);
732 return result;
733}
734
735
736VkResult
737explicit_GetFenceStatus(
738 VkDevice device,
739 VkFence fence)
740{
741 loader_platform_thread_lock_mutex(&objLock);
742 // Warn if submitted_flag is not set
743 validate_status(device, fence, VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
744 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Status Requested for Unsubmitted Fence");
745 validate_object(device, device);
746 loader_platform_thread_unlock_mutex(&objLock);
747
748 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->GetFenceStatus(device, fence);
749
750 return result;
751}
752
753VkResult
754explicit_WaitForFences(
755 VkDevice device,
756 uint32_t fenceCount,
757 const VkFence *pFences,
758 bool32_t waitAll,
759 uint64_t timeout)
760{
761 loader_platform_thread_lock_mutex(&objLock);
762 // Warn if waiting on unsubmitted fence
763 for (uint32_t i = 0; i < fenceCount; i++) {
764 validate_status(device, pFences[i], VK_OBJECT_TYPE_FENCE, OBJSTATUS_FENCE_IS_SUBMITTED, OBJSTATUS_FENCE_IS_SUBMITTED,
765 VK_DBG_REPORT_ERROR_BIT, OBJTRACK_INVALID_FENCE, "Waiting for Unsubmitted Fence");
766 }
767 validate_object(device, device);
768 loader_platform_thread_unlock_mutex(&objLock);
769
770 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->WaitForFences(device, fenceCount, pFences, waitAll, timeout);
771
772 return result;
773}
774
775VkResult
776explicit_AllocDescriptorSets(
777 VkDevice device,
778 VkDescriptorPool descriptorPool,
779 VkDescriptorSetUsage setUsage,
780 uint32_t count,
781 const VkDescriptorSetLayout *pSetLayouts,
782 VkDescriptorSet *pDescriptorSets,
783 uint32_t *pCount)
784{
785 loader_platform_thread_lock_mutex(&objLock);
786 validate_object(device, device);
787 validate_object(device, descriptorPool);
788 loader_platform_thread_unlock_mutex(&objLock);
789
790 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->AllocDescriptorSets(
791 device, descriptorPool, setUsage, count, pSetLayouts, pDescriptorSets, pCount);
792
793 loader_platform_thread_lock_mutex(&objLock);
794 for (uint32_t i = 0; i < *pCount; i++) {
795 create_obj(device, pDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET);
796 }
797 loader_platform_thread_unlock_mutex(&objLock);
798
799 return result;
800}
801
802VkResult
803explicit_DestroySwapChainWSI(
804 VkSwapChainWSI swapChain)
805{
806
807 loader_platform_thread_lock_mutex(&objLock);
808 destroy_obj(swapChain, swapChain);
809 loader_platform_thread_unlock_mutex(&objLock);
810
811 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, swapChain)->DestroySwapChainWSI(swapChain);
812
813 return result;
814}
815
816VkResult
817explicit_FreeMemory(
818 VkDevice device,
819 VkDeviceMemory mem)
820{
821 loader_platform_thread_lock_mutex(&objLock);
822 validate_object(device, device);
823 loader_platform_thread_unlock_mutex(&objLock);
824
825 VkResult result = get_dispatch_table(ObjectTracker_device_table_map, device)->FreeMemory(device, mem);
826
827 loader_platform_thread_lock_mutex(&objLock);
828 destroy_obj(device, mem);
829 loader_platform_thread_unlock_mutex(&objLock);
830
831 return result;
832}
833
834
835// ObjectTracker Extensions
836
837uint64_t
838objTrackGetObjectsCount(
839 VkDevice device)
840{
841 return numTotalObjs;
842}
843
844VkResult
845objTrackGetObjects(
846 VkDevice device,
847 uint64_t objCount,
848 OBJTRACK_NODE *pObjNodeArray)
849{
850 // This bool flags if we're pulling all objs or just a single class of objs
851 // Check the count first thing
852 uint64_t maxObjCount = numTotalObjs;
853 if (objCount > maxObjCount) {
854 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
855 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu total objs", objCount, maxObjCount);
856 return VK_ERROR_INVALID_VALUE;
857 }
858 auto it = objMap.begin();
859 for (uint64_t i = 0; i < objCount; i++) {
860 if (objMap.end() == it) {
861 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
862 "OBJ INTERNAL ERROR : Ran out of objs! Should have %lu, but only copied %lu and not the requested %lu.", maxObjCount, i, objCount);
863 return VK_ERROR_UNKNOWN;
864 }
865 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
866 ++it;
867 }
868 return VK_SUCCESS;
869}
870
871uint64_t
872objTrackGetObjectsOfTypeCount(
873 VkDevice device,
874 VkObjectType type)
875{
876 return numObjs[type];
877}
878
879VkResult
880objTrackGetObjectsOfType(
881 VkDevice device,
882 VkObjectType type,
883 uint64_t objCount,
884 OBJTRACK_NODE *pObjNodeArray)
885{
886 // Check the count first thing
887 uint64_t maxObjCount = numObjs[type];
888 if (objCount > maxObjCount) {
889 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_OBJCOUNT_MAX_EXCEEDED, "OBJTRACK",
890 "OBJ ERROR : Received objTrackGetObjects() request for %lu objs, but there are only %lu objs of type %s",
891 objCount, maxObjCount, string_VkObjectType(type));
892 return VK_ERROR_INVALID_VALUE;
893 }
894 auto it = objMap.begin();
895 for (uint64_t i = 0; i < objCount; i++) {
896 // Get next object of correct type
897 while ((objMap.end() != it) && (it->second->objType != type))
898 ++it;
899 if (objMap.end() == it) {
900 log_msg(mdd(device), VK_DBG_REPORT_ERROR_BIT, (VkObjectType) 0, device, 0, OBJTRACK_INTERNAL_ERROR, "OBJTRACK",
901 "OBJ INTERNAL ERROR : Ran out of %s objs! Should have %lu, but only copied %lu and not the requested %lu.",
902 string_VkObjectType(type), maxObjCount, i, objCount);
903 return VK_ERROR_UNKNOWN;
904 }
905 memcpy(&pObjNodeArray[i], it->second, sizeof(OBJTRACK_NODE));
906 }
907 return VK_SUCCESS;
908}
909