blob: 44d18be09742189202c5542f3e9c55b9a79b6b3d [file] [log] [blame]
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001/* Copyright (c) 2015-2017 The Khronos Group Inc.
2 * Copyright (c) 2015-2017 Valve Corporation
3 * Copyright (c) 2015-2017 LunarG, Inc.
4 * Copyright (C) 2015-2017 Google Inc.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 *
18 * Author: Mark Lobodzinski <mark@lunarg.com>
19 * Author: Jon Ashburn <jon@lunarg.com>
20 * Author: Tobin Ehlis <tobin@lunarg.com>
21 */
22
John Zulauf0fe5bfe2018-05-23 09:36:00 -060023#define VALIDATION_ERROR_MAP_IMPL
24
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060025#include "object_tracker.h"
26
27namespace object_tracker {
28
29std::unordered_map<void *, layer_data *> layer_data_map;
30device_table_map ot_device_table_map;
31instance_table_map ot_instance_table_map;
32std::mutex global_lock;
33uint64_t object_track_index = 0;
34uint32_t loader_layer_if_version = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
35
36void InitObjectTracker(layer_data *my_data, const VkAllocationCallbacks *pAllocator) {
Mark Young6ba8abe2017-11-09 10:37:04 -070037 layer_debug_report_actions(my_data->report_data, my_data->logging_callback, pAllocator, "lunarg_object_tracker");
38 layer_debug_messenger_actions(my_data->report_data, my_data->logging_messenger, pAllocator, "lunarg_object_tracker");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060039}
40
41// Add new queue to head of global queue list
42void AddQueueInfo(VkDevice device, uint32_t queue_node_index, VkQueue queue) {
43 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
44 auto queueItem = device_data->queue_info_map.find(queue);
45 if (queueItem == device_data->queue_info_map.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060046 ObjTrackQueueInfo *p_queue_info = new ObjTrackQueueInfo;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060047 if (p_queue_info != NULL) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060048 memset(p_queue_info, 0, sizeof(ObjTrackQueueInfo));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060049 p_queue_info->queue = queue;
50 p_queue_info->queue_node_index = queue_node_index;
51 device_data->queue_info_map[queue] = p_queue_info;
52 } else {
53 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -060054 HandleToUint64(queue), OBJTRACK_INTERNAL_ERROR,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060055 "ERROR: VK_ERROR_OUT_OF_HOST_MEMORY -- could not allocate memory for Queue Information");
56 }
57 }
58}
59
60// Destroy memRef lists and free all memory
61void DestroyQueueDataStructures(VkDevice device) {
62 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
63
64 for (auto queue_item : device_data->queue_info_map) {
65 delete queue_item.second;
66 }
67 device_data->queue_info_map.clear();
68
69 // Destroy the items in the queue map
70 auto queue = device_data->object_map[kVulkanObjectTypeQueue].begin();
71 while (queue != device_data->object_map[kVulkanObjectTypeQueue].end()) {
72 uint32_t obj_index = queue->second->object_type;
73 assert(device_data->num_total_objects > 0);
74 device_data->num_total_objects--;
75 assert(device_data->num_objects[obj_index] > 0);
76 device_data->num_objects[obj_index]--;
77 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -060078 queue->second->handle, OBJTRACK_NONE,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060079 "OBJ_STAT Destroy Queue obj 0x%" PRIxLEAST64 " (%" PRIu64 " total objs remain & %" PRIu64 " Queue objs).",
80 queue->second->handle, device_data->num_total_objects, device_data->num_objects[obj_index]);
81 delete queue->second;
82 queue = device_data->object_map[kVulkanObjectTypeQueue].erase(queue);
83 }
84}
85
86// Check Queue type flags for selected queue operations
87void ValidateQueueFlags(VkQueue queue, const char *function) {
88 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
89 auto queue_item = device_data->queue_info_map.find(queue);
90 if (queue_item != device_data->queue_info_map.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -060091 ObjTrackQueueInfo *pQueueInfo = queue_item->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060092 if (pQueueInfo != NULL) {
93 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(device_data->physical_device), layer_data_map);
94 if ((instance_data->queue_family_properties[pQueueInfo->queue_node_index].queueFlags & VK_QUEUE_SPARSE_BINDING_BIT) ==
95 0) {
96 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -060097 HandleToUint64(queue), "VUID-vkQueueBindSparse-queuetype",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -060098 "Attempting %s on a non-memory-management capable queue -- VK_QUEUE_SPARSE_BINDING_BIT not set.", function);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -060099 }
100 }
101 }
102}
103
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700104// Look for this device object in any of the instance child devices lists.
105// NOTE: This is of dubious value. In most circumstances Vulkan will die a flaming death if a dispatchable object is invalid.
106// However, if this layer is loaded first and GetProcAddress is used to make API calls, it will detect bad DOs.
Dave Houlton379f1422018-05-23 12:47:07 -0600107bool ValidateDeviceObject(uint64_t device_handle, const std::string &invalid_handle_code, const std::string &wrong_device_code) {
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700108 VkInstance last_instance = nullptr;
109 for (auto layer_data : layer_data_map) {
110 for (auto object : layer_data.second->object_map[kVulkanObjectTypeDevice]) {
111 // Grab last instance to use for possible error message
112 last_instance = layer_data.second->instance;
113 if (object.second->handle == device_handle) return false;
114 }
115 }
116
117 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(last_instance), layer_data_map);
118 return log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, device_handle,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600119 invalid_handle_code, "Invalid Device Object 0x%" PRIxLEAST64 ".", device_handle);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700120}
121
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600122void AllocateCommandBuffer(VkDevice device, const VkCommandPool command_pool, const VkCommandBuffer command_buffer,
123 VkCommandBufferLevel level) {
124 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
125
126 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600127 HandleToUint64(command_buffer), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600128 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT", HandleToUint64(command_buffer));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600129
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600130 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600131 pNewObjNode->object_type = kVulkanObjectTypeCommandBuffer;
132 pNewObjNode->handle = HandleToUint64(command_buffer);
133 pNewObjNode->parent_object = HandleToUint64(command_pool);
134 if (level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
135 pNewObjNode->status = OBJSTATUS_COMMAND_BUFFER_SECONDARY;
136 } else {
137 pNewObjNode->status = OBJSTATUS_NONE;
138 }
139 device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)] = pNewObjNode;
140 device_data->num_objects[kVulkanObjectTypeCommandBuffer]++;
141 device_data->num_total_objects++;
142}
143
144bool ValidateCommandBuffer(VkDevice device, VkCommandPool command_pool, VkCommandBuffer command_buffer) {
145 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
146 bool skip = false;
147 uint64_t object_handle = HandleToUint64(command_buffer);
148 if (device_data->object_map[kVulkanObjectTypeCommandBuffer].find(object_handle) !=
149 device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600150 ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600151
152 if (pNode->parent_object != HandleToUint64(command_pool)) {
153 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600154 object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-parent",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600155 "FreeCommandBuffers is attempting to free Command Buffer 0x%" PRIxLEAST64
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600156 " belonging to Command Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
157 HandleToUint64(command_buffer), pNode->parent_object, HandleToUint64(command_pool));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600158 }
159 } else {
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600160 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600161 object_handle, "VUID-vkFreeCommandBuffers-pCommandBuffers-00048", "Invalid %s Object 0x%" PRIxLEAST64 ".",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600162 object_string[kVulkanObjectTypeCommandBuffer], object_handle);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600163 }
164 return skip;
165}
166
167void AllocateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
168 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
169
170 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600171 HandleToUint64(descriptor_set), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600172 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT", HandleToUint64(descriptor_set));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600173
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600174 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600175 pNewObjNode->object_type = kVulkanObjectTypeDescriptorSet;
176 pNewObjNode->status = OBJSTATUS_NONE;
177 pNewObjNode->handle = HandleToUint64(descriptor_set);
178 pNewObjNode->parent_object = HandleToUint64(descriptor_pool);
179 device_data->object_map[kVulkanObjectTypeDescriptorSet][HandleToUint64(descriptor_set)] = pNewObjNode;
180 device_data->num_objects[kVulkanObjectTypeDescriptorSet]++;
181 device_data->num_total_objects++;
182}
183
184bool ValidateDescriptorSet(VkDevice device, VkDescriptorPool descriptor_pool, VkDescriptorSet descriptor_set) {
185 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
186 bool skip = false;
187 uint64_t object_handle = HandleToUint64(descriptor_set);
188 auto dsItem = device_data->object_map[kVulkanObjectTypeDescriptorSet].find(object_handle);
189 if (dsItem != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600190 ObjTrackState *pNode = dsItem->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600191
192 if (pNode->parent_object != HandleToUint64(descriptor_pool)) {
193 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600194 object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-parent",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600195 "FreeDescriptorSets is attempting to free descriptorSet 0x%" PRIxLEAST64
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600196 " belonging to Descriptor Pool 0x%" PRIxLEAST64 " from pool 0x%" PRIxLEAST64 ").",
197 HandleToUint64(descriptor_set), pNode->parent_object, HandleToUint64(descriptor_pool));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600198 }
199 } else {
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600200 skip |= log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600201 object_handle, "VUID-vkFreeDescriptorSets-pDescriptorSets-00310", "Invalid %s Object 0x%" PRIxLEAST64 ".",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600202 object_string[kVulkanObjectTypeDescriptorSet], object_handle);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600203 }
204 return skip;
205}
206
Dave Houltona9df0ce2018-02-07 10:51:23 -0700207template <typename DispObj>
Chris Forbes2c600e92017-10-20 11:13:20 -0700208static bool ValidateDescriptorWrite(DispObj disp, VkWriteDescriptorSet const *desc, bool isPush) {
209 bool skip = false;
210
211 if (!isPush && desc->dstSet) {
Dave Houlton57ae22f2018-05-18 16:20:52 -0600212 skip |= ValidateObject(disp, desc->dstSet, kVulkanObjectTypeDescriptorSet, false, "VUID-VkWriteDescriptorSet-dstSet-00320",
213 "VUID-VkWriteDescriptorSet-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700214 }
215
216 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) ||
217 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)) {
218 for (uint32_t idx2 = 0; idx2 < desc->descriptorCount; ++idx2) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700219 skip |= ValidateObject(disp, desc->pTexelBufferView[idx2], kVulkanObjectTypeBufferView, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600220 "VUID-VkWriteDescriptorSet-descriptorType-00323", "VUID-VkWriteDescriptorSet-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700221 }
222 }
223
224 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) ||
Dave Houltona9df0ce2018-02-07 10:51:23 -0700225 (desc->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE) || (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) ||
Chris Forbes2c600e92017-10-20 11:13:20 -0700226 (desc->descriptorType == VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT)) {
227 for (uint32_t idx3 = 0; idx3 < desc->descriptorCount; ++idx3) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700228 skip |= ValidateObject(disp, desc->pImageInfo[idx3].imageView, kVulkanObjectTypeImageView, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600229 "VUID-VkWriteDescriptorSet-descriptorType-00326", "VUID-VkDescriptorImageInfo-commonparent");
Chris Forbes2c600e92017-10-20 11:13:20 -0700230 }
231 }
232
233 if ((desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) ||
234 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER) ||
235 (desc->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC) ||
236 (desc->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)) {
237 for (uint32_t idx4 = 0; idx4 < desc->descriptorCount; ++idx4) {
238 if (desc->pBufferInfo[idx4].buffer) {
Dave Houltona9df0ce2018-02-07 10:51:23 -0700239 skip |= ValidateObject(disp, desc->pBufferInfo[idx4].buffer, kVulkanObjectTypeBuffer, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600240 "VUID-VkDescriptorBufferInfo-buffer-parameter", kVUIDUndefined);
Chris Forbes2c600e92017-10-20 11:13:20 -0700241 }
242 }
243 }
244
245 return skip;
246}
247
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600248VKAPI_ATTR void VKAPI_CALL CmdPushDescriptorSetKHR(VkCommandBuffer commandBuffer, VkPipelineBindPoint pipelineBindPoint,
249 VkPipelineLayout layout, uint32_t set, uint32_t descriptorWriteCount,
250 const VkWriteDescriptorSet *pDescriptorWrites) {
251 bool skip = false;
252 {
253 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600254 skip |=
255 ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false,
256 "VUID-vkCmdPushDescriptorSetKHR-commandBuffer-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
257 skip |= ValidateObject(commandBuffer, layout, kVulkanObjectTypePipelineLayout, false,
258 "VUID-vkCmdPushDescriptorSetKHR-layout-parameter", "VUID-vkCmdPushDescriptorSetKHR-commonparent");
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600259 if (pDescriptorWrites) {
260 for (uint32_t index0 = 0; index0 < descriptorWriteCount; ++index0) {
Chris Forbesa94b60b2017-10-20 11:28:02 -0700261 skip |= ValidateDescriptorWrite(commandBuffer, &pDescriptorWrites[index0], true);
Tony Barbour2fd0c2c2017-08-08 12:51:33 -0600262 }
263 }
264 }
265 if (skip) return;
266 get_dispatch_table(ot_device_table_map, commandBuffer)
267 ->CmdPushDescriptorSetKHR(commandBuffer, pipelineBindPoint, layout, set, descriptorWriteCount, pDescriptorWrites);
268}
269
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600270void CreateQueue(VkDevice device, VkQueue vkObj) {
271 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
272
273 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600274 HandleToUint64(vkObj), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600275 object_track_index++, "VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT", HandleToUint64(vkObj));
276
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600277 ObjTrackState *p_obj_node = NULL;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600278 auto queue_item = device_data->object_map[kVulkanObjectTypeQueue].find(HandleToUint64(vkObj));
279 if (queue_item == device_data->object_map[kVulkanObjectTypeQueue].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600280 p_obj_node = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600281 device_data->object_map[kVulkanObjectTypeQueue][HandleToUint64(vkObj)] = p_obj_node;
282 device_data->num_objects[kVulkanObjectTypeQueue]++;
283 device_data->num_total_objects++;
284 } else {
285 p_obj_node = queue_item->second;
286 }
287 p_obj_node->object_type = kVulkanObjectTypeQueue;
288 p_obj_node->status = OBJSTATUS_NONE;
289 p_obj_node->handle = HandleToUint64(vkObj);
290}
291
292void CreateSwapchainImageObject(VkDevice dispatchable_object, VkImage swapchain_image, VkSwapchainKHR swapchain) {
293 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(dispatchable_object), layer_data_map);
294 log_msg(device_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600295 HandleToUint64(swapchain_image), OBJTRACK_NONE, "OBJ[0x%" PRIxLEAST64 "] : CREATE %s object 0x%" PRIxLEAST64,
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600296 object_track_index++, "SwapchainImage", HandleToUint64(swapchain_image));
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600297
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600298 ObjTrackState *pNewObjNode = new ObjTrackState;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600299 pNewObjNode->object_type = kVulkanObjectTypeImage;
300 pNewObjNode->status = OBJSTATUS_NONE;
301 pNewObjNode->handle = HandleToUint64(swapchain_image);
302 pNewObjNode->parent_object = HandleToUint64(swapchain);
303 device_data->swapchainImageMap[HandleToUint64(swapchain_image)] = pNewObjNode;
304}
305
Dave Houlton379f1422018-05-23 12:47:07 -0600306void DeviceReportUndestroyedObjects(VkDevice device, VulkanObjectType object_type, const std::string &error_code) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600307 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000308 for (const auto &item : device_data->object_map[object_type]) {
309 const ObjTrackState *object_info = item.second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600310 log_msg(device_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, get_debug_report_enum[object_type], object_info->handle,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600311 error_code, "OBJ ERROR : For device 0x%" PRIxLEAST64 ", %s object 0x%" PRIxLEAST64 " has not been destroyed.",
Mark Lobodzinski487a0d12018-03-30 10:09:03 -0600312 HandleToUint64(device), object_string[object_type], object_info->handle);
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000313 }
314}
315
316void DeviceDestroyUndestroyedObjects(VkDevice device, VulkanObjectType object_type) {
317 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
318 while (!device_data->object_map[object_type].empty()) {
319 auto item = device_data->object_map[object_type].begin();
320
321 ObjTrackState *object_info = item->second;
322 DestroyObjectSilently(device, object_info->handle, object_type);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600323 }
324}
325
326VKAPI_ATTR void VKAPI_CALL DestroyInstance(VkInstance instance, const VkAllocationCallbacks *pAllocator) {
327 std::unique_lock<std::mutex> lock(global_lock);
328
329 dispatch_key key = get_dispatch_key(instance);
330 layer_data *instance_data = GetLayerDataPtr(key, layer_data_map);
331
332 // Enable the temporary callback(s) here to catch cleanup issues:
Mark Young6ba8abe2017-11-09 10:37:04 -0700333 if (instance_data->num_tmp_debug_messengers > 0) {
334 layer_enable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
335 instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
336 }
337 if (instance_data->num_tmp_report_callbacks > 0) {
338 layer_enable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
339 instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600340 }
341
342 // TODO: The instance handle can not be validated here. The loader will likely have to validate it.
Dave Houlton379f1422018-05-23 12:47:07 -0600343 ValidateObject(instance, instance, kVulkanObjectTypeInstance, true, "VUID-vkDestroyInstance-instance-parameter",
344 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600345
346 // Destroy physical devices
347 for (auto iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
348 iit != instance_data->object_map[kVulkanObjectTypePhysicalDevice].end();) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600349 ObjTrackState *pNode = iit->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600350 VkPhysicalDevice physical_device = reinterpret_cast<VkPhysicalDevice>(pNode->handle);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700351
Dave Houlton379f1422018-05-23 12:47:07 -0600352 DestroyObject(instance, physical_device, kVulkanObjectTypePhysicalDevice, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600353 iit = instance_data->object_map[kVulkanObjectTypePhysicalDevice].begin();
354 }
355
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700356 // Destroy child devices
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600357 for (auto iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
358 iit != instance_data->object_map[kVulkanObjectTypeDevice].end();) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600359 ObjTrackState *pNode = iit->second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600360
361 VkDevice device = reinterpret_cast<VkDevice>(pNode->handle);
362 VkDebugReportObjectTypeEXT debug_object_type = get_debug_report_enum[pNode->object_type];
363
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600364 log_msg(instance_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, debug_object_type, pNode->handle, OBJTRACK_OBJECT_LEAK,
Mark Lobodzinski88529492018-04-01 10:38:15 -0600365 "OBJ ERROR : %s object 0x%" PRIxLEAST64 " has not been destroyed.",
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600366 string_VkDebugReportObjectTypeEXT(debug_object_type), pNode->handle);
367
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700368 // Report any remaining objects in LL
Dave Houlton57ae22f2018-05-18 16:20:52 -0600369 ReportUndestroyedObjects(device, "VUID-vkDestroyInstance-instance-00629");
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000370 DestroyUndestroyedObjects(device);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700371
Dave Houlton379f1422018-05-23 12:47:07 -0600372 DestroyObject(instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyInstance-instance-00630",
373 "VUID-vkDestroyInstance-instance-00631");
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700374 iit = instance_data->object_map[kVulkanObjectTypeDevice].begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600375 }
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700376
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600377 instance_data->object_map[kVulkanObjectTypeDevice].clear();
378
379 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
380 pInstanceTable->DestroyInstance(instance, pAllocator);
381
382 // Disable and cleanup the temporary callback(s):
Mark Young6ba8abe2017-11-09 10:37:04 -0700383 layer_disable_tmp_debug_messengers(instance_data->report_data, instance_data->num_tmp_debug_messengers,
384 instance_data->tmp_debug_messengers);
385 layer_disable_tmp_report_callbacks(instance_data->report_data, instance_data->num_tmp_report_callbacks,
386 instance_data->tmp_report_callbacks);
387 if (instance_data->num_tmp_debug_messengers > 0) {
388 layer_free_tmp_debug_messengers(instance_data->tmp_messenger_create_infos, instance_data->tmp_debug_messengers);
389 instance_data->num_tmp_debug_messengers = 0;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600390 }
Mark Young6ba8abe2017-11-09 10:37:04 -0700391 if (instance_data->num_tmp_report_callbacks > 0) {
392 layer_free_tmp_report_callbacks(instance_data->tmp_report_create_infos, instance_data->tmp_report_callbacks);
393 instance_data->num_tmp_report_callbacks = 0;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600394 }
395
396 // Clean up logging callback, if any
Mark Young6ba8abe2017-11-09 10:37:04 -0700397 while (instance_data->logging_messenger.size() > 0) {
398 VkDebugUtilsMessengerEXT messenger = instance_data->logging_messenger.back();
399 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
400 instance_data->logging_messenger.pop_back();
401 }
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600402 while (instance_data->logging_callback.size() > 0) {
403 VkDebugReportCallbackEXT callback = instance_data->logging_callback.back();
Mark Young6ba8abe2017-11-09 10:37:04 -0700404 layer_destroy_report_callback(instance_data->report_data, callback, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600405 instance_data->logging_callback.pop_back();
406 }
407
Dave Houlton379f1422018-05-23 12:47:07 -0600408 DestroyObject(instance, instance, kVulkanObjectTypeInstance, pAllocator, "VUID-vkDestroyInstance-instance-00630",
409 "VUID-vkDestroyInstance-instance-00631");
Gabríel Arthúr Pétursson3de74ca2018-03-18 01:50:54 +0000410
Mark Young6ba8abe2017-11-09 10:37:04 -0700411 layer_debug_utils_destroy_instance(instance_data->report_data);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600412 FreeLayerDataPtr(key, layer_data_map);
413
414 lock.unlock();
415 ot_instance_table_map.erase(key);
416 delete pInstanceTable;
417}
418
419VKAPI_ATTR void VKAPI_CALL DestroyDevice(VkDevice device, const VkAllocationCallbacks *pAllocator) {
420 std::unique_lock<std::mutex> lock(global_lock);
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700421 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600422 ValidateObject(device, device, kVulkanObjectTypeDevice, true, "VUID-vkDestroyDevice-device-parameter", kVUIDUndefined);
423 DestroyObject(device_data->instance, device, kVulkanObjectTypeDevice, pAllocator, "VUID-vkDestroyDevice-device-00379",
424 "VUID-vkDestroyDevice-device-00380");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600425
426 // Report any remaining objects associated with this VkDevice object in LL
Dave Houlton57ae22f2018-05-18 16:20:52 -0600427 ReportUndestroyedObjects(device, "VUID-vkDestroyDevice-device-00378");
Gabríel Arthúr Péturssonfdcb5402018-03-20 21:52:06 +0000428 DestroyUndestroyedObjects(device);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600429
430 // Clean up Queue's MemRef Linked Lists
431 DestroyQueueDataStructures(device);
432
433 lock.unlock();
434
435 dispatch_key key = get_dispatch_key(device);
436 VkLayerDispatchTable *pDisp = get_dispatch_table(ot_device_table_map, device);
437 pDisp->DestroyDevice(device, pAllocator);
438 ot_device_table_map.erase(key);
439 delete pDisp;
440
441 FreeLayerDataPtr(key, layer_data_map);
442}
443
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600444VKAPI_ATTR void VKAPI_CALL GetDeviceQueue(VkDevice device, uint32_t queueFamilyIndex, uint32_t queueIndex, VkQueue *pQueue) {
445 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600446 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue-device-parameter", kVUIDUndefined);
Mark Lobodzinski439645a2017-07-19 15:18:15 -0600447 lock.unlock();
448
449 get_dispatch_table(ot_device_table_map, device)->GetDeviceQueue(device, queueFamilyIndex, queueIndex, pQueue);
450
451 lock.lock();
452 CreateQueue(device, *pQueue);
453 AddQueueInfo(device, queueFamilyIndex, *pQueue);
454}
455
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800456VKAPI_ATTR void VKAPI_CALL GetDeviceQueue2(VkDevice device, const VkDeviceQueueInfo2 *pQueueInfo, VkQueue *pQueue) {
457 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600458 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetDeviceQueue2-device-parameter", kVUIDUndefined);
Yiwei Zhang991d88d2018-02-14 14:39:46 -0800459 lock.unlock();
460
461 get_dispatch_table(ot_device_table_map, device)->GetDeviceQueue2(device, pQueueInfo, pQueue);
462
463 lock.lock();
464 if (*pQueue != VK_NULL_HANDLE) {
465 CreateQueue(device, *pQueue);
466 AddQueueInfo(device, pQueueInfo->queueFamilyIndex, *pQueue);
467 }
468}
469
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600470VKAPI_ATTR void VKAPI_CALL UpdateDescriptorSets(VkDevice device, uint32_t descriptorWriteCount,
471 const VkWriteDescriptorSet *pDescriptorWrites, uint32_t descriptorCopyCount,
472 const VkCopyDescriptorSet *pDescriptorCopies) {
473 bool skip = false;
474 {
475 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600476 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkUpdateDescriptorSets-device-parameter",
477 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600478 if (pDescriptorCopies) {
479 for (uint32_t idx0 = 0; idx0 < descriptorCopyCount; ++idx0) {
480 if (pDescriptorCopies[idx0].dstSet) {
481 skip |= ValidateObject(device, pDescriptorCopies[idx0].dstSet, kVulkanObjectTypeDescriptorSet, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600482 "VUID-VkCopyDescriptorSet-dstSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600483 }
484 if (pDescriptorCopies[idx0].srcSet) {
485 skip |= ValidateObject(device, pDescriptorCopies[idx0].srcSet, kVulkanObjectTypeDescriptorSet, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600486 "VUID-VkCopyDescriptorSet-srcSet-parameter", "VUID-VkCopyDescriptorSet-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600487 }
488 }
489 }
490 if (pDescriptorWrites) {
491 for (uint32_t idx1 = 0; idx1 < descriptorWriteCount; ++idx1) {
Chris Forbes2c600e92017-10-20 11:13:20 -0700492 skip |= ValidateDescriptorWrite(device, &pDescriptorWrites[idx1], false);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600493 }
494 }
495 }
496 if (skip) {
497 return;
498 }
499 get_dispatch_table(ot_device_table_map, device)
500 ->UpdateDescriptorSets(device, descriptorWriteCount, pDescriptorWrites, descriptorCopyCount, pDescriptorCopies);
501}
502
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600503VKAPI_ATTR VkResult VKAPI_CALL CreateComputePipelines(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount,
504 const VkComputePipelineCreateInfo *pCreateInfos,
505 const VkAllocationCallbacks *pAllocator, VkPipeline *pPipelines) {
506 bool skip = VK_FALSE;
507 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600508 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateComputePipelines-device-parameter",
509 kVUIDUndefined);
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600510 if (pCreateInfos) {
511 for (uint32_t idx0 = 0; idx0 < createInfoCount; ++idx0) {
512 if (pCreateInfos[idx0].basePipelineHandle) {
Dave Houlton379f1422018-05-23 12:47:07 -0600513 skip |=
514 ValidateObject(device, pCreateInfos[idx0].basePipelineHandle, kVulkanObjectTypePipeline, true,
515 "VUID-VkComputePipelineCreateInfo-flags-00697", "VUID-VkComputePipelineCreateInfo-commonparent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600516 }
517 if (pCreateInfos[idx0].layout) {
518 skip |= ValidateObject(device, pCreateInfos[idx0].layout, kVulkanObjectTypePipelineLayout, false,
Dave Houlton379f1422018-05-23 12:47:07 -0600519 "VUID-VkComputePipelineCreateInfo-layout-parameter",
520 "VUID-VkComputePipelineCreateInfo-commonparent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600521 }
522 if (pCreateInfos[idx0].stage.module) {
523 skip |= ValidateObject(device, pCreateInfos[idx0].stage.module, kVulkanObjectTypeShaderModule, false,
Dave Houlton57ae22f2018-05-18 16:20:52 -0600524 "VUID-VkPipelineShaderStageCreateInfo-module-parameter", kVUIDUndefined);
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600525 }
526 }
527 }
528 if (pipelineCache) {
Dave Houlton379f1422018-05-23 12:47:07 -0600529 skip |= ValidateObject(device, pipelineCache, kVulkanObjectTypePipelineCache, true,
530 "VUID-vkCreateComputePipelines-pipelineCache-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -0600531 "VUID-vkCreateComputePipelines-pipelineCache-parent");
Mark Lobodzinski2d26c5f2017-07-19 12:37:04 -0600532 }
533 lock.unlock();
534 if (skip) {
535 for (uint32_t i = 0; i < createInfoCount; i++) {
536 pPipelines[i] = VK_NULL_HANDLE;
537 }
538 return VK_ERROR_VALIDATION_FAILED_EXT;
539 }
540 VkResult result = get_dispatch_table(ot_device_table_map, device)
541 ->CreateComputePipelines(device, pipelineCache, createInfoCount, pCreateInfos, pAllocator, pPipelines);
542 lock.lock();
543 for (uint32_t idx1 = 0; idx1 < createInfoCount; ++idx1) {
544 if (pPipelines[idx1] != VK_NULL_HANDLE) {
545 CreateObject(device, pPipelines[idx1], kVulkanObjectTypePipeline, pAllocator);
546 }
547 }
548 lock.unlock();
549 return result;
550}
551
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600552VKAPI_ATTR VkResult VKAPI_CALL ResetDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
553 VkDescriptorPoolResetFlags flags) {
554 bool skip = false;
555 std::unique_lock<std::mutex> lock(global_lock);
556 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
Dave Houlton379f1422018-05-23 12:47:07 -0600557 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkResetDescriptorPool-device-parameter",
558 kVUIDUndefined);
559 skip |=
560 ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
561 "VUID-vkResetDescriptorPool-descriptorPool-parameter", "VUID-vkResetDescriptorPool-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600562 if (skip) {
563 return VK_ERROR_VALIDATION_FAILED_EXT;
564 }
565 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is reset.
566 // Remove this pool's descriptor sets from our descriptorSet map.
567 auto itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
568 while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600569 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600570 auto del_itr = itr++;
571 if (pNode->parent_object == HandleToUint64(descriptorPool)) {
Dave Houlton379f1422018-05-23 12:47:07 -0600572 DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
573 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600574 }
575 }
576 lock.unlock();
577 VkResult result = get_dispatch_table(ot_device_table_map, device)->ResetDescriptorPool(device, descriptorPool, flags);
578 return result;
579}
580
581VKAPI_ATTR VkResult VKAPI_CALL BeginCommandBuffer(VkCommandBuffer command_buffer, const VkCommandBufferBeginInfo *begin_info) {
582 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(command_buffer), layer_data_map);
583 bool skip = false;
584 {
585 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600586 skip |= ValidateObject(command_buffer, command_buffer, kVulkanObjectTypeCommandBuffer, false,
587 "VUID-vkBeginCommandBuffer-commandBuffer-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600588 if (begin_info) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -0600589 ObjTrackState *pNode = device_data->object_map[kVulkanObjectTypeCommandBuffer][HandleToUint64(command_buffer)];
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600590 if ((begin_info->pInheritanceInfo) && (pNode->status & OBJSTATUS_COMMAND_BUFFER_SECONDARY) &&
591 (begin_info->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
Dave Houlton379f1422018-05-23 12:47:07 -0600592 skip |=
593 ValidateObject(command_buffer, begin_info->pInheritanceInfo->framebuffer, kVulkanObjectTypeFramebuffer, true,
594 "VUID-VkCommandBufferBeginInfo-flags-00055", "VUID-VkCommandBufferInheritanceInfo-commonparent");
595 skip |=
596 ValidateObject(command_buffer, begin_info->pInheritanceInfo->renderPass, kVulkanObjectTypeRenderPass, false,
597 "VUID-VkCommandBufferBeginInfo-flags-00053", "VUID-VkCommandBufferInheritanceInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600598 }
599 }
600 }
601 if (skip) {
602 return VK_ERROR_VALIDATION_FAILED_EXT;
603 }
604 VkResult result = get_dispatch_table(ot_device_table_map, command_buffer)->BeginCommandBuffer(command_buffer, begin_info);
605 return result;
606}
607
608VKAPI_ATTR VkResult VKAPI_CALL CreateDebugReportCallbackEXT(VkInstance instance,
609 const VkDebugReportCallbackCreateInfoEXT *pCreateInfo,
610 const VkAllocationCallbacks *pAllocator,
611 VkDebugReportCallbackEXT *pCallback) {
612 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
613 VkResult result = pInstanceTable->CreateDebugReportCallbackEXT(instance, pCreateInfo, pAllocator, pCallback);
614 if (VK_SUCCESS == result) {
615 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Young6ba8abe2017-11-09 10:37:04 -0700616 result = layer_create_report_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pCallback);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600617 CreateObject(instance, *pCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator);
618 }
619 return result;
620}
621
622VKAPI_ATTR void VKAPI_CALL DestroyDebugReportCallbackEXT(VkInstance instance, VkDebugReportCallbackEXT msgCallback,
623 const VkAllocationCallbacks *pAllocator) {
624 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
625 pInstanceTable->DestroyDebugReportCallbackEXT(instance, msgCallback, pAllocator);
626 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
Mark Young6ba8abe2017-11-09 10:37:04 -0700627 layer_destroy_report_callback(instance_data->report_data, msgCallback, pAllocator);
Dave Houlton379f1422018-05-23 12:47:07 -0600628 DestroyObject(instance, msgCallback, kVulkanObjectTypeDebugReportCallbackEXT, pAllocator,
629 "VUID-vkDestroyDebugReportCallbackEXT-instance-01242", "VUID-vkDestroyDebugReportCallbackEXT-instance-01243");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600630}
631
632VKAPI_ATTR void VKAPI_CALL DebugReportMessageEXT(VkInstance instance, VkDebugReportFlagsEXT flags,
633 VkDebugReportObjectTypeEXT objType, uint64_t object, size_t location,
634 int32_t msgCode, const char *pLayerPrefix, const char *pMsg) {
635 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
636 pInstanceTable->DebugReportMessageEXT(instance, flags, objType, object, location, msgCode, pLayerPrefix, pMsg);
637}
638
Mark Young6ba8abe2017-11-09 10:37:04 -0700639// VK_EXT_debug_utils commands
640VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectNameEXT(VkDevice device, const VkDebugUtilsObjectNameInfoEXT *pNameInfo) {
641 bool skip = VK_FALSE;
642 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600643 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700644 lock.unlock();
645 if (skip) {
646 return VK_ERROR_VALIDATION_FAILED_EXT;
647 }
648 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
649 if (pNameInfo->pObjectName) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600650 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700651 dev_data->report_data->debugUtilsObjectNameMap->insert(
652 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->objectHandle, pNameInfo->pObjectName));
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600653 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700654 } else {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600655 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700656 dev_data->report_data->debugUtilsObjectNameMap->erase(pNameInfo->objectHandle);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600657 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700658 }
659 VkResult result = dev_data->dispatch_table.SetDebugUtilsObjectNameEXT(device, pNameInfo);
660 return result;
661}
662
663VKAPI_ATTR VkResult VKAPI_CALL SetDebugUtilsObjectTagEXT(VkDevice device, const VkDebugUtilsObjectTagInfoEXT *pTagInfo) {
664 bool skip = VK_FALSE;
665 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600666 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700667 lock.unlock();
668 if (skip) {
669 return VK_ERROR_VALIDATION_FAILED_EXT;
670 }
671 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
672 VkResult result = dev_data->dispatch_table.SetDebugUtilsObjectTagEXT(device, pTagInfo);
673 return result;
674}
675
676VKAPI_ATTR void VKAPI_CALL QueueBeginDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
677 bool skip = VK_FALSE;
678 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600679 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700680 lock.unlock();
681 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
682 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600683 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700684 BeginQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600685 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700686 if (dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT) {
687 dev_data->dispatch_table.QueueBeginDebugUtilsLabelEXT(queue, pLabelInfo);
688 }
689 }
690}
691
692VKAPI_ATTR void VKAPI_CALL QueueEndDebugUtilsLabelEXT(VkQueue queue) {
693 bool skip = VK_FALSE;
694 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600695 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700696 lock.unlock();
697 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
698 if (!skip) {
699 if (dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT) {
700 dev_data->dispatch_table.QueueEndDebugUtilsLabelEXT(queue);
701 }
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600702 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700703 EndQueueDebugUtilsLabel(dev_data->report_data, queue);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600704 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700705 }
706}
707
708VKAPI_ATTR void VKAPI_CALL QueueInsertDebugUtilsLabelEXT(VkQueue queue, const VkDebugUtilsLabelEXT *pLabelInfo) {
709 bool skip = VK_FALSE;
710 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -0600711 skip |= ValidateObject(queue, queue, kVulkanObjectTypeQueue, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700712 lock.unlock();
713 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(queue), layer_data_map);
714 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600715 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700716 InsertQueueDebugUtilsLabel(dev_data->report_data, queue, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600717 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700718 if (dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT) {
719 dev_data->dispatch_table.QueueInsertDebugUtilsLabelEXT(queue, pLabelInfo);
720 }
721 }
722}
723
724VKAPI_ATTR void VKAPI_CALL CmdBeginDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
725 bool skip = VK_FALSE;
726 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600727 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700728 lock.unlock();
729 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
730 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600731 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700732 BeginCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600733 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700734 if (dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT) {
735 dev_data->dispatch_table.CmdBeginDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
736 }
737 }
738}
739
740VKAPI_ATTR void VKAPI_CALL CmdEndDebugUtilsLabelEXT(VkCommandBuffer commandBuffer) {
741 bool skip = VK_FALSE;
742 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600743 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700744 lock.unlock();
745 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
746 if (!skip) {
747 if (dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT) {
748 dev_data->dispatch_table.CmdEndDebugUtilsLabelEXT(commandBuffer);
749 }
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600750 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700751 EndCmdDebugUtilsLabel(dev_data->report_data, commandBuffer);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600752 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700753 }
754}
755
756VKAPI_ATTR void VKAPI_CALL CmdInsertDebugUtilsLabelEXT(VkCommandBuffer commandBuffer, const VkDebugUtilsLabelEXT *pLabelInfo) {
757 bool skip = VK_FALSE;
758 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600759 skip |= ValidateObject(commandBuffer, commandBuffer, kVulkanObjectTypeCommandBuffer, false, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700760 lock.unlock();
761 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(commandBuffer), layer_data_map);
762 if (!skip) {
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600763 lock.lock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700764 InsertCmdDebugUtilsLabel(dev_data->report_data, commandBuffer, pLabelInfo);
Jeremy Hayes87648ef2018-05-16 12:09:46 -0600765 lock.unlock();
Mark Young6ba8abe2017-11-09 10:37:04 -0700766 if (dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT) {
767 dev_data->dispatch_table.CmdInsertDebugUtilsLabelEXT(commandBuffer, pLabelInfo);
768 }
769 }
770}
771
772VKAPI_ATTR VkResult VKAPI_CALL CreateDebugUtilsMessengerEXT(VkInstance instance,
773 const VkDebugUtilsMessengerCreateInfoEXT *pCreateInfo,
774 const VkAllocationCallbacks *pAllocator,
775 VkDebugUtilsMessengerEXT *pMessenger) {
776 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
777 VkResult result = pInstanceTable->CreateDebugUtilsMessengerEXT(instance, pCreateInfo, pAllocator, pMessenger);
778 if (VK_SUCCESS == result) {
779 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
780 result = layer_create_messenger_callback(instance_data->report_data, false, pCreateInfo, pAllocator, pMessenger);
781 CreateObject(instance, *pMessenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator);
782 }
783 return result;
784}
785
786VKAPI_ATTR void VKAPI_CALL DestroyDebugUtilsMessengerEXT(VkInstance instance, VkDebugUtilsMessengerEXT messenger,
787 const VkAllocationCallbacks *pAllocator) {
788 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
789 pInstanceTable->DestroyDebugUtilsMessengerEXT(instance, messenger, pAllocator);
790 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(instance), layer_data_map);
791 layer_destroy_messenger_callback(instance_data->report_data, messenger, pAllocator);
Dave Houlton379f1422018-05-23 12:47:07 -0600792 DestroyObject(instance, messenger, kVulkanObjectTypeDebugUtilsMessengerEXT, pAllocator, kVUIDUndefined, kVUIDUndefined);
Mark Young6ba8abe2017-11-09 10:37:04 -0700793}
794
795VKAPI_ATTR void VKAPI_CALL SubmitDebugUtilsMessageEXT(VkInstance instance, VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity,
796 VkDebugUtilsMessageTypeFlagsEXT messageTypes,
797 const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData) {
798 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, instance);
799 pInstanceTable->SubmitDebugUtilsMessageEXT(instance, messageSeverity, messageTypes, pCallbackData);
800}
801
802static const VkExtensionProperties instance_extensions[] = {{VK_EXT_DEBUG_REPORT_EXTENSION_NAME, VK_EXT_DEBUG_REPORT_SPEC_VERSION},
803 {VK_EXT_DEBUG_UTILS_EXTENSION_NAME, VK_EXT_DEBUG_UTILS_SPEC_VERSION}};
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600804
805static const VkLayerProperties globalLayerProps = {"VK_LAYER_LUNARG_object_tracker",
806 VK_LAYER_API_VERSION, // specVersion
807 1, // implementationVersion
808 "LunarG Validation Layer"};
809
810VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceLayerProperties(uint32_t *pCount, VkLayerProperties *pProperties) {
811 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
812}
813
814VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
815 VkLayerProperties *pProperties) {
816 return util_GetLayerProperties(1, &globalLayerProps, pCount, pProperties);
817}
818
819VKAPI_ATTR VkResult VKAPI_CALL EnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
820 VkExtensionProperties *pProperties) {
821 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
822 return util_GetExtensionProperties(1, instance_extensions, pCount, pProperties);
823
824 return VK_ERROR_LAYER_NOT_PRESENT;
825}
826
827VKAPI_ATTR VkResult VKAPI_CALL EnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice, const char *pLayerName,
828 uint32_t *pCount, VkExtensionProperties *pProperties) {
829 if (pLayerName && !strcmp(pLayerName, globalLayerProps.layerName))
830 return util_GetExtensionProperties(0, nullptr, pCount, pProperties);
831
832 assert(physicalDevice);
833 VkLayerInstanceDispatchTable *pTable = get_dispatch_table(ot_instance_table_map, physicalDevice);
834 return pTable->EnumerateDeviceExtensionProperties(physicalDevice, NULL, pCount, pProperties);
835}
836
837VKAPI_ATTR VkResult VKAPI_CALL CreateDevice(VkPhysicalDevice physicalDevice, const VkDeviceCreateInfo *pCreateInfo,
838 const VkAllocationCallbacks *pAllocator, VkDevice *pDevice) {
839 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600840 bool skip = ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
841 "VUID-vkCreateDevice-physicalDevice-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600842 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
843
844 layer_data *phy_dev_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
845 VkLayerDeviceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
846
847 assert(chain_info->u.pLayerInfo);
848 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
849 PFN_vkGetDeviceProcAddr fpGetDeviceProcAddr = chain_info->u.pLayerInfo->pfnNextGetDeviceProcAddr;
850 PFN_vkCreateDevice fpCreateDevice = (PFN_vkCreateDevice)fpGetInstanceProcAddr(phy_dev_data->instance, "vkCreateDevice");
851 if (fpCreateDevice == NULL) {
852 return VK_ERROR_INITIALIZATION_FAILED;
853 }
854
855 // Advance the link info for the next element on the chain
856 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
857
858 VkResult result = fpCreateDevice(physicalDevice, pCreateInfo, pAllocator, pDevice);
859 if (result != VK_SUCCESS) {
860 return result;
861 }
862
863 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(*pDevice), layer_data_map);
Mark Young6ba8abe2017-11-09 10:37:04 -0700864 device_data->report_data = layer_debug_utils_create_device(phy_dev_data->report_data, *pDevice);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600865 layer_init_device_dispatch_table(*pDevice, &device_data->dispatch_table, fpGetDeviceProcAddr);
866
867 // Add link back to physDev
868 device_data->physical_device = physicalDevice;
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700869 device_data->instance = phy_dev_data->instance;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600870
871 initDeviceTable(*pDevice, fpGetDeviceProcAddr, ot_device_table_map);
872
Mark Lobodzinski9bd81192017-11-13 09:38:23 -0700873 CreateObject(phy_dev_data->instance, *pDevice, kVulkanObjectTypeDevice, pAllocator);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600874
875 return result;
876}
877
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600878VKAPI_ATTR VkResult VKAPI_CALL GetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
879 VkImage *pSwapchainImages) {
Mark Lobodzinski09fa2d42017-07-21 10:16:53 -0600880 bool skip = false;
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600881 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600882 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkGetSwapchainImagesKHR-device-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -0600883 kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -0600884 skip |= ValidateObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, false,
885 "VUID-vkGetSwapchainImagesKHR-swapchain-parameter", kVUIDUndefined);
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600886 lock.unlock();
Mark Lobodzinski09fa2d42017-07-21 10:16:53 -0600887 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
888
Mark Lobodzinski216843a2017-07-21 13:23:13 -0600889 VkResult result = get_dispatch_table(ot_device_table_map, device)
890 ->GetSwapchainImagesKHR(device, swapchain, pSwapchainImageCount, pSwapchainImages);
891 if (pSwapchainImages != NULL) {
892 lock.lock();
893 for (uint32_t i = 0; i < *pSwapchainImageCount; i++) {
894 CreateSwapchainImageObject(device, pSwapchainImages[i], swapchain);
895 }
896 lock.unlock();
897 }
898 return result;
899}
900
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100901VKAPI_ATTR VkResult VKAPI_CALL CreateDescriptorSetLayout(VkDevice device, const VkDescriptorSetLayoutCreateInfo *pCreateInfo,
902 const VkAllocationCallbacks *pAllocator,
903 VkDescriptorSetLayout *pSetLayout) {
904 bool skip = false;
905 {
906 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600907 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkCreateDescriptorSetLayout-device-parameter",
908 kVUIDUndefined);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100909 if (pCreateInfo) {
910 if (pCreateInfo->pBindings) {
911 for (uint32_t binding_index = 0; binding_index < pCreateInfo->bindingCount; ++binding_index) {
912 const VkDescriptorSetLayoutBinding &binding = pCreateInfo->pBindings[binding_index];
913 const bool is_sampler_type = binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
914 binding.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
915 if (binding.pImmutableSamplers && is_sampler_type) {
916 for (uint32_t index2 = 0; index2 < binding.descriptorCount; ++index2) {
917 const VkSampler sampler = binding.pImmutableSamplers[index2];
Dave Houlton379f1422018-05-23 12:47:07 -0600918 skip |= ValidateObject(device, sampler, kVulkanObjectTypeSampler, false,
919 "VUID-VkDescriptorSetLayoutBinding-descriptorType-00282", kVUIDUndefined);
Petr Kraus42f6f8d2017-12-17 17:37:33 +0100920 }
921 }
922 }
923 }
924 }
925 }
926 if (skip) return VK_ERROR_VALIDATION_FAILED_EXT;
927 VkResult result =
928 get_dispatch_table(ot_device_table_map, device)->CreateDescriptorSetLayout(device, pCreateInfo, pAllocator, pSetLayout);
929 if (VK_SUCCESS == result) {
930 std::lock_guard<std::mutex> lock(global_lock);
931 CreateObject(device, *pSetLayout, kVulkanObjectTypeDescriptorSetLayout, pAllocator);
932 }
933 return result;
934}
935
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600936VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties(VkPhysicalDevice physicalDevice,
937 uint32_t *pQueueFamilyPropertyCount,
938 VkQueueFamilyProperties *pQueueFamilyProperties) {
939 bool skip = false;
940 {
941 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -0600942 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
943 "VUID-vkGetPhysicalDeviceQueueFamilyProperties-physicalDevice-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600944 }
945 if (skip) {
946 return;
947 }
948 get_dispatch_table(ot_instance_table_map, physicalDevice)
949 ->GetPhysicalDeviceQueueFamilyProperties(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
950 std::lock_guard<std::mutex> lock(global_lock);
951 if (pQueueFamilyProperties != NULL) {
952 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
953 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
954 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
955 }
956 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
957 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i];
958 }
959 }
960}
961
962VKAPI_ATTR VkResult VKAPI_CALL CreateInstance(const VkInstanceCreateInfo *pCreateInfo, const VkAllocationCallbacks *pAllocator,
963 VkInstance *pInstance) {
964 VkLayerInstanceCreateInfo *chain_info = get_chain_info(pCreateInfo, VK_LAYER_LINK_INFO);
965
966 assert(chain_info->u.pLayerInfo);
967 PFN_vkGetInstanceProcAddr fpGetInstanceProcAddr = chain_info->u.pLayerInfo->pfnNextGetInstanceProcAddr;
968 PFN_vkCreateInstance fpCreateInstance = (PFN_vkCreateInstance)fpGetInstanceProcAddr(NULL, "vkCreateInstance");
969 if (fpCreateInstance == NULL) {
970 return VK_ERROR_INITIALIZATION_FAILED;
971 }
972
973 // Advance the link info for the next element on the chain
974 chain_info->u.pLayerInfo = chain_info->u.pLayerInfo->pNext;
975
976 VkResult result = fpCreateInstance(pCreateInfo, pAllocator, pInstance);
977 if (result != VK_SUCCESS) {
978 return result;
979 }
980
981 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(*pInstance), layer_data_map);
982 instance_data->instance = *pInstance;
983 initInstanceTable(*pInstance, fpGetInstanceProcAddr, ot_instance_table_map);
984 VkLayerInstanceDispatchTable *pInstanceTable = get_dispatch_table(ot_instance_table_map, *pInstance);
985
986 // Look for one or more debug report create info structures, and copy the
987 // callback(s) for each one found (for use by vkDestroyInstance)
Mark Young6ba8abe2017-11-09 10:37:04 -0700988 layer_copy_tmp_debug_messengers(pCreateInfo->pNext, &instance_data->num_tmp_debug_messengers,
989 &instance_data->tmp_messenger_create_infos, &instance_data->tmp_debug_messengers);
990 layer_copy_tmp_report_callbacks(pCreateInfo->pNext, &instance_data->num_tmp_report_callbacks,
991 &instance_data->tmp_report_create_infos, &instance_data->tmp_report_callbacks);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600992
Mark Young6ba8abe2017-11-09 10:37:04 -0700993 instance_data->report_data = debug_utils_create_instance(pInstanceTable, *pInstance, pCreateInfo->enabledExtensionCount,
994 pCreateInfo->ppEnabledExtensionNames);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -0600995
996 InitObjectTracker(instance_data, pAllocator);
997
998 CreateObject(*pInstance, *pInstance, kVulkanObjectTypeInstance, pAllocator);
999
1000 return result;
1001}
1002
1003VKAPI_ATTR VkResult VKAPI_CALL EnumeratePhysicalDevices(VkInstance instance, uint32_t *pPhysicalDeviceCount,
1004 VkPhysicalDevice *pPhysicalDevices) {
1005 bool skip = VK_FALSE;
1006 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001007 skip |= ValidateObject(instance, instance, kVulkanObjectTypeInstance, false,
1008 "VUID-vkEnumeratePhysicalDevices-instance-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001009 lock.unlock();
1010 if (skip) {
1011 return VK_ERROR_VALIDATION_FAILED_EXT;
1012 }
1013 VkResult result = get_dispatch_table(ot_instance_table_map, instance)
1014 ->EnumeratePhysicalDevices(instance, pPhysicalDeviceCount, pPhysicalDevices);
1015 lock.lock();
1016 if (result == VK_SUCCESS) {
1017 if (pPhysicalDevices) {
1018 for (uint32_t i = 0; i < *pPhysicalDeviceCount; i++) {
1019 CreateObject(instance, pPhysicalDevices[i], kVulkanObjectTypePhysicalDevice, nullptr);
1020 }
1021 }
1022 }
1023 lock.unlock();
1024 return result;
1025}
1026
1027VKAPI_ATTR VkResult VKAPI_CALL AllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
1028 VkCommandBuffer *pCommandBuffers) {
1029 bool skip = VK_FALSE;
1030 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001031 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateCommandBuffers-device-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001032 kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -06001033 skip |= ValidateObject(device, pAllocateInfo->commandPool, kVulkanObjectTypeCommandPool, false,
1034 "VUID-VkCommandBufferAllocateInfo-commandPool-parameter", kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001035 lock.unlock();
1036
1037 if (skip) {
1038 return VK_ERROR_VALIDATION_FAILED_EXT;
1039 }
1040
1041 VkResult result =
1042 get_dispatch_table(ot_device_table_map, device)->AllocateCommandBuffers(device, pAllocateInfo, pCommandBuffers);
1043
1044 lock.lock();
1045 for (uint32_t i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1046 AllocateCommandBuffer(device, pAllocateInfo->commandPool, pCommandBuffers[i], pAllocateInfo->level);
1047 }
1048 lock.unlock();
1049
1050 return result;
1051}
1052
1053VKAPI_ATTR VkResult VKAPI_CALL AllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
1054 VkDescriptorSet *pDescriptorSets) {
1055 bool skip = VK_FALSE;
1056 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001057 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkAllocateDescriptorSets-device-parameter",
1058 kVUIDUndefined);
1059 skip |= ValidateObject(device, pAllocateInfo->descriptorPool, kVulkanObjectTypeDescriptorPool, false,
1060 "VUID-VkDescriptorSetAllocateInfo-descriptorPool-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001061 "VUID-VkDescriptorSetAllocateInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001062 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1063 skip |= ValidateObject(device, pAllocateInfo->pSetLayouts[i], kVulkanObjectTypeDescriptorSetLayout, false,
Dave Houlton379f1422018-05-23 12:47:07 -06001064 "VUID-VkDescriptorSetAllocateInfo-pSetLayouts-parameter",
1065 "VUID-VkDescriptorSetAllocateInfo-commonparent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001066 }
1067 lock.unlock();
1068 if (skip) {
1069 return VK_ERROR_VALIDATION_FAILED_EXT;
1070 }
1071
1072 VkResult result =
1073 get_dispatch_table(ot_device_table_map, device)->AllocateDescriptorSets(device, pAllocateInfo, pDescriptorSets);
1074
1075 if (VK_SUCCESS == result) {
1076 lock.lock();
1077 for (uint32_t i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
1078 AllocateDescriptorSet(device, pAllocateInfo->descriptorPool, pDescriptorSets[i]);
1079 }
1080 lock.unlock();
1081 }
1082
1083 return result;
1084}
1085
1086VKAPI_ATTR void VKAPI_CALL FreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
1087 const VkCommandBuffer *pCommandBuffers) {
1088 bool skip = false;
1089 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton57ae22f2018-05-18 16:20:52 -06001090 ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeCommandBuffers-device-parameter", kVUIDUndefined);
Dave Houlton379f1422018-05-23 12:47:07 -06001091 ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, false, "VUID-vkFreeCommandBuffers-commandPool-parameter",
1092 "VUID-vkFreeCommandBuffers-commandPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001093 for (uint32_t i = 0; i < commandBufferCount; i++) {
1094 if (pCommandBuffers[i] != VK_NULL_HANDLE) {
1095 skip |= ValidateCommandBuffer(device, commandPool, pCommandBuffers[i]);
1096 }
1097 }
1098
1099 for (uint32_t i = 0; i < commandBufferCount; i++) {
Dave Houlton379f1422018-05-23 12:47:07 -06001100 DestroyObject(device, pCommandBuffers[i], kVulkanObjectTypeCommandBuffer, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001101 }
1102
1103 lock.unlock();
1104 if (!skip) {
1105 get_dispatch_table(ot_device_table_map, device)
1106 ->FreeCommandBuffers(device, commandPool, commandBufferCount, pCommandBuffers);
1107 }
1108}
1109
1110VKAPI_ATTR void VKAPI_CALL DestroySwapchainKHR(VkDevice device, VkSwapchainKHR swapchain, const VkAllocationCallbacks *pAllocator) {
1111 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1112 std::unique_lock<std::mutex> lock(global_lock);
1113 // A swapchain's images are implicitly deleted when the swapchain is deleted.
1114 // Remove this swapchain's images from our map of such images.
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001115 std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->swapchainImageMap.begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001116 while (itr != device_data->swapchainImageMap.end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001117 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001118 if (pNode->parent_object == HandleToUint64(swapchain)) {
1119 delete pNode;
1120 auto delete_item = itr++;
1121 device_data->swapchainImageMap.erase(delete_item);
1122 } else {
1123 ++itr;
1124 }
1125 }
Dave Houlton57ae22f2018-05-18 16:20:52 -06001126 DestroyObject(device, swapchain, kVulkanObjectTypeSwapchainKHR, pAllocator, "VUID-vkDestroySwapchainKHR-swapchain-01283",
1127 "VUID-vkDestroySwapchainKHR-swapchain-01284");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001128 lock.unlock();
1129
1130 get_dispatch_table(ot_device_table_map, device)->DestroySwapchainKHR(device, swapchain, pAllocator);
1131}
1132
1133VKAPI_ATTR VkResult VKAPI_CALL FreeDescriptorSets(VkDevice device, VkDescriptorPool descriptorPool, uint32_t descriptorSetCount,
1134 const VkDescriptorSet *pDescriptorSets) {
1135 bool skip = false;
1136 VkResult result = VK_ERROR_VALIDATION_FAILED_EXT;
1137 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001138 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkFreeDescriptorSets-device-parameter",
1139 kVUIDUndefined);
1140 skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, false,
1141 "VUID-vkFreeDescriptorSets-descriptorPool-parameter", "VUID-vkFreeDescriptorSets-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001142 for (uint32_t i = 0; i < descriptorSetCount; i++) {
1143 if (pDescriptorSets[i] != VK_NULL_HANDLE) {
1144 skip |= ValidateDescriptorSet(device, descriptorPool, pDescriptorSets[i]);
1145 }
1146 }
1147
1148 for (uint32_t i = 0; i < descriptorSetCount; i++) {
Dave Houlton379f1422018-05-23 12:47:07 -06001149 DestroyObject(device, pDescriptorSets[i], kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001150 }
1151
1152 lock.unlock();
1153 if (!skip) {
1154 result = get_dispatch_table(ot_device_table_map, device)
1155 ->FreeDescriptorSets(device, descriptorPool, descriptorSetCount, pDescriptorSets);
1156 }
1157 return result;
1158}
1159
1160VKAPI_ATTR void VKAPI_CALL DestroyDescriptorPool(VkDevice device, VkDescriptorPool descriptorPool,
1161 const VkAllocationCallbacks *pAllocator) {
1162 bool skip = VK_FALSE;
1163 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1164 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001165 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyDescriptorPool-device-parameter",
1166 kVUIDUndefined);
1167 skip |= ValidateObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, true,
1168 "VUID-vkDestroyDescriptorPool-descriptorPool-parameter",
Dave Houlton57ae22f2018-05-18 16:20:52 -06001169 "VUID-vkDestroyDescriptorPool-descriptorPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001170 lock.unlock();
1171 if (skip) {
1172 return;
1173 }
1174 // A DescriptorPool's descriptor sets are implicitly deleted when the pool is deleted.
1175 // Remove this pool's descriptor sets from our descriptorSet map.
1176 lock.lock();
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001177 std::unordered_map<uint64_t, ObjTrackState *>::iterator itr = device_data->object_map[kVulkanObjectTypeDescriptorSet].begin();
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001178 while (itr != device_data->object_map[kVulkanObjectTypeDescriptorSet].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001179 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001180 auto del_itr = itr++;
1181 if (pNode->parent_object == HandleToUint64(descriptorPool)) {
Dave Houlton379f1422018-05-23 12:47:07 -06001182 DestroyObject(device, (VkDescriptorSet)((*del_itr).first), kVulkanObjectTypeDescriptorSet, nullptr, kVUIDUndefined,
1183 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001184 }
1185 }
Dave Houlton379f1422018-05-23 12:47:07 -06001186 DestroyObject(device, descriptorPool, kVulkanObjectTypeDescriptorPool, pAllocator,
1187 "VUID-vkDestroyDescriptorPool-descriptorPool-00304", "VUID-vkDestroyDescriptorPool-descriptorPool-00305");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001188 lock.unlock();
1189 get_dispatch_table(ot_device_table_map, device)->DestroyDescriptorPool(device, descriptorPool, pAllocator);
1190}
1191
1192VKAPI_ATTR void VKAPI_CALL DestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
1193 layer_data *device_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1194 bool skip = false;
1195 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001196 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDestroyCommandPool-device-parameter",
1197 kVUIDUndefined);
1198 skip |= ValidateObject(device, commandPool, kVulkanObjectTypeCommandPool, true,
1199 "VUID-vkDestroyCommandPool-commandPool-parameter", "VUID-vkDestroyCommandPool-commandPool-parent");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001200 lock.unlock();
1201 if (skip) {
1202 return;
1203 }
1204 lock.lock();
1205 // A CommandPool's command buffers are implicitly deleted when the pool is deleted.
1206 // Remove this pool's cmdBuffers from our cmd buffer map.
1207 auto itr = device_data->object_map[kVulkanObjectTypeCommandBuffer].begin();
1208 auto del_itr = itr;
1209 while (itr != device_data->object_map[kVulkanObjectTypeCommandBuffer].end()) {
Mark Lobodzinskiefc64392017-07-18 13:15:47 -06001210 ObjTrackState *pNode = (*itr).second;
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001211 del_itr = itr++;
1212 if (pNode->parent_object == HandleToUint64(commandPool)) {
1213 skip |= ValidateCommandBuffer(device, commandPool, reinterpret_cast<VkCommandBuffer>((*del_itr).first));
1214 DestroyObject(device, reinterpret_cast<VkCommandBuffer>((*del_itr).first), kVulkanObjectTypeCommandBuffer, nullptr,
Dave Houlton57ae22f2018-05-18 16:20:52 -06001215 kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001216 }
1217 }
Dave Houlton57ae22f2018-05-18 16:20:52 -06001218 DestroyObject(device, commandPool, kVulkanObjectTypeCommandPool, pAllocator, "VUID-vkDestroyCommandPool-commandPool-00042",
1219 "VUID-vkDestroyCommandPool-commandPool-00043");
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001220 lock.unlock();
1221 get_dispatch_table(ot_device_table_map, device)->DestroyCommandPool(device, commandPool, pAllocator);
1222}
1223
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001224// Note: This is the core version of this routine. The extension version is below.
1225VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2(VkPhysicalDevice physicalDevice,
1226 uint32_t *pQueueFamilyPropertyCount,
1227 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
1228 bool skip = false;
1229 {
1230 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001231 skip |=
1232 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinski14ddc192017-10-25 16:57:04 -06001233 }
1234 if (skip) {
1235 return;
1236 }
1237 get_dispatch_table(ot_instance_table_map, physicalDevice)
1238 ->GetPhysicalDeviceQueueFamilyProperties2(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
1239 std::lock_guard<std::mutex> lock(global_lock);
1240 if (pQueueFamilyProperties != NULL) {
1241 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1242 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
1243 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
1244 }
1245 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
1246 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
1247 }
1248 }
1249}
1250
1251// Note: This is the extension version of this routine. The core version is above.
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001252VKAPI_ATTR void VKAPI_CALL GetPhysicalDeviceQueueFamilyProperties2KHR(VkPhysicalDevice physicalDevice,
1253 uint32_t *pQueueFamilyPropertyCount,
1254 VkQueueFamilyProperties2KHR *pQueueFamilyProperties) {
1255 bool skip = false;
1256 {
1257 std::lock_guard<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001258 skip |=
1259 ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false, kVUIDUndefined, kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001260 }
1261 if (skip) {
1262 return;
1263 }
1264 get_dispatch_table(ot_instance_table_map, physicalDevice)
1265 ->GetPhysicalDeviceQueueFamilyProperties2KHR(physicalDevice, pQueueFamilyPropertyCount, pQueueFamilyProperties);
1266 std::lock_guard<std::mutex> lock(global_lock);
1267 if (pQueueFamilyProperties != NULL) {
1268 layer_data *instance_data = GetLayerDataPtr(get_dispatch_key(physicalDevice), layer_data_map);
1269 if (instance_data->queue_family_properties.size() < *pQueueFamilyPropertyCount) {
1270 instance_data->queue_family_properties.resize(*pQueueFamilyPropertyCount);
1271 }
1272 for (uint32_t i = 0; i < *pQueueFamilyPropertyCount; i++) {
1273 instance_data->queue_family_properties[i] = pQueueFamilyProperties[i].queueFamilyProperties;
1274 }
1275 }
1276}
1277
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001278VKAPI_ATTR VkResult VKAPI_CALL GetPhysicalDeviceDisplayPropertiesKHR(VkPhysicalDevice physicalDevice, uint32_t *pPropertyCount,
1279 VkDisplayPropertiesKHR *pProperties) {
1280 bool skip = false;
1281 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001282 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1283 "VUID-vkGetPhysicalDeviceDisplayPropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001284 lock.unlock();
1285
1286 if (skip) {
1287 return VK_ERROR_VALIDATION_FAILED_EXT;
1288 }
1289 VkResult result = get_dispatch_table(ot_instance_table_map, physicalDevice)
1290 ->GetPhysicalDeviceDisplayPropertiesKHR(physicalDevice, pPropertyCount, pProperties);
1291
1292 lock.lock();
1293 if (result == VK_SUCCESS) {
1294 if (pProperties) {
1295 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1296 CreateObject(physicalDevice, pProperties[i].display, kVulkanObjectTypeDisplayKHR, nullptr);
1297 }
1298 }
1299 }
1300 lock.unlock();
1301
1302 return result;
1303}
1304
1305VKAPI_ATTR VkResult VKAPI_CALL GetDisplayModePropertiesKHR(VkPhysicalDevice physicalDevice, VkDisplayKHR display,
1306 uint32_t *pPropertyCount, VkDisplayModePropertiesKHR *pProperties) {
1307 bool skip = false;
1308 std::unique_lock<std::mutex> lock(global_lock);
Dave Houlton379f1422018-05-23 12:47:07 -06001309 skip |= ValidateObject(physicalDevice, physicalDevice, kVulkanObjectTypePhysicalDevice, false,
1310 "VUID-vkGetDisplayModePropertiesKHR-physicalDevice-parameter", kVUIDUndefined);
1311 skip |= ValidateObject(physicalDevice, display, kVulkanObjectTypeDisplayKHR, false,
1312 "VUID-vkGetDisplayModePropertiesKHR-display-parameter", kVUIDUndefined);
Shannon McPherson9d5167f2018-05-02 15:24:37 -06001313 lock.unlock();
1314
1315 if (skip) {
1316 return VK_ERROR_VALIDATION_FAILED_EXT;
1317 }
1318 VkResult result = get_dispatch_table(ot_instance_table_map, physicalDevice)
1319 ->GetDisplayModePropertiesKHR(physicalDevice, display, pPropertyCount, pProperties);
1320
1321 lock.lock();
1322 if (result == VK_SUCCESS) {
1323 if (pProperties) {
1324 for (uint32_t i = 0; i < *pPropertyCount; ++i) {
1325 CreateObject(physicalDevice, pProperties[i].displayMode, kVulkanObjectTypeDisplayModeKHR, nullptr);
1326 }
1327 }
1328 }
1329 lock.unlock();
1330
1331 return result;
1332}
1333
Mark Lobodzinskidfe5e172017-07-19 13:03:22 -06001334VKAPI_ATTR VkResult VKAPI_CALL DebugMarkerSetObjectNameEXT(VkDevice device, const VkDebugMarkerObjectNameInfoEXT *pNameInfo) {
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001335 bool skip = VK_FALSE;
1336 std::unique_lock<std::mutex> lock(global_lock);
1337 layer_data *dev_data = GetLayerDataPtr(get_dispatch_key(device), layer_data_map);
1338 if (pNameInfo->pObjectName) {
1339 dev_data->report_data->debugObjectNameMap->insert(
1340 std::make_pair<uint64_t, std::string>((uint64_t &&) pNameInfo->object, pNameInfo->pObjectName));
1341 } else {
1342 dev_data->report_data->debugObjectNameMap->erase(pNameInfo->object);
1343 }
Dave Houlton379f1422018-05-23 12:47:07 -06001344 skip |= ValidateObject(device, device, kVulkanObjectTypeDevice, false, "VUID-vkDebugMarkerSetObjectNameEXT-device-parameter",
1345 kVUIDUndefined);
Mark Lobodzinskib2de97f2017-07-06 15:28:11 -06001346 lock.unlock();
1347 if (skip) {
1348 return VK_ERROR_VALIDATION_FAILED_EXT;
1349 }
1350 VkResult result = dev_data->dispatch_table.DebugMarkerSetObjectNameEXT(device, pNameInfo);
1351 return result;
1352}
1353
1354VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetPhysicalDeviceProcAddr(VkInstance instance, const char *funcName) {
1355 assert(instance);
1356
1357 if (get_dispatch_table(ot_instance_table_map, instance)->GetPhysicalDeviceProcAddr == NULL) {
1358 return NULL;
1359 }
1360 return get_dispatch_table(ot_instance_table_map, instance)->GetPhysicalDeviceProcAddr(instance, funcName);
1361}
1362
1363VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetDeviceProcAddr(VkDevice device, const char *funcName) {
1364 const auto item = name_to_funcptr_map.find(funcName);
1365 if (item != name_to_funcptr_map.end()) {
1366 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
1367 }
1368
1369 auto table = get_dispatch_table(ot_device_table_map, device);
1370 if (!table->GetDeviceProcAddr) return NULL;
1371 return table->GetDeviceProcAddr(device, funcName);
1372}
1373
1374VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL GetInstanceProcAddr(VkInstance instance, const char *funcName) {
1375 const auto item = name_to_funcptr_map.find(funcName);
1376 if (item != name_to_funcptr_map.end()) {
1377 return reinterpret_cast<PFN_vkVoidFunction>(item->second);
1378 }
1379
1380 auto table = get_dispatch_table(ot_instance_table_map, instance);
1381 if (!table->GetInstanceProcAddr) return nullptr;
1382 return table->GetInstanceProcAddr(instance, funcName);
1383}
1384
1385} // namespace object_tracker
1386
1387VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceExtensionProperties(const char *pLayerName, uint32_t *pCount,
1388 VkExtensionProperties *pProperties) {
1389 return object_tracker::EnumerateInstanceExtensionProperties(pLayerName, pCount, pProperties);
1390}
1391
1392VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateInstanceLayerProperties(uint32_t *pCount,
1393 VkLayerProperties *pProperties) {
1394 return object_tracker::EnumerateInstanceLayerProperties(pCount, pProperties);
1395}
1396
1397VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceLayerProperties(VkPhysicalDevice physicalDevice, uint32_t *pCount,
1398 VkLayerProperties *pProperties) {
1399 // The layer command handles VK_NULL_HANDLE just fine internally
1400 assert(physicalDevice == VK_NULL_HANDLE);
1401 return object_tracker::EnumerateDeviceLayerProperties(VK_NULL_HANDLE, pCount, pProperties);
1402}
1403
1404VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetDeviceProcAddr(VkDevice dev, const char *funcName) {
1405 return object_tracker::GetDeviceProcAddr(dev, funcName);
1406}
1407
1408VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vkGetInstanceProcAddr(VkInstance instance, const char *funcName) {
1409 return object_tracker::GetInstanceProcAddr(instance, funcName);
1410}
1411
1412VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkEnumerateDeviceExtensionProperties(VkPhysicalDevice physicalDevice,
1413 const char *pLayerName, uint32_t *pCount,
1414 VkExtensionProperties *pProperties) {
1415 // The layer command handles VK_NULL_HANDLE just fine internally
1416 assert(physicalDevice == VK_NULL_HANDLE);
1417 return object_tracker::EnumerateDeviceExtensionProperties(VK_NULL_HANDLE, pLayerName, pCount, pProperties);
1418}
1419
1420VK_LAYER_EXPORT VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_layerGetPhysicalDeviceProcAddr(VkInstance instance,
1421 const char *funcName) {
1422 return object_tracker::GetPhysicalDeviceProcAddr(instance, funcName);
1423}
1424
1425VK_LAYER_EXPORT VKAPI_ATTR VkResult VKAPI_CALL vkNegotiateLoaderLayerInterfaceVersion(VkNegotiateLayerInterface *pVersionStruct) {
1426 assert(pVersionStruct != NULL);
1427 assert(pVersionStruct->sType == LAYER_NEGOTIATE_INTERFACE_STRUCT);
1428
1429 // Fill in the function pointers if our version is at least capable of having the structure contain them.
1430 if (pVersionStruct->loaderLayerInterfaceVersion >= 2) {
1431 pVersionStruct->pfnGetInstanceProcAddr = vkGetInstanceProcAddr;
1432 pVersionStruct->pfnGetDeviceProcAddr = vkGetDeviceProcAddr;
1433 pVersionStruct->pfnGetPhysicalDeviceProcAddr = vk_layerGetPhysicalDeviceProcAddr;
1434 }
1435
1436 if (pVersionStruct->loaderLayerInterfaceVersion < CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
1437 object_tracker::loader_layer_if_version = pVersionStruct->loaderLayerInterfaceVersion;
1438 } else if (pVersionStruct->loaderLayerInterfaceVersion > CURRENT_LOADER_LAYER_INTERFACE_VERSION) {
1439 pVersionStruct->loaderLayerInterfaceVersion = CURRENT_LOADER_LAYER_INTERFACE_VERSION;
1440 }
1441
1442 return VK_SUCCESS;
1443}