blob: 70814a53bb1f8ab2db755dfae573c91168973489 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mike Stroyan3712d5c2015-04-02 11:59:05 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mike Stroyan3712d5c2015-04-02 11:59:05 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060016 *
17 * Author: Cody Northrop <cody@lunarg.com>
18 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060019 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020
Mike Stroyan313f7e62015-08-10 16:42:53 -060021#ifndef THREADING_H
22#define THREADING_H
Jeremy Hayesb350beb2016-04-12 13:48:52 -060023#include <condition_variable>
24#include <mutex>
Mike Stroyan845bdc42015-11-02 15:30:20 -070025#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060026#include "vk_layer_config.h"
27#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060028
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070029#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
Jon Ashburn5484e0c2016-03-08 17:48:44 -070030 defined(__aarch64__) || defined(__powerpc64__)
Mike Stroyan31c50c82016-01-29 15:09:04 -070031// If pointers are 64-bit, then there can be separate counters for each
32// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
33#define DISTINCT_NONDISPATCHABLE_HANDLES
34#endif
35
Mike Stroyan3712d5c2015-04-02 11:59:05 -060036// Draw State ERROR codes
Mark Lobodzinski14817552016-05-19 17:08:10 -060037enum THREADING_CHECKER_ERROR {
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070038 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
39 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
40 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mark Lobodzinski14817552016-05-19 17:08:10 -060041};
Mike Stroyan3712d5c2015-04-02 11:59:05 -060042
Mike Stroyan845bdc42015-11-02 15:30:20 -070043struct object_use_data {
44 loader_platform_thread_id thread;
45 int reader_count;
46 int writer_count;
47};
48
49struct layer_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -070050
Mike Stroyan0b64aee2016-07-13 10:10:25 -060051namespace threading {
52volatile bool vulkan_in_use = false;
53volatile bool vulkan_multi_threaded = false;
54// starting check if an application is using vulkan from multiple threads.
55inline bool startMultiThread() {
56 if (vulkan_multi_threaded) {
57 return true;
58 }
59 if (vulkan_in_use) {
60 vulkan_multi_threaded = true;
61 return true;
62 }
63 vulkan_in_use = true;
64 return false;
65}
66
67// finishing check if an application is using vulkan from multiple threads.
68inline void finishMultiThread() { vulkan_in_use = false; }
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070069} // namespace threading
Mike Stroyan0b64aee2016-07-13 10:10:25 -060070
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070071template <typename T>
72class counter {
73 public:
Mike Stroyan845bdc42015-11-02 15:30:20 -070074 const char *typeName;
75 VkDebugReportObjectTypeEXT objectType;
Mike Stroyan1a080012016-01-29 15:33:21 -070076 std::unordered_map<T, object_use_data> uses;
Mike Stroyanb3dd7902016-06-30 13:21:37 -060077 std::mutex counter_lock;
78 std::condition_variable counter_condition;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070079 void startWrite(debug_report_data *report_data, T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -060080 if (object == VK_NULL_HANDLE) {
81 return;
82 }
Dustin Graves080069b2016-04-05 13:48:15 -060083 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -070084 loader_platform_thread_id tid = loader_platform_get_thread_id();
Mike Stroyanb3dd7902016-06-30 13:21:37 -060085 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -070086 if (uses.find(object) == uses.end()) {
87 // There is no current use of the object. Record writer thread.
88 struct object_use_data *use_data = &uses[object];
89 use_data->reader_count = 0;
90 use_data->writer_count = 1;
91 use_data->thread = tid;
92 } else {
93 struct object_use_data *use_data = &uses[object];
94 if (use_data->reader_count == 0) {
95 // There are no readers. Two writers just collided.
96 if (use_data->thread != tid) {
Mark Lobodzinski729a8d32017-01-26 12:16:30 -070097 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), 0,
98 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
Karl Schultz2e5ed332017-12-12 10:33:01 -050099 "THREADING ERROR : object of type %s is simultaneously used in "
100 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
101 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700102 if (skipCall) {
103 // Wait for thread-safe access to object instead of skipping call.
104 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600105 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700106 }
107 // There is now no current use of the object. Record writer thread.
Karl Schultz47dd59d2017-01-20 13:19:20 -0700108 struct object_use_data *new_use_data = &uses[object];
109 new_use_data->thread = tid;
110 new_use_data->reader_count = 0;
111 new_use_data->writer_count = 1;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700112 } else {
113 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700114 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700115 use_data->writer_count += 1;
116 }
117 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700118 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700119 // There is no way to make recursion safe. Just forge ahead.
120 use_data->writer_count += 1;
121 }
122 } else {
123 // There are readers. This writer collided with them.
124 if (use_data->thread != tid) {
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700125 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), 0,
126 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
Karl Schultz2e5ed332017-12-12 10:33:01 -0500127 "THREADING ERROR : object of type %s is simultaneously used in "
128 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
129 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700130 if (skipCall) {
131 // Wait for thread-safe access to object instead of skipping call.
132 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600133 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700134 }
135 // There is now no current use of the object. Record writer thread.
Karl Schultz47dd59d2017-01-20 13:19:20 -0700136 struct object_use_data *new_use_data = &uses[object];
137 new_use_data->thread = tid;
138 new_use_data->reader_count = 0;
139 new_use_data->writer_count = 1;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700140 } else {
141 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700142 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700143 use_data->writer_count += 1;
144 }
145 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700146 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700147 // There is no way to make recursion safe. Just forge ahead.
148 use_data->writer_count += 1;
149 }
150 }
151 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700152 }
153
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700154 void finishWrite(T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -0600155 if (object == VK_NULL_HANDLE) {
156 return;
157 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700158 // Object is no longer in use
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600159 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700160 uses[object].writer_count -= 1;
161 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
162 uses.erase(object);
163 }
164 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600165 lock.unlock();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600166 counter_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700167 }
168
169 void startRead(debug_report_data *report_data, T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -0600170 if (object == VK_NULL_HANDLE) {
171 return;
172 }
Dustin Graves080069b2016-04-05 13:48:15 -0600173 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700174 loader_platform_thread_id tid = loader_platform_get_thread_id();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600175 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700176 if (uses.find(object) == uses.end()) {
177 // There is no current use of the object. Record reader count
178 struct object_use_data *use_data = &uses[object];
179 use_data->reader_count = 1;
180 use_data->writer_count = 0;
181 use_data->thread = tid;
Mike Stroyanc8774502016-02-05 09:11:32 -0700182 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700183 // There is a writer of the object.
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700184 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), 0,
185 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
Karl Schultz2e5ed332017-12-12 10:33:01 -0500186 "THREADING ERROR : object of type %s is simultaneously used in "
187 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
188 typeName, (uint64_t)uses[object].thread, (uint64_t)tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700189 if (skipCall) {
190 // Wait for thread-safe access to object instead of skipping call.
191 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600192 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700193 }
194 // There is no current use of the object. Record reader count
195 struct object_use_data *use_data = &uses[object];
196 use_data->reader_count = 1;
197 use_data->writer_count = 0;
198 use_data->thread = tid;
199 } else {
200 uses[object].reader_count += 1;
201 }
202 } else {
203 // There are other readers of the object. Increase reader count
204 uses[object].reader_count += 1;
205 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700206 }
207 void finishRead(T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -0600208 if (object == VK_NULL_HANDLE) {
209 return;
210 }
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600211 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700212 uses[object].reader_count -= 1;
213 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
214 uses.erase(object);
215 }
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600216 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600217 lock.unlock();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600218 counter_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700219 }
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700220 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700221 typeName = name;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700222 objectType = type;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700223 }
224};
225
Cody Northrop55443ef2015-09-28 15:09:32 -0600226struct layer_data {
Chia-I Wu59d0a332016-05-16 11:21:03 +0800227 VkInstance instance;
228
Mike Stroyan313f7e62015-08-10 16:42:53 -0600229 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700230 std::vector<VkDebugReportCallbackEXT> logging_callback;
Mark Young6ba8abe2017-11-09 10:37:04 -0700231 std::vector<VkDebugUtilsMessengerEXT> logging_messenger;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700232 VkLayerDispatchTable *device_dispatch_table;
233 VkLayerInstanceDispatchTable *instance_dispatch_table;
Mark Young6ba8abe2017-11-09 10:37:04 -0700234
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600235 // The following are for keeping track of the temporary callbacks that can
236 // be used in vkCreateInstance and vkDestroyInstance:
Mark Young6ba8abe2017-11-09 10:37:04 -0700237 uint32_t num_tmp_report_callbacks;
238 VkDebugReportCallbackCreateInfoEXT *tmp_report_create_infos;
239 VkDebugReportCallbackEXT *tmp_report_callbacks;
240 uint32_t num_tmp_debug_messengers;
241 VkDebugUtilsMessengerCreateInfoEXT *tmp_messenger_create_infos;
242 VkDebugUtilsMessengerEXT *tmp_debug_messengers;
243
Mike Stroyan845bdc42015-11-02 15:30:20 -0700244 counter<VkCommandBuffer> c_VkCommandBuffer;
245 counter<VkDevice> c_VkDevice;
246 counter<VkInstance> c_VkInstance;
247 counter<VkQueue> c_VkQueue;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700248#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700249 counter<VkBuffer> c_VkBuffer;
250 counter<VkBufferView> c_VkBufferView;
251 counter<VkCommandPool> c_VkCommandPool;
252 counter<VkDescriptorPool> c_VkDescriptorPool;
253 counter<VkDescriptorSet> c_VkDescriptorSet;
254 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
255 counter<VkDeviceMemory> c_VkDeviceMemory;
256 counter<VkEvent> c_VkEvent;
257 counter<VkFence> c_VkFence;
258 counter<VkFramebuffer> c_VkFramebuffer;
259 counter<VkImage> c_VkImage;
260 counter<VkImageView> c_VkImageView;
261 counter<VkPipeline> c_VkPipeline;
262 counter<VkPipelineCache> c_VkPipelineCache;
263 counter<VkPipelineLayout> c_VkPipelineLayout;
264 counter<VkQueryPool> c_VkQueryPool;
265 counter<VkRenderPass> c_VkRenderPass;
266 counter<VkSampler> c_VkSampler;
267 counter<VkSemaphore> c_VkSemaphore;
268 counter<VkShaderModule> c_VkShaderModule;
269 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700270 counter<VkObjectTableNVX> c_VkObjectTableNVX;
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700271 counter<VkIndirectCommandsLayoutNVX> c_VkIndirectCommandsLayoutNVX;
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700272 counter<VkDisplayKHR> c_VkDisplayKHR;
273 counter<VkDisplayModeKHR> c_VkDisplayModeKHR;
274 counter<VkSurfaceKHR> c_VkSurfaceKHR;
275 counter<VkSwapchainKHR> c_VkSwapchainKHR;
Mark Young0f183a82017-02-28 09:58:04 -0700276 counter<VkDescriptorUpdateTemplateKHR> c_VkDescriptorUpdateTemplateKHR;
Mike Schuchardta6b8bdb2017-09-05 16:10:20 -0600277 counter<VkValidationCacheEXT> c_VkValidationCacheEXT;
Lenny Komowb79f04a2017-09-18 17:07:00 -0600278 counter<VkSamplerYcbcrConversionKHR> c_VkSamplerYcbcrConversionKHR;
Mark Young6ba8abe2017-11-09 10:37:04 -0700279 counter<VkDebugUtilsMessengerEXT> c_VkDebugUtilsMessengerEXT;
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700280#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700281 counter<uint64_t> c_uint64_t;
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700282#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700283
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700284 layer_data()
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700285 : report_data(nullptr),
Mark Young6ba8abe2017-11-09 10:37:04 -0700286 num_tmp_report_callbacks(0),
287 tmp_report_create_infos(nullptr),
288 tmp_report_callbacks(nullptr),
289 num_tmp_debug_messengers(0),
290 tmp_messenger_create_infos(nullptr),
291 tmp_debug_messengers(nullptr),
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600292 c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700293 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
294 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
295 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
Mike Stroyan31c50c82016-01-29 15:09:04 -0700296#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700297 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
298 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
299 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
300 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
301 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
302 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
303 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700304 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
305 c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700306 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
307 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
308 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
309 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
310 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
311 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
312 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
313 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
314 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
315 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
316 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700317 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT),
318 c_VkObjectTableNVX("VkObjectTableNVX", VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT),
Dave Houltona9df0ce2018-02-07 10:51:23 -0700319 c_VkIndirectCommandsLayoutNVX("VkIndirectCommandsLayoutNVX",
320 VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT),
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700321 c_VkDisplayKHR("VkDisplayKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT),
322 c_VkDisplayModeKHR("VkDisplayModeKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT),
323 c_VkSurfaceKHR("VkSurfaceKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT),
Mark Young0f183a82017-02-28 09:58:04 -0700324 c_VkSwapchainKHR("VkSwapchainKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT),
Dave Houltona9df0ce2018-02-07 10:51:23 -0700325 c_VkDescriptorUpdateTemplateKHR("VkDescriptorUpdateTemplateKHR",
326 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT),
Mark Young6ba8abe2017-11-09 10:37:04 -0700327 c_VkSamplerYcbcrConversionKHR("VkSamplerYcbcrConversionKHR",
328 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT),
329 c_VkDebugUtilsMessengerEXT("VkDebugUtilsMessengerEXT", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700330#else // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700331 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700332#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700333 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600334};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600335
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700336#define WRAPPER(type) \
337 static void startWriteObject(struct layer_data *my_data, type object) { \
338 my_data->c_##type.startWrite(my_data->report_data, object); \
339 } \
340 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
341 static void startReadObject(struct layer_data *my_data, type object) { \
342 my_data->c_##type.startRead(my_data->report_data, object); \
343 } \
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700344 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600345
Mike Stroyan845bdc42015-11-02 15:30:20 -0700346WRAPPER(VkDevice)
347WRAPPER(VkInstance)
348WRAPPER(VkQueue)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700349#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700350WRAPPER(VkBuffer)
351WRAPPER(VkBufferView)
352WRAPPER(VkCommandPool)
353WRAPPER(VkDescriptorPool)
354WRAPPER(VkDescriptorSet)
355WRAPPER(VkDescriptorSetLayout)
356WRAPPER(VkDeviceMemory)
357WRAPPER(VkEvent)
358WRAPPER(VkFence)
359WRAPPER(VkFramebuffer)
360WRAPPER(VkImage)
361WRAPPER(VkImageView)
362WRAPPER(VkPipeline)
363WRAPPER(VkPipelineCache)
364WRAPPER(VkPipelineLayout)
365WRAPPER(VkQueryPool)
366WRAPPER(VkRenderPass)
367WRAPPER(VkSampler)
368WRAPPER(VkSemaphore)
369WRAPPER(VkShaderModule)
370WRAPPER(VkDebugReportCallbackEXT)
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700371WRAPPER(VkObjectTableNVX)
372WRAPPER(VkIndirectCommandsLayoutNVX)
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700373WRAPPER(VkDisplayKHR)
374WRAPPER(VkDisplayModeKHR)
375WRAPPER(VkSurfaceKHR)
376WRAPPER(VkSwapchainKHR)
Mark Young0f183a82017-02-28 09:58:04 -0700377WRAPPER(VkDescriptorUpdateTemplateKHR)
Mike Schuchardta6b8bdb2017-09-05 16:10:20 -0600378WRAPPER(VkValidationCacheEXT)
Lenny Komowb79f04a2017-09-18 17:07:00 -0600379WRAPPER(VkSamplerYcbcrConversionKHR)
Mark Young6ba8abe2017-11-09 10:37:04 -0700380WRAPPER(VkDebugUtilsMessengerEXT)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700381#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700382WRAPPER(uint64_t)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700383#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700384
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700385static std::unordered_map<void *, layer_data *> layer_data_map;
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600386static std::mutex command_pool_lock;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700387static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
388
389// VkCommandBuffer needs check for implicit use of command pool
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700390static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700391 if (lockPool) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600392 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700393 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600394 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700395 startWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700396 }
397 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
398}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700399static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700400 my_data->c_VkCommandBuffer.finishWrite(object);
401 if (lockPool) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600402 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700403 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600404 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700405 finishWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700406 }
407}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700408static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600409 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700410 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600411 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700412 startReadObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700413 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
414}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700415static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700416 my_data->c_VkCommandBuffer.finishRead(object);
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600417 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700418 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600419 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700420 finishReadObject(my_data, pool);
Mike Stroyan313f7e62015-08-10 16:42:53 -0600421}
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700422#endif // THREADING_H