blob: 529ac83ccdf1d14fad722da7078aa18f8bd9cd21 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mike Stroyan3712d5c2015-04-02 11:59:05 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mike Stroyan3712d5c2015-04-02 11:59:05 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060016 *
17 * Author: Cody Northrop <cody@lunarg.com>
18 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060019 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020
Mike Stroyan313f7e62015-08-10 16:42:53 -060021#ifndef THREADING_H
22#define THREADING_H
Jeremy Hayesb350beb2016-04-12 13:48:52 -060023#include <condition_variable>
24#include <mutex>
Mike Stroyan845bdc42015-11-02 15:30:20 -070025#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060026#include "vk_layer_config.h"
27#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060028
John Zulauf8ca6e2f2018-03-28 09:55:03 -060029VK_DEFINE_NON_DISPATCHABLE_HANDLE(DISTINCT_NONDISPATCHABLE_PHONY_HANDLE)
30// The following line must match the vulkan_core.h condition guarding VK_DEFINE_NON_DISPATCHABLE_HANDLE
31#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || \
32 defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
Mike Stroyan31c50c82016-01-29 15:09:04 -070033// If pointers are 64-bit, then there can be separate counters for each
34// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
35#define DISTINCT_NONDISPATCHABLE_HANDLES
John Zulauf8ca6e2f2018-03-28 09:55:03 -060036// Make sure we catch any disagreement between us and the vulkan definition
37static_assert(std::is_pointer<DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value,
38 "Mismatched non-dispatchable handle handle, expected pointer type.");
39#else
40// Make sure we catch any disagreement between us and the vulkan definition
41static_assert(std::is_same<uint64_t, DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value,
42 "Mismatched non-dispatchable handle handle, expected uint64_t.");
Mike Stroyan31c50c82016-01-29 15:09:04 -070043#endif
44
Mike Stroyan3712d5c2015-04-02 11:59:05 -060045// Draw State ERROR codes
Mark Lobodzinski14817552016-05-19 17:08:10 -060046enum THREADING_CHECKER_ERROR {
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070047 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
48 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
49 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mark Lobodzinski14817552016-05-19 17:08:10 -060050};
Mike Stroyan3712d5c2015-04-02 11:59:05 -060051
Mike Stroyan845bdc42015-11-02 15:30:20 -070052struct object_use_data {
53 loader_platform_thread_id thread;
54 int reader_count;
55 int writer_count;
56};
57
58struct layer_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -070059
Mike Stroyan0b64aee2016-07-13 10:10:25 -060060namespace threading {
61volatile bool vulkan_in_use = false;
62volatile bool vulkan_multi_threaded = false;
63// starting check if an application is using vulkan from multiple threads.
64inline bool startMultiThread() {
65 if (vulkan_multi_threaded) {
66 return true;
67 }
68 if (vulkan_in_use) {
69 vulkan_multi_threaded = true;
70 return true;
71 }
72 vulkan_in_use = true;
73 return false;
74}
75
76// finishing check if an application is using vulkan from multiple threads.
77inline void finishMultiThread() { vulkan_in_use = false; }
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070078} // namespace threading
Mike Stroyan0b64aee2016-07-13 10:10:25 -060079
Mark Lobodzinski64318ba2017-01-26 13:34:13 -070080template <typename T>
81class counter {
82 public:
Mike Stroyan845bdc42015-11-02 15:30:20 -070083 const char *typeName;
84 VkDebugReportObjectTypeEXT objectType;
Mike Stroyan1a080012016-01-29 15:33:21 -070085 std::unordered_map<T, object_use_data> uses;
Mike Stroyanb3dd7902016-06-30 13:21:37 -060086 std::mutex counter_lock;
87 std::condition_variable counter_condition;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070088 void startWrite(debug_report_data *report_data, T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -060089 if (object == VK_NULL_HANDLE) {
90 return;
91 }
Dustin Graves080069b2016-04-05 13:48:15 -060092 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -070093 loader_platform_thread_id tid = loader_platform_get_thread_id();
Mike Stroyanb3dd7902016-06-30 13:21:37 -060094 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -070095 if (uses.find(object) == uses.end()) {
96 // There is no current use of the object. Record writer thread.
97 struct object_use_data *use_data = &uses[object];
98 use_data->reader_count = 0;
99 use_data->writer_count = 1;
100 use_data->thread = tid;
101 } else {
102 struct object_use_data *use_data = &uses[object];
103 if (use_data->reader_count == 0) {
104 // There are no readers. Two writers just collided.
105 if (use_data->thread != tid) {
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600106 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700107 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
Karl Schultz2e5ed332017-12-12 10:33:01 -0500108 "THREADING ERROR : object of type %s is simultaneously used in "
109 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
110 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700111 if (skipCall) {
112 // Wait for thread-safe access to object instead of skipping call.
113 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600114 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700115 }
116 // There is now no current use of the object. Record writer thread.
Karl Schultz47dd59d2017-01-20 13:19:20 -0700117 struct object_use_data *new_use_data = &uses[object];
118 new_use_data->thread = tid;
119 new_use_data->reader_count = 0;
120 new_use_data->writer_count = 1;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700121 } else {
122 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700123 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700124 use_data->writer_count += 1;
125 }
126 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700127 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700128 // There is no way to make recursion safe. Just forge ahead.
129 use_data->writer_count += 1;
130 }
131 } else {
132 // There are readers. This writer collided with them.
133 if (use_data->thread != tid) {
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600134 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700135 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
Karl Schultz2e5ed332017-12-12 10:33:01 -0500136 "THREADING ERROR : object of type %s is simultaneously used in "
137 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
138 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700139 if (skipCall) {
140 // Wait for thread-safe access to object instead of skipping call.
141 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600142 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700143 }
144 // There is now no current use of the object. Record writer thread.
Karl Schultz47dd59d2017-01-20 13:19:20 -0700145 struct object_use_data *new_use_data = &uses[object];
146 new_use_data->thread = tid;
147 new_use_data->reader_count = 0;
148 new_use_data->writer_count = 1;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700149 } else {
150 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700151 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700152 use_data->writer_count += 1;
153 }
154 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700155 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700156 // There is no way to make recursion safe. Just forge ahead.
157 use_data->writer_count += 1;
158 }
159 }
160 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700161 }
162
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700163 void finishWrite(T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -0600164 if (object == VK_NULL_HANDLE) {
165 return;
166 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700167 // Object is no longer in use
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600168 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700169 uses[object].writer_count -= 1;
170 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
171 uses.erase(object);
172 }
173 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600174 lock.unlock();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600175 counter_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700176 }
177
178 void startRead(debug_report_data *report_data, T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -0600179 if (object == VK_NULL_HANDLE) {
180 return;
181 }
Dustin Graves080069b2016-04-05 13:48:15 -0600182 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700183 loader_platform_thread_id tid = loader_platform_get_thread_id();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600184 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700185 if (uses.find(object) == uses.end()) {
186 // There is no current use of the object. Record reader count
187 struct object_use_data *use_data = &uses[object];
188 use_data->reader_count = 1;
189 use_data->writer_count = 0;
190 use_data->thread = tid;
Mike Stroyanc8774502016-02-05 09:11:32 -0700191 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700192 // There is a writer of the object.
Mark Lobodzinskib1fd9d12018-03-30 14:26:00 -0600193 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700194 THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
Karl Schultz2e5ed332017-12-12 10:33:01 -0500195 "THREADING ERROR : object of type %s is simultaneously used in "
196 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
197 typeName, (uint64_t)uses[object].thread, (uint64_t)tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700198 if (skipCall) {
199 // Wait for thread-safe access to object instead of skipping call.
200 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600201 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700202 }
203 // There is no current use of the object. Record reader count
204 struct object_use_data *use_data = &uses[object];
205 use_data->reader_count = 1;
206 use_data->writer_count = 0;
207 use_data->thread = tid;
208 } else {
209 uses[object].reader_count += 1;
210 }
211 } else {
212 // There are other readers of the object. Increase reader count
213 uses[object].reader_count += 1;
214 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700215 }
216 void finishRead(T object) {
Mike Stroyan9e2c72a2017-05-01 11:10:07 -0600217 if (object == VK_NULL_HANDLE) {
218 return;
219 }
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600220 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700221 uses[object].reader_count -= 1;
222 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
223 uses.erase(object);
224 }
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600225 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600226 lock.unlock();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600227 counter_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700228 }
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700229 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700230 typeName = name;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700231 objectType = type;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700232 }
233};
234
Cody Northrop55443ef2015-09-28 15:09:32 -0600235struct layer_data {
Chia-I Wu59d0a332016-05-16 11:21:03 +0800236 VkInstance instance;
237
Mike Stroyan313f7e62015-08-10 16:42:53 -0600238 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700239 std::vector<VkDebugReportCallbackEXT> logging_callback;
Mark Young6ba8abe2017-11-09 10:37:04 -0700240 std::vector<VkDebugUtilsMessengerEXT> logging_messenger;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700241 VkLayerDispatchTable *device_dispatch_table;
242 VkLayerInstanceDispatchTable *instance_dispatch_table;
Mark Young6ba8abe2017-11-09 10:37:04 -0700243
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600244 // The following are for keeping track of the temporary callbacks that can
245 // be used in vkCreateInstance and vkDestroyInstance:
Mark Young6ba8abe2017-11-09 10:37:04 -0700246 uint32_t num_tmp_report_callbacks;
247 VkDebugReportCallbackCreateInfoEXT *tmp_report_create_infos;
248 VkDebugReportCallbackEXT *tmp_report_callbacks;
249 uint32_t num_tmp_debug_messengers;
250 VkDebugUtilsMessengerCreateInfoEXT *tmp_messenger_create_infos;
251 VkDebugUtilsMessengerEXT *tmp_debug_messengers;
252
Mike Stroyan845bdc42015-11-02 15:30:20 -0700253 counter<VkCommandBuffer> c_VkCommandBuffer;
254 counter<VkDevice> c_VkDevice;
255 counter<VkInstance> c_VkInstance;
256 counter<VkQueue> c_VkQueue;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700257#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700258 counter<VkBuffer> c_VkBuffer;
259 counter<VkBufferView> c_VkBufferView;
260 counter<VkCommandPool> c_VkCommandPool;
261 counter<VkDescriptorPool> c_VkDescriptorPool;
262 counter<VkDescriptorSet> c_VkDescriptorSet;
263 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
264 counter<VkDeviceMemory> c_VkDeviceMemory;
265 counter<VkEvent> c_VkEvent;
266 counter<VkFence> c_VkFence;
267 counter<VkFramebuffer> c_VkFramebuffer;
268 counter<VkImage> c_VkImage;
269 counter<VkImageView> c_VkImageView;
270 counter<VkPipeline> c_VkPipeline;
271 counter<VkPipelineCache> c_VkPipelineCache;
272 counter<VkPipelineLayout> c_VkPipelineLayout;
273 counter<VkQueryPool> c_VkQueryPool;
274 counter<VkRenderPass> c_VkRenderPass;
275 counter<VkSampler> c_VkSampler;
276 counter<VkSemaphore> c_VkSemaphore;
277 counter<VkShaderModule> c_VkShaderModule;
278 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700279 counter<VkObjectTableNVX> c_VkObjectTableNVX;
Mark Lobodzinski729a8d32017-01-26 12:16:30 -0700280 counter<VkIndirectCommandsLayoutNVX> c_VkIndirectCommandsLayoutNVX;
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700281 counter<VkDisplayKHR> c_VkDisplayKHR;
282 counter<VkDisplayModeKHR> c_VkDisplayModeKHR;
283 counter<VkSurfaceKHR> c_VkSurfaceKHR;
284 counter<VkSwapchainKHR> c_VkSwapchainKHR;
Mark Young0f183a82017-02-28 09:58:04 -0700285 counter<VkDescriptorUpdateTemplateKHR> c_VkDescriptorUpdateTemplateKHR;
Mike Schuchardta6b8bdb2017-09-05 16:10:20 -0600286 counter<VkValidationCacheEXT> c_VkValidationCacheEXT;
Lenny Komowb79f04a2017-09-18 17:07:00 -0600287 counter<VkSamplerYcbcrConversionKHR> c_VkSamplerYcbcrConversionKHR;
Mark Young6ba8abe2017-11-09 10:37:04 -0700288 counter<VkDebugUtilsMessengerEXT> c_VkDebugUtilsMessengerEXT;
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700289#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700290 counter<uint64_t> c_uint64_t;
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700291#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700292
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700293 layer_data()
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700294 : report_data(nullptr),
Mark Young6ba8abe2017-11-09 10:37:04 -0700295 num_tmp_report_callbacks(0),
296 tmp_report_create_infos(nullptr),
297 tmp_report_callbacks(nullptr),
298 num_tmp_debug_messengers(0),
299 tmp_messenger_create_infos(nullptr),
300 tmp_debug_messengers(nullptr),
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600301 c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700302 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
303 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
304 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
Mike Stroyan31c50c82016-01-29 15:09:04 -0700305#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700306 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
307 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
308 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
309 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
310 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
311 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
312 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700313 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
314 c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700315 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
316 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
317 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
318 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
319 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
320 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
321 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
322 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
323 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
324 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
325 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700326 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT),
327 c_VkObjectTableNVX("VkObjectTableNVX", VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT),
Dave Houltona9df0ce2018-02-07 10:51:23 -0700328 c_VkIndirectCommandsLayoutNVX("VkIndirectCommandsLayoutNVX",
329 VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT),
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700330 c_VkDisplayKHR("VkDisplayKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT),
331 c_VkDisplayModeKHR("VkDisplayModeKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT),
332 c_VkSurfaceKHR("VkSurfaceKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT),
Mark Young0f183a82017-02-28 09:58:04 -0700333 c_VkSwapchainKHR("VkSwapchainKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT),
Dave Houltona9df0ce2018-02-07 10:51:23 -0700334 c_VkDescriptorUpdateTemplateKHR("VkDescriptorUpdateTemplateKHR",
335 VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT),
Mark Young6ba8abe2017-11-09 10:37:04 -0700336 c_VkSamplerYcbcrConversionKHR("VkSamplerYcbcrConversionKHR",
337 VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT),
338 c_VkDebugUtilsMessengerEXT("VkDebugUtilsMessengerEXT", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700339#else // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700340 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700341#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700342 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600343};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600344
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700345#define WRAPPER(type) \
346 static void startWriteObject(struct layer_data *my_data, type object) { \
347 my_data->c_##type.startWrite(my_data->report_data, object); \
348 } \
349 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
350 static void startReadObject(struct layer_data *my_data, type object) { \
351 my_data->c_##type.startRead(my_data->report_data, object); \
352 } \
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700353 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600354
Mike Stroyan845bdc42015-11-02 15:30:20 -0700355WRAPPER(VkDevice)
356WRAPPER(VkInstance)
357WRAPPER(VkQueue)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700358#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700359WRAPPER(VkBuffer)
360WRAPPER(VkBufferView)
361WRAPPER(VkCommandPool)
362WRAPPER(VkDescriptorPool)
363WRAPPER(VkDescriptorSet)
364WRAPPER(VkDescriptorSetLayout)
365WRAPPER(VkDeviceMemory)
366WRAPPER(VkEvent)
367WRAPPER(VkFence)
368WRAPPER(VkFramebuffer)
369WRAPPER(VkImage)
370WRAPPER(VkImageView)
371WRAPPER(VkPipeline)
372WRAPPER(VkPipelineCache)
373WRAPPER(VkPipelineLayout)
374WRAPPER(VkQueryPool)
375WRAPPER(VkRenderPass)
376WRAPPER(VkSampler)
377WRAPPER(VkSemaphore)
378WRAPPER(VkShaderModule)
379WRAPPER(VkDebugReportCallbackEXT)
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700380WRAPPER(VkObjectTableNVX)
381WRAPPER(VkIndirectCommandsLayoutNVX)
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700382WRAPPER(VkDisplayKHR)
383WRAPPER(VkDisplayModeKHR)
384WRAPPER(VkSurfaceKHR)
385WRAPPER(VkSwapchainKHR)
Mark Young0f183a82017-02-28 09:58:04 -0700386WRAPPER(VkDescriptorUpdateTemplateKHR)
Mike Schuchardta6b8bdb2017-09-05 16:10:20 -0600387WRAPPER(VkValidationCacheEXT)
Lenny Komowb79f04a2017-09-18 17:07:00 -0600388WRAPPER(VkSamplerYcbcrConversionKHR)
Mark Young6ba8abe2017-11-09 10:37:04 -0700389WRAPPER(VkDebugUtilsMessengerEXT)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700390#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700391WRAPPER(uint64_t)
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700392#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700393
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700394static std::unordered_map<void *, layer_data *> layer_data_map;
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600395static std::mutex command_pool_lock;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700396static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
397
398// VkCommandBuffer needs check for implicit use of command pool
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700399static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700400 if (lockPool) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600401 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700402 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600403 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700404 startWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700405 }
406 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
407}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700408static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700409 my_data->c_VkCommandBuffer.finishWrite(object);
410 if (lockPool) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600411 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700412 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600413 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700414 finishWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700415 }
416}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700417static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600418 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700419 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600420 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700421 startReadObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700422 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
423}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700424static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700425 my_data->c_VkCommandBuffer.finishRead(object);
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600426 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700427 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600428 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700429 finishReadObject(my_data, pool);
Mike Stroyan313f7e62015-08-10 16:42:53 -0600430}
Mark Lobodzinski64318ba2017-01-26 13:34:13 -0700431#endif // THREADING_H