blob: ac5b17b69b3cf6ae9b110a778bfc413f8034fa10 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mike Stroyan3712d5c2015-04-02 11:59:05 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mike Stroyan3712d5c2015-04-02 11:59:05 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060016 *
17 * Author: Cody Northrop <cody@lunarg.com>
18 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060019 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020
Mike Stroyan313f7e62015-08-10 16:42:53 -060021#ifndef THREADING_H
22#define THREADING_H
Jeremy Hayesb350beb2016-04-12 13:48:52 -060023#include <condition_variable>
24#include <mutex>
Mike Stroyan845bdc42015-11-02 15:30:20 -070025#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060026#include "vk_layer_config.h"
27#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060028
Jon Ashburn5484e0c2016-03-08 17:48:44 -070029#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
30 defined(__aarch64__) || defined(__powerpc64__)
Mike Stroyan31c50c82016-01-29 15:09:04 -070031// If pointers are 64-bit, then there can be separate counters for each
32// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
33#define DISTINCT_NONDISPATCHABLE_HANDLES
34#endif
35
Mike Stroyan3712d5c2015-04-02 11:59:05 -060036// Draw State ERROR codes
Mark Lobodzinski14817552016-05-19 17:08:10 -060037enum THREADING_CHECKER_ERROR {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070038 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
39 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
40 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mark Lobodzinski14817552016-05-19 17:08:10 -060041};
Mike Stroyan3712d5c2015-04-02 11:59:05 -060042
Mike Stroyan845bdc42015-11-02 15:30:20 -070043struct object_use_data {
44 loader_platform_thread_id thread;
45 int reader_count;
46 int writer_count;
47};
48
49struct layer_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -070050
Mike Stroyan0b64aee2016-07-13 10:10:25 -060051namespace threading {
52volatile bool vulkan_in_use = false;
53volatile bool vulkan_multi_threaded = false;
54// starting check if an application is using vulkan from multiple threads.
55inline bool startMultiThread() {
56 if (vulkan_multi_threaded) {
57 return true;
58 }
59 if (vulkan_in_use) {
60 vulkan_multi_threaded = true;
61 return true;
62 }
63 vulkan_in_use = true;
64 return false;
65}
66
67// finishing check if an application is using vulkan from multiple threads.
68inline void finishMultiThread() { vulkan_in_use = false; }
69} // namespace threading
70
Mike Stroyan845bdc42015-11-02 15:30:20 -070071template <typename T> class counter {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070072 public:
Mike Stroyan845bdc42015-11-02 15:30:20 -070073 const char *typeName;
74 VkDebugReportObjectTypeEXT objectType;
Mike Stroyan1a080012016-01-29 15:33:21 -070075 std::unordered_map<T, object_use_data> uses;
Mike Stroyanb3dd7902016-06-30 13:21:37 -060076 std::mutex counter_lock;
77 std::condition_variable counter_condition;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070078 void startWrite(debug_report_data *report_data, T object) {
Dustin Graves080069b2016-04-05 13:48:15 -060079 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -070080 loader_platform_thread_id tid = loader_platform_get_thread_id();
Mike Stroyanb3dd7902016-06-30 13:21:37 -060081 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -070082 if (uses.find(object) == uses.end()) {
83 // There is no current use of the object. Record writer thread.
84 struct object_use_data *use_data = &uses[object];
85 use_data->reader_count = 0;
86 use_data->writer_count = 1;
87 use_data->thread = tid;
88 } else {
89 struct object_use_data *use_data = &uses[object];
90 if (use_data->reader_count == 0) {
91 // There are no readers. Two writers just collided.
92 if (use_data->thread != tid) {
Dustin Graves37bef952016-02-05 16:06:21 -070093 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -070094 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
95 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
96 typeName, use_data->thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -070097 if (skipCall) {
98 // Wait for thread-safe access to object instead of skipping call.
99 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600100 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700101 }
102 // There is now no current use of the object. Record writer thread.
103 struct object_use_data *use_data = &uses[object];
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700104 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700105 use_data->reader_count = 0;
106 use_data->writer_count = 1;
107 } else {
108 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700109 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700110 use_data->writer_count += 1;
111 }
112 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700113 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700114 // There is no way to make recursion safe. Just forge ahead.
115 use_data->writer_count += 1;
116 }
117 } else {
118 // There are readers. This writer collided with them.
119 if (use_data->thread != tid) {
Dustin Graves37bef952016-02-05 16:06:21 -0700120 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700121 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
122 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
123 typeName, use_data->thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700124 if (skipCall) {
125 // Wait for thread-safe access to object instead of skipping call.
126 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600127 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700128 }
129 // There is now no current use of the object. Record writer thread.
130 struct object_use_data *use_data = &uses[object];
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700131 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700132 use_data->reader_count = 0;
133 use_data->writer_count = 1;
134 } else {
135 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700136 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700137 use_data->writer_count += 1;
138 }
139 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700140 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700141 // There is no way to make recursion safe. Just forge ahead.
142 use_data->writer_count += 1;
143 }
144 }
145 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700146 }
147
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700148 void finishWrite(T object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700149 // Object is no longer in use
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600150 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700151 uses[object].writer_count -= 1;
152 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
153 uses.erase(object);
154 }
155 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600156 lock.unlock();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600157 counter_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700158 }
159
160 void startRead(debug_report_data *report_data, T object) {
Dustin Graves080069b2016-04-05 13:48:15 -0600161 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700162 loader_platform_thread_id tid = loader_platform_get_thread_id();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600163 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700164 if (uses.find(object) == uses.end()) {
165 // There is no current use of the object. Record reader count
166 struct object_use_data *use_data = &uses[object];
167 use_data->reader_count = 1;
168 use_data->writer_count = 0;
169 use_data->thread = tid;
Mike Stroyanc8774502016-02-05 09:11:32 -0700170 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700171 // There is a writer of the object.
Dustin Graves37bef952016-02-05 16:06:21 -0700172 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700173 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
174 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
175 uses[object].thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700176 if (skipCall) {
177 // Wait for thread-safe access to object instead of skipping call.
178 while (uses.find(object) != uses.end()) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600179 counter_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700180 }
181 // There is no current use of the object. Record reader count
182 struct object_use_data *use_data = &uses[object];
183 use_data->reader_count = 1;
184 use_data->writer_count = 0;
185 use_data->thread = tid;
186 } else {
187 uses[object].reader_count += 1;
188 }
189 } else {
190 // There are other readers of the object. Increase reader count
191 uses[object].reader_count += 1;
192 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700193 }
194 void finishRead(T object) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600195 std::unique_lock<std::mutex> lock(counter_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700196 uses[object].reader_count -= 1;
197 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
198 uses.erase(object);
199 }
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600200 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600201 lock.unlock();
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600202 counter_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700203 }
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700204 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700205 typeName = name;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700206 objectType = type;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700207 }
208};
209
Cody Northrop55443ef2015-09-28 15:09:32 -0600210struct layer_data {
Chia-I Wu59d0a332016-05-16 11:21:03 +0800211 VkInstance instance;
212
Mike Stroyan313f7e62015-08-10 16:42:53 -0600213 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700214 std::vector<VkDebugReportCallbackEXT> logging_callback;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700215 VkLayerDispatchTable *device_dispatch_table;
216 VkLayerInstanceDispatchTable *instance_dispatch_table;
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600217 // The following are for keeping track of the temporary callbacks that can
218 // be used in vkCreateInstance and vkDestroyInstance:
219 uint32_t num_tmp_callbacks;
220 VkDebugReportCallbackCreateInfoEXT *tmp_dbg_create_infos;
221 VkDebugReportCallbackEXT *tmp_callbacks;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700222 counter<VkCommandBuffer> c_VkCommandBuffer;
223 counter<VkDevice> c_VkDevice;
224 counter<VkInstance> c_VkInstance;
225 counter<VkQueue> c_VkQueue;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700226#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700227 counter<VkBuffer> c_VkBuffer;
228 counter<VkBufferView> c_VkBufferView;
229 counter<VkCommandPool> c_VkCommandPool;
230 counter<VkDescriptorPool> c_VkDescriptorPool;
231 counter<VkDescriptorSet> c_VkDescriptorSet;
232 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
233 counter<VkDeviceMemory> c_VkDeviceMemory;
234 counter<VkEvent> c_VkEvent;
235 counter<VkFence> c_VkFence;
236 counter<VkFramebuffer> c_VkFramebuffer;
237 counter<VkImage> c_VkImage;
238 counter<VkImageView> c_VkImageView;
239 counter<VkPipeline> c_VkPipeline;
240 counter<VkPipelineCache> c_VkPipelineCache;
241 counter<VkPipelineLayout> c_VkPipelineLayout;
242 counter<VkQueryPool> c_VkQueryPool;
243 counter<VkRenderPass> c_VkRenderPass;
244 counter<VkSampler> c_VkSampler;
245 counter<VkSemaphore> c_VkSemaphore;
246 counter<VkShaderModule> c_VkShaderModule;
247 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700248 counter<VkObjectTableNVX> c_VkObjectTableNVX;
249 counter<VkIndirectCommandsLayoutNVX>c_VkIndirectCommandsLayoutNVX;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700250#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700251 counter<uint64_t> c_uint64_t;
252#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700253
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700254 layer_data()
Ian Elliotted6b5ac2016-04-28 09:08:13 -0600255 : report_data(nullptr), num_tmp_callbacks(0), tmp_dbg_create_infos(nullptr), tmp_callbacks(nullptr),
256 c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700257 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
258 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
259 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
Mike Stroyan31c50c82016-01-29 15:09:04 -0700260#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700261 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
262 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
263 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
264 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
265 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
266 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
267 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
268 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
269 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
270 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
271 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
272 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
273 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
274 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
275 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
276 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
277 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
278 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
279 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700280 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT),
281 c_VkObjectTableNVX("VkObjectTableNVX", VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT),
282 c_VkIndirectCommandsLayoutNVX("VkIndirectCommandsLayoutNVX", VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT)
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700283#else // DISTINCT_NONDISPATCHABLE_HANDLES
284 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700285#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700286 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600287};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600288
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700289#define WRAPPER(type) \
290 static void startWriteObject(struct layer_data *my_data, type object) { \
291 my_data->c_##type.startWrite(my_data->report_data, object); \
292 } \
293 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
294 static void startReadObject(struct layer_data *my_data, type object) { \
295 my_data->c_##type.startRead(my_data->report_data, object); \
296 } \
297 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600298
Mike Stroyan845bdc42015-11-02 15:30:20 -0700299WRAPPER(VkDevice)
300WRAPPER(VkInstance)
301WRAPPER(VkQueue)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700302#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700303WRAPPER(VkBuffer)
304WRAPPER(VkBufferView)
305WRAPPER(VkCommandPool)
306WRAPPER(VkDescriptorPool)
307WRAPPER(VkDescriptorSet)
308WRAPPER(VkDescriptorSetLayout)
309WRAPPER(VkDeviceMemory)
310WRAPPER(VkEvent)
311WRAPPER(VkFence)
312WRAPPER(VkFramebuffer)
313WRAPPER(VkImage)
314WRAPPER(VkImageView)
315WRAPPER(VkPipeline)
316WRAPPER(VkPipelineCache)
317WRAPPER(VkPipelineLayout)
318WRAPPER(VkQueryPool)
319WRAPPER(VkRenderPass)
320WRAPPER(VkSampler)
321WRAPPER(VkSemaphore)
322WRAPPER(VkShaderModule)
323WRAPPER(VkDebugReportCallbackEXT)
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700324WRAPPER(VkObjectTableNVX)
325WRAPPER(VkIndirectCommandsLayoutNVX)
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700326#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700327WRAPPER(uint64_t)
328#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700329
Mark Lobodzinski2d589822016-12-12 09:44:34 -0700330
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700331static std::unordered_map<void *, layer_data *> layer_data_map;
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600332static std::mutex command_pool_lock;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700333static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
334
335// VkCommandBuffer needs check for implicit use of command pool
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700336static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700337 if (lockPool) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600338 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700339 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600340 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700341 startWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700342 }
343 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
344}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700345static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700346 my_data->c_VkCommandBuffer.finishWrite(object);
347 if (lockPool) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600348 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700349 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600350 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700351 finishWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700352 }
353}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700354static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600355 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700356 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600357 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700358 startReadObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700359 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
360}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700361static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700362 my_data->c_VkCommandBuffer.finishRead(object);
Mike Stroyanb3dd7902016-06-30 13:21:37 -0600363 std::unique_lock<std::mutex> lock(command_pool_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700364 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600365 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700366 finishReadObject(my_data, pool);
Mike Stroyan313f7e62015-08-10 16:42:53 -0600367}
368#endif // THREADING_H