blob: a45069faf305d0c2507774296464ae4c8a53b1d5 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07005 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and/or associated documentation files (the "Materials"), to
7 * deal in the Materials without restriction, including without limitation the
8 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
9 * sell copies of the Materials, and to permit persons to whom the Materials
10 * are furnished to do so, subject to the following conditions:
Mike Stroyan3712d5c2015-04-02 11:59:05 -060011 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070012 * The above copyright notice(s) and this permission notice shall be included
13 * in all copies or substantial portions of the Materials.
Mike Stroyan3712d5c2015-04-02 11:59:05 -060014 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070015 * The Materials are Confidential Information as defined by the Khronos
16 * Membership Agreement until designated non-confidential by Khronos, at which
17 * point this condition clause shall be removed.
Mike Stroyan3712d5c2015-04-02 11:59:05 -060018 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070019 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
Mike Stroyan3712d5c2015-04-02 11:59:05 -060020 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070021 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 *
23 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
26 * USE OR OTHER DEALINGS IN THE MATERIALS
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060027 *
28 * Author: Cody Northrop <cody@lunarg.com>
29 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060030 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070031
Mike Stroyan313f7e62015-08-10 16:42:53 -060032#ifndef THREADING_H
33#define THREADING_H
Mike Stroyan845bdc42015-11-02 15:30:20 -070034#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060035#include "vk_layer_config.h"
36#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060037
Mike Stroyan31c50c82016-01-29 15:09:04 -070038#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
39// If pointers are 64-bit, then there can be separate counters for each
40// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
41#define DISTINCT_NONDISPATCHABLE_HANDLES
42#endif
43
Mike Stroyan3712d5c2015-04-02 11:59:05 -060044// Draw State ERROR codes
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070045typedef enum _THREADING_CHECKER_ERROR
46{
47 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
48 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
49 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mike Stroyan3712d5c2015-04-02 11:59:05 -060050} THREADING_CHECKER_ERROR;
51
Mike Stroyan845bdc42015-11-02 15:30:20 -070052struct object_use_data {
53 loader_platform_thread_id thread;
54 int reader_count;
55 int writer_count;
56};
57
58struct layer_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -070059
60static int threadingLockInitialized = 0;
61static loader_platform_thread_mutex threadingLock;
62static loader_platform_thread_cond threadingCond;
63
64template <typename T> class counter {
65 public:
66 const char *typeName;
67 VkDebugReportObjectTypeEXT objectType;
Mike Stroyan1a080012016-01-29 15:33:21 -070068 std::unordered_map<T, object_use_data> uses;
Mike Stroyan845bdc42015-11-02 15:30:20 -070069 void startWrite(debug_report_data *report_data, T object)
70 {
71 VkBool32 skipCall = VK_FALSE;
72 loader_platform_thread_id tid = loader_platform_get_thread_id();
73 loader_platform_thread_lock_mutex(&threadingLock);
74 if (uses.find(object) == uses.end()) {
75 // There is no current use of the object. Record writer thread.
76 struct object_use_data *use_data = &uses[object];
77 use_data->reader_count = 0;
78 use_data->writer_count = 1;
79 use_data->thread = tid;
80 } else {
81 struct object_use_data *use_data = &uses[object];
82 if (use_data->reader_count == 0) {
83 // There are no readers. Two writers just collided.
84 if (use_data->thread != tid) {
85 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
86 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
87 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
88 typeName, use_data->thread, tid);
89 if (skipCall) {
90 // Wait for thread-safe access to object instead of skipping call.
91 while (uses.find(object) != uses.end()) {
92 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
93 }
94 // There is now no current use of the object. Record writer thread.
95 struct object_use_data *use_data = &uses[object];
96 use_data->thread = tid ;
97 use_data->reader_count = 0;
98 use_data->writer_count = 1;
99 } else {
100 // Continue with an unsafe use of the object.
101 use_data->writer_count += 1;
102 }
103 } else {
104 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
105 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
106 "THREADING ERROR : object of type %s is recursively used in thread %ld",
107 typeName, tid);
108 // There is no way to make recursion safe. Just forge ahead.
109 use_data->writer_count += 1;
110 }
111 } else {
112 // There are readers. This writer collided with them.
113 if (use_data->thread != tid) {
114 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
115 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
116 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
117 typeName, use_data->thread, tid);
118 if (skipCall) {
119 // Wait for thread-safe access to object instead of skipping call.
120 while (uses.find(object) != uses.end()) {
121 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
122 }
123 // There is now no current use of the object. Record writer thread.
124 struct object_use_data *use_data = &uses[object];
125 use_data->thread = tid ;
126 use_data->reader_count = 0;
127 use_data->writer_count = 1;
128 } else {
129 // Continue with an unsafe use of the object.
130 use_data->writer_count += 1;
131 }
132 } else {
133 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
134 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
135 "THREADING ERROR : object of type %s is recursively used in thread %ld",
136 typeName, tid);
137 // There is no way to make recursion safe. Just forge ahead.
138 use_data->writer_count += 1;
139 }
140 }
141 }
142 loader_platform_thread_unlock_mutex(&threadingLock);
143 }
144
145 void finishWrite(T object)
146 {
147 // Object is no longer in use
148 loader_platform_thread_lock_mutex(&threadingLock);
149 uses[object].writer_count -= 1;
150 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
151 uses.erase(object);
152 }
153 // Notify any waiting threads that this object may be safe to use
154 loader_platform_thread_cond_broadcast(&threadingCond);
155 loader_platform_thread_unlock_mutex(&threadingLock);
156 }
157
158 void startRead(debug_report_data *report_data, T object) {
159 VkBool32 skipCall = VK_FALSE;
160 loader_platform_thread_id tid = loader_platform_get_thread_id();
161 loader_platform_thread_lock_mutex(&threadingLock);
162 if (uses.find(object) == uses.end()) {
163 // There is no current use of the object. Record reader count
164 struct object_use_data *use_data = &uses[object];
165 use_data->reader_count = 1;
166 use_data->writer_count = 0;
167 use_data->thread = tid;
168 } else if (uses[object].writer_count > 0) {
169 // There is a writer of the object.
170 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
171 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
172 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
173 typeName, uses[object].thread, tid);
174 if (skipCall) {
175 // Wait for thread-safe access to object instead of skipping call.
176 while (uses.find(object) != uses.end()) {
177 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
178 }
179 // There is no current use of the object. Record reader count
180 struct object_use_data *use_data = &uses[object];
181 use_data->reader_count = 1;
182 use_data->writer_count = 0;
183 use_data->thread = tid;
184 } else {
185 uses[object].reader_count += 1;
186 }
187 } else {
188 // There are other readers of the object. Increase reader count
189 uses[object].reader_count += 1;
190 }
191 loader_platform_thread_unlock_mutex(&threadingLock);
192 }
193 void finishRead(T object) {
194 loader_platform_thread_lock_mutex(&threadingLock);
195 uses[object].reader_count -= 1;
196 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
197 uses.erase(object);
198 }
199 // Notify and waiting threads that this object may be safe to use
200 loader_platform_thread_cond_broadcast(&threadingCond);
201 loader_platform_thread_unlock_mutex(&threadingLock);
202 }
203 counter(const char *name = "",
204 VkDebugReportObjectTypeEXT type=VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
205 typeName = name;
206 objectType=type;
207 }
208};
209
Cody Northrop55443ef2015-09-28 15:09:32 -0600210struct layer_data {
Mike Stroyan313f7e62015-08-10 16:42:53 -0600211 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700212 std::vector<VkDebugReportCallbackEXT> logging_callback;
213 VkLayerDispatchTable* device_dispatch_table;
214 VkLayerInstanceDispatchTable* instance_dispatch_table;
215 counter<VkCommandBuffer> c_VkCommandBuffer;
216 counter<VkDevice> c_VkDevice;
217 counter<VkInstance> c_VkInstance;
218 counter<VkQueue> c_VkQueue;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700219#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700220 counter<VkBuffer> c_VkBuffer;
221 counter<VkBufferView> c_VkBufferView;
222 counter<VkCommandPool> c_VkCommandPool;
223 counter<VkDescriptorPool> c_VkDescriptorPool;
224 counter<VkDescriptorSet> c_VkDescriptorSet;
225 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
226 counter<VkDeviceMemory> c_VkDeviceMemory;
227 counter<VkEvent> c_VkEvent;
228 counter<VkFence> c_VkFence;
229 counter<VkFramebuffer> c_VkFramebuffer;
230 counter<VkImage> c_VkImage;
231 counter<VkImageView> c_VkImageView;
232 counter<VkPipeline> c_VkPipeline;
233 counter<VkPipelineCache> c_VkPipelineCache;
234 counter<VkPipelineLayout> c_VkPipelineLayout;
235 counter<VkQueryPool> c_VkQueryPool;
236 counter<VkRenderPass> c_VkRenderPass;
237 counter<VkSampler> c_VkSampler;
238 counter<VkSemaphore> c_VkSemaphore;
239 counter<VkShaderModule> c_VkShaderModule;
240 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700241#else // DISTINCT_NONDISPATCHABLE_HANDLES
242 counter<uint64_t> c_uint64_t;
243#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700244 layer_data():
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700245 report_data(nullptr),
Mike Stroyan845bdc42015-11-02 15:30:20 -0700246 c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
247 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
248 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
249 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
Mike Stroyan31c50c82016-01-29 15:09:04 -0700250#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700251 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
252 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
253 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
254 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
255 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
256 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
257 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
258 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
259 c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
260 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
261 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
262 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
263 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
264 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
265 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
266 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
267 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
268 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
269 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
270 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
271 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700272#else // DISTINCT_NONDISPATCHABLE_HANDLES
273 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
274#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700275 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600276};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600277
Mike Stroyan845bdc42015-11-02 15:30:20 -0700278#define WRAPPER(type) \
279static void startWriteObject(struct layer_data *my_data, type object){my_data->c_##type.startWrite(my_data->report_data, object);}\
280static void finishWriteObject(struct layer_data *my_data, type object){my_data->c_##type.finishWrite(object);}\
281static void startReadObject(struct layer_data *my_data, type object){my_data->c_##type.startRead(my_data->report_data, object);}\
282static void finishReadObject(struct layer_data *my_data, type object){my_data->c_##type.finishRead(object);}
Mike Stroyan313f7e62015-08-10 16:42:53 -0600283
Mike Stroyan845bdc42015-11-02 15:30:20 -0700284WRAPPER(VkDevice)
285WRAPPER(VkInstance)
286WRAPPER(VkQueue)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700287#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700288WRAPPER(VkBuffer)
289WRAPPER(VkBufferView)
290WRAPPER(VkCommandPool)
291WRAPPER(VkDescriptorPool)
292WRAPPER(VkDescriptorSet)
293WRAPPER(VkDescriptorSetLayout)
294WRAPPER(VkDeviceMemory)
295WRAPPER(VkEvent)
296WRAPPER(VkFence)
297WRAPPER(VkFramebuffer)
298WRAPPER(VkImage)
299WRAPPER(VkImageView)
300WRAPPER(VkPipeline)
301WRAPPER(VkPipelineCache)
302WRAPPER(VkPipelineLayout)
303WRAPPER(VkQueryPool)
304WRAPPER(VkRenderPass)
305WRAPPER(VkSampler)
306WRAPPER(VkSemaphore)
307WRAPPER(VkShaderModule)
308WRAPPER(VkDebugReportCallbackEXT)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700309#else // DISTINCT_NONDISPATCHABLE_HANDLES
310WRAPPER(uint64_t)
311#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700312
313static std::unordered_map<void*, layer_data *> layer_data_map;
314static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
315
316// VkCommandBuffer needs check for implicit use of command pool
317static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700318{
Mike Stroyan845bdc42015-11-02 15:30:20 -0700319 if (lockPool) {
Mike Stroyan31c50c82016-01-29 15:09:04 -0700320 startWriteObject(my_data, command_pool_map[object]);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700321 }
322 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
323}
324static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
325{
326 my_data->c_VkCommandBuffer.finishWrite(object);
327 if (lockPool) {
Mike Stroyan31c50c82016-01-29 15:09:04 -0700328 finishWriteObject(my_data, command_pool_map[object]);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700329 }
330}
331static void startReadObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=false)
332{
333 if (lockPool) {
Mike Stroyan31c50c82016-01-29 15:09:04 -0700334 startReadObject(my_data, command_pool_map[object]);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700335 }
336 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
337}
338static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=false)
339{
340 my_data->c_VkCommandBuffer.finishRead(object);
341 if (lockPool) {
Mike Stroyan31c50c82016-01-29 15:09:04 -0700342 finishReadObject(my_data, command_pool_map[object]);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700343 }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600344}
345#endif // THREADING_H