blob: 086459f5d38d04b5612c87131b42f720dcdbcf92 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07005 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and/or associated documentation files (the "Materials"), to
7 * deal in the Materials without restriction, including without limitation the
8 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
9 * sell copies of the Materials, and to permit persons to whom the Materials
10 * are furnished to do so, subject to the following conditions:
Mike Stroyan3712d5c2015-04-02 11:59:05 -060011 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070012 * The above copyright notice(s) and this permission notice shall be included
13 * in all copies or substantial portions of the Materials.
Mike Stroyan3712d5c2015-04-02 11:59:05 -060014 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070015 * The Materials are Confidential Information as defined by the Khronos
16 * Membership Agreement until designated non-confidential by Khronos, at which
17 * point this condition clause shall be removed.
Mike Stroyan3712d5c2015-04-02 11:59:05 -060018 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070019 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
Mike Stroyan3712d5c2015-04-02 11:59:05 -060020 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070021 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
22 *
23 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
24 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
25 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
26 * USE OR OTHER DEALINGS IN THE MATERIALS
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060027 *
28 * Author: Cody Northrop <cody@lunarg.com>
29 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060030 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070031
Mike Stroyan313f7e62015-08-10 16:42:53 -060032#ifndef THREADING_H
33#define THREADING_H
Mike Stroyan845bdc42015-11-02 15:30:20 -070034#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060035#include "vk_layer_config.h"
36#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060037
38// Draw State ERROR codes
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -070039typedef enum _THREADING_CHECKER_ERROR
40{
41 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
42 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
43 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mike Stroyan3712d5c2015-04-02 11:59:05 -060044} THREADING_CHECKER_ERROR;
45
Mike Stroyan845bdc42015-11-02 15:30:20 -070046struct object_use_data {
47 loader_platform_thread_id thread;
48 int reader_count;
49 int writer_count;
50};
51
52struct layer_data;
53using namespace std;
54
55static int threadingLockInitialized = 0;
56static loader_platform_thread_mutex threadingLock;
57static loader_platform_thread_cond threadingCond;
58
59template <typename T> class counter {
60 public:
61 const char *typeName;
62 VkDebugReportObjectTypeEXT objectType;
63 unordered_map<T, object_use_data> uses;
64 void startWrite(debug_report_data *report_data, T object)
65 {
66 VkBool32 skipCall = VK_FALSE;
67 loader_platform_thread_id tid = loader_platform_get_thread_id();
68 loader_platform_thread_lock_mutex(&threadingLock);
69 if (uses.find(object) == uses.end()) {
70 // There is no current use of the object. Record writer thread.
71 struct object_use_data *use_data = &uses[object];
72 use_data->reader_count = 0;
73 use_data->writer_count = 1;
74 use_data->thread = tid;
75 } else {
76 struct object_use_data *use_data = &uses[object];
77 if (use_data->reader_count == 0) {
78 // There are no readers. Two writers just collided.
79 if (use_data->thread != tid) {
80 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
81 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
82 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
83 typeName, use_data->thread, tid);
84 if (skipCall) {
85 // Wait for thread-safe access to object instead of skipping call.
86 while (uses.find(object) != uses.end()) {
87 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
88 }
89 // There is now no current use of the object. Record writer thread.
90 struct object_use_data *use_data = &uses[object];
91 use_data->thread = tid ;
92 use_data->reader_count = 0;
93 use_data->writer_count = 1;
94 } else {
95 // Continue with an unsafe use of the object.
96 use_data->writer_count += 1;
97 }
98 } else {
99 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
100 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
101 "THREADING ERROR : object of type %s is recursively used in thread %ld",
102 typeName, tid);
103 // There is no way to make recursion safe. Just forge ahead.
104 use_data->writer_count += 1;
105 }
106 } else {
107 // There are readers. This writer collided with them.
108 if (use_data->thread != tid) {
109 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
110 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
111 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
112 typeName, use_data->thread, tid);
113 if (skipCall) {
114 // Wait for thread-safe access to object instead of skipping call.
115 while (uses.find(object) != uses.end()) {
116 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
117 }
118 // There is now no current use of the object. Record writer thread.
119 struct object_use_data *use_data = &uses[object];
120 use_data->thread = tid ;
121 use_data->reader_count = 0;
122 use_data->writer_count = 1;
123 } else {
124 // Continue with an unsafe use of the object.
125 use_data->writer_count += 1;
126 }
127 } else {
128 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
129 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
130 "THREADING ERROR : object of type %s is recursively used in thread %ld",
131 typeName, tid);
132 // There is no way to make recursion safe. Just forge ahead.
133 use_data->writer_count += 1;
134 }
135 }
136 }
137 loader_platform_thread_unlock_mutex(&threadingLock);
138 }
139
140 void finishWrite(T object)
141 {
142 // Object is no longer in use
143 loader_platform_thread_lock_mutex(&threadingLock);
144 uses[object].writer_count -= 1;
145 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
146 uses.erase(object);
147 }
148 // Notify any waiting threads that this object may be safe to use
149 loader_platform_thread_cond_broadcast(&threadingCond);
150 loader_platform_thread_unlock_mutex(&threadingLock);
151 }
152
153 void startRead(debug_report_data *report_data, T object) {
154 VkBool32 skipCall = VK_FALSE;
155 loader_platform_thread_id tid = loader_platform_get_thread_id();
156 loader_platform_thread_lock_mutex(&threadingLock);
157 if (uses.find(object) == uses.end()) {
158 // There is no current use of the object. Record reader count
159 struct object_use_data *use_data = &uses[object];
160 use_data->reader_count = 1;
161 use_data->writer_count = 0;
162 use_data->thread = tid;
163 } else if (uses[object].writer_count > 0) {
164 // There is a writer of the object.
165 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, reinterpret_cast<uint64_t>(object),
166 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
167 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
168 typeName, uses[object].thread, tid);
169 if (skipCall) {
170 // Wait for thread-safe access to object instead of skipping call.
171 while (uses.find(object) != uses.end()) {
172 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
173 }
174 // There is no current use of the object. Record reader count
175 struct object_use_data *use_data = &uses[object];
176 use_data->reader_count = 1;
177 use_data->writer_count = 0;
178 use_data->thread = tid;
179 } else {
180 uses[object].reader_count += 1;
181 }
182 } else {
183 // There are other readers of the object. Increase reader count
184 uses[object].reader_count += 1;
185 }
186 loader_platform_thread_unlock_mutex(&threadingLock);
187 }
188 void finishRead(T object) {
189 loader_platform_thread_lock_mutex(&threadingLock);
190 uses[object].reader_count -= 1;
191 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
192 uses.erase(object);
193 }
194 // Notify and waiting threads that this object may be safe to use
195 loader_platform_thread_cond_broadcast(&threadingCond);
196 loader_platform_thread_unlock_mutex(&threadingLock);
197 }
198 counter(const char *name = "",
199 VkDebugReportObjectTypeEXT type=VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
200 typeName = name;
201 objectType=type;
202 }
203};
204
Cody Northrop55443ef2015-09-28 15:09:32 -0600205struct layer_data {
Mike Stroyan313f7e62015-08-10 16:42:53 -0600206 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700207 std::vector<VkDebugReportCallbackEXT> logging_callback;
208 VkLayerDispatchTable* device_dispatch_table;
209 VkLayerInstanceDispatchTable* instance_dispatch_table;
210 counter<VkCommandBuffer> c_VkCommandBuffer;
211 counter<VkDevice> c_VkDevice;
212 counter<VkInstance> c_VkInstance;
213 counter<VkQueue> c_VkQueue;
214 counter<VkBuffer> c_VkBuffer;
215 counter<VkBufferView> c_VkBufferView;
216 counter<VkCommandPool> c_VkCommandPool;
217 counter<VkDescriptorPool> c_VkDescriptorPool;
218 counter<VkDescriptorSet> c_VkDescriptorSet;
219 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
220 counter<VkDeviceMemory> c_VkDeviceMemory;
221 counter<VkEvent> c_VkEvent;
222 counter<VkFence> c_VkFence;
223 counter<VkFramebuffer> c_VkFramebuffer;
224 counter<VkImage> c_VkImage;
225 counter<VkImageView> c_VkImageView;
226 counter<VkPipeline> c_VkPipeline;
227 counter<VkPipelineCache> c_VkPipelineCache;
228 counter<VkPipelineLayout> c_VkPipelineLayout;
229 counter<VkQueryPool> c_VkQueryPool;
230 counter<VkRenderPass> c_VkRenderPass;
231 counter<VkSampler> c_VkSampler;
232 counter<VkSemaphore> c_VkSemaphore;
233 counter<VkShaderModule> c_VkShaderModule;
234 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
235 layer_data():
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700236 report_data(nullptr),
Mike Stroyan845bdc42015-11-02 15:30:20 -0700237 c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
238 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
239 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
240 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
241 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
242 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
243 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
244 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
245 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
246 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
247 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
248 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT),
249 c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
250 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
251 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
252 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
253 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
254 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
255 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
256 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
257 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
258 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
259 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
260 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
261 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700262 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600263};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600264
Mike Stroyan845bdc42015-11-02 15:30:20 -0700265#define WRAPPER(type) \
266static void startWriteObject(struct layer_data *my_data, type object){my_data->c_##type.startWrite(my_data->report_data, object);}\
267static void finishWriteObject(struct layer_data *my_data, type object){my_data->c_##type.finishWrite(object);}\
268static void startReadObject(struct layer_data *my_data, type object){my_data->c_##type.startRead(my_data->report_data, object);}\
269static void finishReadObject(struct layer_data *my_data, type object){my_data->c_##type.finishRead(object);}
Mike Stroyan313f7e62015-08-10 16:42:53 -0600270
Mike Stroyan845bdc42015-11-02 15:30:20 -0700271WRAPPER(VkDevice)
272WRAPPER(VkInstance)
273WRAPPER(VkQueue)
274WRAPPER(VkBuffer)
275WRAPPER(VkBufferView)
276WRAPPER(VkCommandPool)
277WRAPPER(VkDescriptorPool)
278WRAPPER(VkDescriptorSet)
279WRAPPER(VkDescriptorSetLayout)
280WRAPPER(VkDeviceMemory)
281WRAPPER(VkEvent)
282WRAPPER(VkFence)
283WRAPPER(VkFramebuffer)
284WRAPPER(VkImage)
285WRAPPER(VkImageView)
286WRAPPER(VkPipeline)
287WRAPPER(VkPipelineCache)
288WRAPPER(VkPipelineLayout)
289WRAPPER(VkQueryPool)
290WRAPPER(VkRenderPass)
291WRAPPER(VkSampler)
292WRAPPER(VkSemaphore)
293WRAPPER(VkShaderModule)
294WRAPPER(VkDebugReportCallbackEXT)
295
296static std::unordered_map<void*, layer_data *> layer_data_map;
297static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
298
299// VkCommandBuffer needs check for implicit use of command pool
300static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
Mark Lobodzinski1ed594e2016-02-03 09:57:14 -0700301{
Mike Stroyan845bdc42015-11-02 15:30:20 -0700302 if (lockPool) {
303 my_data->c_VkCommandPool.startWrite(my_data->report_data, command_pool_map[object]);
304 }
305 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
306}
307static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=true)
308{
309 my_data->c_VkCommandBuffer.finishWrite(object);
310 if (lockPool) {
311 my_data->c_VkCommandPool.finishWrite(command_pool_map[object]);
312 }
313}
314static void startReadObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=false)
315{
316 if (lockPool) {
317 my_data->c_VkCommandPool.startRead(my_data->report_data, command_pool_map[object]);
318 }
319 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
320}
321static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool=false)
322{
323 my_data->c_VkCommandBuffer.finishRead(object);
324 if (lockPool) {
325 my_data->c_VkCommandPool.finishRead(command_pool_map[object]);
326 }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600327}
328#endif // THREADING_H