blob: 0e2336392e32aab2add1ce87bba1e5a4a4a86af8 [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07005 * Permission is hereby granted, free of charge, to any person obtaining a copy
6 * of this software and/or associated documentation files (the "Materials"), to
7 * deal in the Materials without restriction, including without limitation the
8 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
9 * sell copies of the Materials, and to permit persons to whom the Materials
10 * are furnished to do so, subject to the following conditions:
Mike Stroyan3712d5c2015-04-02 11:59:05 -060011 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070012 * The above copyright notice(s) and this permission notice shall be included
13 * in all copies or substantial portions of the Materials.
Mike Stroyan3712d5c2015-04-02 11:59:05 -060014 *
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070015 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
Mike Stroyan3712d5c2015-04-02 11:59:05 -060016 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070017 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 *
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
22 * USE OR OTHER DEALINGS IN THE MATERIALS
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060023 *
24 * Author: Cody Northrop <cody@lunarg.com>
25 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060026 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070027
Mike Stroyan313f7e62015-08-10 16:42:53 -060028#ifndef THREADING_H
29#define THREADING_H
Mike Stroyan845bdc42015-11-02 15:30:20 -070030#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060031#include "vk_layer_config.h"
32#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060033
Jon Ashburn5484e0c2016-03-08 17:48:44 -070034#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
35 defined(__aarch64__) || defined(__powerpc64__)
Mike Stroyan31c50c82016-01-29 15:09:04 -070036// If pointers are 64-bit, then there can be separate counters for each
37// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
38#define DISTINCT_NONDISPATCHABLE_HANDLES
39#endif
40
Mike Stroyan3712d5c2015-04-02 11:59:05 -060041// Draw State ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070042typedef enum _THREADING_CHECKER_ERROR {
43 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
44 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
45 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mike Stroyan3712d5c2015-04-02 11:59:05 -060046} THREADING_CHECKER_ERROR;
47
Mike Stroyan845bdc42015-11-02 15:30:20 -070048struct object_use_data {
49 loader_platform_thread_id thread;
50 int reader_count;
51 int writer_count;
52};
53
54struct layer_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -070055
56static int threadingLockInitialized = 0;
57static loader_platform_thread_mutex threadingLock;
58static loader_platform_thread_cond threadingCond;
59
60template <typename T> class counter {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070061 public:
Mike Stroyan845bdc42015-11-02 15:30:20 -070062 const char *typeName;
63 VkDebugReportObjectTypeEXT objectType;
Mike Stroyan1a080012016-01-29 15:33:21 -070064 std::unordered_map<T, object_use_data> uses;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070065 void startWrite(debug_report_data *report_data, T object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -070066 VkBool32 skipCall = VK_FALSE;
67 loader_platform_thread_id tid = loader_platform_get_thread_id();
68 loader_platform_thread_lock_mutex(&threadingLock);
69 if (uses.find(object) == uses.end()) {
70 // There is no current use of the object. Record writer thread.
71 struct object_use_data *use_data = &uses[object];
72 use_data->reader_count = 0;
73 use_data->writer_count = 1;
74 use_data->thread = tid;
75 } else {
76 struct object_use_data *use_data = &uses[object];
77 if (use_data->reader_count == 0) {
78 // There are no readers. Two writers just collided.
79 if (use_data->thread != tid) {
Dustin Graves37bef952016-02-05 16:06:21 -070080 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -070081 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
82 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
83 typeName, use_data->thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -070084 if (skipCall) {
85 // Wait for thread-safe access to object instead of skipping call.
86 while (uses.find(object) != uses.end()) {
87 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
88 }
89 // There is now no current use of the object. Record writer thread.
90 struct object_use_data *use_data = &uses[object];
Jon Ashburn5484e0c2016-03-08 17:48:44 -070091 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -070092 use_data->reader_count = 0;
93 use_data->writer_count = 1;
94 } else {
95 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -070096 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -070097 use_data->writer_count += 1;
98 }
99 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700100 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700101 // There is no way to make recursion safe. Just forge ahead.
102 use_data->writer_count += 1;
103 }
104 } else {
105 // There are readers. This writer collided with them.
106 if (use_data->thread != tid) {
Dustin Graves37bef952016-02-05 16:06:21 -0700107 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700108 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
109 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
110 typeName, use_data->thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700111 if (skipCall) {
112 // Wait for thread-safe access to object instead of skipping call.
113 while (uses.find(object) != uses.end()) {
114 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
115 }
116 // There is now no current use of the object. Record writer thread.
117 struct object_use_data *use_data = &uses[object];
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700118 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700119 use_data->reader_count = 0;
120 use_data->writer_count = 1;
121 } else {
122 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700123 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700124 use_data->writer_count += 1;
125 }
126 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700127 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700128 // There is no way to make recursion safe. Just forge ahead.
129 use_data->writer_count += 1;
130 }
131 }
132 }
133 loader_platform_thread_unlock_mutex(&threadingLock);
134 }
135
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700136 void finishWrite(T object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700137 // Object is no longer in use
138 loader_platform_thread_lock_mutex(&threadingLock);
139 uses[object].writer_count -= 1;
140 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
141 uses.erase(object);
142 }
143 // Notify any waiting threads that this object may be safe to use
144 loader_platform_thread_cond_broadcast(&threadingCond);
145 loader_platform_thread_unlock_mutex(&threadingLock);
146 }
147
148 void startRead(debug_report_data *report_data, T object) {
149 VkBool32 skipCall = VK_FALSE;
150 loader_platform_thread_id tid = loader_platform_get_thread_id();
151 loader_platform_thread_lock_mutex(&threadingLock);
152 if (uses.find(object) == uses.end()) {
153 // There is no current use of the object. Record reader count
154 struct object_use_data *use_data = &uses[object];
155 use_data->reader_count = 1;
156 use_data->writer_count = 0;
157 use_data->thread = tid;
Mike Stroyanc8774502016-02-05 09:11:32 -0700158 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700159 // There is a writer of the object.
Dustin Graves37bef952016-02-05 16:06:21 -0700160 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700161 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
162 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
163 uses[object].thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700164 if (skipCall) {
165 // Wait for thread-safe access to object instead of skipping call.
166 while (uses.find(object) != uses.end()) {
167 loader_platform_thread_cond_wait(&threadingCond, &threadingLock);
168 }
169 // There is no current use of the object. Record reader count
170 struct object_use_data *use_data = &uses[object];
171 use_data->reader_count = 1;
172 use_data->writer_count = 0;
173 use_data->thread = tid;
174 } else {
175 uses[object].reader_count += 1;
176 }
177 } else {
178 // There are other readers of the object. Increase reader count
179 uses[object].reader_count += 1;
180 }
181 loader_platform_thread_unlock_mutex(&threadingLock);
182 }
183 void finishRead(T object) {
184 loader_platform_thread_lock_mutex(&threadingLock);
185 uses[object].reader_count -= 1;
186 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
187 uses.erase(object);
188 }
189 // Notify and waiting threads that this object may be safe to use
190 loader_platform_thread_cond_broadcast(&threadingCond);
191 loader_platform_thread_unlock_mutex(&threadingLock);
192 }
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700193 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700194 typeName = name;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700195 objectType = type;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700196 }
197};
198
Cody Northrop55443ef2015-09-28 15:09:32 -0600199struct layer_data {
Mike Stroyan313f7e62015-08-10 16:42:53 -0600200 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700201 std::vector<VkDebugReportCallbackEXT> logging_callback;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700202 VkLayerDispatchTable *device_dispatch_table;
203 VkLayerInstanceDispatchTable *instance_dispatch_table;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700204 counter<VkCommandBuffer> c_VkCommandBuffer;
205 counter<VkDevice> c_VkDevice;
206 counter<VkInstance> c_VkInstance;
207 counter<VkQueue> c_VkQueue;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700208#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700209 counter<VkBuffer> c_VkBuffer;
210 counter<VkBufferView> c_VkBufferView;
211 counter<VkCommandPool> c_VkCommandPool;
212 counter<VkDescriptorPool> c_VkDescriptorPool;
213 counter<VkDescriptorSet> c_VkDescriptorSet;
214 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
215 counter<VkDeviceMemory> c_VkDeviceMemory;
216 counter<VkEvent> c_VkEvent;
217 counter<VkFence> c_VkFence;
218 counter<VkFramebuffer> c_VkFramebuffer;
219 counter<VkImage> c_VkImage;
220 counter<VkImageView> c_VkImageView;
221 counter<VkPipeline> c_VkPipeline;
222 counter<VkPipelineCache> c_VkPipelineCache;
223 counter<VkPipelineLayout> c_VkPipelineLayout;
224 counter<VkQueryPool> c_VkQueryPool;
225 counter<VkRenderPass> c_VkRenderPass;
226 counter<VkSampler> c_VkSampler;
227 counter<VkSemaphore> c_VkSemaphore;
228 counter<VkShaderModule> c_VkShaderModule;
229 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700230#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700231 counter<uint64_t> c_uint64_t;
232#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700233 layer_data()
234 : report_data(nullptr), c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
235 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
236 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
237 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
Mike Stroyan31c50c82016-01-29 15:09:04 -0700238#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700239 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
240 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
241 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
242 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
243 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
244 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
245 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
246 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
247 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
248 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
249 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
250 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
251 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
252 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
253 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
254 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
255 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
256 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
257 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
258 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
259#else // DISTINCT_NONDISPATCHABLE_HANDLES
260 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700261#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700262 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600263};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600264
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700265#define WRAPPER(type) \
266 static void startWriteObject(struct layer_data *my_data, type object) { \
267 my_data->c_##type.startWrite(my_data->report_data, object); \
268 } \
269 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
270 static void startReadObject(struct layer_data *my_data, type object) { \
271 my_data->c_##type.startRead(my_data->report_data, object); \
272 } \
273 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600274
Mike Stroyan845bdc42015-11-02 15:30:20 -0700275WRAPPER(VkDevice)
276WRAPPER(VkInstance)
277WRAPPER(VkQueue)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700278#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700279WRAPPER(VkBuffer)
280WRAPPER(VkBufferView)
281WRAPPER(VkCommandPool)
282WRAPPER(VkDescriptorPool)
283WRAPPER(VkDescriptorSet)
284WRAPPER(VkDescriptorSetLayout)
285WRAPPER(VkDeviceMemory)
286WRAPPER(VkEvent)
287WRAPPER(VkFence)
288WRAPPER(VkFramebuffer)
289WRAPPER(VkImage)
290WRAPPER(VkImageView)
291WRAPPER(VkPipeline)
292WRAPPER(VkPipelineCache)
293WRAPPER(VkPipelineLayout)
294WRAPPER(VkQueryPool)
295WRAPPER(VkRenderPass)
296WRAPPER(VkSampler)
297WRAPPER(VkSemaphore)
298WRAPPER(VkShaderModule)
299WRAPPER(VkDebugReportCallbackEXT)
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700300#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700301WRAPPER(uint64_t)
302#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700303
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700304static std::unordered_map<void *, layer_data *> layer_data_map;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700305static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
306
307// VkCommandBuffer needs check for implicit use of command pool
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700308static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700309 if (lockPool) {
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700310 loader_platform_thread_lock_mutex(&threadingLock);
311 VkCommandPool pool = command_pool_map[object];
312 loader_platform_thread_unlock_mutex(&threadingLock);
313 startWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700314 }
315 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
316}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700317static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700318 my_data->c_VkCommandBuffer.finishWrite(object);
319 if (lockPool) {
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700320 loader_platform_thread_lock_mutex(&threadingLock);
321 VkCommandPool pool = command_pool_map[object];
322 loader_platform_thread_unlock_mutex(&threadingLock);
323 finishWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700324 }
325}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700326static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700327 loader_platform_thread_lock_mutex(&threadingLock);
328 VkCommandPool pool = command_pool_map[object];
329 loader_platform_thread_unlock_mutex(&threadingLock);
330 startReadObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700331 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
332}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700333static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700334 my_data->c_VkCommandBuffer.finishRead(object);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700335 loader_platform_thread_lock_mutex(&threadingLock);
336 VkCommandPool pool = command_pool_map[object];
337 loader_platform_thread_unlock_mutex(&threadingLock);
338 finishReadObject(my_data, pool);
Mike Stroyan313f7e62015-08-10 16:42:53 -0600339}
340#endif // THREADING_H