blob: 31881618ea228945c35c591633a032bfb35e518d [file] [log] [blame]
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -07001/* Copyright (c) 2015-2016 The Khronos Group Inc.
2 * Copyright (c) 2015-2016 Valve Corporation
3 * Copyright (c) 2015-2016 LunarG, Inc.
Mike Stroyan3712d5c2015-04-02 11:59:05 -06004 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06005 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
Mike Stroyan3712d5c2015-04-02 11:59:05 -06008 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -06009 * http://www.apache.org/licenses/LICENSE-2.0
Mike Stroyan3712d5c2015-04-02 11:59:05 -060010 *
Jon Ashburn3ebf1252016-04-19 11:30:31 -060011 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
Courtney Goeltzenleuchter05559522015-10-30 11:14:30 -060016 *
17 * Author: Cody Northrop <cody@lunarg.com>
18 * Author: Mike Stroyan <mike@LunarG.com>
Mike Stroyan3712d5c2015-04-02 11:59:05 -060019 */
Mark Lobodzinski6eda00a2016-02-02 15:55:36 -070020
Mike Stroyan313f7e62015-08-10 16:42:53 -060021#ifndef THREADING_H
22#define THREADING_H
Jeremy Hayesb350beb2016-04-12 13:48:52 -060023#include <condition_variable>
24#include <mutex>
Mike Stroyan845bdc42015-11-02 15:30:20 -070025#include <vector>
Mike Stroyan313f7e62015-08-10 16:42:53 -060026#include "vk_layer_config.h"
27#include "vk_layer_logging.h"
Mike Stroyan3712d5c2015-04-02 11:59:05 -060028
Jon Ashburn5484e0c2016-03-08 17:48:44 -070029#if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \
30 defined(__aarch64__) || defined(__powerpc64__)
Mike Stroyan31c50c82016-01-29 15:09:04 -070031// If pointers are 64-bit, then there can be separate counters for each
32// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
33#define DISTINCT_NONDISPATCHABLE_HANDLES
34#endif
35
Mike Stroyan3712d5c2015-04-02 11:59:05 -060036// Draw State ERROR codes
Jon Ashburn5484e0c2016-03-08 17:48:44 -070037typedef enum _THREADING_CHECKER_ERROR {
38 THREADING_CHECKER_NONE, // Used for INFO & other non-error messages
39 THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads
40 THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread
Mike Stroyan3712d5c2015-04-02 11:59:05 -060041} THREADING_CHECKER_ERROR;
42
Mike Stroyan845bdc42015-11-02 15:30:20 -070043struct object_use_data {
44 loader_platform_thread_id thread;
45 int reader_count;
46 int writer_count;
47};
48
49struct layer_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -070050
Jeremy Hayesb350beb2016-04-12 13:48:52 -060051static std::mutex global_lock;
52static std::condition_variable global_condition;
Mike Stroyan845bdc42015-11-02 15:30:20 -070053
54template <typename T> class counter {
Jon Ashburn5484e0c2016-03-08 17:48:44 -070055 public:
Mike Stroyan845bdc42015-11-02 15:30:20 -070056 const char *typeName;
57 VkDebugReportObjectTypeEXT objectType;
Mike Stroyan1a080012016-01-29 15:33:21 -070058 std::unordered_map<T, object_use_data> uses;
Jon Ashburn5484e0c2016-03-08 17:48:44 -070059 void startWrite(debug_report_data *report_data, T object) {
Dustin Graves080069b2016-04-05 13:48:15 -060060 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -070061 loader_platform_thread_id tid = loader_platform_get_thread_id();
Jeremy Hayesb350beb2016-04-12 13:48:52 -060062 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -070063 if (uses.find(object) == uses.end()) {
64 // There is no current use of the object. Record writer thread.
65 struct object_use_data *use_data = &uses[object];
66 use_data->reader_count = 0;
67 use_data->writer_count = 1;
68 use_data->thread = tid;
69 } else {
70 struct object_use_data *use_data = &uses[object];
71 if (use_data->reader_count == 0) {
72 // There are no readers. Two writers just collided.
73 if (use_data->thread != tid) {
Dustin Graves37bef952016-02-05 16:06:21 -070074 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -070075 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
76 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
77 typeName, use_data->thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -070078 if (skipCall) {
79 // Wait for thread-safe access to object instead of skipping call.
80 while (uses.find(object) != uses.end()) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -060081 global_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -070082 }
83 // There is now no current use of the object. Record writer thread.
84 struct object_use_data *use_data = &uses[object];
Jon Ashburn5484e0c2016-03-08 17:48:44 -070085 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -070086 use_data->reader_count = 0;
87 use_data->writer_count = 1;
88 } else {
89 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -070090 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -070091 use_data->writer_count += 1;
92 }
93 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -070094 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -070095 // There is no way to make recursion safe. Just forge ahead.
96 use_data->writer_count += 1;
97 }
98 } else {
99 // There are readers. This writer collided with them.
100 if (use_data->thread != tid) {
Dustin Graves37bef952016-02-05 16:06:21 -0700101 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700102 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
103 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld",
104 typeName, use_data->thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700105 if (skipCall) {
106 // Wait for thread-safe access to object instead of skipping call.
107 while (uses.find(object) != uses.end()) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600108 global_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700109 }
110 // There is now no current use of the object. Record writer thread.
111 struct object_use_data *use_data = &uses[object];
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700112 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700113 use_data->reader_count = 0;
114 use_data->writer_count = 1;
115 } else {
116 // Continue with an unsafe use of the object.
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700117 use_data->thread = tid;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700118 use_data->writer_count += 1;
119 }
120 } else {
Mike Stroyanc8774502016-02-05 09:11:32 -0700121 // This is either safe multiple use in one call, or recursive use.
Mike Stroyan845bdc42015-11-02 15:30:20 -0700122 // There is no way to make recursion safe. Just forge ahead.
123 use_data->writer_count += 1;
124 }
125 }
126 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700127 }
128
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700129 void finishWrite(T object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700130 // Object is no longer in use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600131 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700132 uses[object].writer_count -= 1;
133 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
134 uses.erase(object);
135 }
136 // Notify any waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600137 lock.unlock();
138 global_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700139 }
140
141 void startRead(debug_report_data *report_data, T object) {
Dustin Graves080069b2016-04-05 13:48:15 -0600142 bool skipCall = false;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700143 loader_platform_thread_id tid = loader_platform_get_thread_id();
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600144 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700145 if (uses.find(object) == uses.end()) {
146 // There is no current use of the object. Record reader count
147 struct object_use_data *use_data = &uses[object];
148 use_data->reader_count = 1;
149 use_data->writer_count = 0;
150 use_data->thread = tid;
Mike Stroyanc8774502016-02-05 09:11:32 -0700151 } else if (uses[object].writer_count > 0 && uses[object].thread != tid) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700152 // There is a writer of the object.
Dustin Graves37bef952016-02-05 16:06:21 -0700153 skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700154 /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING",
155 "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName,
156 uses[object].thread, tid);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700157 if (skipCall) {
158 // Wait for thread-safe access to object instead of skipping call.
159 while (uses.find(object) != uses.end()) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600160 global_condition.wait(lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700161 }
162 // There is no current use of the object. Record reader count
163 struct object_use_data *use_data = &uses[object];
164 use_data->reader_count = 1;
165 use_data->writer_count = 0;
166 use_data->thread = tid;
167 } else {
168 uses[object].reader_count += 1;
169 }
170 } else {
171 // There are other readers of the object. Increase reader count
172 uses[object].reader_count += 1;
173 }
Mike Stroyan845bdc42015-11-02 15:30:20 -0700174 }
175 void finishRead(T object) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600176 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700177 uses[object].reader_count -= 1;
178 if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) {
179 uses.erase(object);
180 }
181 // Notify and waiting threads that this object may be safe to use
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600182 lock.unlock();
183 global_condition.notify_all();
Mike Stroyan845bdc42015-11-02 15:30:20 -0700184 }
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700185 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700186 typeName = name;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700187 objectType = type;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700188 }
189};
190
Cody Northrop55443ef2015-09-28 15:09:32 -0600191struct layer_data {
Mike Stroyan313f7e62015-08-10 16:42:53 -0600192 debug_report_data *report_data;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700193 std::vector<VkDebugReportCallbackEXT> logging_callback;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700194 VkLayerDispatchTable *device_dispatch_table;
195 VkLayerInstanceDispatchTable *instance_dispatch_table;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700196 counter<VkCommandBuffer> c_VkCommandBuffer;
197 counter<VkDevice> c_VkDevice;
198 counter<VkInstance> c_VkInstance;
199 counter<VkQueue> c_VkQueue;
Mike Stroyan31c50c82016-01-29 15:09:04 -0700200#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700201 counter<VkBuffer> c_VkBuffer;
202 counter<VkBufferView> c_VkBufferView;
203 counter<VkCommandPool> c_VkCommandPool;
204 counter<VkDescriptorPool> c_VkDescriptorPool;
205 counter<VkDescriptorSet> c_VkDescriptorSet;
206 counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout;
207 counter<VkDeviceMemory> c_VkDeviceMemory;
208 counter<VkEvent> c_VkEvent;
209 counter<VkFence> c_VkFence;
210 counter<VkFramebuffer> c_VkFramebuffer;
211 counter<VkImage> c_VkImage;
212 counter<VkImageView> c_VkImageView;
213 counter<VkPipeline> c_VkPipeline;
214 counter<VkPipelineCache> c_VkPipelineCache;
215 counter<VkPipelineLayout> c_VkPipelineLayout;
216 counter<VkQueryPool> c_VkQueryPool;
217 counter<VkRenderPass> c_VkRenderPass;
218 counter<VkSampler> c_VkSampler;
219 counter<VkSemaphore> c_VkSemaphore;
220 counter<VkShaderModule> c_VkShaderModule;
221 counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT;
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700222#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700223 counter<uint64_t> c_uint64_t;
224#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700225 layer_data()
226 : report_data(nullptr), c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT),
227 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT),
228 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT),
229 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT),
Mike Stroyan31c50c82016-01-29 15:09:04 -0700230#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700231 c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT),
232 c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT),
233 c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT),
234 c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT),
235 c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT),
236 c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT),
237 c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT),
238 c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT),
239 c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT),
240 c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT),
241 c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT),
242 c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT),
243 c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT),
244 c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT),
245 c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT),
246 c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT),
247 c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT),
248 c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT),
249 c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT),
250 c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT)
251#else // DISTINCT_NONDISPATCHABLE_HANDLES
252 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700253#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700254 {};
Cody Northrop55443ef2015-09-28 15:09:32 -0600255};
Mike Stroyan313f7e62015-08-10 16:42:53 -0600256
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700257#define WRAPPER(type) \
258 static void startWriteObject(struct layer_data *my_data, type object) { \
259 my_data->c_##type.startWrite(my_data->report_data, object); \
260 } \
261 static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \
262 static void startReadObject(struct layer_data *my_data, type object) { \
263 my_data->c_##type.startRead(my_data->report_data, object); \
264 } \
265 static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); }
Mike Stroyan313f7e62015-08-10 16:42:53 -0600266
Mike Stroyan845bdc42015-11-02 15:30:20 -0700267WRAPPER(VkDevice)
268WRAPPER(VkInstance)
269WRAPPER(VkQueue)
Mike Stroyan31c50c82016-01-29 15:09:04 -0700270#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700271WRAPPER(VkBuffer)
272WRAPPER(VkBufferView)
273WRAPPER(VkCommandPool)
274WRAPPER(VkDescriptorPool)
275WRAPPER(VkDescriptorSet)
276WRAPPER(VkDescriptorSetLayout)
277WRAPPER(VkDeviceMemory)
278WRAPPER(VkEvent)
279WRAPPER(VkFence)
280WRAPPER(VkFramebuffer)
281WRAPPER(VkImage)
282WRAPPER(VkImageView)
283WRAPPER(VkPipeline)
284WRAPPER(VkPipelineCache)
285WRAPPER(VkPipelineLayout)
286WRAPPER(VkQueryPool)
287WRAPPER(VkRenderPass)
288WRAPPER(VkSampler)
289WRAPPER(VkSemaphore)
290WRAPPER(VkShaderModule)
291WRAPPER(VkDebugReportCallbackEXT)
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700292#else // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan31c50c82016-01-29 15:09:04 -0700293WRAPPER(uint64_t)
294#endif // DISTINCT_NONDISPATCHABLE_HANDLES
Mike Stroyan845bdc42015-11-02 15:30:20 -0700295
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700296static std::unordered_map<void *, layer_data *> layer_data_map;
Mike Stroyan845bdc42015-11-02 15:30:20 -0700297static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
298
299// VkCommandBuffer needs check for implicit use of command pool
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700300static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700301 if (lockPool) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600302 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700303 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600304 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700305 startWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700306 }
307 my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object);
308}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700309static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700310 my_data->c_VkCommandBuffer.finishWrite(object);
311 if (lockPool) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600312 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700313 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600314 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700315 finishWriteObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700316 }
317}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700318static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600319 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700320 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600321 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700322 startReadObject(my_data, pool);
Mike Stroyan845bdc42015-11-02 15:30:20 -0700323 my_data->c_VkCommandBuffer.startRead(my_data->report_data, object);
324}
Jon Ashburn5484e0c2016-03-08 17:48:44 -0700325static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) {
Mike Stroyan845bdc42015-11-02 15:30:20 -0700326 my_data->c_VkCommandBuffer.finishRead(object);
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600327 std::unique_lock<std::mutex> lock(global_lock);
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700328 VkCommandPool pool = command_pool_map[object];
Jeremy Hayesb350beb2016-04-12 13:48:52 -0600329 lock.unlock();
Mike Stroyanae8e8a72016-02-08 10:27:55 -0700330 finishReadObject(my_data, pool);
Mike Stroyan313f7e62015-08-10 16:42:53 -0600331}
332#endif // THREADING_H