Mark Lobodzinski | 288e4f7 | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2015-2016 The Khronos Group Inc. |
| 2 | * Copyright (c) 2015-2016 Valve Corporation |
| 3 | * Copyright (c) 2015-2016 LunarG, Inc. |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 4 | * |
Mark Lobodzinski | 288e4f7 | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 5 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 6 | * of this software and/or associated documentation files (the "Materials"), to |
| 7 | * deal in the Materials without restriction, including without limitation the |
| 8 | * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or |
| 9 | * sell copies of the Materials, and to permit persons to whom the Materials |
| 10 | * are furnished to do so, subject to the following conditions: |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 11 | * |
Mark Lobodzinski | 288e4f7 | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 12 | * The above copyright notice(s) and this permission notice shall be included |
| 13 | * in all copies or substantial portions of the Materials. |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 14 | * |
Mark Lobodzinski | 288e4f7 | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 15 | * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
Mark Lobodzinski | 288e4f7 | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. |
| 18 | * |
| 19 | * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, |
| 20 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
| 21 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE |
| 22 | * USE OR OTHER DEALINGS IN THE MATERIALS |
Courtney Goeltzenleuchter | 96cd795 | 2015-10-30 11:14:30 -0600 | [diff] [blame] | 23 | * |
| 24 | * Author: Cody Northrop <cody@lunarg.com> |
| 25 | * Author: Mike Stroyan <mike@LunarG.com> |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 26 | */ |
Mark Lobodzinski | 288e4f7 | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 27 | |
Mike Stroyan | 90a166e | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 28 | #ifndef THREADING_H |
| 29 | #define THREADING_H |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 30 | #include <condition_variable> |
| 31 | #include <mutex> |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 32 | #include <vector> |
Mike Stroyan | 90a166e | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 33 | #include "vk_layer_config.h" |
| 34 | #include "vk_layer_logging.h" |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 35 | |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 36 | #if defined(__LP64__) || defined(_WIN64) || defined(__x86_64__) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || \ |
| 37 | defined(__aarch64__) || defined(__powerpc64__) |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 38 | // If pointers are 64-bit, then there can be separate counters for each |
| 39 | // NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t. |
| 40 | #define DISTINCT_NONDISPATCHABLE_HANDLES |
| 41 | #endif |
| 42 | |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 43 | // Draw State ERROR codes |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 44 | typedef enum _THREADING_CHECKER_ERROR { |
| 45 | THREADING_CHECKER_NONE, // Used for INFO & other non-error messages |
| 46 | THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads |
| 47 | THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread |
Mike Stroyan | b326d2c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 48 | } THREADING_CHECKER_ERROR; |
| 49 | |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 50 | struct object_use_data { |
| 51 | loader_platform_thread_id thread; |
| 52 | int reader_count; |
| 53 | int writer_count; |
| 54 | }; |
| 55 | |
| 56 | struct layer_data; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 57 | |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 58 | static std::mutex global_lock; |
| 59 | static std::condition_variable global_condition; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 60 | |
| 61 | template <typename T> class counter { |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 62 | public: |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 63 | const char *typeName; |
| 64 | VkDebugReportObjectTypeEXT objectType; |
Mike Stroyan | 941448a | 2016-01-29 15:33:21 -0700 | [diff] [blame] | 65 | std::unordered_map<T, object_use_data> uses; |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 66 | void startWrite(debug_report_data *report_data, T object) { |
Dustin Graves | bb84994 | 2016-04-05 13:48:15 -0600 | [diff] [blame] | 67 | bool skipCall = false; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 68 | loader_platform_thread_id tid = loader_platform_get_thread_id(); |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 69 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 70 | if (uses.find(object) == uses.end()) { |
| 71 | // There is no current use of the object. Record writer thread. |
| 72 | struct object_use_data *use_data = &uses[object]; |
| 73 | use_data->reader_count = 0; |
| 74 | use_data->writer_count = 1; |
| 75 | use_data->thread = tid; |
| 76 | } else { |
| 77 | struct object_use_data *use_data = &uses[object]; |
| 78 | if (use_data->reader_count == 0) { |
| 79 | // There are no readers. Two writers just collided. |
| 80 | if (use_data->thread != tid) { |
Dustin Graves | 868726c | 2016-02-05 16:06:21 -0700 | [diff] [blame] | 81 | skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 82 | /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING", |
| 83 | "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", |
| 84 | typeName, use_data->thread, tid); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 85 | if (skipCall) { |
| 86 | // Wait for thread-safe access to object instead of skipping call. |
| 87 | while (uses.find(object) != uses.end()) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 88 | global_condition.wait(lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 89 | } |
| 90 | // There is now no current use of the object. Record writer thread. |
| 91 | struct object_use_data *use_data = &uses[object]; |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 92 | use_data->thread = tid; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 93 | use_data->reader_count = 0; |
| 94 | use_data->writer_count = 1; |
| 95 | } else { |
| 96 | // Continue with an unsafe use of the object. |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 97 | use_data->thread = tid; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 98 | use_data->writer_count += 1; |
| 99 | } |
| 100 | } else { |
Mike Stroyan | c0a1e46 | 2016-02-05 09:11:32 -0700 | [diff] [blame] | 101 | // This is either safe multiple use in one call, or recursive use. |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 102 | // There is no way to make recursion safe. Just forge ahead. |
| 103 | use_data->writer_count += 1; |
| 104 | } |
| 105 | } else { |
| 106 | // There are readers. This writer collided with them. |
| 107 | if (use_data->thread != tid) { |
Dustin Graves | 868726c | 2016-02-05 16:06:21 -0700 | [diff] [blame] | 108 | skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 109 | /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING", |
| 110 | "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", |
| 111 | typeName, use_data->thread, tid); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 112 | if (skipCall) { |
| 113 | // Wait for thread-safe access to object instead of skipping call. |
| 114 | while (uses.find(object) != uses.end()) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 115 | global_condition.wait(lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 116 | } |
| 117 | // There is now no current use of the object. Record writer thread. |
| 118 | struct object_use_data *use_data = &uses[object]; |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 119 | use_data->thread = tid; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 120 | use_data->reader_count = 0; |
| 121 | use_data->writer_count = 1; |
| 122 | } else { |
| 123 | // Continue with an unsafe use of the object. |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 124 | use_data->thread = tid; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 125 | use_data->writer_count += 1; |
| 126 | } |
| 127 | } else { |
Mike Stroyan | c0a1e46 | 2016-02-05 09:11:32 -0700 | [diff] [blame] | 128 | // This is either safe multiple use in one call, or recursive use. |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 129 | // There is no way to make recursion safe. Just forge ahead. |
| 130 | use_data->writer_count += 1; |
| 131 | } |
| 132 | } |
| 133 | } |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 136 | void finishWrite(T object) { |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 137 | // Object is no longer in use |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 138 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 139 | uses[object].writer_count -= 1; |
| 140 | if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) { |
| 141 | uses.erase(object); |
| 142 | } |
| 143 | // Notify any waiting threads that this object may be safe to use |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 144 | lock.unlock(); |
| 145 | global_condition.notify_all(); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | void startRead(debug_report_data *report_data, T object) { |
Dustin Graves | bb84994 | 2016-04-05 13:48:15 -0600 | [diff] [blame] | 149 | bool skipCall = false; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 150 | loader_platform_thread_id tid = loader_platform_get_thread_id(); |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 151 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 152 | if (uses.find(object) == uses.end()) { |
| 153 | // There is no current use of the object. Record reader count |
| 154 | struct object_use_data *use_data = &uses[object]; |
| 155 | use_data->reader_count = 1; |
| 156 | use_data->writer_count = 0; |
| 157 | use_data->thread = tid; |
Mike Stroyan | c0a1e46 | 2016-02-05 09:11:32 -0700 | [diff] [blame] | 158 | } else if (uses[object].writer_count > 0 && uses[object].thread != tid) { |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 159 | // There is a writer of the object. |
Dustin Graves | 868726c | 2016-02-05 16:06:21 -0700 | [diff] [blame] | 160 | skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 161 | /*location*/ 0, THREADING_CHECKER_MULTIPLE_THREADS, "THREADING", |
| 162 | "THREADING ERROR : object of type %s is simultaneously used in thread %ld and thread %ld", typeName, |
| 163 | uses[object].thread, tid); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 164 | if (skipCall) { |
| 165 | // Wait for thread-safe access to object instead of skipping call. |
| 166 | while (uses.find(object) != uses.end()) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 167 | global_condition.wait(lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 168 | } |
| 169 | // There is no current use of the object. Record reader count |
| 170 | struct object_use_data *use_data = &uses[object]; |
| 171 | use_data->reader_count = 1; |
| 172 | use_data->writer_count = 0; |
| 173 | use_data->thread = tid; |
| 174 | } else { |
| 175 | uses[object].reader_count += 1; |
| 176 | } |
| 177 | } else { |
| 178 | // There are other readers of the object. Increase reader count |
| 179 | uses[object].reader_count += 1; |
| 180 | } |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 181 | } |
| 182 | void finishRead(T object) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 183 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 184 | uses[object].reader_count -= 1; |
| 185 | if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) { |
| 186 | uses.erase(object); |
| 187 | } |
| 188 | // Notify and waiting threads that this object may be safe to use |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 189 | lock.unlock(); |
| 190 | global_condition.notify_all(); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 191 | } |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 192 | counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) { |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 193 | typeName = name; |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 194 | objectType = type; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 195 | } |
| 196 | }; |
| 197 | |
Cody Northrop | 73bb657 | 2015-09-28 15:09:32 -0600 | [diff] [blame] | 198 | struct layer_data { |
Mike Stroyan | 90a166e | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 199 | debug_report_data *report_data; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 200 | std::vector<VkDebugReportCallbackEXT> logging_callback; |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 201 | VkLayerDispatchTable *device_dispatch_table; |
| 202 | VkLayerInstanceDispatchTable *instance_dispatch_table; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 203 | counter<VkCommandBuffer> c_VkCommandBuffer; |
| 204 | counter<VkDevice> c_VkDevice; |
| 205 | counter<VkInstance> c_VkInstance; |
| 206 | counter<VkQueue> c_VkQueue; |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 207 | #ifdef DISTINCT_NONDISPATCHABLE_HANDLES |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 208 | counter<VkBuffer> c_VkBuffer; |
| 209 | counter<VkBufferView> c_VkBufferView; |
| 210 | counter<VkCommandPool> c_VkCommandPool; |
| 211 | counter<VkDescriptorPool> c_VkDescriptorPool; |
| 212 | counter<VkDescriptorSet> c_VkDescriptorSet; |
| 213 | counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout; |
| 214 | counter<VkDeviceMemory> c_VkDeviceMemory; |
| 215 | counter<VkEvent> c_VkEvent; |
| 216 | counter<VkFence> c_VkFence; |
| 217 | counter<VkFramebuffer> c_VkFramebuffer; |
| 218 | counter<VkImage> c_VkImage; |
| 219 | counter<VkImageView> c_VkImageView; |
| 220 | counter<VkPipeline> c_VkPipeline; |
| 221 | counter<VkPipelineCache> c_VkPipelineCache; |
| 222 | counter<VkPipelineLayout> c_VkPipelineLayout; |
| 223 | counter<VkQueryPool> c_VkQueryPool; |
| 224 | counter<VkRenderPass> c_VkRenderPass; |
| 225 | counter<VkSampler> c_VkSampler; |
| 226 | counter<VkSemaphore> c_VkSemaphore; |
| 227 | counter<VkShaderModule> c_VkShaderModule; |
| 228 | counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT; |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 229 | #else // DISTINCT_NONDISPATCHABLE_HANDLES |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 230 | counter<uint64_t> c_uint64_t; |
| 231 | #endif // DISTINCT_NONDISPATCHABLE_HANDLES |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 232 | layer_data() |
| 233 | : report_data(nullptr), c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT), |
| 234 | c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT), |
| 235 | c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT), |
| 236 | c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT), |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 237 | #ifdef DISTINCT_NONDISPATCHABLE_HANDLES |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 238 | c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT), |
| 239 | c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT), |
| 240 | c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT), |
| 241 | c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT), |
| 242 | c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT), |
| 243 | c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT), |
| 244 | c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT), |
| 245 | c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT), |
| 246 | c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT), |
| 247 | c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT), |
| 248 | c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT), |
| 249 | c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT), |
| 250 | c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT), |
| 251 | c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT), |
| 252 | c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT), |
| 253 | c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT), |
| 254 | c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT), |
| 255 | c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT), |
| 256 | c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT), |
| 257 | c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT) |
| 258 | #else // DISTINCT_NONDISPATCHABLE_HANDLES |
| 259 | c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 260 | #endif // DISTINCT_NONDISPATCHABLE_HANDLES |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 261 | {}; |
Cody Northrop | 73bb657 | 2015-09-28 15:09:32 -0600 | [diff] [blame] | 262 | }; |
Mike Stroyan | 90a166e | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 263 | |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 264 | #define WRAPPER(type) \ |
| 265 | static void startWriteObject(struct layer_data *my_data, type object) { \ |
| 266 | my_data->c_##type.startWrite(my_data->report_data, object); \ |
| 267 | } \ |
| 268 | static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \ |
| 269 | static void startReadObject(struct layer_data *my_data, type object) { \ |
| 270 | my_data->c_##type.startRead(my_data->report_data, object); \ |
| 271 | } \ |
| 272 | static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); } |
Mike Stroyan | 90a166e | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 273 | |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 274 | WRAPPER(VkDevice) |
| 275 | WRAPPER(VkInstance) |
| 276 | WRAPPER(VkQueue) |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 277 | #ifdef DISTINCT_NONDISPATCHABLE_HANDLES |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 278 | WRAPPER(VkBuffer) |
| 279 | WRAPPER(VkBufferView) |
| 280 | WRAPPER(VkCommandPool) |
| 281 | WRAPPER(VkDescriptorPool) |
| 282 | WRAPPER(VkDescriptorSet) |
| 283 | WRAPPER(VkDescriptorSetLayout) |
| 284 | WRAPPER(VkDeviceMemory) |
| 285 | WRAPPER(VkEvent) |
| 286 | WRAPPER(VkFence) |
| 287 | WRAPPER(VkFramebuffer) |
| 288 | WRAPPER(VkImage) |
| 289 | WRAPPER(VkImageView) |
| 290 | WRAPPER(VkPipeline) |
| 291 | WRAPPER(VkPipelineCache) |
| 292 | WRAPPER(VkPipelineLayout) |
| 293 | WRAPPER(VkQueryPool) |
| 294 | WRAPPER(VkRenderPass) |
| 295 | WRAPPER(VkSampler) |
| 296 | WRAPPER(VkSemaphore) |
| 297 | WRAPPER(VkShaderModule) |
| 298 | WRAPPER(VkDebugReportCallbackEXT) |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 299 | #else // DISTINCT_NONDISPATCHABLE_HANDLES |
Mike Stroyan | c630ec3 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 300 | WRAPPER(uint64_t) |
| 301 | #endif // DISTINCT_NONDISPATCHABLE_HANDLES |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 302 | |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 303 | static std::unordered_map<void *, layer_data *> layer_data_map; |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 304 | static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map; |
| 305 | |
| 306 | // VkCommandBuffer needs check for implicit use of command pool |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 307 | static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) { |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 308 | if (lockPool) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 309 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 310 | VkCommandPool pool = command_pool_map[object]; |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 311 | lock.unlock(); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 312 | startWriteObject(my_data, pool); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 313 | } |
| 314 | my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object); |
| 315 | } |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 316 | static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) { |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 317 | my_data->c_VkCommandBuffer.finishWrite(object); |
| 318 | if (lockPool) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 319 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 320 | VkCommandPool pool = command_pool_map[object]; |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 321 | lock.unlock(); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 322 | finishWriteObject(my_data, pool); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 323 | } |
| 324 | } |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 325 | static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) { |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 326 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 327 | VkCommandPool pool = command_pool_map[object]; |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 328 | lock.unlock(); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 329 | startReadObject(my_data, pool); |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 330 | my_data->c_VkCommandBuffer.startRead(my_data->report_data, object); |
| 331 | } |
Jon Ashburn | 491a3cd | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 332 | static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) { |
Mike Stroyan | 8849f9a | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 333 | my_data->c_VkCommandBuffer.finishRead(object); |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 334 | std::unique_lock<std::mutex> lock(global_lock); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 335 | VkCommandPool pool = command_pool_map[object]; |
Jeremy Hayes | df40567 | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 336 | lock.unlock(); |
Mike Stroyan | ef2b6b2 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 337 | finishReadObject(my_data, pool); |
Mike Stroyan | 90a166e | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 338 | } |
| 339 | #endif // THREADING_H |