| Mark Lobodzinski | 6eda00a | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 1 | /* Copyright (c) 2015-2016 The Khronos Group Inc. |
| 2 | * Copyright (c) 2015-2016 Valve Corporation |
| 3 | * Copyright (c) 2015-2016 LunarG, Inc. |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 4 | * |
| Jon Ashburn | 3ebf125 | 2016-04-19 11:30:31 -0600 | [diff] [blame] | 5 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | * you may not use this file except in compliance with the License. |
| 7 | * You may obtain a copy of the License at |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 8 | * |
| Jon Ashburn | 3ebf125 | 2016-04-19 11:30:31 -0600 | [diff] [blame] | 9 | * http://www.apache.org/licenses/LICENSE-2.0 |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 10 | * |
| Jon Ashburn | 3ebf125 | 2016-04-19 11:30:31 -0600 | [diff] [blame] | 11 | * Unless required by applicable law or agreed to in writing, software |
| 12 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | * See the License for the specific language governing permissions and |
| 15 | * limitations under the License. |
| Courtney Goeltzenleuchter | 0555952 | 2015-10-30 11:14:30 -0600 | [diff] [blame] | 16 | * |
| 17 | * Author: Cody Northrop <cody@lunarg.com> |
| 18 | * Author: Mike Stroyan <mike@LunarG.com> |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 19 | */ |
| Mark Lobodzinski | 6eda00a | 2016-02-02 15:55:36 -0700 | [diff] [blame] | 20 | |
| Mike Stroyan | 313f7e6 | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 21 | #ifndef THREADING_H |
| 22 | #define THREADING_H |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 23 | #include <condition_variable> |
| 24 | #include <mutex> |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 25 | #include <vector> |
| Mike Stroyan | 313f7e6 | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 26 | #include "vk_layer_config.h" |
| 27 | #include "vk_layer_logging.h" |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 28 | |
| John Zulauf | 8ca6e2f | 2018-03-28 09:55:03 -0600 | [diff] [blame] | 29 | VK_DEFINE_NON_DISPATCHABLE_HANDLE(DISTINCT_NONDISPATCHABLE_PHONY_HANDLE) |
| 30 | // The following line must match the vulkan_core.h condition guarding VK_DEFINE_NON_DISPATCHABLE_HANDLE |
| 31 | #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || \ |
| 32 | defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 33 | // If pointers are 64-bit, then there can be separate counters for each |
| 34 | // NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t. |
| 35 | #define DISTINCT_NONDISPATCHABLE_HANDLES |
| John Zulauf | 8ca6e2f | 2018-03-28 09:55:03 -0600 | [diff] [blame] | 36 | // Make sure we catch any disagreement between us and the vulkan definition |
| 37 | static_assert(std::is_pointer<DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value, |
| 38 | "Mismatched non-dispatchable handle handle, expected pointer type."); |
| 39 | #else |
| 40 | // Make sure we catch any disagreement between us and the vulkan definition |
| 41 | static_assert(std::is_same<uint64_t, DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value, |
| 42 | "Mismatched non-dispatchable handle handle, expected uint64_t."); |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 43 | #endif |
| 44 | |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 45 | // Draw State ERROR codes |
| Mark Lobodzinski | 1481755 | 2016-05-19 17:08:10 -0600 | [diff] [blame] | 46 | enum THREADING_CHECKER_ERROR { |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 47 | THREADING_CHECKER_NONE, // Used for INFO & other non-error messages |
| 48 | THREADING_CHECKER_MULTIPLE_THREADS, // Object used simultaneously by multiple threads |
| 49 | THREADING_CHECKER_SINGLE_THREAD_REUSE, // Object used simultaneously by recursion in single thread |
| Mark Lobodzinski | 1481755 | 2016-05-19 17:08:10 -0600 | [diff] [blame] | 50 | }; |
| Mike Stroyan | 3712d5c | 2015-04-02 11:59:05 -0600 | [diff] [blame] | 51 | |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 52 | struct object_use_data { |
| 53 | loader_platform_thread_id thread; |
| 54 | int reader_count; |
| 55 | int writer_count; |
| 56 | }; |
| 57 | |
| 58 | struct layer_data; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 59 | |
| Mike Stroyan | 0b64aee | 2016-07-13 10:10:25 -0600 | [diff] [blame] | 60 | namespace threading { |
| 61 | volatile bool vulkan_in_use = false; |
| 62 | volatile bool vulkan_multi_threaded = false; |
| 63 | // starting check if an application is using vulkan from multiple threads. |
| 64 | inline bool startMultiThread() { |
| 65 | if (vulkan_multi_threaded) { |
| 66 | return true; |
| 67 | } |
| 68 | if (vulkan_in_use) { |
| 69 | vulkan_multi_threaded = true; |
| 70 | return true; |
| 71 | } |
| 72 | vulkan_in_use = true; |
| 73 | return false; |
| 74 | } |
| 75 | |
| 76 | // finishing check if an application is using vulkan from multiple threads. |
| 77 | inline void finishMultiThread() { vulkan_in_use = false; } |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 78 | } // namespace threading |
| Mike Stroyan | 0b64aee | 2016-07-13 10:10:25 -0600 | [diff] [blame] | 79 | |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 80 | template <typename T> |
| 81 | class counter { |
| 82 | public: |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 83 | const char *typeName; |
| 84 | VkDebugReportObjectTypeEXT objectType; |
| Mike Stroyan | 1a08001 | 2016-01-29 15:33:21 -0700 | [diff] [blame] | 85 | std::unordered_map<T, object_use_data> uses; |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 86 | std::mutex counter_lock; |
| 87 | std::condition_variable counter_condition; |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 88 | void startWrite(debug_report_data *report_data, T object) { |
| Mike Stroyan | 9e2c72a | 2017-05-01 11:10:07 -0600 | [diff] [blame] | 89 | if (object == VK_NULL_HANDLE) { |
| 90 | return; |
| 91 | } |
| Dustin Graves | 080069b | 2016-04-05 13:48:15 -0600 | [diff] [blame] | 92 | bool skipCall = false; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 93 | loader_platform_thread_id tid = loader_platform_get_thread_id(); |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 94 | std::unique_lock<std::mutex> lock(counter_lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 95 | if (uses.find(object) == uses.end()) { |
| 96 | // There is no current use of the object. Record writer thread. |
| 97 | struct object_use_data *use_data = &uses[object]; |
| 98 | use_data->reader_count = 0; |
| 99 | use_data->writer_count = 1; |
| 100 | use_data->thread = tid; |
| 101 | } else { |
| 102 | struct object_use_data *use_data = &uses[object]; |
| 103 | if (use_data->reader_count == 0) { |
| 104 | // There are no readers. Two writers just collided. |
| 105 | if (use_data->thread != tid) { |
| Mark Lobodzinski | b1fd9d1 | 2018-03-30 14:26:00 -0600 | [diff] [blame^] | 106 | skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), |
| Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 107 | THREADING_CHECKER_MULTIPLE_THREADS, "THREADING", |
| Karl Schultz | 2e5ed33 | 2017-12-12 10:33:01 -0500 | [diff] [blame] | 108 | "THREADING ERROR : object of type %s is simultaneously used in " |
| 109 | "thread 0x%" PRIx64 " and thread 0x%" PRIx64, |
| 110 | typeName, (uint64_t)use_data->thread, (uint64_t)tid); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 111 | if (skipCall) { |
| 112 | // Wait for thread-safe access to object instead of skipping call. |
| 113 | while (uses.find(object) != uses.end()) { |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 114 | counter_condition.wait(lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 115 | } |
| 116 | // There is now no current use of the object. Record writer thread. |
| Karl Schultz | 47dd59d | 2017-01-20 13:19:20 -0700 | [diff] [blame] | 117 | struct object_use_data *new_use_data = &uses[object]; |
| 118 | new_use_data->thread = tid; |
| 119 | new_use_data->reader_count = 0; |
| 120 | new_use_data->writer_count = 1; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 121 | } else { |
| 122 | // Continue with an unsafe use of the object. |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 123 | use_data->thread = tid; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 124 | use_data->writer_count += 1; |
| 125 | } |
| 126 | } else { |
| Mike Stroyan | c877450 | 2016-02-05 09:11:32 -0700 | [diff] [blame] | 127 | // This is either safe multiple use in one call, or recursive use. |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 128 | // There is no way to make recursion safe. Just forge ahead. |
| 129 | use_data->writer_count += 1; |
| 130 | } |
| 131 | } else { |
| 132 | // There are readers. This writer collided with them. |
| 133 | if (use_data->thread != tid) { |
| Mark Lobodzinski | b1fd9d1 | 2018-03-30 14:26:00 -0600 | [diff] [blame^] | 134 | skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), |
| Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 135 | THREADING_CHECKER_MULTIPLE_THREADS, "THREADING", |
| Karl Schultz | 2e5ed33 | 2017-12-12 10:33:01 -0500 | [diff] [blame] | 136 | "THREADING ERROR : object of type %s is simultaneously used in " |
| 137 | "thread 0x%" PRIx64 " and thread 0x%" PRIx64, |
| 138 | typeName, (uint64_t)use_data->thread, (uint64_t)tid); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 139 | if (skipCall) { |
| 140 | // Wait for thread-safe access to object instead of skipping call. |
| 141 | while (uses.find(object) != uses.end()) { |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 142 | counter_condition.wait(lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 143 | } |
| 144 | // There is now no current use of the object. Record writer thread. |
| Karl Schultz | 47dd59d | 2017-01-20 13:19:20 -0700 | [diff] [blame] | 145 | struct object_use_data *new_use_data = &uses[object]; |
| 146 | new_use_data->thread = tid; |
| 147 | new_use_data->reader_count = 0; |
| 148 | new_use_data->writer_count = 1; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 149 | } else { |
| 150 | // Continue with an unsafe use of the object. |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 151 | use_data->thread = tid; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 152 | use_data->writer_count += 1; |
| 153 | } |
| 154 | } else { |
| Mike Stroyan | c877450 | 2016-02-05 09:11:32 -0700 | [diff] [blame] | 155 | // This is either safe multiple use in one call, or recursive use. |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 156 | // There is no way to make recursion safe. Just forge ahead. |
| 157 | use_data->writer_count += 1; |
| 158 | } |
| 159 | } |
| 160 | } |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 161 | } |
| 162 | |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 163 | void finishWrite(T object) { |
| Mike Stroyan | 9e2c72a | 2017-05-01 11:10:07 -0600 | [diff] [blame] | 164 | if (object == VK_NULL_HANDLE) { |
| 165 | return; |
| 166 | } |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 167 | // Object is no longer in use |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 168 | std::unique_lock<std::mutex> lock(counter_lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 169 | uses[object].writer_count -= 1; |
| 170 | if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) { |
| 171 | uses.erase(object); |
| 172 | } |
| 173 | // Notify any waiting threads that this object may be safe to use |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 174 | lock.unlock(); |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 175 | counter_condition.notify_all(); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 176 | } |
| 177 | |
| 178 | void startRead(debug_report_data *report_data, T object) { |
| Mike Stroyan | 9e2c72a | 2017-05-01 11:10:07 -0600 | [diff] [blame] | 179 | if (object == VK_NULL_HANDLE) { |
| 180 | return; |
| 181 | } |
| Dustin Graves | 080069b | 2016-04-05 13:48:15 -0600 | [diff] [blame] | 182 | bool skipCall = false; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 183 | loader_platform_thread_id tid = loader_platform_get_thread_id(); |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 184 | std::unique_lock<std::mutex> lock(counter_lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 185 | if (uses.find(object) == uses.end()) { |
| 186 | // There is no current use of the object. Record reader count |
| 187 | struct object_use_data *use_data = &uses[object]; |
| 188 | use_data->reader_count = 1; |
| 189 | use_data->writer_count = 0; |
| 190 | use_data->thread = tid; |
| Mike Stroyan | c877450 | 2016-02-05 09:11:32 -0700 | [diff] [blame] | 191 | } else if (uses[object].writer_count > 0 && uses[object].thread != tid) { |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 192 | // There is a writer of the object. |
| Mark Lobodzinski | b1fd9d1 | 2018-03-30 14:26:00 -0600 | [diff] [blame^] | 193 | skipCall |= log_msg(report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object), |
| Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 194 | THREADING_CHECKER_MULTIPLE_THREADS, "THREADING", |
| Karl Schultz | 2e5ed33 | 2017-12-12 10:33:01 -0500 | [diff] [blame] | 195 | "THREADING ERROR : object of type %s is simultaneously used in " |
| 196 | "thread 0x%" PRIx64 " and thread 0x%" PRIx64, |
| 197 | typeName, (uint64_t)uses[object].thread, (uint64_t)tid); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 198 | if (skipCall) { |
| 199 | // Wait for thread-safe access to object instead of skipping call. |
| 200 | while (uses.find(object) != uses.end()) { |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 201 | counter_condition.wait(lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 202 | } |
| 203 | // There is no current use of the object. Record reader count |
| 204 | struct object_use_data *use_data = &uses[object]; |
| 205 | use_data->reader_count = 1; |
| 206 | use_data->writer_count = 0; |
| 207 | use_data->thread = tid; |
| 208 | } else { |
| 209 | uses[object].reader_count += 1; |
| 210 | } |
| 211 | } else { |
| 212 | // There are other readers of the object. Increase reader count |
| 213 | uses[object].reader_count += 1; |
| 214 | } |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 215 | } |
| 216 | void finishRead(T object) { |
| Mike Stroyan | 9e2c72a | 2017-05-01 11:10:07 -0600 | [diff] [blame] | 217 | if (object == VK_NULL_HANDLE) { |
| 218 | return; |
| 219 | } |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 220 | std::unique_lock<std::mutex> lock(counter_lock); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 221 | uses[object].reader_count -= 1; |
| 222 | if ((uses[object].reader_count == 0) && (uses[object].writer_count == 0)) { |
| 223 | uses.erase(object); |
| 224 | } |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 225 | // Notify any waiting threads that this object may be safe to use |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 226 | lock.unlock(); |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 227 | counter_condition.notify_all(); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 228 | } |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 229 | counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) { |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 230 | typeName = name; |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 231 | objectType = type; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 232 | } |
| 233 | }; |
| 234 | |
| Cody Northrop | 55443ef | 2015-09-28 15:09:32 -0600 | [diff] [blame] | 235 | struct layer_data { |
| Chia-I Wu | 59d0a33 | 2016-05-16 11:21:03 +0800 | [diff] [blame] | 236 | VkInstance instance; |
| 237 | |
| Mike Stroyan | 313f7e6 | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 238 | debug_report_data *report_data; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 239 | std::vector<VkDebugReportCallbackEXT> logging_callback; |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 240 | std::vector<VkDebugUtilsMessengerEXT> logging_messenger; |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 241 | VkLayerDispatchTable *device_dispatch_table; |
| 242 | VkLayerInstanceDispatchTable *instance_dispatch_table; |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 243 | |
| Ian Elliott | ed6b5ac | 2016-04-28 09:08:13 -0600 | [diff] [blame] | 244 | // The following are for keeping track of the temporary callbacks that can |
| 245 | // be used in vkCreateInstance and vkDestroyInstance: |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 246 | uint32_t num_tmp_report_callbacks; |
| 247 | VkDebugReportCallbackCreateInfoEXT *tmp_report_create_infos; |
| 248 | VkDebugReportCallbackEXT *tmp_report_callbacks; |
| 249 | uint32_t num_tmp_debug_messengers; |
| 250 | VkDebugUtilsMessengerCreateInfoEXT *tmp_messenger_create_infos; |
| 251 | VkDebugUtilsMessengerEXT *tmp_debug_messengers; |
| 252 | |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 253 | counter<VkCommandBuffer> c_VkCommandBuffer; |
| 254 | counter<VkDevice> c_VkDevice; |
| 255 | counter<VkInstance> c_VkInstance; |
| 256 | counter<VkQueue> c_VkQueue; |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 257 | #ifdef DISTINCT_NONDISPATCHABLE_HANDLES |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 258 | counter<VkBuffer> c_VkBuffer; |
| 259 | counter<VkBufferView> c_VkBufferView; |
| 260 | counter<VkCommandPool> c_VkCommandPool; |
| 261 | counter<VkDescriptorPool> c_VkDescriptorPool; |
| 262 | counter<VkDescriptorSet> c_VkDescriptorSet; |
| 263 | counter<VkDescriptorSetLayout> c_VkDescriptorSetLayout; |
| 264 | counter<VkDeviceMemory> c_VkDeviceMemory; |
| 265 | counter<VkEvent> c_VkEvent; |
| 266 | counter<VkFence> c_VkFence; |
| 267 | counter<VkFramebuffer> c_VkFramebuffer; |
| 268 | counter<VkImage> c_VkImage; |
| 269 | counter<VkImageView> c_VkImageView; |
| 270 | counter<VkPipeline> c_VkPipeline; |
| 271 | counter<VkPipelineCache> c_VkPipelineCache; |
| 272 | counter<VkPipelineLayout> c_VkPipelineLayout; |
| 273 | counter<VkQueryPool> c_VkQueryPool; |
| 274 | counter<VkRenderPass> c_VkRenderPass; |
| 275 | counter<VkSampler> c_VkSampler; |
| 276 | counter<VkSemaphore> c_VkSemaphore; |
| 277 | counter<VkShaderModule> c_VkShaderModule; |
| 278 | counter<VkDebugReportCallbackEXT> c_VkDebugReportCallbackEXT; |
| Mark Lobodzinski | 2d58982 | 2016-12-12 09:44:34 -0700 | [diff] [blame] | 279 | counter<VkObjectTableNVX> c_VkObjectTableNVX; |
| Mark Lobodzinski | 729a8d3 | 2017-01-26 12:16:30 -0700 | [diff] [blame] | 280 | counter<VkIndirectCommandsLayoutNVX> c_VkIndirectCommandsLayoutNVX; |
| Mark Lobodzinski | 9c14780 | 2017-02-10 08:34:54 -0700 | [diff] [blame] | 281 | counter<VkDisplayKHR> c_VkDisplayKHR; |
| 282 | counter<VkDisplayModeKHR> c_VkDisplayModeKHR; |
| 283 | counter<VkSurfaceKHR> c_VkSurfaceKHR; |
| 284 | counter<VkSwapchainKHR> c_VkSwapchainKHR; |
| Mark Young | 0f183a8 | 2017-02-28 09:58:04 -0700 | [diff] [blame] | 285 | counter<VkDescriptorUpdateTemplateKHR> c_VkDescriptorUpdateTemplateKHR; |
| Mike Schuchardt | a6b8bdb | 2017-09-05 16:10:20 -0600 | [diff] [blame] | 286 | counter<VkValidationCacheEXT> c_VkValidationCacheEXT; |
| Lenny Komow | b79f04a | 2017-09-18 17:07:00 -0600 | [diff] [blame] | 287 | counter<VkSamplerYcbcrConversionKHR> c_VkSamplerYcbcrConversionKHR; |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 288 | counter<VkDebugUtilsMessengerEXT> c_VkDebugUtilsMessengerEXT; |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 289 | #else // DISTINCT_NONDISPATCHABLE_HANDLES |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 290 | counter<uint64_t> c_uint64_t; |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 291 | #endif // DISTINCT_NONDISPATCHABLE_HANDLES |
| Mark Lobodzinski | 2d58982 | 2016-12-12 09:44:34 -0700 | [diff] [blame] | 292 | |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 293 | layer_data() |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 294 | : report_data(nullptr), |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 295 | num_tmp_report_callbacks(0), |
| 296 | tmp_report_create_infos(nullptr), |
| 297 | tmp_report_callbacks(nullptr), |
| 298 | num_tmp_debug_messengers(0), |
| 299 | tmp_messenger_create_infos(nullptr), |
| 300 | tmp_debug_messengers(nullptr), |
| Ian Elliott | ed6b5ac | 2016-04-28 09:08:13 -0600 | [diff] [blame] | 301 | c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT), |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 302 | c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT), |
| 303 | c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT), |
| 304 | c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT), |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 305 | #ifdef DISTINCT_NONDISPATCHABLE_HANDLES |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 306 | c_VkBuffer("VkBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT), |
| 307 | c_VkBufferView("VkBufferView", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT), |
| 308 | c_VkCommandPool("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT), |
| 309 | c_VkDescriptorPool("VkDescriptorPool", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT), |
| 310 | c_VkDescriptorSet("VkDescriptorSet", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT), |
| 311 | c_VkDescriptorSetLayout("VkDescriptorSetLayout", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT), |
| 312 | c_VkDeviceMemory("VkDeviceMemory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT), |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 313 | c_VkEvent("VkEvent", VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT), |
| 314 | c_VkFence("VkFence", VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT), |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 315 | c_VkFramebuffer("VkFramebuffer", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT), |
| 316 | c_VkImage("VkImage", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT), |
| 317 | c_VkImageView("VkImageView", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT), |
| 318 | c_VkPipeline("VkPipeline", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT), |
| 319 | c_VkPipelineCache("VkPipelineCache", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT), |
| 320 | c_VkPipelineLayout("VkPipelineLayout", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT), |
| 321 | c_VkQueryPool("VkQueryPool", VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT), |
| 322 | c_VkRenderPass("VkRenderPass", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT), |
| 323 | c_VkSampler("VkSampler", VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT), |
| 324 | c_VkSemaphore("VkSemaphore", VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT), |
| 325 | c_VkShaderModule("VkShaderModule", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT), |
| Mark Lobodzinski | 2d58982 | 2016-12-12 09:44:34 -0700 | [diff] [blame] | 326 | c_VkDebugReportCallbackEXT("VkDebugReportCallbackEXT", VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT), |
| 327 | c_VkObjectTableNVX("VkObjectTableNVX", VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT), |
| Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 328 | c_VkIndirectCommandsLayoutNVX("VkIndirectCommandsLayoutNVX", |
| 329 | VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT), |
| Mark Lobodzinski | 9c14780 | 2017-02-10 08:34:54 -0700 | [diff] [blame] | 330 | c_VkDisplayKHR("VkDisplayKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT), |
| 331 | c_VkDisplayModeKHR("VkDisplayModeKHR", VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT), |
| 332 | c_VkSurfaceKHR("VkSurfaceKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT), |
| Mark Young | 0f183a8 | 2017-02-28 09:58:04 -0700 | [diff] [blame] | 333 | c_VkSwapchainKHR("VkSwapchainKHR", VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT), |
| Dave Houlton | a9df0ce | 2018-02-07 10:51:23 -0700 | [diff] [blame] | 334 | c_VkDescriptorUpdateTemplateKHR("VkDescriptorUpdateTemplateKHR", |
| 335 | VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_KHR_EXT), |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 336 | c_VkSamplerYcbcrConversionKHR("VkSamplerYcbcrConversionKHR", |
| 337 | VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_KHR_EXT), |
| 338 | c_VkDebugUtilsMessengerEXT("VkDebugUtilsMessengerEXT", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 339 | #else // DISTINCT_NONDISPATCHABLE_HANDLES |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 340 | c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT) |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 341 | #endif // DISTINCT_NONDISPATCHABLE_HANDLES |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 342 | {}; |
| Cody Northrop | 55443ef | 2015-09-28 15:09:32 -0600 | [diff] [blame] | 343 | }; |
| Mike Stroyan | 313f7e6 | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 344 | |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 345 | #define WRAPPER(type) \ |
| 346 | static void startWriteObject(struct layer_data *my_data, type object) { \ |
| 347 | my_data->c_##type.startWrite(my_data->report_data, object); \ |
| 348 | } \ |
| 349 | static void finishWriteObject(struct layer_data *my_data, type object) { my_data->c_##type.finishWrite(object); } \ |
| 350 | static void startReadObject(struct layer_data *my_data, type object) { \ |
| 351 | my_data->c_##type.startRead(my_data->report_data, object); \ |
| 352 | } \ |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 353 | static void finishReadObject(struct layer_data *my_data, type object) { my_data->c_##type.finishRead(object); } |
| Mike Stroyan | 313f7e6 | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 354 | |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 355 | WRAPPER(VkDevice) |
| 356 | WRAPPER(VkInstance) |
| 357 | WRAPPER(VkQueue) |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 358 | #ifdef DISTINCT_NONDISPATCHABLE_HANDLES |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 359 | WRAPPER(VkBuffer) |
| 360 | WRAPPER(VkBufferView) |
| 361 | WRAPPER(VkCommandPool) |
| 362 | WRAPPER(VkDescriptorPool) |
| 363 | WRAPPER(VkDescriptorSet) |
| 364 | WRAPPER(VkDescriptorSetLayout) |
| 365 | WRAPPER(VkDeviceMemory) |
| 366 | WRAPPER(VkEvent) |
| 367 | WRAPPER(VkFence) |
| 368 | WRAPPER(VkFramebuffer) |
| 369 | WRAPPER(VkImage) |
| 370 | WRAPPER(VkImageView) |
| 371 | WRAPPER(VkPipeline) |
| 372 | WRAPPER(VkPipelineCache) |
| 373 | WRAPPER(VkPipelineLayout) |
| 374 | WRAPPER(VkQueryPool) |
| 375 | WRAPPER(VkRenderPass) |
| 376 | WRAPPER(VkSampler) |
| 377 | WRAPPER(VkSemaphore) |
| 378 | WRAPPER(VkShaderModule) |
| 379 | WRAPPER(VkDebugReportCallbackEXT) |
| Mark Lobodzinski | 2d58982 | 2016-12-12 09:44:34 -0700 | [diff] [blame] | 380 | WRAPPER(VkObjectTableNVX) |
| 381 | WRAPPER(VkIndirectCommandsLayoutNVX) |
| Mark Lobodzinski | 9c14780 | 2017-02-10 08:34:54 -0700 | [diff] [blame] | 382 | WRAPPER(VkDisplayKHR) |
| 383 | WRAPPER(VkDisplayModeKHR) |
| 384 | WRAPPER(VkSurfaceKHR) |
| 385 | WRAPPER(VkSwapchainKHR) |
| Mark Young | 0f183a8 | 2017-02-28 09:58:04 -0700 | [diff] [blame] | 386 | WRAPPER(VkDescriptorUpdateTemplateKHR) |
| Mike Schuchardt | a6b8bdb | 2017-09-05 16:10:20 -0600 | [diff] [blame] | 387 | WRAPPER(VkValidationCacheEXT) |
| Lenny Komow | b79f04a | 2017-09-18 17:07:00 -0600 | [diff] [blame] | 388 | WRAPPER(VkSamplerYcbcrConversionKHR) |
| Mark Young | 6ba8abe | 2017-11-09 10:37:04 -0700 | [diff] [blame] | 389 | WRAPPER(VkDebugUtilsMessengerEXT) |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 390 | #else // DISTINCT_NONDISPATCHABLE_HANDLES |
| Mike Stroyan | 31c50c8 | 2016-01-29 15:09:04 -0700 | [diff] [blame] | 391 | WRAPPER(uint64_t) |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 392 | #endif // DISTINCT_NONDISPATCHABLE_HANDLES |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 393 | |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 394 | static std::unordered_map<void *, layer_data *> layer_data_map; |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 395 | static std::mutex command_pool_lock; |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 396 | static std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map; |
| 397 | |
| 398 | // VkCommandBuffer needs check for implicit use of command pool |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 399 | static void startWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) { |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 400 | if (lockPool) { |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 401 | std::unique_lock<std::mutex> lock(command_pool_lock); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 402 | VkCommandPool pool = command_pool_map[object]; |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 403 | lock.unlock(); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 404 | startWriteObject(my_data, pool); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 405 | } |
| 406 | my_data->c_VkCommandBuffer.startWrite(my_data->report_data, object); |
| 407 | } |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 408 | static void finishWriteObject(struct layer_data *my_data, VkCommandBuffer object, bool lockPool = true) { |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 409 | my_data->c_VkCommandBuffer.finishWrite(object); |
| 410 | if (lockPool) { |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 411 | std::unique_lock<std::mutex> lock(command_pool_lock); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 412 | VkCommandPool pool = command_pool_map[object]; |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 413 | lock.unlock(); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 414 | finishWriteObject(my_data, pool); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 415 | } |
| 416 | } |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 417 | static void startReadObject(struct layer_data *my_data, VkCommandBuffer object) { |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 418 | std::unique_lock<std::mutex> lock(command_pool_lock); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 419 | VkCommandPool pool = command_pool_map[object]; |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 420 | lock.unlock(); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 421 | startReadObject(my_data, pool); |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 422 | my_data->c_VkCommandBuffer.startRead(my_data->report_data, object); |
| 423 | } |
| Jon Ashburn | 5484e0c | 2016-03-08 17:48:44 -0700 | [diff] [blame] | 424 | static void finishReadObject(struct layer_data *my_data, VkCommandBuffer object) { |
| Mike Stroyan | 845bdc4 | 2015-11-02 15:30:20 -0700 | [diff] [blame] | 425 | my_data->c_VkCommandBuffer.finishRead(object); |
| Mike Stroyan | b3dd790 | 2016-06-30 13:21:37 -0600 | [diff] [blame] | 426 | std::unique_lock<std::mutex> lock(command_pool_lock); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 427 | VkCommandPool pool = command_pool_map[object]; |
| Jeremy Hayes | b350beb | 2016-04-12 13:48:52 -0600 | [diff] [blame] | 428 | lock.unlock(); |
| Mike Stroyan | ae8e8a7 | 2016-02-08 10:27:55 -0700 | [diff] [blame] | 429 | finishReadObject(my_data, pool); |
| Mike Stroyan | 313f7e6 | 2015-08-10 16:42:53 -0600 | [diff] [blame] | 430 | } |
| Mark Lobodzinski | 64318ba | 2017-01-26 13:34:13 -0700 | [diff] [blame] | 431 | #endif // THREADING_H |