blob: 651cb093d77afecaca7925b307a787b02d4721ee [file] [log] [blame]
Mark Lobodzinskiff910992016-10-11 14:29:52 -06001#!/usr/bin/python3 -i
2#
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -07003# Copyright (c) 2015-2019 The Khronos Group Inc.
4# Copyright (c) 2015-2019 Valve Corporation
5# Copyright (c) 2015-2019 LunarG, Inc.
6# Copyright (c) 2015-2019 Google Inc.
Mark Lobodzinskiff910992016-10-11 14:29:52 -06007#
8# Licensed under the Apache License, Version 2.0 (the "License");
9# you may not use this file except in compliance with the License.
10# You may obtain a copy of the License at
11#
12# http://www.apache.org/licenses/LICENSE-2.0
13#
14# Unless required by applicable law or agreed to in writing, software
15# distributed under the License is distributed on an "AS IS" BASIS,
16# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17# See the License for the specific language governing permissions and
18# limitations under the License.
19#
20# Author: Mike Stroyan <stroyan@google.com>
Mark Lobodzinskid3b439e2017-06-07 13:08:41 -060021# Author: Mark Lobodzinski <mark@lunarg.com>
Mark Lobodzinskiff910992016-10-11 14:29:52 -060022
23import os,re,sys
24from generator import *
Mark Lobodzinski62f71562017-10-24 13:41:18 -060025from common_codegen import *
Mark Lobodzinskiff910992016-10-11 14:29:52 -060026
27# ThreadGeneratorOptions - subclass of GeneratorOptions.
28#
29# Adds options used by ThreadOutputGenerator objects during threading
30# layer generation.
31#
32# Additional members
33# prefixText - list of strings to prefix generated header with
34# (usually a copyright statement + calling convention macros).
35# protectFile - True if multiple inclusion protection should be
36# generated (based on the filename) around the entire header.
37# protectFeature - True if #ifndef..#endif protection should be
38# generated around a feature interface in the header file.
39# genFuncPointers - True if function pointer typedefs should be
40# generated
41# protectProto - If conditional protection should be generated
42# around prototype declarations, set to either '#ifdef'
43# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
44# to require opt-out (#ifndef protectProtoStr). Otherwise
45# set to None.
46# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
47# declarations, if protectProto is set
48# apicall - string to use for the function declaration prefix,
49# such as APICALL on Windows.
50# apientry - string to use for the calling convention macro,
51# in typedefs, such as APIENTRY.
52# apientryp - string to use for the calling convention macro
53# in function pointer typedefs, such as APIENTRYP.
54# indentFuncProto - True if prototype declarations should put each
55# parameter on a separate line
56# indentFuncPointer - True if typedefed function pointers should put each
57# parameter on a separate line
58# alignFuncParam - if nonzero and parameters are being put on a
59# separate line, align parameter names at the specified column
60class ThreadGeneratorOptions(GeneratorOptions):
61 def __init__(self,
Mike Schuchardt21638df2019-03-16 10:52:02 -070062 conventions = None,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060063 filename = None,
64 directory = '.',
65 apiname = None,
66 profile = None,
67 versions = '.*',
68 emitversions = '.*',
69 defaultExtensions = None,
70 addExtensions = None,
71 removeExtensions = None,
Mark Lobodzinski62f71562017-10-24 13:41:18 -060072 emitExtensions = None,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060073 sortProcedure = regSortFeatures,
74 prefixText = "",
75 genFuncPointers = True,
76 protectFile = True,
77 protectFeature = True,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060078 apicall = '',
79 apientry = '',
80 apientryp = '',
81 indentFuncProto = True,
82 indentFuncPointer = False,
Mark Lobodzinski62f71562017-10-24 13:41:18 -060083 alignFuncParam = 0,
84 expandEnumerants = True):
Mike Schuchardt21638df2019-03-16 10:52:02 -070085 GeneratorOptions.__init__(self, conventions, filename, directory, apiname, profile,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060086 versions, emitversions, defaultExtensions,
Mark Lobodzinski62f71562017-10-24 13:41:18 -060087 addExtensions, removeExtensions, emitExtensions, sortProcedure)
Mark Lobodzinskiff910992016-10-11 14:29:52 -060088 self.prefixText = prefixText
89 self.genFuncPointers = genFuncPointers
90 self.protectFile = protectFile
91 self.protectFeature = protectFeature
Mark Lobodzinskiff910992016-10-11 14:29:52 -060092 self.apicall = apicall
93 self.apientry = apientry
94 self.apientryp = apientryp
95 self.indentFuncProto = indentFuncProto
96 self.indentFuncPointer = indentFuncPointer
97 self.alignFuncParam = alignFuncParam
Mark Lobodzinski62f71562017-10-24 13:41:18 -060098 self.expandEnumerants = expandEnumerants
99
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600100
101# ThreadOutputGenerator - subclass of OutputGenerator.
102# Generates Thread checking framework
103#
104# ---- methods ----
105# ThreadOutputGenerator(errFile, warnFile, diagFile) - args as for
106# OutputGenerator. Defines additional internal state.
107# ---- methods overriding base class ----
108# beginFile(genOpts)
109# endFile()
110# beginFeature(interface, emit)
111# endFeature()
112# genType(typeinfo,name)
113# genStruct(typeinfo,name)
114# genGroup(groupinfo,name)
115# genEnum(enuminfo, name)
116# genCmd(cmdinfo)
117class ThreadOutputGenerator(OutputGenerator):
118 """Generate specified API interfaces in a specific style, such as a C header"""
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700119
120 inline_copyright_message = """
121// This file is ***GENERATED***. Do Not Edit.
Petr Kraus6e6ff3e2019-08-09 18:28:13 +0200122// See thread_safety_generator.py for modifications.
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700123
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700124/* Copyright (c) 2015-2019 The Khronos Group Inc.
125 * Copyright (c) 2015-2019 Valve Corporation
126 * Copyright (c) 2015-2019 LunarG, Inc.
127 * Copyright (c) 2015-2019 Google Inc.
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700128 *
129 * Licensed under the Apache License, Version 2.0 (the "License");
130 * you may not use this file except in compliance with the License.
131 * You may obtain a copy of the License at
132 *
133 * http://www.apache.org/licenses/LICENSE-2.0
134 *
135 * Unless required by applicable law or agreed to in writing, software
136 * distributed under the License is distributed on an "AS IS" BASIS,
137 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
138 * See the License for the specific language governing permissions and
139 * limitations under the License.
140 *
141 * Author: Mark Lobodzinski <mark@lunarg.com>
142 */"""
143
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700144 # Note that the inline_custom_header_preamble template below contains three embedded template expansion identifiers.
145 # These get replaced with generated code sections, and are labeled:
146 # o COUNTER_CLASS_DEFINITIONS_TEMPLATE
147 # o COUNTER_CLASS_INSTANCES_TEMPLATE
148 # o COUNTER_CLASS_BODIES_TEMPLATE
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700149 inline_custom_header_preamble = """
150#pragma once
151
152#include <condition_variable>
153#include <mutex>
154#include <vector>
155#include <unordered_set>
156#include <string>
157
158VK_DEFINE_NON_DISPATCHABLE_HANDLE(DISTINCT_NONDISPATCHABLE_PHONY_HANDLE)
159// The following line must match the vulkan_core.h condition guarding VK_DEFINE_NON_DISPATCHABLE_HANDLE
160#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || \
161 defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
162// If pointers are 64-bit, then there can be separate counters for each
163// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
164#define DISTINCT_NONDISPATCHABLE_HANDLES
165// Make sure we catch any disagreement between us and the vulkan definition
166static_assert(std::is_pointer<DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value,
167 "Mismatched non-dispatchable handle handle, expected pointer type.");
168#else
169// Make sure we catch any disagreement between us and the vulkan definition
170static_assert(std::is_same<uint64_t, DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value,
171 "Mismatched non-dispatchable handle handle, expected uint64_t.");
172#endif
173
174// Suppress unused warning on Linux
175#if defined(__GNUC__)
176#define DECORATE_UNUSED __attribute__((unused))
177#else
178#define DECORATE_UNUSED
179#endif
180
181// clang-format off
182static const char DECORATE_UNUSED *kVUID_Threading_Info = "UNASSIGNED-Threading-Info";
183static const char DECORATE_UNUSED *kVUID_Threading_MultipleThreads = "UNASSIGNED-Threading-MultipleThreads";
184static const char DECORATE_UNUSED *kVUID_Threading_SingleThreadReuse = "UNASSIGNED-Threading-SingleThreadReuse";
185// clang-format on
186
187#undef DECORATE_UNUSED
188
189struct object_use_data {
190 loader_platform_thread_id thread;
191 int reader_count;
192 int writer_count;
193};
194
Jeff Bolz1cb6fd6f2019-02-03 21:58:14 -0600195// This is a wrapper around unordered_map that optimizes for the common case
196// of only containing a single element. The "first" element's use is stored
197// inline in the class and doesn't require hashing or memory (de)allocation.
198// TODO: Consider generalizing this from one element to N elements (where N
199// is a template parameter).
200template <typename Key, typename T>
201class small_unordered_map {
202
203 bool first_data_allocated;
204 Key first_data_key;
205 T first_data;
206
207 std::unordered_map<Key, T> uses;
208
209public:
210 small_unordered_map() : first_data_allocated(false) {}
211
212 bool contains(const Key& object) const {
213 if (first_data_allocated && object == first_data_key) {
214 return true;
215 // check size() first to avoid hashing object unnecessarily.
216 } else if (uses.size() == 0) {
217 return false;
218 } else {
219 return uses.find(object) != uses.end();
220 }
221 }
222
223 T& operator[](const Key& object) {
224 if (first_data_allocated && first_data_key == object) {
225 return first_data;
226 } else if (!first_data_allocated && uses.size() == 0) {
227 first_data_allocated = true;
228 first_data_key = object;
229 return first_data;
230 } else {
231 return uses[object];
232 }
233 }
234
235 typename std::unordered_map<Key, T>::size_type erase(const Key& object) {
236 if (first_data_allocated && first_data_key == object) {
237 first_data_allocated = false;
238 return 1;
239 } else {
240 return uses.erase(object);
241 }
242 }
243};
244
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500245#define THREAD_SAFETY_BUCKETS_LOG2 6
246#define THREAD_SAFETY_BUCKETS (1 << THREAD_SAFETY_BUCKETS_LOG2)
247
248template <typename T> inline uint32_t ThreadSafetyHashObject(T object)
249{
250 uint32_t hash = (uint32_t)(uint64_t)object;
251 hash ^= (hash >> THREAD_SAFETY_BUCKETS_LOG2) ^ (hash >> (2*THREAD_SAFETY_BUCKETS_LOG2));
252 hash &= (THREAD_SAFETY_BUCKETS-1);
253 return hash;
254}
255
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700256template <typename T>
257class counter {
258public:
259 const char *typeName;
260 VkDebugReportObjectTypeEXT objectType;
261 debug_report_data **report_data;
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700262
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500263 // Per-bucket locking, to reduce contention.
264 small_unordered_map<T, object_use_data> uses[THREAD_SAFETY_BUCKETS];
265 std::mutex counter_lock[THREAD_SAFETY_BUCKETS];
266 std::condition_variable counter_condition[THREAD_SAFETY_BUCKETS];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700267
268 void StartWrite(T object) {
269 if (object == VK_NULL_HANDLE) {
270 return;
271 }
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500272 uint32_t h = ThreadSafetyHashObject(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700273 bool skip = false;
274 loader_platform_thread_id tid = loader_platform_get_thread_id();
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500275 std::unique_lock<std::mutex> lock(counter_lock[h]);
276 if (!uses[h].contains(object)) {
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700277 // There is no current use of the object. Record writer thread.
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500278 struct object_use_data *use_data = &uses[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700279 use_data->reader_count = 0;
280 use_data->writer_count = 1;
281 use_data->thread = tid;
282 } else {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500283 struct object_use_data *use_data = &uses[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700284 if (use_data->reader_count == 0) {
285 // There are no readers. Two writers just collided.
286 if (use_data->thread != tid) {
287 skip |= log_msg(*report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
288 kVUID_Threading_MultipleThreads,
289 "THREADING ERROR : object of type %s is simultaneously used in "
290 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
291 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
292 if (skip) {
293 // Wait for thread-safe access to object instead of skipping call.
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500294 while (uses[h].contains(object)) {
295 counter_condition[h].wait(lock);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700296 }
297 // There is now no current use of the object. Record writer thread.
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500298 struct object_use_data *new_use_data = &uses[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700299 new_use_data->thread = tid;
300 new_use_data->reader_count = 0;
301 new_use_data->writer_count = 1;
302 } else {
303 // Continue with an unsafe use of the object.
304 use_data->thread = tid;
305 use_data->writer_count += 1;
306 }
307 } else {
308 // This is either safe multiple use in one call, or recursive use.
309 // There is no way to make recursion safe. Just forge ahead.
310 use_data->writer_count += 1;
311 }
312 } else {
313 // There are readers. This writer collided with them.
314 if (use_data->thread != tid) {
315 skip |= log_msg(*report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
316 kVUID_Threading_MultipleThreads,
317 "THREADING ERROR : object of type %s is simultaneously used in "
318 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
319 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
320 if (skip) {
321 // Wait for thread-safe access to object instead of skipping call.
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500322 while (uses[h].contains(object)) {
323 counter_condition[h].wait(lock);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700324 }
325 // There is now no current use of the object. Record writer thread.
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500326 struct object_use_data *new_use_data = &uses[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700327 new_use_data->thread = tid;
328 new_use_data->reader_count = 0;
329 new_use_data->writer_count = 1;
330 } else {
331 // Continue with an unsafe use of the object.
332 use_data->thread = tid;
333 use_data->writer_count += 1;
334 }
335 } else {
336 // This is either safe multiple use in one call, or recursive use.
337 // There is no way to make recursion safe. Just forge ahead.
338 use_data->writer_count += 1;
339 }
340 }
341 }
342 }
343
344 void FinishWrite(T object) {
345 if (object == VK_NULL_HANDLE) {
346 return;
347 }
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500348 uint32_t h = ThreadSafetyHashObject(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700349 // Object is no longer in use
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500350 std::unique_lock<std::mutex> lock(counter_lock[h]);
351 uses[h][object].writer_count -= 1;
352 if ((uses[h][object].reader_count == 0) && (uses[h][object].writer_count == 0)) {
353 uses[h].erase(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700354 }
355 // Notify any waiting threads that this object may be safe to use
356 lock.unlock();
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500357 counter_condition[h].notify_all();
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700358 }
359
360 void StartRead(T object) {
361 if (object == VK_NULL_HANDLE) {
362 return;
363 }
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500364 uint32_t h = ThreadSafetyHashObject(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700365 bool skip = false;
366 loader_platform_thread_id tid = loader_platform_get_thread_id();
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500367 std::unique_lock<std::mutex> lock(counter_lock[h]);
368 if (!uses[h].contains(object)) {
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700369 // There is no current use of the object. Record reader count
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500370 struct object_use_data *use_data = &uses[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700371 use_data->reader_count = 1;
372 use_data->writer_count = 0;
373 use_data->thread = tid;
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500374 } else if (uses[h][object].writer_count > 0 && uses[h][object].thread != tid) {
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700375 // There is a writer of the object.
Petr Krause20affe2019-08-09 18:31:48 +0200376 skip |= log_msg(*report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
377 kVUID_Threading_MultipleThreads,
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700378 "THREADING ERROR : object of type %s is simultaneously used in "
379 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500380 typeName, (uint64_t)uses[h][object].thread, (uint64_t)tid);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700381 if (skip) {
382 // Wait for thread-safe access to object instead of skipping call.
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500383 while (uses[h].contains(object)) {
384 counter_condition[h].wait(lock);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700385 }
386 // There is no current use of the object. Record reader count
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500387 struct object_use_data *use_data = &uses[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700388 use_data->reader_count = 1;
389 use_data->writer_count = 0;
390 use_data->thread = tid;
391 } else {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500392 uses[h][object].reader_count += 1;
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700393 }
394 } else {
395 // There are other readers of the object. Increase reader count
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500396 uses[h][object].reader_count += 1;
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700397 }
398 }
399 void FinishRead(T object) {
400 if (object == VK_NULL_HANDLE) {
401 return;
402 }
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500403 uint32_t h = ThreadSafetyHashObject(object);
404 std::unique_lock<std::mutex> lock(counter_lock[h]);
405 uses[h][object].reader_count -= 1;
406 if ((uses[h][object].reader_count == 0) && (uses[h][object].writer_count == 0)) {
407 uses[h].erase(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700408 }
409 // Notify any waiting threads that this object may be safe to use
410 lock.unlock();
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500411 counter_condition[h].notify_all();
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700412 }
413 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, debug_report_data **rep_data = nullptr) {
414 typeName = name;
415 objectType = type;
416 report_data = rep_data;
417 }
418};
419
420
421
422class ThreadSafety : public ValidationObject {
423public:
424
425 // Override chassis read/write locks for this validation object
Jeremy Hayesd4a3ec32019-01-29 14:42:08 -0700426 // This override takes a deferred lock. i.e. it is not acquired.
427 std::unique_lock<std::mutex> write_lock() {
428 return std::unique_lock<std::mutex>(validation_object_mutex, std::defer_lock);
429 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700430
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500431 std::mutex command_pool_lock[THREAD_SAFETY_BUCKETS];
432 std::unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map[THREAD_SAFETY_BUCKETS];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700433
434 counter<VkCommandBuffer> c_VkCommandBuffer;
435 counter<VkDevice> c_VkDevice;
436 counter<VkInstance> c_VkInstance;
437 counter<VkQueue> c_VkQueue;
438#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
439
440 // Special entry to allow tracking of command pool Reset and Destroy
441 counter<VkCommandPool> c_VkCommandPoolContents;
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700442COUNTER_CLASS_DEFINITIONS_TEMPLATE
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700443
444#else // DISTINCT_NONDISPATCHABLE_HANDLES
445 // Special entry to allow tracking of command pool Reset and Destroy
446 counter<uint64_t> c_VkCommandPoolContents;
447
448 counter<uint64_t> c_uint64_t;
449#endif // DISTINCT_NONDISPATCHABLE_HANDLES
450
451 ThreadSafety()
452 : c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, &report_data),
453 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, &report_data),
454 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, &report_data),
455 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, &report_data),
456 c_VkCommandPoolContents("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, &report_data),
457
458#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700459COUNTER_CLASS_INSTANCES_TEMPLATE
460
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700461
462#else // DISTINCT_NONDISPATCHABLE_HANDLES
463 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, &report_data)
464#endif // DISTINCT_NONDISPATCHABLE_HANDLES
465 {};
466
467#define WRAPPER(type) \
468 void StartWriteObject(type object) { \
469 c_##type.StartWrite(object); \
470 } \
471 void FinishWriteObject(type object) { \
472 c_##type.FinishWrite(object); \
473 } \
474 void StartReadObject(type object) { \
475 c_##type.StartRead(object); \
476 } \
477 void FinishReadObject(type object) { \
478 c_##type.FinishRead(object); \
479 }
480
481WRAPPER(VkDevice)
482WRAPPER(VkInstance)
483WRAPPER(VkQueue)
484#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700485COUNTER_CLASS_BODIES_TEMPLATE
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700486
487#else // DISTINCT_NONDISPATCHABLE_HANDLES
488WRAPPER(uint64_t)
489#endif // DISTINCT_NONDISPATCHABLE_HANDLES
490
491 // VkCommandBuffer needs check for implicit use of command pool
492 void StartWriteObject(VkCommandBuffer object, bool lockPool = true) {
493 if (lockPool) {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500494 uint32_t h = ThreadSafetyHashObject(object);
495 std::unique_lock<std::mutex> lock(command_pool_lock[h]);
496 VkCommandPool pool = command_pool_map[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700497 lock.unlock();
498 StartWriteObject(pool);
499 }
500 c_VkCommandBuffer.StartWrite(object);
501 }
502 void FinishWriteObject(VkCommandBuffer object, bool lockPool = true) {
503 c_VkCommandBuffer.FinishWrite(object);
504 if (lockPool) {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500505 uint32_t h = ThreadSafetyHashObject(object);
506 std::unique_lock<std::mutex> lock(command_pool_lock[h]);
507 VkCommandPool pool = command_pool_map[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700508 lock.unlock();
509 FinishWriteObject(pool);
510 }
511 }
512 void StartReadObject(VkCommandBuffer object) {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500513 uint32_t h = ThreadSafetyHashObject(object);
514 std::unique_lock<std::mutex> lock(command_pool_lock[h]);
515 VkCommandPool pool = command_pool_map[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700516 lock.unlock();
517 // We set up a read guard against the "Contents" counter to catch conflict vs. vkResetCommandPool and vkDestroyCommandPool
518 // while *not* establishing a read guard against the command pool counter itself to avoid false postives for
519 // non-externally sync'd command buffers
520 c_VkCommandPoolContents.StartRead(pool);
521 c_VkCommandBuffer.StartRead(object);
522 }
523 void FinishReadObject(VkCommandBuffer object) {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500524 uint32_t h = ThreadSafetyHashObject(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700525 c_VkCommandBuffer.FinishRead(object);
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500526 std::unique_lock<std::mutex> lock(command_pool_lock[h]);
527 VkCommandPool pool = command_pool_map[h][object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700528 lock.unlock();
529 c_VkCommandPoolContents.FinishRead(pool);
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700530 } """
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700531
532
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700533 inline_custom_source_preamble = """
534void ThreadSafety::PreCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
535 VkCommandBuffer *pCommandBuffers) {
536 StartReadObject(device);
537 StartWriteObject(pAllocateInfo->commandPool);
538}
539
540void ThreadSafety::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700541 VkCommandBuffer *pCommandBuffers, VkResult result) {
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700542 FinishReadObject(device);
543 FinishWriteObject(pAllocateInfo->commandPool);
544
545 // Record mapping from command buffer to command pool
John Zulaufc9f979e2019-04-29 08:51:15 -0600546 if(pCommandBuffers) {
John Zulaufc9f979e2019-04-29 08:51:15 -0600547 for (uint32_t index = 0; index < pAllocateInfo->commandBufferCount; index++) {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500548 uint32_t h = ThreadSafetyHashObject(pCommandBuffers[index]);
549 std::lock_guard<std::mutex> lock(command_pool_lock[h]);
550 command_pool_map[h][pCommandBuffers[index]] = pAllocateInfo->commandPool;
John Zulaufc9f979e2019-04-29 08:51:15 -0600551 }
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700552 }
553}
554
555void ThreadSafety::PreCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
556 VkDescriptorSet *pDescriptorSets) {
557 StartReadObject(device);
558 StartWriteObject(pAllocateInfo->descriptorPool);
559 // Host access to pAllocateInfo::descriptorPool must be externally synchronized
560}
561
562void ThreadSafety::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700563 VkDescriptorSet *pDescriptorSets, VkResult result) {
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700564 FinishReadObject(device);
565 FinishWriteObject(pAllocateInfo->descriptorPool);
566 // Host access to pAllocateInfo::descriptorPool must be externally synchronized
567}
568
569void ThreadSafety::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
570 const VkCommandBuffer *pCommandBuffers) {
571 const bool lockCommandPool = false; // pool is already directly locked
572 StartReadObject(device);
573 StartWriteObject(commandPool);
John Zulaufc9f979e2019-04-29 08:51:15 -0600574 if(pCommandBuffers) {
575 // Even though we're immediately "finishing" below, we still are testing for concurrency with any call in process
576 // so this isn't a no-op
577 for (uint32_t index = 0; index < commandBufferCount; index++) {
578 StartWriteObject(pCommandBuffers[index], lockCommandPool);
579 }
580 // The driver may immediately reuse command buffers in another thread.
581 // These updates need to be done before calling down to the driver.
582 for (uint32_t index = 0; index < commandBufferCount; index++) {
583 FinishWriteObject(pCommandBuffers[index], lockCommandPool);
584 }
585 // Holding the lock for the shortest time while we update the map
John Zulaufc9f979e2019-04-29 08:51:15 -0600586 for (uint32_t index = 0; index < commandBufferCount; index++) {
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500587 uint32_t h = ThreadSafetyHashObject(pCommandBuffers[index]);
588 std::lock_guard<std::mutex> lock(command_pool_lock[h]);
589 command_pool_map[h].erase(pCommandBuffers[index]);
John Zulaufc9f979e2019-04-29 08:51:15 -0600590 }
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700591 }
592}
593
594void ThreadSafety::PostCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
595 const VkCommandBuffer *pCommandBuffers) {
596 FinishReadObject(device);
597 FinishWriteObject(commandPool);
598}
599
600void ThreadSafety::PreCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
601 StartReadObject(device);
602 StartWriteObject(commandPool);
603 // Check for any uses of non-externally sync'd command buffers (for example from vkCmdExecuteCommands)
604 c_VkCommandPoolContents.StartWrite(commandPool);
605 // Host access to commandPool must be externally synchronized
606}
607
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700608void ThreadSafety::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result) {
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700609 FinishReadObject(device);
610 FinishWriteObject(commandPool);
611 c_VkCommandPoolContents.FinishWrite(commandPool);
612 // Host access to commandPool must be externally synchronized
613}
614
615void ThreadSafety::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
616 StartReadObject(device);
617 StartWriteObject(commandPool);
618 // Check for any uses of non-externally sync'd command buffers (for example from vkCmdExecuteCommands)
619 c_VkCommandPoolContents.StartWrite(commandPool);
620 // Host access to commandPool must be externally synchronized
621}
622
623void ThreadSafety::PostCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
624 FinishReadObject(device);
625 FinishWriteObject(commandPool);
626 c_VkCommandPoolContents.FinishWrite(commandPool);
627}
628
Mark Lobodzinski8925c052018-12-18 12:41:15 -0700629// GetSwapchainImages can return a non-zero count with a NULL pSwapchainImages pointer. Let's avoid crashes by ignoring
630// pSwapchainImages.
631void ThreadSafety::PreCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
632 VkImage *pSwapchainImages) {
633 StartReadObject(device);
634 StartReadObject(swapchain);
635}
636
637void ThreadSafety::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700638 VkImage *pSwapchainImages, VkResult result) {
Mark Lobodzinski8925c052018-12-18 12:41:15 -0700639 FinishReadObject(device);
640 FinishReadObject(swapchain);
641}
642
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700643"""
644
645
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600646 # This is an ordered list of sections in the header file.
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700647 ALL_SECTIONS = ['command']
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600648 def __init__(self,
649 errFile = sys.stderr,
650 warnFile = sys.stderr,
651 diagFile = sys.stdout):
652 OutputGenerator.__init__(self, errFile, warnFile, diagFile)
653 # Internal state - accumulators for different inner block text
654 self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700655 self.non_dispatchable_types = set()
656 self.object_to_debug_report_type = {
657 'VkInstance' : 'VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT',
658 'VkPhysicalDevice' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT',
659 'VkDevice' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT',
660 'VkQueue' : 'VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT',
661 'VkSemaphore' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT',
662 'VkCommandBuffer' : 'VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT',
663 'VkFence' : 'VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT',
664 'VkDeviceMemory' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT',
665 'VkBuffer' : 'VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT',
666 'VkImage' : 'VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT',
667 'VkEvent' : 'VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT',
668 'VkQueryPool' : 'VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT',
669 'VkBufferView' : 'VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT',
670 'VkImageView' : 'VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT',
671 'VkShaderModule' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT',
672 'VkPipelineCache' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT',
673 'VkPipelineLayout' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT',
674 'VkRenderPass' : 'VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT',
675 'VkPipeline' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT',
676 'VkDescriptorSetLayout' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT',
677 'VkSampler' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT',
678 'VkDescriptorPool' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT',
679 'VkDescriptorSet' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT',
680 'VkFramebuffer' : 'VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT',
681 'VkCommandPool' : 'VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT',
682 'VkSurfaceKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT',
683 'VkSwapchainKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT',
684 'VkDisplayKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT',
685 'VkDisplayModeKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT',
686 'VkObjectTableNVX' : 'VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT',
687 'VkIndirectCommandsLayoutNVX' : 'VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT',
688 'VkSamplerYcbcrConversion' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT',
689 'VkDescriptorUpdateTemplate' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT',
690 'VkAccelerationStructureNV' : 'VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT',
691 'VkDebugReportCallbackEXT' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT',
692 'VkValidationCacheEXT' : 'VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT' }
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600693
694 # Check if the parameter passed in is a pointer to an array
695 def paramIsArray(self, param):
696 return param.attrib.get('len') is not None
697
698 # Check if the parameter passed in is a pointer
699 def paramIsPointer(self, param):
700 ispointer = False
701 for elem in param:
Raul Tambre7b300182019-05-04 11:25:14 +0300702 if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600703 ispointer = True
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600704 return ispointer
Mark Lobodzinski60b77b32017-02-14 09:16:56 -0700705
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600706 def makeThreadUseBlock(self, cmd, functionprefix):
707 """Generate C function pointer typedef for <command> Element"""
708 paramdecl = ''
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600709 # Find and add any parameters that are thread unsafe
710 params = cmd.findall('param')
711 for param in params:
712 paramname = param.find('name')
713 if False: # self.paramIsPointer(param):
714 paramdecl += ' // not watching use of pointer ' + paramname.text + '\n'
715 else:
716 externsync = param.attrib.get('externsync')
717 if externsync == 'true':
718 if self.paramIsArray(param):
John Zulauf03208642019-04-24 14:40:41 -0600719 paramdecl += 'if (' + paramname.text + ') {\n'
720 paramdecl += ' for (uint32_t index=0; index < ' + param.attrib.get('len') + '; index++) {\n'
721 paramdecl += ' ' + functionprefix + 'WriteObject(' + paramname.text + '[index]);\n'
722 paramdecl += ' }\n'
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700723 paramdecl += '}\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600724 else:
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700725 paramdecl += functionprefix + 'WriteObject(' + paramname.text + ');\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600726 elif (param.attrib.get('externsync')):
727 if self.paramIsArray(param):
728 # Externsync can list pointers to arrays of members to synchronize
John Zulauf03208642019-04-24 14:40:41 -0600729 paramdecl += 'if (' + paramname.text + ') {\n'
730 paramdecl += ' for (uint32_t index=0; index < ' + param.attrib.get('len') + '; index++) {\n'
731 second_indent = ' '
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600732 for member in externsync.split(","):
733 # Replace first empty [] in member name with index
734 element = member.replace('[]','[index]',1)
735 if '[]' in element:
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700736 # TODO: These null checks can be removed if threading ends up behind parameter
737 # validation in layer order
738 element_ptr = element.split('[]')[0]
John Zulauf03208642019-04-24 14:40:41 -0600739 paramdecl += ' if (' + element_ptr + ') {\n'
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700740 # Replace any second empty [] in element name with inner array index based on mapping array
741 # names like "pSomeThings[]" to "someThingCount" array size. This could be more robust by
742 # mapping a param member name to a struct type and "len" attribute.
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600743 limit = element[0:element.find('s[]')] + 'Count'
744 dotp = limit.rfind('.p')
745 limit = limit[0:dotp+1] + limit[dotp+2:dotp+3].lower() + limit[dotp+3:]
John Zulauf03208642019-04-24 14:40:41 -0600746 paramdecl += ' for (uint32_t index2=0; index2 < '+limit+'; index2++) {\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600747 element = element.replace('[]','[index2]')
John Zulauf03208642019-04-24 14:40:41 -0600748 second_indent = ' '
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700749 paramdecl += ' ' + second_indent + functionprefix + 'WriteObject(' + element + ');\n'
John Zulauf03208642019-04-24 14:40:41 -0600750 paramdecl += ' }\n'
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700751 paramdecl += ' }\n'
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700752 else:
753 paramdecl += ' ' + second_indent + functionprefix + 'WriteObject(' + element + ');\n'
John Zulauf03208642019-04-24 14:40:41 -0600754 paramdecl += ' }\n'
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700755 paramdecl += '}\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600756 else:
757 # externsync can list members to synchronize
758 for member in externsync.split(","):
759 member = str(member).replace("::", "->")
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700760 member = str(member).replace(".", "->")
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700761 paramdecl += ' ' + functionprefix + 'WriteObject(' + member + ');\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600762 else:
763 paramtype = param.find('type')
764 if paramtype is not None:
765 paramtype = paramtype.text
766 else:
767 paramtype = 'None'
Mike Schuchardtf8690262019-07-11 10:08:33 -0700768 if paramtype in self.handle_types and paramtype != 'VkPhysicalDevice':
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600769 if self.paramIsArray(param) and ('pPipelines' != paramname.text):
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700770 # Add pointer dereference for array counts that are pointer values
771 dereference = ''
772 for candidate in params:
773 if param.attrib.get('len') == candidate.find('name').text:
774 if self.paramIsPointer(candidate):
775 dereference = '*'
Mark Lobodzinski60b77b32017-02-14 09:16:56 -0700776 param_len = str(param.attrib.get('len')).replace("::", "->")
John Zulauf03208642019-04-24 14:40:41 -0600777 paramdecl += 'if (' + paramname.text + ') {\n'
778 paramdecl += ' for (uint32_t index = 0; index < ' + dereference + param_len + '; index++) {\n'
779 paramdecl += ' ' + functionprefix + 'ReadObject(' + paramname.text + '[index]);\n'
780 paramdecl += ' }\n'
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700781 paramdecl += '}\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600782 elif not self.paramIsPointer(param):
783 # Pointer params are often being created.
784 # They are not being read from.
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700785 paramdecl += functionprefix + 'ReadObject(' + paramname.text + ');\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600786 explicitexternsyncparams = cmd.findall("param[@externsync]")
787 if (explicitexternsyncparams is not None):
788 for param in explicitexternsyncparams:
789 externsyncattrib = param.attrib.get('externsync')
790 paramname = param.find('name')
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700791 paramdecl += '// Host access to '
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600792 if externsyncattrib == 'true':
793 if self.paramIsArray(param):
794 paramdecl += 'each member of ' + paramname.text
795 elif self.paramIsPointer(param):
796 paramdecl += 'the object referenced by ' + paramname.text
797 else:
798 paramdecl += paramname.text
799 else:
800 paramdecl += externsyncattrib
801 paramdecl += ' must be externally synchronized\n'
802
803 # Find and add any "implicit" parameters that are thread unsafe
804 implicitexternsyncparams = cmd.find('implicitexternsyncparams')
805 if (implicitexternsyncparams is not None):
806 for elem in implicitexternsyncparams:
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700807 paramdecl += '// '
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600808 paramdecl += elem.text
809 paramdecl += ' must be externally synchronized between host accesses\n'
810
811 if (paramdecl == ''):
812 return None
813 else:
814 return paramdecl
815 def beginFile(self, genOpts):
816 OutputGenerator.beginFile(self, genOpts)
Mike Schuchardt09a1c752019-06-20 12:04:38 -0700817
818 # Initialize members that require the tree
819 self.handle_types = GetHandleTypes(self.registry.tree)
820
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700821 # TODO: LUGMAL -- remove this and add our copyright
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600822 # User-supplied prefix text, if any (list of strings)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700823 write(self.inline_copyright_message, file=self.outFile)
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700824
825 self.header_file = (genOpts.filename == 'thread_safety.h')
826 self.source_file = (genOpts.filename == 'thread_safety.cpp')
827
828 if not self.header_file and not self.source_file:
829 print("Error: Output Filenames have changed, update generator source.\n")
830 sys.exit(1)
831
832 if self.source_file:
833 write('#include "chassis.h"', file=self.outFile)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700834 write('#include "thread_safety.h"', file=self.outFile)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600835 self.newline()
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700836 write(self.inline_custom_source_preamble, file=self.outFile)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700837
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700838
839 def endFile(self):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700840
841 # Create class definitions
842 counter_class_defs = ''
843 counter_class_instances = ''
844 counter_class_bodies = ''
845
Mike Schuchardtaed5ac32019-06-21 09:03:31 -0700846 for obj in sorted(self.non_dispatchable_types):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700847 counter_class_defs += ' counter<%s> c_%s;\n' % (obj, obj)
848 if obj in self.object_to_debug_report_type:
849 obj_type = self.object_to_debug_report_type[obj]
850 else:
851 obj_type = 'VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT'
852 counter_class_instances += ' c_%s("%s", %s, &report_data),\n' % (obj, obj, obj_type)
853 counter_class_bodies += 'WRAPPER(%s)\n' % obj
854 if self.header_file:
855 class_def = self.inline_custom_header_preamble.replace('COUNTER_CLASS_DEFINITIONS_TEMPLATE', counter_class_defs)
856 class_def = class_def.replace('COUNTER_CLASS_INSTANCES_TEMPLATE', counter_class_instances[:-2]) # Kill last comma
857 class_def = class_def.replace('COUNTER_CLASS_BODIES_TEMPLATE', counter_class_bodies)
858 write(class_def, file=self.outFile)
859 write('\n'.join(self.sections['command']), file=self.outFile)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700860 if self.header_file:
861 write('};', file=self.outFile)
862
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600863 # Finish processing in superclass
864 OutputGenerator.endFile(self)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700865
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600866 def beginFeature(self, interface, emit):
867 #write('// starting beginFeature', file=self.outFile)
868 # Start processing in superclass
869 OutputGenerator.beginFeature(self, interface, emit)
870 # C-specific
871 # Accumulate includes, defines, types, enums, function pointer typedefs,
872 # end function prototypes separately for this feature. They're only
873 # printed in endFeature().
Mark Lobodzinski62f71562017-10-24 13:41:18 -0600874 self.featureExtraProtect = GetFeatureProtect(interface)
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700875 if (self.featureExtraProtect is not None):
876 self.appendSection('command', '\n#ifdef %s' % self.featureExtraProtect)
877
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600878 #write('// ending beginFeature', file=self.outFile)
879 def endFeature(self):
880 # C-specific
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600881 if (self.emit):
MichaƂ Janiszewski3c3ce9e2018-10-30 23:25:21 +0100882 if (self.featureExtraProtect is not None):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700883 self.appendSection('command', '#endif // %s' % self.featureExtraProtect)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600884 # Finish processing in superclass
885 OutputGenerator.endFeature(self)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600886 #
887 # Append a definition to the specified section
888 def appendSection(self, section, text):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600889 self.sections[section].append(text)
890 #
891 # Type generation
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700892 def genType(self, typeinfo, name, alias):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700893 OutputGenerator.genType(self, typeinfo, name, alias)
Mike Schuchardtf8690262019-07-11 10:08:33 -0700894 if self.handle_types.IsNonDispatchable(name):
895 self.non_dispatchable_types.add(name)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600896 #
897 # Struct (e.g. C "struct" type) generation.
898 # This is a special case of the <type> tag where the contents are
899 # interpreted as a set of <member> tags instead of freeform C
900 # C type declarations. The <member> tags are just like <param>
901 # tags - they are a declaration of a struct or union member.
902 # Only simple member declarations are supported (no nested
903 # structs etc.)
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700904 def genStruct(self, typeinfo, typeName, alias):
905 OutputGenerator.genStruct(self, typeinfo, typeName, alias)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600906 body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
907 # paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
908 for member in typeinfo.elem.findall('.//member'):
909 body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
910 body += ';\n'
911 body += '} ' + typeName + ';\n'
912 self.appendSection('struct', body)
913 #
914 # Group (e.g. C "enum" type) generation.
915 # These are concatenated together with other types.
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700916 def genGroup(self, groupinfo, groupName, alias):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600917 pass
918 # Enumerant generation
919 # <enum> tags may specify their values in several ways, but are usually
920 # just integers.
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700921 def genEnum(self, enuminfo, name, alias):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600922 pass
923 #
924 # Command generation
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700925 def genCmd(self, cmdinfo, name, alias):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600926 # Commands shadowed by interface functions and are not implemented
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600927 special_functions = [
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600928 'vkCreateDevice',
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600929 'vkCreateInstance',
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600930 'vkAllocateCommandBuffers',
931 'vkFreeCommandBuffers',
John Zulaufe28aa342018-10-24 12:18:39 -0600932 'vkResetCommandPool',
933 'vkDestroyCommandPool',
Mark Lobodzinski3bd82ad2017-02-16 11:45:27 -0700934 'vkAllocateDescriptorSets',
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700935 'vkQueuePresentKHR',
Mark Lobodzinski8925c052018-12-18 12:41:15 -0700936 'vkGetSwapchainImagesKHR',
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600937 ]
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700938 if name == 'vkQueuePresentKHR' or (name in special_functions and self.source_file):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600939 return
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700940
941 if (("DebugMarker" in name or "DebugUtilsObject" in name) and "EXT" in name):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600942 self.appendSection('command', '// TODO - not wrapping EXT function ' + name)
943 return
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700944
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600945 # Determine first if this function needs to be intercepted
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700946 startthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'Start')
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600947 if startthreadsafety is None:
948 return
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700949 finishthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'Finish')
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600950
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700951 OutputGenerator.genCmd(self, cmdinfo, name, alias)
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700952
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600953 # setup common to call wrappers
954 # first parameter is always dispatchable
955 dispatchable_type = cmdinfo.elem.find('param/type').text
956 dispatchable_name = cmdinfo.elem.find('param/name').text
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700957
958 decls = self.makeCDecls(cmdinfo.elem)
959
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700960 result_type = cmdinfo.elem.find('proto/type')
961
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700962 if self.source_file:
963 pre_decl = decls[0][:-1]
964 pre_decl = pre_decl.split("VKAPI_CALL ")[1]
965 pre_decl = 'void ThreadSafety::PreCallRecord' + pre_decl + ' {'
966
967 # PreCallRecord
968 self.appendSection('command', '')
969 self.appendSection('command', pre_decl)
970 self.appendSection('command', " " + "\n ".join(str(startthreadsafety).rstrip().split("\n")))
971 self.appendSection('command', '}')
972
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700973 # PostCallRecord
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700974 post_decl = pre_decl.replace('PreCallRecord', 'PostCallRecord')
975 if result_type.text == 'VkResult':
976 post_decl = post_decl.replace(')', ',\n VkResult result)')
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700977 self.appendSection('command', '')
978 self.appendSection('command', post_decl)
979 self.appendSection('command', " " + "\n ".join(str(finishthreadsafety).rstrip().split("\n")))
980 self.appendSection('command', '}')
981
982 if self.header_file:
983 pre_decl = decls[0][:-1]
984 pre_decl = pre_decl.split("VKAPI_CALL ")[1]
985 pre_decl = 'void PreCallRecord' + pre_decl + ';'
986
987 # PreCallRecord
988 self.appendSection('command', '')
989 self.appendSection('command', pre_decl)
990
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700991 # PostCallRecord
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700992 post_decl = pre_decl.replace('PreCallRecord', 'PostCallRecord')
993 if result_type.text == 'VkResult':
994 post_decl = post_decl.replace(')', ',\n VkResult result)')
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700995 self.appendSection('command', '')
996 self.appendSection('command', post_decl)
997
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600998 #
999 # override makeProtoName to drop the "vk" prefix
1000 def makeProtoName(self, name, tail):
1001 return self.genOpts.apientry + name[2:] + tail