blob: da0a1f90634fc8f0149cabe5314d73a95908281f [file] [log] [blame]
Mark Lobodzinskiff910992016-10-11 14:29:52 -06001#!/usr/bin/python3 -i
2#
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -07003# Copyright (c) 2015-2019 The Khronos Group Inc.
4# Copyright (c) 2015-2019 Valve Corporation
5# Copyright (c) 2015-2019 LunarG, Inc.
6# Copyright (c) 2015-2019 Google Inc.
Mark Lobodzinskiff910992016-10-11 14:29:52 -06007#
8# Licensed under the Apache License, Version 2.0 (the "License");
9# you may not use this file except in compliance with the License.
10# You may obtain a copy of the License at
11#
12# http://www.apache.org/licenses/LICENSE-2.0
13#
14# Unless required by applicable law or agreed to in writing, software
15# distributed under the License is distributed on an "AS IS" BASIS,
16# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17# See the License for the specific language governing permissions and
18# limitations under the License.
19#
20# Author: Mike Stroyan <stroyan@google.com>
Mark Lobodzinskid3b439e2017-06-07 13:08:41 -060021# Author: Mark Lobodzinski <mark@lunarg.com>
Mark Lobodzinskiff910992016-10-11 14:29:52 -060022
23import os,re,sys
24from generator import *
Mark Lobodzinski62f71562017-10-24 13:41:18 -060025from common_codegen import *
Mark Lobodzinskiff910992016-10-11 14:29:52 -060026
27# ThreadGeneratorOptions - subclass of GeneratorOptions.
28#
29# Adds options used by ThreadOutputGenerator objects during threading
30# layer generation.
31#
32# Additional members
33# prefixText - list of strings to prefix generated header with
34# (usually a copyright statement + calling convention macros).
35# protectFile - True if multiple inclusion protection should be
36# generated (based on the filename) around the entire header.
37# protectFeature - True if #ifndef..#endif protection should be
38# generated around a feature interface in the header file.
39# genFuncPointers - True if function pointer typedefs should be
40# generated
41# protectProto - If conditional protection should be generated
42# around prototype declarations, set to either '#ifdef'
43# to require opt-in (#ifdef protectProtoStr) or '#ifndef'
44# to require opt-out (#ifndef protectProtoStr). Otherwise
45# set to None.
46# protectProtoStr - #ifdef/#ifndef symbol to use around prototype
47# declarations, if protectProto is set
48# apicall - string to use for the function declaration prefix,
49# such as APICALL on Windows.
50# apientry - string to use for the calling convention macro,
51# in typedefs, such as APIENTRY.
52# apientryp - string to use for the calling convention macro
53# in function pointer typedefs, such as APIENTRYP.
54# indentFuncProto - True if prototype declarations should put each
55# parameter on a separate line
56# indentFuncPointer - True if typedefed function pointers should put each
57# parameter on a separate line
58# alignFuncParam - if nonzero and parameters are being put on a
59# separate line, align parameter names at the specified column
60class ThreadGeneratorOptions(GeneratorOptions):
61 def __init__(self,
Mike Schuchardt21638df2019-03-16 10:52:02 -070062 conventions = None,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060063 filename = None,
64 directory = '.',
65 apiname = None,
66 profile = None,
67 versions = '.*',
68 emitversions = '.*',
69 defaultExtensions = None,
70 addExtensions = None,
71 removeExtensions = None,
Mark Lobodzinski62f71562017-10-24 13:41:18 -060072 emitExtensions = None,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060073 sortProcedure = regSortFeatures,
74 prefixText = "",
75 genFuncPointers = True,
76 protectFile = True,
77 protectFeature = True,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060078 apicall = '',
79 apientry = '',
80 apientryp = '',
81 indentFuncProto = True,
82 indentFuncPointer = False,
Mark Lobodzinski62f71562017-10-24 13:41:18 -060083 alignFuncParam = 0,
84 expandEnumerants = True):
Mike Schuchardt21638df2019-03-16 10:52:02 -070085 GeneratorOptions.__init__(self, conventions, filename, directory, apiname, profile,
Mark Lobodzinskiff910992016-10-11 14:29:52 -060086 versions, emitversions, defaultExtensions,
Mark Lobodzinski62f71562017-10-24 13:41:18 -060087 addExtensions, removeExtensions, emitExtensions, sortProcedure)
Mark Lobodzinskiff910992016-10-11 14:29:52 -060088 self.prefixText = prefixText
89 self.genFuncPointers = genFuncPointers
90 self.protectFile = protectFile
91 self.protectFeature = protectFeature
Mark Lobodzinskiff910992016-10-11 14:29:52 -060092 self.apicall = apicall
93 self.apientry = apientry
94 self.apientryp = apientryp
95 self.indentFuncProto = indentFuncProto
96 self.indentFuncPointer = indentFuncPointer
97 self.alignFuncParam = alignFuncParam
Mark Lobodzinski62f71562017-10-24 13:41:18 -060098 self.expandEnumerants = expandEnumerants
99
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600100
101# ThreadOutputGenerator - subclass of OutputGenerator.
102# Generates Thread checking framework
103#
104# ---- methods ----
105# ThreadOutputGenerator(errFile, warnFile, diagFile) - args as for
106# OutputGenerator. Defines additional internal state.
107# ---- methods overriding base class ----
108# beginFile(genOpts)
109# endFile()
110# beginFeature(interface, emit)
111# endFeature()
112# genType(typeinfo,name)
113# genStruct(typeinfo,name)
114# genGroup(groupinfo,name)
115# genEnum(enuminfo, name)
116# genCmd(cmdinfo)
117class ThreadOutputGenerator(OutputGenerator):
118 """Generate specified API interfaces in a specific style, such as a C header"""
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700119
120 inline_copyright_message = """
121// This file is ***GENERATED***. Do Not Edit.
Petr Kraus6e6ff3e2019-08-09 18:28:13 +0200122// See thread_safety_generator.py for modifications.
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700123
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700124/* Copyright (c) 2015-2019 The Khronos Group Inc.
125 * Copyright (c) 2015-2019 Valve Corporation
126 * Copyright (c) 2015-2019 LunarG, Inc.
127 * Copyright (c) 2015-2019 Google Inc.
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700128 *
129 * Licensed under the Apache License, Version 2.0 (the "License");
130 * you may not use this file except in compliance with the License.
131 * You may obtain a copy of the License at
132 *
133 * http://www.apache.org/licenses/LICENSE-2.0
134 *
135 * Unless required by applicable law or agreed to in writing, software
136 * distributed under the License is distributed on an "AS IS" BASIS,
137 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
138 * See the License for the specific language governing permissions and
139 * limitations under the License.
140 *
141 * Author: Mark Lobodzinski <mark@lunarg.com>
142 */"""
143
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700144 # Note that the inline_custom_header_preamble template below contains three embedded template expansion identifiers.
145 # These get replaced with generated code sections, and are labeled:
146 # o COUNTER_CLASS_DEFINITIONS_TEMPLATE
147 # o COUNTER_CLASS_INSTANCES_TEMPLATE
148 # o COUNTER_CLASS_BODIES_TEMPLATE
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700149 inline_custom_header_preamble = """
150#pragma once
151
Jeff Bolzd8610ec2019-08-14 16:49:01 -0500152#include <chrono>
Jeff Bolzade10b22019-08-14 23:09:00 -0500153#include <thread>
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700154#include <mutex>
155#include <vector>
156#include <unordered_set>
157#include <string>
158
159VK_DEFINE_NON_DISPATCHABLE_HANDLE(DISTINCT_NONDISPATCHABLE_PHONY_HANDLE)
160// The following line must match the vulkan_core.h condition guarding VK_DEFINE_NON_DISPATCHABLE_HANDLE
161#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || \
162 defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
163// If pointers are 64-bit, then there can be separate counters for each
164// NONDISPATCHABLE_HANDLE type. Otherwise they are all typedef uint64_t.
165#define DISTINCT_NONDISPATCHABLE_HANDLES
166// Make sure we catch any disagreement between us and the vulkan definition
167static_assert(std::is_pointer<DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value,
168 "Mismatched non-dispatchable handle handle, expected pointer type.");
169#else
170// Make sure we catch any disagreement between us and the vulkan definition
171static_assert(std::is_same<uint64_t, DISTINCT_NONDISPATCHABLE_PHONY_HANDLE>::value,
172 "Mismatched non-dispatchable handle handle, expected uint64_t.");
173#endif
174
175// Suppress unused warning on Linux
176#if defined(__GNUC__)
177#define DECORATE_UNUSED __attribute__((unused))
178#else
179#define DECORATE_UNUSED
180#endif
181
182// clang-format off
183static const char DECORATE_UNUSED *kVUID_Threading_Info = "UNASSIGNED-Threading-Info";
184static const char DECORATE_UNUSED *kVUID_Threading_MultipleThreads = "UNASSIGNED-Threading-MultipleThreads";
185static const char DECORATE_UNUSED *kVUID_Threading_SingleThreadReuse = "UNASSIGNED-Threading-SingleThreadReuse";
186// clang-format on
187
188#undef DECORATE_UNUSED
189
190struct object_use_data {
191 loader_platform_thread_id thread;
192 int reader_count;
193 int writer_count;
194};
195
Jeff Bolz1cb6fd6f2019-02-03 21:58:14 -0600196// This is a wrapper around unordered_map that optimizes for the common case
197// of only containing a single element. The "first" element's use is stored
198// inline in the class and doesn't require hashing or memory (de)allocation.
199// TODO: Consider generalizing this from one element to N elements (where N
200// is a template parameter).
201template <typename Key, typename T>
202class small_unordered_map {
203
204 bool first_data_allocated;
205 Key first_data_key;
206 T first_data;
207
208 std::unordered_map<Key, T> uses;
209
210public:
211 small_unordered_map() : first_data_allocated(false) {}
212
213 bool contains(const Key& object) const {
214 if (first_data_allocated && object == first_data_key) {
215 return true;
216 // check size() first to avoid hashing object unnecessarily.
217 } else if (uses.size() == 0) {
218 return false;
219 } else {
220 return uses.find(object) != uses.end();
221 }
222 }
223
224 T& operator[](const Key& object) {
225 if (first_data_allocated && first_data_key == object) {
226 return first_data;
227 } else if (!first_data_allocated && uses.size() == 0) {
228 first_data_allocated = true;
229 first_data_key = object;
230 return first_data;
231 } else {
232 return uses[object];
233 }
234 }
235
236 typename std::unordered_map<Key, T>::size_type erase(const Key& object) {
237 if (first_data_allocated && first_data_key == object) {
238 first_data_allocated = false;
239 return 1;
240 } else {
241 return uses.erase(object);
242 }
243 }
244};
245
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500246#define THREAD_SAFETY_BUCKETS_LOG2 6
247#define THREAD_SAFETY_BUCKETS (1 << THREAD_SAFETY_BUCKETS_LOG2)
248
249template <typename T> inline uint32_t ThreadSafetyHashObject(T object)
250{
Jeff Bolzade10b22019-08-14 23:09:00 -0500251 uint64_t u64 = (uint64_t)(uintptr_t)object;
252 uint32_t hash = (uint32_t)(u64 >> 32) + (uint32_t)u64;
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500253 hash ^= (hash >> THREAD_SAFETY_BUCKETS_LOG2) ^ (hash >> (2*THREAD_SAFETY_BUCKETS_LOG2));
254 hash &= (THREAD_SAFETY_BUCKETS-1);
255 return hash;
256}
257
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700258template <typename T>
259class counter {
260public:
261 const char *typeName;
262 VkDebugReportObjectTypeEXT objectType;
263 debug_report_data **report_data;
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700264
Jeff Bolz9bd5fb32019-08-12 13:53:41 -0500265 // Per-bucket locking, to reduce contention.
Jeff Bolzade10b22019-08-14 23:09:00 -0500266 struct CounterBucket {
267 small_unordered_map<T, object_use_data> uses;
268 std::mutex counter_lock;
269 };
270
271 CounterBucket buckets[THREAD_SAFETY_BUCKETS];
Jeff Bolz2711f532019-08-16 12:30:08 -0500272 CounterBucket &GetBucket(T object)
Jeff Bolzade10b22019-08-14 23:09:00 -0500273 {
274 return buckets[ThreadSafetyHashObject(object)];
275 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700276
277 void StartWrite(T object) {
278 if (object == VK_NULL_HANDLE) {
279 return;
280 }
Jeff Bolz2711f532019-08-16 12:30:08 -0500281 auto &bucket = GetBucket(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700282 bool skip = false;
283 loader_platform_thread_id tid = loader_platform_get_thread_id();
Jeff Bolzade10b22019-08-14 23:09:00 -0500284 std::unique_lock<std::mutex> lock(bucket.counter_lock);
285 if (!bucket.uses.contains(object)) {
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700286 // There is no current use of the object. Record writer thread.
Jeff Bolzade10b22019-08-14 23:09:00 -0500287 struct object_use_data *use_data = &bucket.uses[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700288 use_data->reader_count = 0;
289 use_data->writer_count = 1;
290 use_data->thread = tid;
291 } else {
Jeff Bolzade10b22019-08-14 23:09:00 -0500292 struct object_use_data *use_data = &bucket.uses[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700293 if (use_data->reader_count == 0) {
294 // There are no readers. Two writers just collided.
295 if (use_data->thread != tid) {
296 skip |= log_msg(*report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
297 kVUID_Threading_MultipleThreads,
298 "THREADING ERROR : object of type %s is simultaneously used in "
299 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
300 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
301 if (skip) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500302 WaitForObjectIdle(bucket, object, lock);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700303 // There is now no current use of the object. Record writer thread.
Jeff Bolzade10b22019-08-14 23:09:00 -0500304 struct object_use_data *new_use_data = &bucket.uses[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700305 new_use_data->thread = tid;
306 new_use_data->reader_count = 0;
307 new_use_data->writer_count = 1;
308 } else {
309 // Continue with an unsafe use of the object.
310 use_data->thread = tid;
311 use_data->writer_count += 1;
312 }
313 } else {
314 // This is either safe multiple use in one call, or recursive use.
315 // There is no way to make recursion safe. Just forge ahead.
316 use_data->writer_count += 1;
317 }
318 } else {
319 // There are readers. This writer collided with them.
320 if (use_data->thread != tid) {
321 skip |= log_msg(*report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
322 kVUID_Threading_MultipleThreads,
323 "THREADING ERROR : object of type %s is simultaneously used in "
324 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
325 typeName, (uint64_t)use_data->thread, (uint64_t)tid);
326 if (skip) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500327 WaitForObjectIdle(bucket, object, lock);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700328 // There is now no current use of the object. Record writer thread.
Jeff Bolzade10b22019-08-14 23:09:00 -0500329 struct object_use_data *new_use_data = &bucket.uses[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700330 new_use_data->thread = tid;
331 new_use_data->reader_count = 0;
332 new_use_data->writer_count = 1;
333 } else {
334 // Continue with an unsafe use of the object.
335 use_data->thread = tid;
336 use_data->writer_count += 1;
337 }
338 } else {
339 // This is either safe multiple use in one call, or recursive use.
340 // There is no way to make recursion safe. Just forge ahead.
341 use_data->writer_count += 1;
342 }
343 }
344 }
345 }
346
347 void FinishWrite(T object) {
348 if (object == VK_NULL_HANDLE) {
349 return;
350 }
Jeff Bolz2711f532019-08-16 12:30:08 -0500351 auto &bucket = GetBucket(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700352 // Object is no longer in use
Jeff Bolzade10b22019-08-14 23:09:00 -0500353 std::unique_lock<std::mutex> lock(bucket.counter_lock);
354 struct object_use_data *use_data = &bucket.uses[object];
Jeff Bolzd8610ec2019-08-14 16:49:01 -0500355 use_data->writer_count -= 1;
356 if ((use_data->reader_count == 0) && (use_data->writer_count == 0)) {
Jeff Bolzade10b22019-08-14 23:09:00 -0500357 bucket.uses.erase(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700358 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700359 }
360
361 void StartRead(T object) {
362 if (object == VK_NULL_HANDLE) {
363 return;
364 }
Jeff Bolz2711f532019-08-16 12:30:08 -0500365 auto &bucket = GetBucket(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700366 bool skip = false;
367 loader_platform_thread_id tid = loader_platform_get_thread_id();
Jeff Bolzade10b22019-08-14 23:09:00 -0500368 std::unique_lock<std::mutex> lock(bucket.counter_lock);
369 if (!bucket.uses.contains(object)) {
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700370 // There is no current use of the object. Record reader count
Jeff Bolzade10b22019-08-14 23:09:00 -0500371 struct object_use_data *use_data = &bucket.uses[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700372 use_data->reader_count = 1;
373 use_data->writer_count = 0;
374 use_data->thread = tid;
Jeff Bolzade10b22019-08-14 23:09:00 -0500375 } else if (bucket.uses[object].writer_count > 0 && bucket.uses[object].thread != tid) {
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700376 // There is a writer of the object.
Petr Krause20affe2019-08-09 18:31:48 +0200377 skip |= log_msg(*report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, objectType, (uint64_t)(object),
378 kVUID_Threading_MultipleThreads,
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700379 "THREADING ERROR : object of type %s is simultaneously used in "
380 "thread 0x%" PRIx64 " and thread 0x%" PRIx64,
Jeff Bolzade10b22019-08-14 23:09:00 -0500381 typeName, (uint64_t)bucket.uses[object].thread, (uint64_t)tid);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700382 if (skip) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500383 WaitForObjectIdle(bucket, object, lock);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700384 // There is no current use of the object. Record reader count
Jeff Bolzade10b22019-08-14 23:09:00 -0500385 struct object_use_data *use_data = &bucket.uses[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700386 use_data->reader_count = 1;
387 use_data->writer_count = 0;
388 use_data->thread = tid;
389 } else {
Jeff Bolzade10b22019-08-14 23:09:00 -0500390 bucket.uses[object].reader_count += 1;
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700391 }
392 } else {
393 // There are other readers of the object. Increase reader count
Jeff Bolzade10b22019-08-14 23:09:00 -0500394 bucket.uses[object].reader_count += 1;
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700395 }
396 }
397 void FinishRead(T object) {
398 if (object == VK_NULL_HANDLE) {
399 return;
400 }
Jeff Bolz2711f532019-08-16 12:30:08 -0500401 auto &bucket = GetBucket(object);
Jeff Bolzade10b22019-08-14 23:09:00 -0500402 std::unique_lock<std::mutex> lock(bucket.counter_lock);
403 struct object_use_data *use_data = &bucket.uses[object];
Jeff Bolzd8610ec2019-08-14 16:49:01 -0500404 use_data->reader_count -= 1;
405 if ((use_data->reader_count == 0) && (use_data->writer_count == 0)) {
Jeff Bolzade10b22019-08-14 23:09:00 -0500406 bucket.uses.erase(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700407 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700408 }
409 counter(const char *name = "", VkDebugReportObjectTypeEXT type = VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, debug_report_data **rep_data = nullptr) {
410 typeName = name;
411 objectType = type;
412 report_data = rep_data;
413 }
Jeff Bolz2711f532019-08-16 12:30:08 -0500414
415private:
416 void WaitForObjectIdle(CounterBucket &bucket, T object, std::unique_lock<std::mutex> &lock) {
417 // Wait for thread-safe access to object instead of skipping call.
418 // Don't use condition_variable to wait because it should be extremely
419 // rare to have collisions, but signaling would be very frequent.
420 while (bucket.uses.contains(object)) {
421 lock.unlock();
422 std::this_thread::sleep_for(std::chrono::microseconds(1));
423 lock.lock();
424 }
425 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700426};
427
428
429
430class ThreadSafety : public ValidationObject {
431public:
432
433 // Override chassis read/write locks for this validation object
Jeremy Hayesd4a3ec32019-01-29 14:42:08 -0700434 // This override takes a deferred lock. i.e. it is not acquired.
435 std::unique_lock<std::mutex> write_lock() {
436 return std::unique_lock<std::mutex>(validation_object_mutex, std::defer_lock);
437 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700438
Jeff Bolzade10b22019-08-14 23:09:00 -0500439 // Per-bucket locking, to reduce contention.
440 struct CommandBufferBucket {
441 std::mutex command_pool_lock;
442 small_unordered_map<VkCommandBuffer, VkCommandPool> command_pool_map;
443 };
444
445 CommandBufferBucket buckets[THREAD_SAFETY_BUCKETS];
Jeff Bolz2711f532019-08-16 12:30:08 -0500446 CommandBufferBucket &GetBucket(VkCommandBuffer object)
Jeff Bolzade10b22019-08-14 23:09:00 -0500447 {
448 return buckets[ThreadSafetyHashObject(object)];
449 }
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700450
451 counter<VkCommandBuffer> c_VkCommandBuffer;
452 counter<VkDevice> c_VkDevice;
453 counter<VkInstance> c_VkInstance;
454 counter<VkQueue> c_VkQueue;
455#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
456
457 // Special entry to allow tracking of command pool Reset and Destroy
458 counter<VkCommandPool> c_VkCommandPoolContents;
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700459COUNTER_CLASS_DEFINITIONS_TEMPLATE
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700460
461#else // DISTINCT_NONDISPATCHABLE_HANDLES
462 // Special entry to allow tracking of command pool Reset and Destroy
463 counter<uint64_t> c_VkCommandPoolContents;
464
465 counter<uint64_t> c_uint64_t;
466#endif // DISTINCT_NONDISPATCHABLE_HANDLES
467
468 ThreadSafety()
469 : c_VkCommandBuffer("VkCommandBuffer", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT, &report_data),
470 c_VkDevice("VkDevice", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT, &report_data),
471 c_VkInstance("VkInstance", VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT, &report_data),
472 c_VkQueue("VkQueue", VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT, &report_data),
473 c_VkCommandPoolContents("VkCommandPool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT, &report_data),
474
475#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700476COUNTER_CLASS_INSTANCES_TEMPLATE
477
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700478
479#else // DISTINCT_NONDISPATCHABLE_HANDLES
480 c_uint64_t("NON_DISPATCHABLE_HANDLE", VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT, &report_data)
481#endif // DISTINCT_NONDISPATCHABLE_HANDLES
482 {};
483
484#define WRAPPER(type) \
485 void StartWriteObject(type object) { \
486 c_##type.StartWrite(object); \
487 } \
488 void FinishWriteObject(type object) { \
489 c_##type.FinishWrite(object); \
490 } \
491 void StartReadObject(type object) { \
492 c_##type.StartRead(object); \
493 } \
494 void FinishReadObject(type object) { \
495 c_##type.FinishRead(object); \
496 }
497
498WRAPPER(VkDevice)
499WRAPPER(VkInstance)
500WRAPPER(VkQueue)
501#ifdef DISTINCT_NONDISPATCHABLE_HANDLES
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700502COUNTER_CLASS_BODIES_TEMPLATE
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700503
504#else // DISTINCT_NONDISPATCHABLE_HANDLES
505WRAPPER(uint64_t)
506#endif // DISTINCT_NONDISPATCHABLE_HANDLES
507
508 // VkCommandBuffer needs check for implicit use of command pool
509 void StartWriteObject(VkCommandBuffer object, bool lockPool = true) {
510 if (lockPool) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500511 auto &bucket = GetBucket(object);
Jeff Bolzade10b22019-08-14 23:09:00 -0500512 std::unique_lock<std::mutex> lock(bucket.command_pool_lock);
513 VkCommandPool pool = bucket.command_pool_map[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700514 lock.unlock();
515 StartWriteObject(pool);
516 }
517 c_VkCommandBuffer.StartWrite(object);
518 }
519 void FinishWriteObject(VkCommandBuffer object, bool lockPool = true) {
520 c_VkCommandBuffer.FinishWrite(object);
521 if (lockPool) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500522 auto &bucket = GetBucket(object);
Jeff Bolzade10b22019-08-14 23:09:00 -0500523 std::unique_lock<std::mutex> lock(bucket.command_pool_lock);
524 VkCommandPool pool = bucket.command_pool_map[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700525 lock.unlock();
526 FinishWriteObject(pool);
527 }
528 }
529 void StartReadObject(VkCommandBuffer object) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500530 auto &bucket = GetBucket(object);
Jeff Bolzade10b22019-08-14 23:09:00 -0500531 std::unique_lock<std::mutex> lock(bucket.command_pool_lock);
532 VkCommandPool pool = bucket.command_pool_map[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700533 lock.unlock();
534 // We set up a read guard against the "Contents" counter to catch conflict vs. vkResetCommandPool and vkDestroyCommandPool
535 // while *not* establishing a read guard against the command pool counter itself to avoid false postives for
536 // non-externally sync'd command buffers
537 c_VkCommandPoolContents.StartRead(pool);
538 c_VkCommandBuffer.StartRead(object);
539 }
540 void FinishReadObject(VkCommandBuffer object) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500541 auto &bucket = GetBucket(object);
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700542 c_VkCommandBuffer.FinishRead(object);
Jeff Bolzade10b22019-08-14 23:09:00 -0500543 std::unique_lock<std::mutex> lock(bucket.command_pool_lock);
544 VkCommandPool pool = bucket.command_pool_map[object];
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700545 lock.unlock();
546 c_VkCommandPoolContents.FinishRead(pool);
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700547 } """
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700548
549
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700550 inline_custom_source_preamble = """
551void ThreadSafety::PreCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
552 VkCommandBuffer *pCommandBuffers) {
553 StartReadObject(device);
554 StartWriteObject(pAllocateInfo->commandPool);
555}
556
557void ThreadSafety::PostCallRecordAllocateCommandBuffers(VkDevice device, const VkCommandBufferAllocateInfo *pAllocateInfo,
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700558 VkCommandBuffer *pCommandBuffers, VkResult result) {
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700559 FinishReadObject(device);
560 FinishWriteObject(pAllocateInfo->commandPool);
561
562 // Record mapping from command buffer to command pool
John Zulaufc9f979e2019-04-29 08:51:15 -0600563 if(pCommandBuffers) {
John Zulaufc9f979e2019-04-29 08:51:15 -0600564 for (uint32_t index = 0; index < pAllocateInfo->commandBufferCount; index++) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500565 auto &bucket = GetBucket(pCommandBuffers[index]);
Jeff Bolzade10b22019-08-14 23:09:00 -0500566 std::lock_guard<std::mutex> lock(bucket.command_pool_lock);
567 bucket.command_pool_map[pCommandBuffers[index]] = pAllocateInfo->commandPool;
John Zulaufc9f979e2019-04-29 08:51:15 -0600568 }
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700569 }
570}
571
572void ThreadSafety::PreCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
573 VkDescriptorSet *pDescriptorSets) {
574 StartReadObject(device);
575 StartWriteObject(pAllocateInfo->descriptorPool);
576 // Host access to pAllocateInfo::descriptorPool must be externally synchronized
577}
578
579void ThreadSafety::PostCallRecordAllocateDescriptorSets(VkDevice device, const VkDescriptorSetAllocateInfo *pAllocateInfo,
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700580 VkDescriptorSet *pDescriptorSets, VkResult result) {
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700581 FinishReadObject(device);
582 FinishWriteObject(pAllocateInfo->descriptorPool);
583 // Host access to pAllocateInfo::descriptorPool must be externally synchronized
584}
585
586void ThreadSafety::PreCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
587 const VkCommandBuffer *pCommandBuffers) {
588 const bool lockCommandPool = false; // pool is already directly locked
589 StartReadObject(device);
590 StartWriteObject(commandPool);
John Zulaufc9f979e2019-04-29 08:51:15 -0600591 if(pCommandBuffers) {
592 // Even though we're immediately "finishing" below, we still are testing for concurrency with any call in process
593 // so this isn't a no-op
594 for (uint32_t index = 0; index < commandBufferCount; index++) {
595 StartWriteObject(pCommandBuffers[index], lockCommandPool);
596 }
597 // The driver may immediately reuse command buffers in another thread.
598 // These updates need to be done before calling down to the driver.
599 for (uint32_t index = 0; index < commandBufferCount; index++) {
600 FinishWriteObject(pCommandBuffers[index], lockCommandPool);
601 }
602 // Holding the lock for the shortest time while we update the map
John Zulaufc9f979e2019-04-29 08:51:15 -0600603 for (uint32_t index = 0; index < commandBufferCount; index++) {
Jeff Bolz2711f532019-08-16 12:30:08 -0500604 auto &bucket = GetBucket(pCommandBuffers[index]);
Jeff Bolzade10b22019-08-14 23:09:00 -0500605 std::lock_guard<std::mutex> lock(bucket.command_pool_lock);
606 bucket.command_pool_map.erase(pCommandBuffers[index]);
John Zulaufc9f979e2019-04-29 08:51:15 -0600607 }
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700608 }
609}
610
611void ThreadSafety::PostCallRecordFreeCommandBuffers(VkDevice device, VkCommandPool commandPool, uint32_t commandBufferCount,
612 const VkCommandBuffer *pCommandBuffers) {
613 FinishReadObject(device);
614 FinishWriteObject(commandPool);
615}
616
617void ThreadSafety::PreCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags) {
618 StartReadObject(device);
619 StartWriteObject(commandPool);
620 // Check for any uses of non-externally sync'd command buffers (for example from vkCmdExecuteCommands)
621 c_VkCommandPoolContents.StartWrite(commandPool);
622 // Host access to commandPool must be externally synchronized
623}
624
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700625void ThreadSafety::PostCallRecordResetCommandPool(VkDevice device, VkCommandPool commandPool, VkCommandPoolResetFlags flags, VkResult result) {
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700626 FinishReadObject(device);
627 FinishWriteObject(commandPool);
628 c_VkCommandPoolContents.FinishWrite(commandPool);
629 // Host access to commandPool must be externally synchronized
630}
631
632void ThreadSafety::PreCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
633 StartReadObject(device);
634 StartWriteObject(commandPool);
635 // Check for any uses of non-externally sync'd command buffers (for example from vkCmdExecuteCommands)
636 c_VkCommandPoolContents.StartWrite(commandPool);
637 // Host access to commandPool must be externally synchronized
638}
639
640void ThreadSafety::PostCallRecordDestroyCommandPool(VkDevice device, VkCommandPool commandPool, const VkAllocationCallbacks *pAllocator) {
641 FinishReadObject(device);
642 FinishWriteObject(commandPool);
643 c_VkCommandPoolContents.FinishWrite(commandPool);
644}
645
Mark Lobodzinski8925c052018-12-18 12:41:15 -0700646// GetSwapchainImages can return a non-zero count with a NULL pSwapchainImages pointer. Let's avoid crashes by ignoring
647// pSwapchainImages.
648void ThreadSafety::PreCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
649 VkImage *pSwapchainImages) {
650 StartReadObject(device);
651 StartReadObject(swapchain);
652}
653
654void ThreadSafety::PostCallRecordGetSwapchainImagesKHR(VkDevice device, VkSwapchainKHR swapchain, uint32_t *pSwapchainImageCount,
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700655 VkImage *pSwapchainImages, VkResult result) {
Mark Lobodzinski8925c052018-12-18 12:41:15 -0700656 FinishReadObject(device);
657 FinishReadObject(swapchain);
658}
659
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700660"""
661
662
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600663 # This is an ordered list of sections in the header file.
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700664 ALL_SECTIONS = ['command']
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600665 def __init__(self,
666 errFile = sys.stderr,
667 warnFile = sys.stderr,
668 diagFile = sys.stdout):
669 OutputGenerator.__init__(self, errFile, warnFile, diagFile)
670 # Internal state - accumulators for different inner block text
671 self.sections = dict([(section, []) for section in self.ALL_SECTIONS])
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700672 self.non_dispatchable_types = set()
673 self.object_to_debug_report_type = {
674 'VkInstance' : 'VK_DEBUG_REPORT_OBJECT_TYPE_INSTANCE_EXT',
675 'VkPhysicalDevice' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PHYSICAL_DEVICE_EXT',
676 'VkDevice' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT',
677 'VkQueue' : 'VK_DEBUG_REPORT_OBJECT_TYPE_QUEUE_EXT',
678 'VkSemaphore' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT',
679 'VkCommandBuffer' : 'VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT',
680 'VkFence' : 'VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT',
681 'VkDeviceMemory' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT',
682 'VkBuffer' : 'VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT',
683 'VkImage' : 'VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT',
684 'VkEvent' : 'VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT',
685 'VkQueryPool' : 'VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT',
686 'VkBufferView' : 'VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_VIEW_EXT',
687 'VkImageView' : 'VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT',
688 'VkShaderModule' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT',
689 'VkPipelineCache' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_CACHE_EXT',
690 'VkPipelineLayout' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT',
691 'VkRenderPass' : 'VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT',
692 'VkPipeline' : 'VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT',
693 'VkDescriptorSetLayout' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT_EXT',
694 'VkSampler' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT',
695 'VkDescriptorPool' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_POOL_EXT',
696 'VkDescriptorSet' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT',
697 'VkFramebuffer' : 'VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT',
698 'VkCommandPool' : 'VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT',
699 'VkSurfaceKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SURFACE_KHR_EXT',
700 'VkSwapchainKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT',
701 'VkDisplayKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_KHR_EXT',
702 'VkDisplayModeKHR' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DISPLAY_MODE_KHR_EXT',
703 'VkObjectTableNVX' : 'VK_DEBUG_REPORT_OBJECT_TYPE_OBJECT_TABLE_NVX_EXT',
704 'VkIndirectCommandsLayoutNVX' : 'VK_DEBUG_REPORT_OBJECT_TYPE_INDIRECT_COMMANDS_LAYOUT_NVX_EXT',
705 'VkSamplerYcbcrConversion' : 'VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_YCBCR_CONVERSION_EXT',
706 'VkDescriptorUpdateTemplate' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_EXT',
707 'VkAccelerationStructureNV' : 'VK_DEBUG_REPORT_OBJECT_TYPE_ACCELERATION_STRUCTURE_NV_EXT',
708 'VkDebugReportCallbackEXT' : 'VK_DEBUG_REPORT_OBJECT_TYPE_DEBUG_REPORT_EXT',
709 'VkValidationCacheEXT' : 'VK_DEBUG_REPORT_OBJECT_TYPE_VALIDATION_CACHE_EXT' }
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600710
711 # Check if the parameter passed in is a pointer to an array
712 def paramIsArray(self, param):
713 return param.attrib.get('len') is not None
714
715 # Check if the parameter passed in is a pointer
716 def paramIsPointer(self, param):
717 ispointer = False
718 for elem in param:
Raul Tambre7b300182019-05-04 11:25:14 +0300719 if elem.tag == 'type' and elem.tail is not None and '*' in elem.tail:
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600720 ispointer = True
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600721 return ispointer
Mark Lobodzinski60b77b32017-02-14 09:16:56 -0700722
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600723 def makeThreadUseBlock(self, cmd, functionprefix):
724 """Generate C function pointer typedef for <command> Element"""
725 paramdecl = ''
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600726 # Find and add any parameters that are thread unsafe
727 params = cmd.findall('param')
728 for param in params:
729 paramname = param.find('name')
730 if False: # self.paramIsPointer(param):
731 paramdecl += ' // not watching use of pointer ' + paramname.text + '\n'
732 else:
733 externsync = param.attrib.get('externsync')
734 if externsync == 'true':
735 if self.paramIsArray(param):
John Zulauf03208642019-04-24 14:40:41 -0600736 paramdecl += 'if (' + paramname.text + ') {\n'
737 paramdecl += ' for (uint32_t index=0; index < ' + param.attrib.get('len') + '; index++) {\n'
738 paramdecl += ' ' + functionprefix + 'WriteObject(' + paramname.text + '[index]);\n'
739 paramdecl += ' }\n'
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700740 paramdecl += '}\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600741 else:
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700742 paramdecl += functionprefix + 'WriteObject(' + paramname.text + ');\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600743 elif (param.attrib.get('externsync')):
744 if self.paramIsArray(param):
745 # Externsync can list pointers to arrays of members to synchronize
John Zulauf03208642019-04-24 14:40:41 -0600746 paramdecl += 'if (' + paramname.text + ') {\n'
747 paramdecl += ' for (uint32_t index=0; index < ' + param.attrib.get('len') + '; index++) {\n'
748 second_indent = ' '
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600749 for member in externsync.split(","):
750 # Replace first empty [] in member name with index
751 element = member.replace('[]','[index]',1)
752 if '[]' in element:
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700753 # TODO: These null checks can be removed if threading ends up behind parameter
754 # validation in layer order
755 element_ptr = element.split('[]')[0]
John Zulauf03208642019-04-24 14:40:41 -0600756 paramdecl += ' if (' + element_ptr + ') {\n'
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700757 # Replace any second empty [] in element name with inner array index based on mapping array
758 # names like "pSomeThings[]" to "someThingCount" array size. This could be more robust by
759 # mapping a param member name to a struct type and "len" attribute.
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600760 limit = element[0:element.find('s[]')] + 'Count'
761 dotp = limit.rfind('.p')
762 limit = limit[0:dotp+1] + limit[dotp+2:dotp+3].lower() + limit[dotp+3:]
John Zulauf03208642019-04-24 14:40:41 -0600763 paramdecl += ' for (uint32_t index2=0; index2 < '+limit+'; index2++) {\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600764 element = element.replace('[]','[index2]')
John Zulauf03208642019-04-24 14:40:41 -0600765 second_indent = ' '
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700766 paramdecl += ' ' + second_indent + functionprefix + 'WriteObject(' + element + ');\n'
John Zulauf03208642019-04-24 14:40:41 -0600767 paramdecl += ' }\n'
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700768 paramdecl += ' }\n'
Mark Lobodzinski6d495662019-02-15 11:54:53 -0700769 else:
770 paramdecl += ' ' + second_indent + functionprefix + 'WriteObject(' + element + ');\n'
John Zulauf03208642019-04-24 14:40:41 -0600771 paramdecl += ' }\n'
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700772 paramdecl += '}\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600773 else:
774 # externsync can list members to synchronize
775 for member in externsync.split(","):
776 member = str(member).replace("::", "->")
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700777 member = str(member).replace(".", "->")
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700778 paramdecl += ' ' + functionprefix + 'WriteObject(' + member + ');\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600779 else:
780 paramtype = param.find('type')
781 if paramtype is not None:
782 paramtype = paramtype.text
783 else:
784 paramtype = 'None'
Mike Schuchardtf8690262019-07-11 10:08:33 -0700785 if paramtype in self.handle_types and paramtype != 'VkPhysicalDevice':
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600786 if self.paramIsArray(param) and ('pPipelines' != paramname.text):
Mark Lobodzinski9c147802017-02-10 08:34:54 -0700787 # Add pointer dereference for array counts that are pointer values
788 dereference = ''
789 for candidate in params:
790 if param.attrib.get('len') == candidate.find('name').text:
791 if self.paramIsPointer(candidate):
792 dereference = '*'
Mark Lobodzinski60b77b32017-02-14 09:16:56 -0700793 param_len = str(param.attrib.get('len')).replace("::", "->")
John Zulauf03208642019-04-24 14:40:41 -0600794 paramdecl += 'if (' + paramname.text + ') {\n'
795 paramdecl += ' for (uint32_t index = 0; index < ' + dereference + param_len + '; index++) {\n'
796 paramdecl += ' ' + functionprefix + 'ReadObject(' + paramname.text + '[index]);\n'
797 paramdecl += ' }\n'
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700798 paramdecl += '}\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600799 elif not self.paramIsPointer(param):
800 # Pointer params are often being created.
801 # They are not being read from.
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700802 paramdecl += functionprefix + 'ReadObject(' + paramname.text + ');\n'
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600803 explicitexternsyncparams = cmd.findall("param[@externsync]")
804 if (explicitexternsyncparams is not None):
805 for param in explicitexternsyncparams:
806 externsyncattrib = param.attrib.get('externsync')
807 paramname = param.find('name')
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700808 paramdecl += '// Host access to '
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600809 if externsyncattrib == 'true':
810 if self.paramIsArray(param):
811 paramdecl += 'each member of ' + paramname.text
812 elif self.paramIsPointer(param):
813 paramdecl += 'the object referenced by ' + paramname.text
814 else:
815 paramdecl += paramname.text
816 else:
817 paramdecl += externsyncattrib
818 paramdecl += ' must be externally synchronized\n'
819
820 # Find and add any "implicit" parameters that are thread unsafe
821 implicitexternsyncparams = cmd.find('implicitexternsyncparams')
822 if (implicitexternsyncparams is not None):
823 for elem in implicitexternsyncparams:
Mark Lobodzinski716a4f92018-11-16 08:54:20 -0700824 paramdecl += '// '
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600825 paramdecl += elem.text
826 paramdecl += ' must be externally synchronized between host accesses\n'
827
828 if (paramdecl == ''):
829 return None
830 else:
831 return paramdecl
832 def beginFile(self, genOpts):
833 OutputGenerator.beginFile(self, genOpts)
Mike Schuchardt09a1c752019-06-20 12:04:38 -0700834
835 # Initialize members that require the tree
836 self.handle_types = GetHandleTypes(self.registry.tree)
837
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700838 # TODO: LUGMAL -- remove this and add our copyright
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600839 # User-supplied prefix text, if any (list of strings)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700840 write(self.inline_copyright_message, file=self.outFile)
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700841
842 self.header_file = (genOpts.filename == 'thread_safety.h')
843 self.source_file = (genOpts.filename == 'thread_safety.cpp')
844
845 if not self.header_file and not self.source_file:
846 print("Error: Output Filenames have changed, update generator source.\n")
847 sys.exit(1)
848
849 if self.source_file:
850 write('#include "chassis.h"', file=self.outFile)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700851 write('#include "thread_safety.h"', file=self.outFile)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600852 self.newline()
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700853 write(self.inline_custom_source_preamble, file=self.outFile)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700854
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700855
856 def endFile(self):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700857
858 # Create class definitions
859 counter_class_defs = ''
860 counter_class_instances = ''
861 counter_class_bodies = ''
862
Mike Schuchardtaed5ac32019-06-21 09:03:31 -0700863 for obj in sorted(self.non_dispatchable_types):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700864 counter_class_defs += ' counter<%s> c_%s;\n' % (obj, obj)
865 if obj in self.object_to_debug_report_type:
866 obj_type = self.object_to_debug_report_type[obj]
867 else:
868 obj_type = 'VK_DEBUG_REPORT_OBJECT_TYPE_UNKNOWN_EXT'
869 counter_class_instances += ' c_%s("%s", %s, &report_data),\n' % (obj, obj, obj_type)
870 counter_class_bodies += 'WRAPPER(%s)\n' % obj
871 if self.header_file:
872 class_def = self.inline_custom_header_preamble.replace('COUNTER_CLASS_DEFINITIONS_TEMPLATE', counter_class_defs)
873 class_def = class_def.replace('COUNTER_CLASS_INSTANCES_TEMPLATE', counter_class_instances[:-2]) # Kill last comma
874 class_def = class_def.replace('COUNTER_CLASS_BODIES_TEMPLATE', counter_class_bodies)
875 write(class_def, file=self.outFile)
876 write('\n'.join(self.sections['command']), file=self.outFile)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700877 if self.header_file:
878 write('};', file=self.outFile)
879
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600880 # Finish processing in superclass
881 OutputGenerator.endFile(self)
Mark Lobodzinski706e52b2018-12-11 13:21:52 -0700882
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600883 def beginFeature(self, interface, emit):
884 #write('// starting beginFeature', file=self.outFile)
885 # Start processing in superclass
886 OutputGenerator.beginFeature(self, interface, emit)
887 # C-specific
888 # Accumulate includes, defines, types, enums, function pointer typedefs,
889 # end function prototypes separately for this feature. They're only
890 # printed in endFeature().
Mark Lobodzinski62f71562017-10-24 13:41:18 -0600891 self.featureExtraProtect = GetFeatureProtect(interface)
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700892 if (self.featureExtraProtect is not None):
893 self.appendSection('command', '\n#ifdef %s' % self.featureExtraProtect)
894
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600895 #write('// ending beginFeature', file=self.outFile)
896 def endFeature(self):
897 # C-specific
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600898 if (self.emit):
MichaƂ Janiszewski3c3ce9e2018-10-30 23:25:21 +0100899 if (self.featureExtraProtect is not None):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700900 self.appendSection('command', '#endif // %s' % self.featureExtraProtect)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600901 # Finish processing in superclass
902 OutputGenerator.endFeature(self)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600903 #
904 # Append a definition to the specified section
905 def appendSection(self, section, text):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600906 self.sections[section].append(text)
907 #
908 # Type generation
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700909 def genType(self, typeinfo, name, alias):
Mark Lobodzinski796454c2018-12-11 16:10:55 -0700910 OutputGenerator.genType(self, typeinfo, name, alias)
Mike Schuchardtf8690262019-07-11 10:08:33 -0700911 if self.handle_types.IsNonDispatchable(name):
912 self.non_dispatchable_types.add(name)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600913 #
914 # Struct (e.g. C "struct" type) generation.
915 # This is a special case of the <type> tag where the contents are
916 # interpreted as a set of <member> tags instead of freeform C
917 # C type declarations. The <member> tags are just like <param>
918 # tags - they are a declaration of a struct or union member.
919 # Only simple member declarations are supported (no nested
920 # structs etc.)
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700921 def genStruct(self, typeinfo, typeName, alias):
922 OutputGenerator.genStruct(self, typeinfo, typeName, alias)
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600923 body = 'typedef ' + typeinfo.elem.get('category') + ' ' + typeName + ' {\n'
924 # paramdecl = self.makeCParamDecl(typeinfo.elem, self.genOpts.alignFuncParam)
925 for member in typeinfo.elem.findall('.//member'):
926 body += self.makeCParamDecl(member, self.genOpts.alignFuncParam)
927 body += ';\n'
928 body += '} ' + typeName + ';\n'
929 self.appendSection('struct', body)
930 #
931 # Group (e.g. C "enum" type) generation.
932 # These are concatenated together with other types.
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700933 def genGroup(self, groupinfo, groupName, alias):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600934 pass
935 # Enumerant generation
936 # <enum> tags may specify their values in several ways, but are usually
937 # just integers.
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700938 def genEnum(self, enuminfo, name, alias):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600939 pass
940 #
941 # Command generation
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700942 def genCmd(self, cmdinfo, name, alias):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600943 # Commands shadowed by interface functions and are not implemented
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600944 special_functions = [
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600945 'vkCreateDevice',
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600946 'vkCreateInstance',
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600947 'vkAllocateCommandBuffers',
948 'vkFreeCommandBuffers',
John Zulaufe28aa342018-10-24 12:18:39 -0600949 'vkResetCommandPool',
950 'vkDestroyCommandPool',
Mark Lobodzinski3bd82ad2017-02-16 11:45:27 -0700951 'vkAllocateDescriptorSets',
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700952 'vkQueuePresentKHR',
Mark Lobodzinski8925c052018-12-18 12:41:15 -0700953 'vkGetSwapchainImagesKHR',
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600954 ]
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700955 if name == 'vkQueuePresentKHR' or (name in special_functions and self.source_file):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600956 return
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700957
958 if (("DebugMarker" in name or "DebugUtilsObject" in name) and "EXT" in name):
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600959 self.appendSection('command', '// TODO - not wrapping EXT function ' + name)
960 return
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700961
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600962 # Determine first if this function needs to be intercepted
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700963 startthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'Start')
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600964 if startthreadsafety is None:
965 return
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700966 finishthreadsafety = self.makeThreadUseBlock(cmdinfo.elem, 'Finish')
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600967
Mike Schuchardtf375c7c2017-12-28 11:23:48 -0700968 OutputGenerator.genCmd(self, cmdinfo, name, alias)
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700969
Mark Lobodzinskiff910992016-10-11 14:29:52 -0600970 # setup common to call wrappers
971 # first parameter is always dispatchable
972 dispatchable_type = cmdinfo.elem.find('param/type').text
973 dispatchable_name = cmdinfo.elem.find('param/name').text
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700974
975 decls = self.makeCDecls(cmdinfo.elem)
976
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700977 result_type = cmdinfo.elem.find('proto/type')
978
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700979 if self.source_file:
980 pre_decl = decls[0][:-1]
981 pre_decl = pre_decl.split("VKAPI_CALL ")[1]
982 pre_decl = 'void ThreadSafety::PreCallRecord' + pre_decl + ' {'
983
984 # PreCallRecord
985 self.appendSection('command', '')
986 self.appendSection('command', pre_decl)
987 self.appendSection('command', " " + "\n ".join(str(startthreadsafety).rstrip().split("\n")))
988 self.appendSection('command', '}')
989
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700990 # PostCallRecord
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -0700991 post_decl = pre_decl.replace('PreCallRecord', 'PostCallRecord')
992 if result_type.text == 'VkResult':
993 post_decl = post_decl.replace(')', ',\n VkResult result)')
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -0700994 self.appendSection('command', '')
995 self.appendSection('command', post_decl)
996 self.appendSection('command', " " + "\n ".join(str(finishthreadsafety).rstrip().split("\n")))
997 self.appendSection('command', '}')
998
999 if self.header_file:
1000 pre_decl = decls[0][:-1]
1001 pre_decl = pre_decl.split("VKAPI_CALL ")[1]
1002 pre_decl = 'void PreCallRecord' + pre_decl + ';'
1003
1004 # PreCallRecord
1005 self.appendSection('command', '')
1006 self.appendSection('command', pre_decl)
1007
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -07001008 # PostCallRecord
Mark Lobodzinskicd05c1e2019-01-17 15:33:46 -07001009 post_decl = pre_decl.replace('PreCallRecord', 'PostCallRecord')
1010 if result_type.text == 'VkResult':
1011 post_decl = post_decl.replace(')', ',\n VkResult result)')
Mark Lobodzinski1f2ba262018-12-04 14:15:47 -07001012 self.appendSection('command', '')
1013 self.appendSection('command', post_decl)
1014
Mark Lobodzinskiff910992016-10-11 14:29:52 -06001015 #
1016 # override makeProtoName to drop the "vk" prefix
1017 def makeProtoName(self, name, tail):
1018 return self.genOpts.apientry + name[2:] + tail