blob: 47c593fdd2c5b8eb55682d21d1b5cfa23c09afca [file] [log] [blame]
Tobin Ehlis35308dd2016-10-31 13:27:36 -06001#!/usr/bin/env python3
2# Copyright (c) 2015-2016 The Khronos Group Inc.
3# Copyright (c) 2015-2016 Valve Corporation
4# Copyright (c) 2015-2016 LunarG, Inc.
5# Copyright (c) 2015-2016 Google Inc.
6#
7# Licensed under the Apache License, Version 2.0 (the "License");
8# you may not use this file except in compliance with the License.
9# You may obtain a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS,
15# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16# See the License for the specific language governing permissions and
17# limitations under the License.
18#
19# Author: Tobin Ehlis <tobine@google.com>
20
21import argparse
22import os
23import sys
24import platform
25
26# vk_validation_stats.py overview
27# This script is intended to generate statistics on the state of validation code
28# based on information parsed from the source files and the database file
29# Here's what it currently does:
30# 1. Parse vk_validation_error_database.txt to store claimed state of validation checks
31# 2. Parse vk_validation_error_messages.h to verify the actual checks in header vs. the
32# claimed state of the checks
33# 3. Parse source files to identify which checks are implemented and verify that this
34# exactly matches the list of checks claimed to be implemented in the database
35# 4. Parse test file(s) and verify that reported tests exist
36# 5. Report out stats on number of checks, implemented checks, and duplicated checks
37#
38# TODO:
39# 1. Would also like to report out number of existing checks that don't yet use new, unique enum
40# 2. Could use notes to store custom fields (like TODO) and print those out here
41# 3. Update test code to check if tests use new, unique enums to check for errors instead of strings
42
43db_file = 'vk_validation_error_database.txt'
44layer_source_files = [
45'core_validation.cpp',
46'descriptor_sets.cpp',
47'parameter_validation.cpp',
48'object_tracker.cpp',
49]
50header_file = 'vk_validation_error_messages.h'
51# TODO : Don't hardcode linux path format if we want this to run on windows
52test_file = '../tests/layer_validation_tests.cpp'
53
54
55class ValidationDatabase:
56 def __init__(self, filename=db_file):
57 self.db_file = filename
58 self.delimiter = '~^~'
59 self.db_dict = {} # complete dict of all db values per error enum
60 # specialized data structs with slices of complete dict
61 self.db_implemented_enums = [] # list of all error enums claiming to be implemented in database file
62 self.db_enum_to_tests = {} # dict where enum is key to lookup list of tests implementing the enum
63 #self.src_implemented_enums
64 def read(self):
65 """Read a database file into internal data structures, format of each line is <enum><implemented Y|N?><testname><api><errormsg><notes>"""
66 #db_dict = {} # This is a simple db of just enum->errormsg, the same as is created from spec
67 #max_id = 0
68 with open(self.db_file, "r") as infile:
69 for line in infile:
70 line = line.strip()
71 if line.startswith('#') or '' == line:
72 continue
73 db_line = line.split(self.delimiter)
74 if len(db_line) != 6:
75 print "ERROR: Bad database line doesn't have 6 elements: %s" % (line)
76 error_enum = db_line[0]
77 implemented = db_line[1]
78 testname = db_line[2]
79 api = db_line[3]
80 error_str = db_line[4]
81 note = db_line[5]
82 # Read complete database contents into our class var for later use
83 self.db_dict[error_enum] = {}
84 self.db_dict[error_enum]['check_implemented'] = implemented
85 self.db_dict[error_enum]['testname'] = testname
86 self.db_dict[error_enum]['api'] = api
87 self.db_dict[error_enum]['error_string'] = error_str
88 self.db_dict[error_enum]['note'] = note
89 # Now build custom data structs
90 if 'Y' == implemented:
91 self.db_implemented_enums.append(error_enum)
92 if testname.lower() not in ['unknown', 'none']:
93 self.db_enum_to_tests[error_enum] = testname.split(',')
94 #if len(self.db_enum_to_tests[error_enum]) > 1:
95 # print "Found check %s that has multiple tests: %s" % (error_enum, self.db_enum_to_tests[error_enum])
96 #else:
97 # print "Check %s has single test: %s" % (error_enum, self.db_enum_to_tests[error_enum])
98 #unique_id = int(db_line[0].split('_')[-1])
99 #if unique_id > max_id:
100 # max_id = unique_id
101 #print "Found %d total enums in database" % (len(self.db_dict.keys()))
102 #print "Found %d enums claiming to be implemented in source" % (len(self.db_implemented_enums))
103 #print "Found %d enums claiming to have tests implemented" % (len(self.db_enum_to_tests.keys()))
104
105class ValidationHeader:
106 def __init__(self, filename=header_file):
107 self.filename = header_file
108 self.enums = []
109 def read(self):
110 """Read unique error enum header file into internal data structures"""
111 grab_enums = False
112 with open(self.filename, "r") as infile:
113 for line in infile:
114 line = line.strip()
115 if 'enum UNIQUE_VALIDATION_ERROR_CODE {' in line:
116 grab_enums = True
117 continue
118 if grab_enums:
119 if 'VALIDATION_ERROR_MAX_ENUM' in line:
120 grab_enums = False
121 break # done
122 if 'VALIDATION_ERROR_' in line:
123 enum = line.split(' = ')[0]
124 self.enums.append(enum)
125 #print "Found %d error enums. First is %s and last is %s." % (len(self.enums), self.enums[0], self.enums[-1])
126
127class ValidationSource:
128 def __init__(self, source_file_list):
129 self.source_files = source_file_list
130 self.enum_count_dict = {} # dict of enum values to the count of how much they're used
131 def parse(self):
132 duplicate_checks = 0
133 for sf in self.source_files:
134 with open(sf) as f:
135 for line in f:
136 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
137 continue
138 # Find enums
139 #if 'VALIDATION_ERROR_' in line and True not in [ignore in line for ignore in ['[VALIDATION_ERROR_', 'UNIQUE_VALIDATION_ERROR_CODE']]:
140 if 'VALIDATION_ERROR_' in line and 'UNIQUE_VALIDATION_ERROR_CODE' not in line:
141 # Need to isolate the validation error enum
142 #print("Line has check:%s" % (line))
143 line_list = line.split()
144 enum = ''
145 for str in line_list:
146 if 'VALIDATION_ERROR_' in str and '[VALIDATION_ERROR_' not in str:
147 enum = str.strip(',);')
148 break
149 if enum != '':
150 if enum not in self.enum_count_dict:
151 self.enum_count_dict[enum] = 1
152 #print "Found enum %s implemented for first time in file %s" % (enum, sf)
153 else:
154 self.enum_count_dict[enum] = self.enum_count_dict[enum] + 1
155 #print "Found enum %s implemented for %d time in file %s" % (enum, self.enum_count_dict[enum], sf)
156 duplicate_checks = duplicate_checks + 1
157 #else:
158 #print("Didn't find actual check in line:%s" % (line))
159 #print "Found %d unique implemented checks and %d are duplicated at least once" % (len(self.enum_count_dict.keys()), duplicate_checks)
160
161# Class to parse the validation layer test source and store testnames
162# TODO: Enhance class to detect use of unique error enums in the test
163class TestParser:
164 def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']):
165 self.test_files = test_file_list
166 self.tests_set = set()
167 self.test_trigger_txt_list = []
168 for tg in test_group_name:
169 self.test_trigger_txt_list.append('TEST_F(%s' % tg)
170 #print('Test trigger test list: %s' % (self.test_trigger_txt_list))
171
172 # Parse test files into internal data struct
173 def parse(self):
174 # For each test file, parse test names into set
175 grab_next_line = False # handle testname on separate line than wildcard
176 for test_file in self.test_files:
177 with open(test_file) as tf:
178 for line in tf:
179 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
180 continue
181
182 if True in [ttt in line for ttt in self.test_trigger_txt_list]:
183 #print('Test wildcard in line: %s' % (line))
184 testname = line.split(',')[-1]
185 testname = testname.strip().strip(' {)')
186 #print('Inserting test: "%s"' % (testname))
187 if ('' == testname):
188 grab_next_line = True
189 continue
190 self.tests_set.add(testname)
191 if grab_next_line: # test name on its own line
192 grab_next_line = False
193 testname = testname.strip().strip(' {)')
194 self.tests_set.add(testname)
195
196# Little helper class for coloring cmd line output
197class bcolors:
198
199 def __init__(self):
200 self.GREEN = '\033[0;32m'
201 self.RED = '\033[0;31m'
202 self.YELLOW = '\033[1;33m'
203 self.ENDC = '\033[0m'
204 if 'Linux' != platform.system():
205 self.GREEN = ''
206 self.RED = ''
207 self.YELLOW = ''
208 self.ENDC = ''
209
210 def green(self):
211 return self.GREEN
212
213 def red(self):
214 return self.RED
215
216 def yellow(self):
217 return self.YELLOW
218
219 def endc(self):
220 return self.ENDC
221
222# Class to parse the validation layer test source and store testnames
223class TestParser:
224 def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']):
225 self.test_files = test_file_list
226 self.tests_set = set()
227 self.test_trigger_txt_list = []
228 for tg in test_group_name:
229 self.test_trigger_txt_list.append('TEST_F(%s' % tg)
230 #print('Test trigger test list: %s' % (self.test_trigger_txt_list))
231
232 # Parse test files into internal data struct
233 def parse(self):
234 # For each test file, parse test names into set
235 grab_next_line = False # handle testname on separate line than wildcard
236 for test_file in self.test_files:
237 with open(test_file) as tf:
238 for line in tf:
239 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
240 continue
241
242 if True in [ttt in line for ttt in self.test_trigger_txt_list]:
243 #print('Test wildcard in line: %s' % (line))
244 testname = line.split(',')[-1]
245 testname = testname.strip().strip(' {)')
246 #print('Inserting test: "%s"' % (testname))
247 if ('' == testname):
248 grab_next_line = True
249 continue
250 self.tests_set.add(testname)
251 if grab_next_line: # test name on its own line
252 grab_next_line = False
253 testname = testname.strip().strip(' {)')
254 self.tests_set.add(testname)
255
256def main(argv=None):
257 # parse db
258 val_db = ValidationDatabase()
259 val_db.read()
260 # parse header
261 val_header = ValidationHeader()
262 val_header.read()
263 # Create parser for layer files
264 val_source = ValidationSource(layer_source_files)
265 val_source.parse()
266 # Parse test files
267 test_parser = TestParser([test_file, ])
268 test_parser.parse()
269
270 # Process stats - Just doing this inline in main, could make a fancy class to handle
271 # all the processing of data and then get results from that
272 txt_color = bcolors()
273 print("Validation Statistics")
274 # First give number of checks in db & header and report any discrepancies
275 db_enums = len(val_db.db_dict.keys())
276 hdr_enums = len(val_header.enums)
277 print(" Database file includes %d unique checks" % (db_enums))
278 print(" Header file declares %d unique checks" % (hdr_enums))
279 tmp_db_dict = val_db.db_dict
280 db_missing = []
281 for enum in val_header.enums:
282 if not tmp_db_dict.pop(enum, False):
283 db_missing.append(enum)
284 if db_enums == hdr_enums and len(db_missing) == 0 and len(tmp_db_dict.keys()) == 0:
285 print(txt_color.green() + " Database and Header match, GREAT!" + txt_color.endc())
286 else:
287 print(txt_color.red() + " Uh oh, Database doesn't match Header :(" + txt_color.endc())
288 if len(db_missing) != 0:
289 print(txt_color.red() + " The following checks are in header but missing from database:" + txt_color.endc())
290 for missing_enum in db_missing:
291 print(txt_color.red() + " %s" % (missing_enum) + txt_color.endc())
292 if len(tmp_db_dict.keys()) != 0:
293 print(txt_color.red() + " The following checks are in database but haven't been declared in the header:" + txt_color.endc())
294 for extra_enum in tmp_db_dict:
295 print(txt_color.red() + " %s" % (extra_enum) + txt_color.endc())
296 # Report out claimed implemented checks vs. found actual implemented checks
297 imp_not_found = [] # Checks claimed to implemented in DB file but no source found
298 imp_not_claimed = [] # Checks found implemented but not claimed to be in DB
299 multiple_uses = False # Flag if any enums are used multiple times
300 for db_imp in val_db.db_implemented_enums:
301 if db_imp not in val_source.enum_count_dict:
302 imp_not_found.append(db_imp)
303 for src_enum in val_source.enum_count_dict:
304 if val_source.enum_count_dict[src_enum] > 1:
305 multiple_uses = True
306 if src_enum not in val_db.db_implemented_enums:
307 imp_not_claimed.append(src_enum)
308 print(" Database file claims that %d checks (%s) are implemented in source." % (len(val_db.db_implemented_enums), "{0:.0f}%".format(float(len(val_db.db_implemented_enums))/db_enums * 100)))
309 if len(imp_not_found) == 0 and len(imp_not_claimed) == 0:
310 print(txt_color.green() + " All claimed Database implemented checks have been found in source, and no source checks aren't claimed in Database, GREAT!" + txt_color.endc())
311 else:
312 print(txt_color.red() + " Uh oh, Database claimed implemented don't match Source :(" + txt_color.endc())
313 if len(imp_not_found) != 0:
314 print(txt_color.red() + " The following checks are claimed to be implemented in Database, but weren't found in source:" + txt_color.endc())
315 for not_imp_enum in imp_not_found:
316 print(txt_color.red() + " %s" % (not_imp_enum) + txt_color.endc())
317 if len(imp_not_claimed) != 0:
318 print(txt_color.red() + " The following checks are implemented in source, but not claimed to be in Database:" + txt_color.endc())
319 for imp_enum in imp_not_claimed:
320 print(txt_color.red() + " %s" % (imp_enum) + txt_color.endc())
321 if multiple_uses:
322 print(txt_color.yellow() + " Note that some checks are used multiple times. These may be good candidates for new valid usage spec language." + txt_color.endc())
323 print(txt_color.yellow() + " Here is a list of each check used multiple times with its number of uses:" + txt_color.endc())
324 for enum in val_source.enum_count_dict:
325 if val_source.enum_count_dict[enum] > 1:
326 print(txt_color.yellow() + " %s: %d" % (enum, val_source.enum_count_dict[enum]) + txt_color.endc())
327 # Now check that tests claimed to be implemented are actual test names
328 bad_testnames = []
329 for enum in val_db.db_enum_to_tests:
330 for testname in val_db.db_enum_to_tests[enum]:
331 if testname not in test_parser.tests_set:
332 bad_testnames.append(testname)
333 print(" Database file claims that %d checks have tests written." % len(val_db.db_enum_to_tests))
334 if len(bad_testnames) == 0:
335 print(txt_color.green() + " All claimed tests have valid names. That's good!" + txt_color.endc())
336 else:
337 print(txt_color.red() + " The following testnames in Database appear to be invalid:")
338 for bt in bad_testnames:
339 print(txt_color.red() + " %s" % (bt))
340
341 return 0
342
343if __name__ == "__main__":
344 sys.exit(main())
345