blob: 56952800e5d3c9e4b102b98eb1c795fe2d66122d [file] [log] [blame]
Tobin Ehlis35308dd2016-10-31 13:27:36 -06001#!/usr/bin/env python3
2# Copyright (c) 2015-2016 The Khronos Group Inc.
3# Copyright (c) 2015-2016 Valve Corporation
4# Copyright (c) 2015-2016 LunarG, Inc.
5# Copyright (c) 2015-2016 Google Inc.
6#
7# Licensed under the Apache License, Version 2.0 (the "License");
8# you may not use this file except in compliance with the License.
9# You may obtain a copy of the License at
10#
11# http://www.apache.org/licenses/LICENSE-2.0
12#
13# Unless required by applicable law or agreed to in writing, software
14# distributed under the License is distributed on an "AS IS" BASIS,
15# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16# See the License for the specific language governing permissions and
17# limitations under the License.
18#
19# Author: Tobin Ehlis <tobine@google.com>
20
21import argparse
22import os
23import sys
24import platform
25
26# vk_validation_stats.py overview
27# This script is intended to generate statistics on the state of validation code
28# based on information parsed from the source files and the database file
29# Here's what it currently does:
30# 1. Parse vk_validation_error_database.txt to store claimed state of validation checks
31# 2. Parse vk_validation_error_messages.h to verify the actual checks in header vs. the
32# claimed state of the checks
33# 3. Parse source files to identify which checks are implemented and verify that this
34# exactly matches the list of checks claimed to be implemented in the database
35# 4. Parse test file(s) and verify that reported tests exist
36# 5. Report out stats on number of checks, implemented checks, and duplicated checks
37#
38# TODO:
39# 1. Would also like to report out number of existing checks that don't yet use new, unique enum
40# 2. Could use notes to store custom fields (like TODO) and print those out here
41# 3. Update test code to check if tests use new, unique enums to check for errors instead of strings
42
43db_file = 'vk_validation_error_database.txt'
44layer_source_files = [
45'core_validation.cpp',
46'descriptor_sets.cpp',
47'parameter_validation.cpp',
48'object_tracker.cpp',
Tobin Ehlis3f0b2772016-11-18 16:56:15 -070049'image.cpp'
Tobin Ehlis35308dd2016-10-31 13:27:36 -060050]
51header_file = 'vk_validation_error_messages.h'
52# TODO : Don't hardcode linux path format if we want this to run on windows
53test_file = '../tests/layer_validation_tests.cpp'
54
55
56class ValidationDatabase:
57 def __init__(self, filename=db_file):
58 self.db_file = filename
59 self.delimiter = '~^~'
60 self.db_dict = {} # complete dict of all db values per error enum
61 # specialized data structs with slices of complete dict
62 self.db_implemented_enums = [] # list of all error enums claiming to be implemented in database file
63 self.db_enum_to_tests = {} # dict where enum is key to lookup list of tests implementing the enum
64 #self.src_implemented_enums
65 def read(self):
66 """Read a database file into internal data structures, format of each line is <enum><implemented Y|N?><testname><api><errormsg><notes>"""
67 #db_dict = {} # This is a simple db of just enum->errormsg, the same as is created from spec
68 #max_id = 0
69 with open(self.db_file, "r") as infile:
70 for line in infile:
71 line = line.strip()
72 if line.startswith('#') or '' == line:
73 continue
74 db_line = line.split(self.delimiter)
75 if len(db_line) != 6:
76 print "ERROR: Bad database line doesn't have 6 elements: %s" % (line)
77 error_enum = db_line[0]
78 implemented = db_line[1]
79 testname = db_line[2]
80 api = db_line[3]
81 error_str = db_line[4]
82 note = db_line[5]
83 # Read complete database contents into our class var for later use
84 self.db_dict[error_enum] = {}
85 self.db_dict[error_enum]['check_implemented'] = implemented
86 self.db_dict[error_enum]['testname'] = testname
87 self.db_dict[error_enum]['api'] = api
88 self.db_dict[error_enum]['error_string'] = error_str
89 self.db_dict[error_enum]['note'] = note
90 # Now build custom data structs
91 if 'Y' == implemented:
92 self.db_implemented_enums.append(error_enum)
93 if testname.lower() not in ['unknown', 'none']:
94 self.db_enum_to_tests[error_enum] = testname.split(',')
95 #if len(self.db_enum_to_tests[error_enum]) > 1:
96 # print "Found check %s that has multiple tests: %s" % (error_enum, self.db_enum_to_tests[error_enum])
97 #else:
98 # print "Check %s has single test: %s" % (error_enum, self.db_enum_to_tests[error_enum])
99 #unique_id = int(db_line[0].split('_')[-1])
100 #if unique_id > max_id:
101 # max_id = unique_id
102 #print "Found %d total enums in database" % (len(self.db_dict.keys()))
103 #print "Found %d enums claiming to be implemented in source" % (len(self.db_implemented_enums))
104 #print "Found %d enums claiming to have tests implemented" % (len(self.db_enum_to_tests.keys()))
105
106class ValidationHeader:
107 def __init__(self, filename=header_file):
108 self.filename = header_file
109 self.enums = []
110 def read(self):
111 """Read unique error enum header file into internal data structures"""
112 grab_enums = False
113 with open(self.filename, "r") as infile:
114 for line in infile:
115 line = line.strip()
116 if 'enum UNIQUE_VALIDATION_ERROR_CODE {' in line:
117 grab_enums = True
118 continue
119 if grab_enums:
120 if 'VALIDATION_ERROR_MAX_ENUM' in line:
121 grab_enums = False
122 break # done
123 if 'VALIDATION_ERROR_' in line:
124 enum = line.split(' = ')[0]
125 self.enums.append(enum)
126 #print "Found %d error enums. First is %s and last is %s." % (len(self.enums), self.enums[0], self.enums[-1])
127
128class ValidationSource:
129 def __init__(self, source_file_list):
130 self.source_files = source_file_list
131 self.enum_count_dict = {} # dict of enum values to the count of how much they're used
132 def parse(self):
133 duplicate_checks = 0
134 for sf in self.source_files:
135 with open(sf) as f:
136 for line in f:
137 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
138 continue
139 # Find enums
140 #if 'VALIDATION_ERROR_' in line and True not in [ignore in line for ignore in ['[VALIDATION_ERROR_', 'UNIQUE_VALIDATION_ERROR_CODE']]:
141 if 'VALIDATION_ERROR_' in line and 'UNIQUE_VALIDATION_ERROR_CODE' not in line:
142 # Need to isolate the validation error enum
143 #print("Line has check:%s" % (line))
144 line_list = line.split()
145 enum = ''
146 for str in line_list:
147 if 'VALIDATION_ERROR_' in str and '[VALIDATION_ERROR_' not in str:
148 enum = str.strip(',);')
149 break
150 if enum != '':
151 if enum not in self.enum_count_dict:
152 self.enum_count_dict[enum] = 1
153 #print "Found enum %s implemented for first time in file %s" % (enum, sf)
154 else:
155 self.enum_count_dict[enum] = self.enum_count_dict[enum] + 1
156 #print "Found enum %s implemented for %d time in file %s" % (enum, self.enum_count_dict[enum], sf)
157 duplicate_checks = duplicate_checks + 1
158 #else:
159 #print("Didn't find actual check in line:%s" % (line))
160 #print "Found %d unique implemented checks and %d are duplicated at least once" % (len(self.enum_count_dict.keys()), duplicate_checks)
161
162# Class to parse the validation layer test source and store testnames
163# TODO: Enhance class to detect use of unique error enums in the test
164class TestParser:
165 def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']):
166 self.test_files = test_file_list
167 self.tests_set = set()
168 self.test_trigger_txt_list = []
169 for tg in test_group_name:
170 self.test_trigger_txt_list.append('TEST_F(%s' % tg)
171 #print('Test trigger test list: %s' % (self.test_trigger_txt_list))
172
173 # Parse test files into internal data struct
174 def parse(self):
175 # For each test file, parse test names into set
176 grab_next_line = False # handle testname on separate line than wildcard
177 for test_file in self.test_files:
178 with open(test_file) as tf:
179 for line in tf:
180 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
181 continue
182
183 if True in [ttt in line for ttt in self.test_trigger_txt_list]:
184 #print('Test wildcard in line: %s' % (line))
185 testname = line.split(',')[-1]
186 testname = testname.strip().strip(' {)')
187 #print('Inserting test: "%s"' % (testname))
188 if ('' == testname):
189 grab_next_line = True
190 continue
191 self.tests_set.add(testname)
192 if grab_next_line: # test name on its own line
193 grab_next_line = False
194 testname = testname.strip().strip(' {)')
195 self.tests_set.add(testname)
196
197# Little helper class for coloring cmd line output
198class bcolors:
199
200 def __init__(self):
201 self.GREEN = '\033[0;32m'
202 self.RED = '\033[0;31m'
203 self.YELLOW = '\033[1;33m'
204 self.ENDC = '\033[0m'
205 if 'Linux' != platform.system():
206 self.GREEN = ''
207 self.RED = ''
208 self.YELLOW = ''
209 self.ENDC = ''
210
211 def green(self):
212 return self.GREEN
213
214 def red(self):
215 return self.RED
216
217 def yellow(self):
218 return self.YELLOW
219
220 def endc(self):
221 return self.ENDC
222
223# Class to parse the validation layer test source and store testnames
224class TestParser:
225 def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']):
226 self.test_files = test_file_list
227 self.tests_set = set()
228 self.test_trigger_txt_list = []
229 for tg in test_group_name:
230 self.test_trigger_txt_list.append('TEST_F(%s' % tg)
231 #print('Test trigger test list: %s' % (self.test_trigger_txt_list))
232
233 # Parse test files into internal data struct
234 def parse(self):
235 # For each test file, parse test names into set
236 grab_next_line = False # handle testname on separate line than wildcard
237 for test_file in self.test_files:
238 with open(test_file) as tf:
239 for line in tf:
240 if True in [line.strip().startswith(comment) for comment in ['//', '/*']]:
241 continue
242
243 if True in [ttt in line for ttt in self.test_trigger_txt_list]:
244 #print('Test wildcard in line: %s' % (line))
245 testname = line.split(',')[-1]
246 testname = testname.strip().strip(' {)')
247 #print('Inserting test: "%s"' % (testname))
248 if ('' == testname):
249 grab_next_line = True
250 continue
251 self.tests_set.add(testname)
252 if grab_next_line: # test name on its own line
253 grab_next_line = False
254 testname = testname.strip().strip(' {)')
255 self.tests_set.add(testname)
256
257def main(argv=None):
258 # parse db
259 val_db = ValidationDatabase()
260 val_db.read()
261 # parse header
262 val_header = ValidationHeader()
263 val_header.read()
264 # Create parser for layer files
265 val_source = ValidationSource(layer_source_files)
266 val_source.parse()
267 # Parse test files
268 test_parser = TestParser([test_file, ])
269 test_parser.parse()
270
271 # Process stats - Just doing this inline in main, could make a fancy class to handle
272 # all the processing of data and then get results from that
273 txt_color = bcolors()
274 print("Validation Statistics")
275 # First give number of checks in db & header and report any discrepancies
276 db_enums = len(val_db.db_dict.keys())
277 hdr_enums = len(val_header.enums)
278 print(" Database file includes %d unique checks" % (db_enums))
279 print(" Header file declares %d unique checks" % (hdr_enums))
280 tmp_db_dict = val_db.db_dict
281 db_missing = []
282 for enum in val_header.enums:
283 if not tmp_db_dict.pop(enum, False):
284 db_missing.append(enum)
285 if db_enums == hdr_enums and len(db_missing) == 0 and len(tmp_db_dict.keys()) == 0:
286 print(txt_color.green() + " Database and Header match, GREAT!" + txt_color.endc())
287 else:
288 print(txt_color.red() + " Uh oh, Database doesn't match Header :(" + txt_color.endc())
289 if len(db_missing) != 0:
290 print(txt_color.red() + " The following checks are in header but missing from database:" + txt_color.endc())
291 for missing_enum in db_missing:
292 print(txt_color.red() + " %s" % (missing_enum) + txt_color.endc())
293 if len(tmp_db_dict.keys()) != 0:
294 print(txt_color.red() + " The following checks are in database but haven't been declared in the header:" + txt_color.endc())
295 for extra_enum in tmp_db_dict:
296 print(txt_color.red() + " %s" % (extra_enum) + txt_color.endc())
297 # Report out claimed implemented checks vs. found actual implemented checks
298 imp_not_found = [] # Checks claimed to implemented in DB file but no source found
299 imp_not_claimed = [] # Checks found implemented but not claimed to be in DB
300 multiple_uses = False # Flag if any enums are used multiple times
301 for db_imp in val_db.db_implemented_enums:
302 if db_imp not in val_source.enum_count_dict:
303 imp_not_found.append(db_imp)
304 for src_enum in val_source.enum_count_dict:
305 if val_source.enum_count_dict[src_enum] > 1:
306 multiple_uses = True
307 if src_enum not in val_db.db_implemented_enums:
308 imp_not_claimed.append(src_enum)
309 print(" Database file claims that %d checks (%s) are implemented in source." % (len(val_db.db_implemented_enums), "{0:.0f}%".format(float(len(val_db.db_implemented_enums))/db_enums * 100)))
310 if len(imp_not_found) == 0 and len(imp_not_claimed) == 0:
311 print(txt_color.green() + " All claimed Database implemented checks have been found in source, and no source checks aren't claimed in Database, GREAT!" + txt_color.endc())
312 else:
313 print(txt_color.red() + " Uh oh, Database claimed implemented don't match Source :(" + txt_color.endc())
314 if len(imp_not_found) != 0:
Tobin Ehlis3f0b2772016-11-18 16:56:15 -0700315 print(txt_color.red() + " The following %d checks are claimed to be implemented in Database, but weren't found in source:" % (len(imp_not_found)) + txt_color.endc())
Tobin Ehlis35308dd2016-10-31 13:27:36 -0600316 for not_imp_enum in imp_not_found:
317 print(txt_color.red() + " %s" % (not_imp_enum) + txt_color.endc())
318 if len(imp_not_claimed) != 0:
319 print(txt_color.red() + " The following checks are implemented in source, but not claimed to be in Database:" + txt_color.endc())
320 for imp_enum in imp_not_claimed:
321 print(txt_color.red() + " %s" % (imp_enum) + txt_color.endc())
322 if multiple_uses:
323 print(txt_color.yellow() + " Note that some checks are used multiple times. These may be good candidates for new valid usage spec language." + txt_color.endc())
324 print(txt_color.yellow() + " Here is a list of each check used multiple times with its number of uses:" + txt_color.endc())
325 for enum in val_source.enum_count_dict:
326 if val_source.enum_count_dict[enum] > 1:
327 print(txt_color.yellow() + " %s: %d" % (enum, val_source.enum_count_dict[enum]) + txt_color.endc())
328 # Now check that tests claimed to be implemented are actual test names
329 bad_testnames = []
330 for enum in val_db.db_enum_to_tests:
331 for testname in val_db.db_enum_to_tests[enum]:
332 if testname not in test_parser.tests_set:
333 bad_testnames.append(testname)
334 print(" Database file claims that %d checks have tests written." % len(val_db.db_enum_to_tests))
335 if len(bad_testnames) == 0:
336 print(txt_color.green() + " All claimed tests have valid names. That's good!" + txt_color.endc())
337 else:
338 print(txt_color.red() + " The following testnames in Database appear to be invalid:")
339 for bt in bad_testnames:
340 print(txt_color.red() + " %s" % (bt))
341
342 return 0
343
344if __name__ == "__main__":
345 sys.exit(main())
346