Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 1 | #!/usr/bin/env python3 |
| 2 | # Copyright (c) 2015-2016 The Khronos Group Inc. |
| 3 | # Copyright (c) 2015-2016 Valve Corporation |
| 4 | # Copyright (c) 2015-2016 LunarG, Inc. |
| 5 | # Copyright (c) 2015-2016 Google Inc. |
| 6 | # |
| 7 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 8 | # you may not use this file except in compliance with the License. |
| 9 | # You may obtain a copy of the License at |
| 10 | # |
| 11 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 12 | # |
| 13 | # Unless required by applicable law or agreed to in writing, software |
| 14 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 15 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 16 | # See the License for the specific language governing permissions and |
| 17 | # limitations under the License. |
| 18 | # |
| 19 | # Author: Tobin Ehlis <tobine@google.com> |
| 20 | |
| 21 | import argparse |
| 22 | import os |
| 23 | import sys |
| 24 | import platform |
| 25 | |
| 26 | # vk_validation_stats.py overview |
| 27 | # This script is intended to generate statistics on the state of validation code |
| 28 | # based on information parsed from the source files and the database file |
| 29 | # Here's what it currently does: |
| 30 | # 1. Parse vk_validation_error_database.txt to store claimed state of validation checks |
| 31 | # 2. Parse vk_validation_error_messages.h to verify the actual checks in header vs. the |
| 32 | # claimed state of the checks |
| 33 | # 3. Parse source files to identify which checks are implemented and verify that this |
| 34 | # exactly matches the list of checks claimed to be implemented in the database |
| 35 | # 4. Parse test file(s) and verify that reported tests exist |
| 36 | # 5. Report out stats on number of checks, implemented checks, and duplicated checks |
| 37 | # |
Tobin Ehlis | 20e3258 | 2016-12-05 14:50:03 -0700 | [diff] [blame] | 38 | # If a mis-match is found during steps 2, 3, or 4, then the script exits w/ a non-zero error code |
| 39 | # otherwise, the script will exit(0) |
| 40 | # |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 41 | # TODO: |
| 42 | # 1. Would also like to report out number of existing checks that don't yet use new, unique enum |
| 43 | # 2. Could use notes to store custom fields (like TODO) and print those out here |
| 44 | # 3. Update test code to check if tests use new, unique enums to check for errors instead of strings |
| 45 | |
| 46 | db_file = 'vk_validation_error_database.txt' |
| 47 | layer_source_files = [ |
| 48 | 'core_validation.cpp', |
| 49 | 'descriptor_sets.cpp', |
| 50 | 'parameter_validation.cpp', |
| 51 | 'object_tracker.cpp', |
Mike Weiblen | 6a27de5 | 2016-12-09 17:36:28 -0700 | [diff] [blame] | 52 | 'image.cpp', |
| 53 | 'swapchain.cpp' |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 54 | ] |
| 55 | header_file = 'vk_validation_error_messages.h' |
| 56 | # TODO : Don't hardcode linux path format if we want this to run on windows |
| 57 | test_file = '../tests/layer_validation_tests.cpp' |
| 58 | |
| 59 | |
| 60 | class ValidationDatabase: |
| 61 | def __init__(self, filename=db_file): |
| 62 | self.db_file = filename |
| 63 | self.delimiter = '~^~' |
| 64 | self.db_dict = {} # complete dict of all db values per error enum |
| 65 | # specialized data structs with slices of complete dict |
| 66 | self.db_implemented_enums = [] # list of all error enums claiming to be implemented in database file |
| 67 | self.db_enum_to_tests = {} # dict where enum is key to lookup list of tests implementing the enum |
| 68 | #self.src_implemented_enums |
| 69 | def read(self): |
| 70 | """Read a database file into internal data structures, format of each line is <enum><implemented Y|N?><testname><api><errormsg><notes>""" |
| 71 | #db_dict = {} # This is a simple db of just enum->errormsg, the same as is created from spec |
| 72 | #max_id = 0 |
| 73 | with open(self.db_file, "r") as infile: |
| 74 | for line in infile: |
| 75 | line = line.strip() |
| 76 | if line.startswith('#') or '' == line: |
| 77 | continue |
| 78 | db_line = line.split(self.delimiter) |
| 79 | if len(db_line) != 6: |
Tobin Ehlis | 027f321 | 2016-12-09 12:15:26 -0700 | [diff] [blame] | 80 | print("ERROR: Bad database line doesn't have 6 elements: %s" % (line)) |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 81 | error_enum = db_line[0] |
| 82 | implemented = db_line[1] |
| 83 | testname = db_line[2] |
| 84 | api = db_line[3] |
| 85 | error_str = db_line[4] |
| 86 | note = db_line[5] |
| 87 | # Read complete database contents into our class var for later use |
| 88 | self.db_dict[error_enum] = {} |
| 89 | self.db_dict[error_enum]['check_implemented'] = implemented |
| 90 | self.db_dict[error_enum]['testname'] = testname |
| 91 | self.db_dict[error_enum]['api'] = api |
| 92 | self.db_dict[error_enum]['error_string'] = error_str |
| 93 | self.db_dict[error_enum]['note'] = note |
| 94 | # Now build custom data structs |
| 95 | if 'Y' == implemented: |
| 96 | self.db_implemented_enums.append(error_enum) |
| 97 | if testname.lower() not in ['unknown', 'none']: |
| 98 | self.db_enum_to_tests[error_enum] = testname.split(',') |
| 99 | #if len(self.db_enum_to_tests[error_enum]) > 1: |
| 100 | # print "Found check %s that has multiple tests: %s" % (error_enum, self.db_enum_to_tests[error_enum]) |
| 101 | #else: |
| 102 | # print "Check %s has single test: %s" % (error_enum, self.db_enum_to_tests[error_enum]) |
| 103 | #unique_id = int(db_line[0].split('_')[-1]) |
| 104 | #if unique_id > max_id: |
| 105 | # max_id = unique_id |
| 106 | #print "Found %d total enums in database" % (len(self.db_dict.keys())) |
| 107 | #print "Found %d enums claiming to be implemented in source" % (len(self.db_implemented_enums)) |
| 108 | #print "Found %d enums claiming to have tests implemented" % (len(self.db_enum_to_tests.keys())) |
| 109 | |
| 110 | class ValidationHeader: |
| 111 | def __init__(self, filename=header_file): |
| 112 | self.filename = header_file |
| 113 | self.enums = [] |
| 114 | def read(self): |
| 115 | """Read unique error enum header file into internal data structures""" |
| 116 | grab_enums = False |
| 117 | with open(self.filename, "r") as infile: |
| 118 | for line in infile: |
| 119 | line = line.strip() |
| 120 | if 'enum UNIQUE_VALIDATION_ERROR_CODE {' in line: |
| 121 | grab_enums = True |
| 122 | continue |
| 123 | if grab_enums: |
| 124 | if 'VALIDATION_ERROR_MAX_ENUM' in line: |
| 125 | grab_enums = False |
| 126 | break # done |
Tobin Ehlis | f53eac3 | 2016-12-09 14:10:47 -0700 | [diff] [blame] | 127 | elif 'VALIDATION_ERROR_UNDEFINED' in line: |
| 128 | continue |
| 129 | elif 'VALIDATION_ERROR_' in line: |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 130 | enum = line.split(' = ')[0] |
| 131 | self.enums.append(enum) |
| 132 | #print "Found %d error enums. First is %s and last is %s." % (len(self.enums), self.enums[0], self.enums[-1]) |
| 133 | |
| 134 | class ValidationSource: |
| 135 | def __init__(self, source_file_list): |
| 136 | self.source_files = source_file_list |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 137 | self.enum_count_dict = {} # dict of enum values to the count of how much they're used, and location of where they're used |
Tobin Ehlis | 3d9dd94 | 2016-11-23 13:08:01 -0700 | [diff] [blame] | 138 | # 1790 is a special case that provides an exception when an extension is enabled. No specific error is flagged, but the exception is handled so add it here |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 139 | self.enum_count_dict['VALIDATION_ERROR_01790'] = {} |
| 140 | self.enum_count_dict['VALIDATION_ERROR_01790']['count'] = 1 |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 141 | def parse(self): |
| 142 | duplicate_checks = 0 |
| 143 | for sf in self.source_files: |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 144 | line_num = 0 |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 145 | with open(sf) as f: |
| 146 | for line in f: |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 147 | line_num = line_num + 1 |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 148 | if True in [line.strip().startswith(comment) for comment in ['//', '/*']]: |
| 149 | continue |
| 150 | # Find enums |
| 151 | #if 'VALIDATION_ERROR_' in line and True not in [ignore in line for ignore in ['[VALIDATION_ERROR_', 'UNIQUE_VALIDATION_ERROR_CODE']]: |
Tobin Ehlis | f53eac3 | 2016-12-09 14:10:47 -0700 | [diff] [blame] | 152 | if ' VALIDATION_ERROR_' in line: |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 153 | # Need to isolate the validation error enum |
| 154 | #print("Line has check:%s" % (line)) |
| 155 | line_list = line.split() |
Tobin Ehlis | 928742e | 2016-12-09 17:11:13 -0700 | [diff] [blame] | 156 | enum_list = [] |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 157 | for str in line_list: |
Tobin Ehlis | f53eac3 | 2016-12-09 14:10:47 -0700 | [diff] [blame] | 158 | if 'VALIDATION_ERROR_' in str and True not in [ignore_str in str for ignore_str in ['[VALIDATION_ERROR_', 'VALIDATION_ERROR_UNDEFINED', 'UNIQUE_VALIDATION_ERROR_CODE']]: |
Tobin Ehlis | 928742e | 2016-12-09 17:11:13 -0700 | [diff] [blame] | 159 | enum_list.append(str.strip(',);')) |
| 160 | #break |
| 161 | for enum in enum_list: |
| 162 | if enum != '': |
| 163 | if enum not in self.enum_count_dict: |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 164 | self.enum_count_dict[enum] = {} |
| 165 | self.enum_count_dict[enum]['count'] = 1 |
| 166 | self.enum_count_dict[enum]['file_line'] = [] |
| 167 | self.enum_count_dict[enum]['file_line'].append('%s,%d' % (sf, line_num)) |
Tobin Ehlis | 928742e | 2016-12-09 17:11:13 -0700 | [diff] [blame] | 168 | #print "Found enum %s implemented for first time in file %s" % (enum, sf) |
| 169 | else: |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 170 | self.enum_count_dict[enum]['count'] = self.enum_count_dict[enum]['count'] + 1 |
| 171 | self.enum_count_dict[enum]['file_line'].append('%s,%d' % (sf, line_num)) |
Tobin Ehlis | 928742e | 2016-12-09 17:11:13 -0700 | [diff] [blame] | 172 | #print "Found enum %s implemented for %d time in file %s" % (enum, self.enum_count_dict[enum], sf) |
| 173 | duplicate_checks = duplicate_checks + 1 |
| 174 | #else: |
| 175 | #print("Didn't find actual check in line:%s" % (line)) |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 176 | #print "Found %d unique implemented checks and %d are duplicated at least once" % (len(self.enum_count_dict.keys()), duplicate_checks) |
| 177 | |
| 178 | # Class to parse the validation layer test source and store testnames |
| 179 | # TODO: Enhance class to detect use of unique error enums in the test |
| 180 | class TestParser: |
| 181 | def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']): |
| 182 | self.test_files = test_file_list |
| 183 | self.tests_set = set() |
| 184 | self.test_trigger_txt_list = [] |
| 185 | for tg in test_group_name: |
| 186 | self.test_trigger_txt_list.append('TEST_F(%s' % tg) |
| 187 | #print('Test trigger test list: %s' % (self.test_trigger_txt_list)) |
| 188 | |
| 189 | # Parse test files into internal data struct |
| 190 | def parse(self): |
| 191 | # For each test file, parse test names into set |
| 192 | grab_next_line = False # handle testname on separate line than wildcard |
| 193 | for test_file in self.test_files: |
| 194 | with open(test_file) as tf: |
| 195 | for line in tf: |
| 196 | if True in [line.strip().startswith(comment) for comment in ['//', '/*']]: |
| 197 | continue |
| 198 | |
| 199 | if True in [ttt in line for ttt in self.test_trigger_txt_list]: |
| 200 | #print('Test wildcard in line: %s' % (line)) |
| 201 | testname = line.split(',')[-1] |
| 202 | testname = testname.strip().strip(' {)') |
| 203 | #print('Inserting test: "%s"' % (testname)) |
| 204 | if ('' == testname): |
| 205 | grab_next_line = True |
| 206 | continue |
| 207 | self.tests_set.add(testname) |
| 208 | if grab_next_line: # test name on its own line |
| 209 | grab_next_line = False |
| 210 | testname = testname.strip().strip(' {)') |
| 211 | self.tests_set.add(testname) |
| 212 | |
| 213 | # Little helper class for coloring cmd line output |
| 214 | class bcolors: |
| 215 | |
| 216 | def __init__(self): |
| 217 | self.GREEN = '\033[0;32m' |
| 218 | self.RED = '\033[0;31m' |
| 219 | self.YELLOW = '\033[1;33m' |
| 220 | self.ENDC = '\033[0m' |
| 221 | if 'Linux' != platform.system(): |
| 222 | self.GREEN = '' |
| 223 | self.RED = '' |
| 224 | self.YELLOW = '' |
| 225 | self.ENDC = '' |
| 226 | |
| 227 | def green(self): |
| 228 | return self.GREEN |
| 229 | |
| 230 | def red(self): |
| 231 | return self.RED |
| 232 | |
| 233 | def yellow(self): |
| 234 | return self.YELLOW |
| 235 | |
| 236 | def endc(self): |
| 237 | return self.ENDC |
| 238 | |
| 239 | # Class to parse the validation layer test source and store testnames |
| 240 | class TestParser: |
| 241 | def __init__(self, test_file_list, test_group_name=['VkLayerTest', 'VkPositiveLayerTest', 'VkWsiEnabledLayerTest']): |
| 242 | self.test_files = test_file_list |
| 243 | self.tests_set = set() |
| 244 | self.test_trigger_txt_list = [] |
| 245 | for tg in test_group_name: |
| 246 | self.test_trigger_txt_list.append('TEST_F(%s' % tg) |
| 247 | #print('Test trigger test list: %s' % (self.test_trigger_txt_list)) |
| 248 | |
| 249 | # Parse test files into internal data struct |
| 250 | def parse(self): |
| 251 | # For each test file, parse test names into set |
| 252 | grab_next_line = False # handle testname on separate line than wildcard |
| 253 | for test_file in self.test_files: |
| 254 | with open(test_file) as tf: |
| 255 | for line in tf: |
| 256 | if True in [line.strip().startswith(comment) for comment in ['//', '/*']]: |
| 257 | continue |
| 258 | |
| 259 | if True in [ttt in line for ttt in self.test_trigger_txt_list]: |
| 260 | #print('Test wildcard in line: %s' % (line)) |
| 261 | testname = line.split(',')[-1] |
| 262 | testname = testname.strip().strip(' {)') |
| 263 | #print('Inserting test: "%s"' % (testname)) |
| 264 | if ('' == testname): |
| 265 | grab_next_line = True |
| 266 | continue |
| 267 | self.tests_set.add(testname) |
| 268 | if grab_next_line: # test name on its own line |
| 269 | grab_next_line = False |
| 270 | testname = testname.strip().strip(' {)') |
| 271 | self.tests_set.add(testname) |
| 272 | |
| 273 | def main(argv=None): |
Tobin Ehlis | 20e3258 | 2016-12-05 14:50:03 -0700 | [diff] [blame] | 274 | result = 0 # Non-zero result indicates an error case |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 275 | # parse db |
| 276 | val_db = ValidationDatabase() |
| 277 | val_db.read() |
| 278 | # parse header |
| 279 | val_header = ValidationHeader() |
| 280 | val_header.read() |
| 281 | # Create parser for layer files |
| 282 | val_source = ValidationSource(layer_source_files) |
| 283 | val_source.parse() |
| 284 | # Parse test files |
| 285 | test_parser = TestParser([test_file, ]) |
| 286 | test_parser.parse() |
| 287 | |
| 288 | # Process stats - Just doing this inline in main, could make a fancy class to handle |
| 289 | # all the processing of data and then get results from that |
| 290 | txt_color = bcolors() |
| 291 | print("Validation Statistics") |
| 292 | # First give number of checks in db & header and report any discrepancies |
| 293 | db_enums = len(val_db.db_dict.keys()) |
| 294 | hdr_enums = len(val_header.enums) |
| 295 | print(" Database file includes %d unique checks" % (db_enums)) |
| 296 | print(" Header file declares %d unique checks" % (hdr_enums)) |
| 297 | tmp_db_dict = val_db.db_dict |
| 298 | db_missing = [] |
| 299 | for enum in val_header.enums: |
| 300 | if not tmp_db_dict.pop(enum, False): |
| 301 | db_missing.append(enum) |
| 302 | if db_enums == hdr_enums and len(db_missing) == 0 and len(tmp_db_dict.keys()) == 0: |
| 303 | print(txt_color.green() + " Database and Header match, GREAT!" + txt_color.endc()) |
| 304 | else: |
| 305 | print(txt_color.red() + " Uh oh, Database doesn't match Header :(" + txt_color.endc()) |
Tobin Ehlis | 20e3258 | 2016-12-05 14:50:03 -0700 | [diff] [blame] | 306 | result = 1 |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 307 | if len(db_missing) != 0: |
| 308 | print(txt_color.red() + " The following checks are in header but missing from database:" + txt_color.endc()) |
| 309 | for missing_enum in db_missing: |
| 310 | print(txt_color.red() + " %s" % (missing_enum) + txt_color.endc()) |
| 311 | if len(tmp_db_dict.keys()) != 0: |
| 312 | print(txt_color.red() + " The following checks are in database but haven't been declared in the header:" + txt_color.endc()) |
| 313 | for extra_enum in tmp_db_dict: |
| 314 | print(txt_color.red() + " %s" % (extra_enum) + txt_color.endc()) |
| 315 | # Report out claimed implemented checks vs. found actual implemented checks |
| 316 | imp_not_found = [] # Checks claimed to implemented in DB file but no source found |
| 317 | imp_not_claimed = [] # Checks found implemented but not claimed to be in DB |
| 318 | multiple_uses = False # Flag if any enums are used multiple times |
| 319 | for db_imp in val_db.db_implemented_enums: |
| 320 | if db_imp not in val_source.enum_count_dict: |
| 321 | imp_not_found.append(db_imp) |
| 322 | for src_enum in val_source.enum_count_dict: |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 323 | if val_source.enum_count_dict[src_enum]['count'] > 1: |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 324 | multiple_uses = True |
| 325 | if src_enum not in val_db.db_implemented_enums: |
| 326 | imp_not_claimed.append(src_enum) |
| 327 | print(" Database file claims that %d checks (%s) are implemented in source." % (len(val_db.db_implemented_enums), "{0:.0f}%".format(float(len(val_db.db_implemented_enums))/db_enums * 100))) |
| 328 | if len(imp_not_found) == 0 and len(imp_not_claimed) == 0: |
| 329 | print(txt_color.green() + " All claimed Database implemented checks have been found in source, and no source checks aren't claimed in Database, GREAT!" + txt_color.endc()) |
| 330 | else: |
Tobin Ehlis | 20e3258 | 2016-12-05 14:50:03 -0700 | [diff] [blame] | 331 | result = 1 |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 332 | print(txt_color.red() + " Uh oh, Database claimed implemented don't match Source :(" + txt_color.endc()) |
| 333 | if len(imp_not_found) != 0: |
Tobin Ehlis | 3f0b277 | 2016-11-18 16:56:15 -0700 | [diff] [blame] | 334 | print(txt_color.red() + " The following %d checks are claimed to be implemented in Database, but weren't found in source:" % (len(imp_not_found)) + txt_color.endc()) |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 335 | for not_imp_enum in imp_not_found: |
| 336 | print(txt_color.red() + " %s" % (not_imp_enum) + txt_color.endc()) |
| 337 | if len(imp_not_claimed) != 0: |
| 338 | print(txt_color.red() + " The following checks are implemented in source, but not claimed to be in Database:" + txt_color.endc()) |
| 339 | for imp_enum in imp_not_claimed: |
| 340 | print(txt_color.red() + " %s" % (imp_enum) + txt_color.endc()) |
| 341 | if multiple_uses: |
| 342 | print(txt_color.yellow() + " Note that some checks are used multiple times. These may be good candidates for new valid usage spec language." + txt_color.endc()) |
| 343 | print(txt_color.yellow() + " Here is a list of each check used multiple times with its number of uses:" + txt_color.endc()) |
| 344 | for enum in val_source.enum_count_dict: |
Tobin Ehlis | 3d1f2bd | 2016-12-22 11:19:15 -0700 | [diff] [blame^] | 345 | if val_source.enum_count_dict[enum]['count'] > 1: |
| 346 | print(txt_color.yellow() + " %s: %d uses in file,line:" % (enum, val_source.enum_count_dict[enum]['count']) + txt_color.endc()) |
| 347 | for file_line in val_source.enum_count_dict[enum]['file_line']: |
| 348 | print(txt_color.yellow() + " \t%s" % (file_line) + txt_color.endc()) |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 349 | # Now check that tests claimed to be implemented are actual test names |
| 350 | bad_testnames = [] |
| 351 | for enum in val_db.db_enum_to_tests: |
| 352 | for testname in val_db.db_enum_to_tests[enum]: |
| 353 | if testname not in test_parser.tests_set: |
| 354 | bad_testnames.append(testname) |
| 355 | print(" Database file claims that %d checks have tests written." % len(val_db.db_enum_to_tests)) |
| 356 | if len(bad_testnames) == 0: |
| 357 | print(txt_color.green() + " All claimed tests have valid names. That's good!" + txt_color.endc()) |
| 358 | else: |
| 359 | print(txt_color.red() + " The following testnames in Database appear to be invalid:") |
Tobin Ehlis | 20e3258 | 2016-12-05 14:50:03 -0700 | [diff] [blame] | 360 | result = 1 |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 361 | for bt in bad_testnames: |
Tobin Ehlis | b04c2c6 | 2016-11-21 15:51:45 -0700 | [diff] [blame] | 362 | print(txt_color.red() + " %s" % (bt) + txt_color.endc()) |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 363 | |
Tobin Ehlis | 20e3258 | 2016-12-05 14:50:03 -0700 | [diff] [blame] | 364 | return result |
Tobin Ehlis | 35308dd | 2016-10-31 13:27:36 -0600 | [diff] [blame] | 365 | |
| 366 | if __name__ == "__main__": |
| 367 | sys.exit(main()) |
| 368 | |