| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 1 | #!/usr/bin/python3 |
| 2 | # |
| 3 | # Copyright (c) 2018 Google Inc. |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the "License"); |
| 6 | # you may not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # http://www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an "AS IS" BASIS, |
| 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | # |
| 17 | # Author: William Henning <whenning@google.com> |
| 18 | # |
| 19 | # This script parses the validation layers test continuous integration ouput |
| 20 | # and reports the number of tests that passed, failured, ouput unexpected errors, |
| 21 | # or were skipped. As such, the script is only designed to parse the ouput |
| 22 | # generated by the existing CI implementation. |
| 23 | # |
| 24 | # usage: |
| 25 | # for profile in tests/device_profiles/*.json; do echo Testing with |
| 26 | # profile $profile; VK_LAYER_PATH=DEVSIM_AND_VALIDATION_PATHS |
| 27 | # VK_DEVSIM_FILE=$profile VK_ICD_FILENAMES=MOCK_ICD_PATH |
| 28 | # ./build/tests/vk_layer_validation_tests --devsim; done |
| 29 | # | python3 parse_test_results.py [--fail_on_skip] [--fail_on_unexpected] |
| 30 | # |
| 31 | # --fail_on_skip causes the script to exit with a non-zero exit code if a test |
| 32 | # didn't run on any device profile |
| 33 | # |
| 34 | # --fail_on_unexpected causes the script to exit with a non-zero exit code if |
| 35 | # a test printed unexpected errors |
| 36 | # |
| 37 | |
| 38 | import argparse |
| 39 | import re |
| 40 | import sys |
| 41 | from collections import defaultdict |
| 42 | |
| 43 | class OutputStats(object): |
| 44 | def __init__(self): |
| 45 | self.current_profile = "" |
| 46 | self.current_test = "" |
| 47 | self.current_test_output = "" |
| 48 | self.test_results = defaultdict(defaultdict) |
| 49 | self.unexpected_errors = defaultdict(defaultdict) |
| 50 | |
| 51 | def match(self, line): |
| 52 | self.new_profile_match(line) |
| 53 | self.test_suite_end_match(line) |
| 54 | self.start_test_match(line) |
| 55 | if self.current_test != "": |
| 56 | self.current_test_output += line |
| 57 | self.skip_test_match(line) |
| 58 | self.pass_test_match(line) |
| 59 | self.fail_test_match(line) |
| 60 | self.unexpected_error_match(line) |
| 61 | |
| 62 | def print_summary(self, skip_is_failure, unexpected_is_failure): |
| 63 | if self.current_test != "": |
| 64 | self.test_died() |
| 65 | |
| 66 | passed_tests = 0 |
| 67 | skipped_tests = 0 |
| 68 | failed_tests = 0 |
| 69 | unexpected_error_tests = 0 |
| 70 | did_fail = False |
| 71 | |
| 72 | for test_name, results in self.test_results.items(): |
| 73 | skipped_profiles = 0 |
| 74 | passed_profiles = 0 |
| 75 | failed_profiles = 0 |
| 76 | aborted_profiles = 0 |
| 77 | unexpected_error_profiles = 0 |
| 78 | for profile, result in results.items(): |
| 79 | if result == "pass": |
| 80 | passed_profiles += 1 |
| 81 | if result == "fail": |
| 82 | failed_profiles += 1 |
| 83 | if result == "skip": |
| 84 | skipped_profiles += 1 |
| 85 | if self.unexpected_errors.get(test_name, {}).get(profile, "") == "true": |
| 86 | unexpected_error_profiles += 1 |
| 87 | if failed_profiles != 0: |
| 88 | print("TEST FAILED:", test_name) |
| 89 | failed_tests += 1 |
| 90 | elif skipped_profiles == len(results): |
| 91 | print("TEST SKIPPED ALL DEVICES:", test_name) |
| 92 | skipped_tests += 1 |
| 93 | else: |
| 94 | passed_tests += 1 |
| 95 | if unexpected_error_profiles != 0: |
| 96 | print("UNEXPECTED ERRORS:", test_name) |
| 97 | unexpected_error_tests += 1 |
| 98 | num_tests = len(self.test_results) |
| 99 | print("PASSED: ", passed_tests, "/", num_tests, " tests") |
| 100 | if skipped_tests != 0: |
| 101 | did_fail |= skip_is_failure |
| 102 | print("NEVER RAN: ", skipped_tests, "/", num_tests, " tests") |
| 103 | if failed_tests != 0: |
| 104 | did_fail = True |
| 105 | print("FAILED: ", failed_tests, "/", num_tests, "tests") |
| 106 | if unexpected_error_tests != 0: |
| 107 | did_fail |= unexpected_is_failure |
| 108 | print("UNEXPECTED OUPUT: ", unexpected_error_tests, "/", num_tests, "tests") |
| 109 | return did_fail |
| 110 | |
| 111 | def new_profile_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 112 | if re.search(r'Testing with profile .*/(.*)', line) is not None: |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 113 | self.current_profile = re.search(r'Testing with profile .*/(.*)', line).group(1) |
| 114 | |
| 115 | def test_suite_end_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 116 | if re.search(r'\[-*\]', line) is not None: |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 117 | if self.current_test != "": |
| 118 | # Here we see a message that starts [----------] before another test |
| 119 | # finished running. This should mean that that other test died. |
| 120 | self.test_died() |
| 121 | |
| 122 | def start_test_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 123 | if re.search(r'\[ RUN\s*\]', line) is not None: |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 124 | # This parser doesn't handle the case where one test's start comes between another |
| 125 | # test's start and result. |
| 126 | assert self.current_test == "" |
| 127 | self.current_test = re.search(r'] (.*)', line).group(1) |
| 128 | self.current_test_output = "" |
| 129 | |
| 130 | def skip_test_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 131 | if re.search(r'TEST SKIPPED', line) is not None: |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 132 | self.test_results[self.current_test][self.current_profile] = "skip" |
| 133 | |
| 134 | def pass_test_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 135 | if re.search(r'\[\s*OK \]', line) is not None: |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 136 | # If gtest says the test passed, check if it was skipped before marking it passed |
| 137 | if self.test_results.get(self.current_test, {}).get(self.current_profile, "") != "skip": |
| 138 | self.test_results[self.current_test][self.current_profile] = "pass" |
| 139 | self.current_test = "" |
| 140 | |
| 141 | def fail_test_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 142 | if re.search(r'\[\s*FAILED\s*\]', line) is not None and self.current_test != "": |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 143 | self.test_results[self.current_test][self.current_profile] = "fail" |
| 144 | self.current_test = "" |
| 145 | |
| 146 | def unexpected_error_match(self, line): |
| Michał Janiszewski | 3c3ce9e | 2018-10-30 23:25:21 +0100 | [diff] [blame] | 147 | if re.search(r'^Unexpected: ', line) is not None: |
| William Henning | c540b21 | 2018-05-17 13:15:17 -0600 | [diff] [blame] | 148 | self.unexpected_errors[self.current_test][self.current_profile] = "true" |
| 149 | |
| 150 | def test_died(self): |
| 151 | print("A test likely crashed. Testing is being aborted.") |
| 152 | print("Final test output: ") |
| 153 | print(self.current_test_output) |
| 154 | sys.exit(1) |
| 155 | |
| 156 | def main(): |
| 157 | parser = argparse.ArgumentParser(description='Parse the output from validation layer tests.') |
| 158 | parser.add_argument('--fail_on_skip', action='store_true', help="Makes the script exit with a " |
| 159 | "non-zero exit code if a test didn't run on any device profile.") |
| 160 | parser.add_argument('--fail_on_unexpected', action='store_true', help="Makes the script exit " |
| 161 | "with a non-zero exit code if a test causes unexpected errors.") |
| 162 | args = parser.parse_args() |
| 163 | |
| 164 | stats = OutputStats() |
| 165 | for line in sys.stdin: |
| 166 | stats.match(line) |
| 167 | failed = stats.print_summary(args.fail_on_skip, args.fail_on_unexpected) |
| 168 | if failed == True: |
| 169 | print("\nFAILED CI") |
| 170 | sys.exit(1) |
| 171 | |
| 172 | if __name__ == '__main__': |
| 173 | main() |