blob: 7dca9264279eb5c459a503392e2e55cd49a79843 [file] [log] [blame]
raymes85ef5db2013-02-15 05:20:49 +00001#!/usr/bin/python2.6
2#
3# Copyright 2010 Google Inc. All Rights Reserved.
4
5"""Script to compare a baseline results file to a new results file."""
6
7__author__ = "raymes@google.com (Raymes Khoury)"
8
9import sys
10
11
12PASS = "pass"
13FAIL = "fail"
14NOT_EXECUTED = "not executed"
15
16def Usage():
17 print "Usage: %s baseline_results new_results1 new_results2 ..." % sys.argv[0]
18 sys.exit(1)
19
20
21def parse_results(results_filenames):
22 results = []
23 for filename in results_filenames:
24 results_file = open(filename, 'rb')
25 for line in results_file:
26 if line.strip() != "":
27 results.append(line.strip().split("\t"))
28 results_file.close()
29 return results
30
31def ParseResults(baseline_file, new_result_files):
32 baseline_results = parse_results([baseline_file])
33 new_results = parse_results(new_result_files)
34
35 test_status = {}
36
37 for new_result in new_results:
38 test_status[new_result[0]] = (new_result[1], NOT_EXECUTED)
39
40 for baseline_result in baseline_results:
41 if baseline_result[0] in test_status:
42 test_status[baseline_result[0]][0] = baseline_result[1]
43 else:
44 test_status[baseline_result[0]] = (NOT_EXECUTED, baseline_result[1])
45
46 regressions = []
47 for result in test_status.keys():
48 if test_status[result][0] != test_status[result][1]:
49 regressions.append(result)
50
51 return (baseline_results, new_results, test_status, regressions)
52
53def GenerateResultsStatistics(baseline_file, new_result_files):
54 (baseline_results, new_results,
55 test_status, regressions) = ParseResults(baseline_file, new_result_files)
56
57 num_tests_executed = len(new_results)
58 num_regressions = len(regressions)
59 num_passes = 0
60 num_failures = 0
61 for result in new_results:
62 if result[1] == PASS:
63 num_passes += 1
64 else:
65 num_failures += 1
66
67 return (num_tests_executed, num_passes, num_failures, num_regressions)
68
69def GenerateResultsReport(baseline_file, new_result_files):
70 (baseline_results, new_results,
71 test_status, regressions) = ParseResults(baseline_file, new_result_files)
72
73 num_tests_executed = len(new_results)
74 num_regressions = len(regressions)
75 num_passes = 0
76 num_failures = 0
77 for result in new_results:
78 if result[1] == PASS:
79 num_passes += 1
80 else:
81 num_failures += 1
82
83 report = ""
84 report += "Test summary\n"
85 report += "Tests executed: " + str(num_tests_executed) + "\n"
86 report += "Passes: " + str(num_passes) + "\n"
87 report += "Failures: " + str(num_failures) + "\n"
88 report += "Regressions: " + str(num_regressions) + "\n\n"
89 report += "-------------------------\n\n"
90 report += "Regressions\n"
91 report += "Test name\t\tExpected result\t\tActual result\n"
92 for regression in regressions:
93 report += "%s\t\t%s\t\t%s\n" % (regression, test_status[regression][1],
94 test_status[regression][0])
95 report += "\n"
96 report += "-------------------------\n\n"
97 report += "All tests\n"
98 report += "Test name\t\tExpected result\t\tActual result\n"
99 for result in test_status.keys():
100 report += "%s\t\t%s\t\t%s\n" % (result, test_status[result][1],
101 test_status[result][0])
102 return report
103
104def Main(argv):
105 if len(argv) < 2:
106 Usage()
107
108 print GenerateResultsReport(argv[1], argv[2:])
109
110
111
112
113
114
115
116
117
118
119
120if __name__ == "__main__":
121 Main(sys.argv)