Ben Murdoch | 097c5b2 | 2016-05-18 11:27:45 +0100 | [diff] [blame^] | 1 | # Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 | # Use of this source code is governed by a BSD-style license that can be |
| 3 | # found in the LICENSE file. |
| 4 | |
| 5 | # |
| 6 | # Most of this file was ported over from Blink's |
| 7 | # webkitpy/layout_tests/layout_package/json_results_generator_unittest.py |
| 8 | # |
| 9 | |
| 10 | import unittest |
| 11 | import json |
| 12 | |
| 13 | from pylib.results.flakiness_dashboard import json_results_generator |
| 14 | |
| 15 | |
| 16 | class JSONGeneratorTest(unittest.TestCase): |
| 17 | |
| 18 | def setUp(self): |
| 19 | self.builder_name = 'DUMMY_BUILDER_NAME' |
| 20 | self.build_name = 'DUMMY_BUILD_NAME' |
| 21 | self.build_number = 'DUMMY_BUILDER_NUMBER' |
| 22 | |
| 23 | # For archived results. |
| 24 | self._json = None |
| 25 | self._num_runs = 0 |
| 26 | self._tests_set = set([]) |
| 27 | self._test_timings = {} |
| 28 | self._failed_count_map = {} |
| 29 | |
| 30 | self._PASS_count = 0 |
| 31 | self._DISABLED_count = 0 |
| 32 | self._FLAKY_count = 0 |
| 33 | self._FAILS_count = 0 |
| 34 | self._fixable_count = 0 |
| 35 | |
| 36 | self._orig_write_json = json_results_generator.WriteJSON |
| 37 | |
| 38 | # unused arguments ... pylint: disable=W0613 |
| 39 | def _WriteJSONStub(json_object, file_path, callback=None): |
| 40 | pass |
| 41 | |
| 42 | json_results_generator.WriteJSON = _WriteJSONStub |
| 43 | |
| 44 | def tearDown(self): |
| 45 | json_results_generator.WriteJSON = self._orig_write_json |
| 46 | |
| 47 | def _TestJSONGeneration(self, passed_tests_list, failed_tests_list): |
| 48 | tests_set = set(passed_tests_list) | set(failed_tests_list) |
| 49 | |
| 50 | DISABLED_tests = set([t for t in tests_set |
| 51 | if t.startswith('DISABLED_')]) |
| 52 | FLAKY_tests = set([t for t in tests_set |
| 53 | if t.startswith('FLAKY_')]) |
| 54 | FAILS_tests = set([t for t in tests_set |
| 55 | if t.startswith('FAILS_')]) |
| 56 | PASS_tests = tests_set - (DISABLED_tests | FLAKY_tests | FAILS_tests) |
| 57 | |
| 58 | failed_tests = set(failed_tests_list) - DISABLED_tests |
| 59 | failed_count_map = dict([(t, 1) for t in failed_tests]) |
| 60 | |
| 61 | test_timings = {} |
| 62 | i = 0 |
| 63 | for test in tests_set: |
| 64 | test_timings[test] = float(self._num_runs * 100 + i) |
| 65 | i += 1 |
| 66 | |
| 67 | test_results_map = dict() |
| 68 | for test in tests_set: |
| 69 | test_results_map[test] = json_results_generator.TestResult( |
| 70 | test, failed=(test in failed_tests), |
| 71 | elapsed_time=test_timings[test]) |
| 72 | |
| 73 | generator = json_results_generator.JSONResultsGeneratorBase( |
| 74 | self.builder_name, self.build_name, self.build_number, |
| 75 | '', |
| 76 | None, # don't fetch past json results archive |
| 77 | test_results_map) |
| 78 | |
| 79 | failed_count_map = dict([(t, 1) for t in failed_tests]) |
| 80 | |
| 81 | # Test incremental json results |
| 82 | incremental_json = generator.GetJSON() |
| 83 | self._VerifyJSONResults( |
| 84 | tests_set, |
| 85 | test_timings, |
| 86 | failed_count_map, |
| 87 | len(PASS_tests), |
| 88 | len(DISABLED_tests), |
| 89 | len(FLAKY_tests), |
| 90 | len(DISABLED_tests | failed_tests), |
| 91 | incremental_json, |
| 92 | 1) |
| 93 | |
| 94 | # We don't verify the results here, but at least we make sure the code |
| 95 | # runs without errors. |
| 96 | generator.GenerateJSONOutput() |
| 97 | generator.GenerateTimesMSFile() |
| 98 | |
| 99 | def _VerifyJSONResults(self, tests_set, test_timings, failed_count_map, |
| 100 | PASS_count, DISABLED_count, FLAKY_count, |
| 101 | fixable_count, json_obj, num_runs): |
| 102 | # Aliasing to a short name for better access to its constants. |
| 103 | JRG = json_results_generator.JSONResultsGeneratorBase |
| 104 | |
| 105 | self.assertIn(JRG.VERSION_KEY, json_obj) |
| 106 | self.assertIn(self.builder_name, json_obj) |
| 107 | |
| 108 | buildinfo = json_obj[self.builder_name] |
| 109 | self.assertIn(JRG.FIXABLE, buildinfo) |
| 110 | self.assertIn(JRG.TESTS, buildinfo) |
| 111 | self.assertEqual(len(buildinfo[JRG.BUILD_NUMBERS]), num_runs) |
| 112 | self.assertEqual(buildinfo[JRG.BUILD_NUMBERS][0], self.build_number) |
| 113 | |
| 114 | if tests_set or DISABLED_count: |
| 115 | fixable = {} |
| 116 | for fixable_items in buildinfo[JRG.FIXABLE]: |
| 117 | for (result_type, count) in fixable_items.iteritems(): |
| 118 | if result_type in fixable: |
| 119 | fixable[result_type] = fixable[result_type] + count |
| 120 | else: |
| 121 | fixable[result_type] = count |
| 122 | |
| 123 | if PASS_count: |
| 124 | self.assertEqual(fixable[JRG.PASS_RESULT], PASS_count) |
| 125 | else: |
| 126 | self.assertTrue(JRG.PASS_RESULT not in fixable or |
| 127 | fixable[JRG.PASS_RESULT] == 0) |
| 128 | if DISABLED_count: |
| 129 | self.assertEqual(fixable[JRG.SKIP_RESULT], DISABLED_count) |
| 130 | else: |
| 131 | self.assertTrue(JRG.SKIP_RESULT not in fixable or |
| 132 | fixable[JRG.SKIP_RESULT] == 0) |
| 133 | if FLAKY_count: |
| 134 | self.assertEqual(fixable[JRG.FLAKY_RESULT], FLAKY_count) |
| 135 | else: |
| 136 | self.assertTrue(JRG.FLAKY_RESULT not in fixable or |
| 137 | fixable[JRG.FLAKY_RESULT] == 0) |
| 138 | |
| 139 | if failed_count_map: |
| 140 | tests = buildinfo[JRG.TESTS] |
| 141 | for test_name in failed_count_map.iterkeys(): |
| 142 | test = self._FindTestInTrie(test_name, tests) |
| 143 | |
| 144 | failed = 0 |
| 145 | for result in test[JRG.RESULTS]: |
| 146 | if result[1] == JRG.FAIL_RESULT: |
| 147 | failed += result[0] |
| 148 | self.assertEqual(failed_count_map[test_name], failed) |
| 149 | |
| 150 | timing_count = 0 |
| 151 | for timings in test[JRG.TIMES]: |
| 152 | if timings[1] == test_timings[test_name]: |
| 153 | timing_count = timings[0] |
| 154 | self.assertEqual(1, timing_count) |
| 155 | |
| 156 | if fixable_count: |
| 157 | self.assertEqual(sum(buildinfo[JRG.FIXABLE_COUNT]), fixable_count) |
| 158 | |
| 159 | def _FindTestInTrie(self, path, trie): |
| 160 | nodes = path.split('/') |
| 161 | sub_trie = trie |
| 162 | for node in nodes: |
| 163 | self.assertIn(node, sub_trie) |
| 164 | sub_trie = sub_trie[node] |
| 165 | return sub_trie |
| 166 | |
| 167 | def testJSONGeneration(self): |
| 168 | self._TestJSONGeneration([], []) |
| 169 | self._TestJSONGeneration(['A1', 'B1'], []) |
| 170 | self._TestJSONGeneration([], ['FAILS_A2', 'FAILS_B2']) |
| 171 | self._TestJSONGeneration(['DISABLED_A3', 'DISABLED_B3'], []) |
| 172 | self._TestJSONGeneration(['A4'], ['B4', 'FAILS_C4']) |
| 173 | self._TestJSONGeneration(['DISABLED_C5', 'DISABLED_D5'], ['A5', 'B5']) |
| 174 | self._TestJSONGeneration( |
| 175 | ['A6', 'B6', 'FAILS_C6', 'DISABLED_E6', 'DISABLED_F6'], |
| 176 | ['FAILS_D6']) |
| 177 | |
| 178 | # Generate JSON with the same test sets. (Both incremental results and |
| 179 | # archived results must be updated appropriately.) |
| 180 | self._TestJSONGeneration( |
| 181 | ['A', 'FLAKY_B', 'DISABLED_C'], |
| 182 | ['FAILS_D', 'FLAKY_E']) |
| 183 | self._TestJSONGeneration( |
| 184 | ['A', 'DISABLED_C', 'FLAKY_E'], |
| 185 | ['FLAKY_B', 'FAILS_D']) |
| 186 | self._TestJSONGeneration( |
| 187 | ['FLAKY_B', 'DISABLED_C', 'FAILS_D'], |
| 188 | ['A', 'FLAKY_E']) |
| 189 | |
| 190 | def testHierarchicalJSNGeneration(self): |
| 191 | # FIXME: Re-work tests to be more comprehensible and comprehensive. |
| 192 | self._TestJSONGeneration(['foo/A'], ['foo/B', 'bar/C']) |
| 193 | |
| 194 | def testTestTimingsTrie(self): |
| 195 | individual_test_timings = [] |
| 196 | individual_test_timings.append( |
| 197 | json_results_generator.TestResult( |
| 198 | 'foo/bar/baz.html', |
| 199 | elapsed_time=1.2)) |
| 200 | individual_test_timings.append( |
| 201 | json_results_generator.TestResult('bar.html', elapsed_time=0.0001)) |
| 202 | trie = json_results_generator.TestTimingsTrie(individual_test_timings) |
| 203 | |
| 204 | expected_trie = { |
| 205 | 'bar.html': 0, |
| 206 | 'foo': { |
| 207 | 'bar': { |
| 208 | 'baz.html': 1200, |
| 209 | } |
| 210 | } |
| 211 | } |
| 212 | |
| 213 | self.assertEqual(json.dumps(trie), json.dumps(expected_trie)) |