Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 1 | #!/usr/bin/env python2 |
| 2 | """Generate summary report for ChromeOS toolchain waterfalls.""" |
| 3 | |
| 4 | # Desired future features (to be added): |
| 5 | # - arguments to allow generating only the main waterfall report, |
| 6 | # or only the rotating builder reports, or only the failures |
| 7 | # report; or the waterfall reports without the failures report. |
| 8 | # - Better way of figuring out which dates/builds to generate |
| 9 | # reports for: probably an argument specifying a date or a date |
| 10 | # range, then use something like the new buildbot utils to |
| 11 | # query the build logs to find the right build numbers for the |
| 12 | # builders for the specified dates. |
| 13 | # - Store/get the json/data files in mobiletc-prebuild's x20 area. |
| 14 | # - Update data in json file to reflect, for each testsuite, which |
| 15 | # tests are not expected to run on which boards; update this |
| 16 | # script to use that data appropriately. |
| 17 | # - Make sure user's prodaccess is up-to-date before trying to use |
| 18 | # this script. |
| 19 | # - Add some nice formatting/highlighting to reports. |
| 20 | |
| 21 | from __future__ import print_function |
| 22 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 23 | import argparse |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 24 | import getpass |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 25 | import json |
| 26 | import os |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 27 | import re |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 28 | import shutil |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 29 | import sys |
| 30 | import time |
| 31 | |
| 32 | from cros_utils import command_executer |
| 33 | |
| 34 | # All the test suites whose data we might want for the reports. |
| 35 | TESTS = ( |
| 36 | ('bvt-inline', 'HWTest'), |
| 37 | ('bvt-cq', 'HWTest'), |
| 38 | ('toolchain-tests', 'HWTest'), |
| 39 | ('security', 'HWTest'), |
| 40 | ('kernel_daily_regression', 'HWTest'), |
| 41 | ('kernel_daily_benchmarks', 'HWTest'),) |
| 42 | |
| 43 | # The main waterfall builders, IN THE ORDER IN WHICH WE WANT THEM |
| 44 | # LISTED IN THE REPORT. |
| 45 | WATERFALL_BUILDERS = [ |
| 46 | 'amd64-gcc-toolchain', 'arm-gcc-toolchain', 'arm64-gcc-toolchain', |
| 47 | 'x86-gcc-toolchain', 'amd64-llvm-toolchain', 'arm-llvm-toolchain', |
| 48 | 'arm64-llvm-toolchain', 'x86-llvm-toolchain', 'amd64-llvm-next-toolchain', |
| 49 | 'arm-llvm-next-toolchain', 'arm64-llvm-next-toolchain', |
| 50 | 'x86-llvm-next-toolchain' |
| 51 | ] |
| 52 | |
Manoj Gupta | 9c0b33b | 2016-12-15 14:52:25 -0800 | [diff] [blame] | 53 | DATA_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-report-data/' |
| 54 | ARCHIVE_DIR = '/google/data/rw/users/mo/mobiletc-prebuild/waterfall-reports/' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 55 | DOWNLOAD_DIR = '/tmp/waterfall-logs' |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 56 | MAX_SAVE_RECORDS = 7 |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 57 | BUILD_DATA_FILE = '%s/build-data.txt' % DATA_DIR |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 58 | GCC_ROTATING_BUILDER = 'gcc_toolchain' |
| 59 | LLVM_ROTATING_BUILDER = 'llvm_next_toolchain' |
| 60 | ROTATING_BUILDERS = [GCC_ROTATING_BUILDER, LLVM_ROTATING_BUILDER] |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 61 | |
| 62 | # For int-to-string date conversion. Note, the index of the month in this |
| 63 | # list needs to correspond to the month's integer value. i.e. 'Sep' must |
| 64 | # be as MONTHS[9]. |
| 65 | MONTHS = [ |
| 66 | '', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', |
| 67 | 'Nov', 'Dec' |
| 68 | ] |
| 69 | |
| 70 | |
| 71 | def format_date(int_date): |
| 72 | """Convert an integer date to a string date. YYYYMMDD -> YYYY-MMM-DD""" |
| 73 | |
| 74 | if int_date == 0: |
| 75 | return 'today' |
| 76 | |
| 77 | tmp_date = int_date |
| 78 | day = tmp_date % 100 |
| 79 | tmp_date = tmp_date / 100 |
| 80 | month = tmp_date % 100 |
| 81 | year = tmp_date / 100 |
| 82 | |
| 83 | month_str = MONTHS[month] |
| 84 | date_str = '%d-%s-%d' % (year, month_str, day) |
| 85 | return date_str |
| 86 | |
| 87 | |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 88 | def EmailReport(report_file, report_type, date): |
| 89 | subject = '%s Waterfall Summary report, %s' % (report_type, date) |
| 90 | email_to = getpass.getuser() |
Rahul Chaudhry | 213f3c0 | 2016-12-06 10:47:05 -0800 | [diff] [blame] | 91 | sendgmr_path = '/google/data/ro/projects/gws-sre/sendgmr' |
| 92 | command = ('%s --to=%s@google.com --subject="%s" --body_file=%s' % |
| 93 | (sendgmr_path, email_to, subject, report_file)) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 94 | command_executer.GetCommandExecuter().RunCommand(command) |
| 95 | |
| 96 | |
| 97 | def PruneOldFailures(failure_dict, int_date): |
| 98 | earliest_date = int_date - MAX_SAVE_RECORDS |
| 99 | for suite in failure_dict: |
| 100 | suite_dict = failure_dict[suite] |
| 101 | test_keys_to_remove = [] |
| 102 | for test in suite_dict: |
| 103 | test_dict = suite_dict[test] |
| 104 | msg_keys_to_remove = [] |
| 105 | for msg in test_dict: |
| 106 | fails = test_dict[msg] |
| 107 | i = 0 |
| 108 | while i < len(fails) and fails[i][0] <= earliest_date: |
| 109 | i += 1 |
| 110 | new_fails = fails[i:] |
| 111 | test_dict[msg] = new_fails |
| 112 | if len(new_fails) == 0: |
| 113 | msg_keys_to_remove.append(msg) |
| 114 | |
| 115 | for k in msg_keys_to_remove: |
| 116 | del test_dict[k] |
| 117 | |
| 118 | suite_dict[test] = test_dict |
| 119 | if len(test_dict) == 0: |
| 120 | test_keys_to_remove.append(test) |
| 121 | |
| 122 | for k in test_keys_to_remove: |
| 123 | del suite_dict[k] |
| 124 | |
| 125 | failure_dict[suite] = suite_dict |
| 126 | |
| 127 | |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 128 | def GetBuildID(build_bot, date): |
| 129 | """Get the build id for a build_bot at a given date.""" |
| 130 | day = '{day:02d}'.format(day=date%100) |
| 131 | mon = MONTHS[date/100%100] |
| 132 | date_string = mon + ' ' + day |
| 133 | if build_bot in WATERFALL_BUILDERS: |
| 134 | url = 'https://uberchromegw.corp.google.com/i/chromeos/' + \ |
| 135 | 'builders/%s?numbuilds=200' % build_bot |
| 136 | if build_bot in ROTATING_BUILDERS: |
| 137 | url = 'https://uberchromegw.corp.google.com/i/chromiumos.tryserver/' + \ |
| 138 | 'builders/%s?numbuilds=200' % build_bot |
| 139 | command = 'sso_client %s' %url |
| 140 | retval = 1 |
| 141 | retry_time = 3 |
| 142 | while retval and retry_time: |
| 143 | retval, output, _ = \ |
| 144 | command_executer.GetCommandExecuter().RunCommandWOutput(command, \ |
| 145 | print_to_console=False) |
| 146 | retry_time -= 1 |
| 147 | |
| 148 | if retval: |
| 149 | return [] |
| 150 | |
| 151 | out = output.split('\n') |
| 152 | line_num = 0 |
| 153 | build_id = [] |
| 154 | # Parse the output like this |
| 155 | # <td>Dec 14 10:55</td> |
| 156 | # <td class="revision">??</td> |
| 157 | # <td failure</td><td><a href="../builders/gcc_toolchain/builds/109">#109</a> |
| 158 | while line_num < len(out): |
| 159 | if date_string in out[line_num]: |
| 160 | if line_num + 2 < len(out): |
| 161 | build_num_line = out[line_num + 2] |
| 162 | raw_num = re.findall(r'builds/\d+', build_num_line) |
| 163 | # raw_num is ['builds/109'] in the example. |
| 164 | if raw_num: |
| 165 | build_id.append(int(raw_num[0].split('/')[1])) |
| 166 | line_num += 1 |
| 167 | return build_id |
| 168 | |
| 169 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 170 | def GenerateFailuresReport(fail_dict, date): |
| 171 | filename = 'waterfall_report.failures.%s.txt' % date |
| 172 | date_string = format_date(date) |
| 173 | with open(filename, 'w') as out_file: |
| 174 | # Write failure report section. |
| 175 | out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) |
| 176 | |
| 177 | # We want to sort the errors and output them in order of the ones that occur |
| 178 | # most often. So we have to collect the data about all of them, then sort |
| 179 | # it. |
| 180 | error_groups = [] |
| 181 | for suite in fail_dict: |
| 182 | suite_dict = fail_dict[suite] |
| 183 | if suite_dict: |
| 184 | for test in suite_dict: |
| 185 | test_dict = suite_dict[test] |
| 186 | for err_msg in test_dict: |
| 187 | err_list = test_dict[err_msg] |
| 188 | sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) |
| 189 | err_group = [len(sorted_list), suite, test, err_msg, sorted_list] |
| 190 | error_groups.append(err_group) |
| 191 | |
| 192 | # Sort the errors by the number of errors of each type. Then output them in |
| 193 | # order. |
| 194 | sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) |
| 195 | for i in range(0, len(sorted_errors)): |
| 196 | err_group = sorted_errors[i] |
| 197 | suite = err_group[1] |
| 198 | test = err_group[2] |
| 199 | err_msg = err_group[3] |
| 200 | err_list = err_group[4] |
| 201 | out_file.write('Suite: %s\n' % suite) |
| 202 | out_file.write(' %s (%d failures)\n' % (test, len(err_list))) |
| 203 | out_file.write(' (%s)\n' % err_msg) |
| 204 | for i in range(0, len(err_list)): |
| 205 | err = err_list[i] |
| 206 | out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], |
| 207 | err[2])) |
| 208 | out_file.write('\n') |
| 209 | |
| 210 | print('Report generated in %s.' % filename) |
| 211 | return filename |
| 212 | |
| 213 | |
| 214 | def GenerateWaterfallReport(report_dict, fail_dict, waterfall_type, date, |
| 215 | omit_failures): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 216 | """Write out the actual formatted report.""" |
| 217 | |
| 218 | filename = 'waterfall_report.%s_waterfall.%s.txt' % (waterfall_type, date) |
| 219 | |
| 220 | date_string = '' |
| 221 | date_list = report_dict['date'] |
| 222 | num_dates = len(date_list) |
| 223 | i = 0 |
| 224 | for d in date_list: |
| 225 | date_string += d |
| 226 | if i < num_dates - 1: |
| 227 | date_string += ', ' |
| 228 | i += 1 |
| 229 | |
| 230 | if waterfall_type == 'main': |
| 231 | report_list = WATERFALL_BUILDERS |
| 232 | else: |
| 233 | report_list = report_dict.keys() |
| 234 | |
| 235 | with open(filename, 'w') as out_file: |
| 236 | # Write Report Header |
| 237 | out_file.write('\nStatus of %s Waterfall Builds from %s\n\n' % |
| 238 | (waterfall_type, date_string)) |
| 239 | out_file.write(' ' |
| 240 | ' kernel kernel\n') |
| 241 | out_file.write(' Build bvt- bvt-cq ' |
| 242 | 'toolchain- security daily daily\n') |
| 243 | out_file.write(' status inline ' |
| 244 | ' tests regression benchmarks\n') |
| 245 | out_file.write(' [P/ F/ DR]* [P/ F /DR]* ' |
| 246 | '[P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]* [P/ F/ DR]*\n\n') |
| 247 | |
| 248 | # Write daily waterfall status section. |
| 249 | for i in range(0, len(report_list)): |
| 250 | builder = report_list[i] |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 251 | if builder == 'date': |
| 252 | continue |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 253 | |
| 254 | if builder not in report_dict: |
| 255 | out_file.write('Unable to find information for %s.\n\n' % builder) |
| 256 | continue |
| 257 | |
| 258 | build_dict = report_dict[builder] |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 259 | status = build_dict.get('build_status', 'bad') |
| 260 | inline = build_dict.get('bvt-inline', '[??/ ?? /??]') |
| 261 | cq = build_dict.get('bvt-cq', '[??/ ?? /??]') |
| 262 | inline_color = build_dict.get('bvt-inline-color', '') |
| 263 | cq_color = build_dict.get('bvt-cq-color', '') |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 264 | if 'x86' not in builder: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 265 | toolchain = build_dict.get('toolchain-tests', '[??/ ?? /??]') |
| 266 | security = build_dict.get('security', '[??/ ?? /??]') |
| 267 | toolchain_color = build_dict.get('toolchain-tests-color', '') |
| 268 | security_color = build_dict.get('security-color', '') |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 269 | if 'gcc' in builder: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 270 | regression = build_dict.get('kernel_daily_regression', '[??/ ?? /??]') |
| 271 | bench = build_dict.get('kernel_daily_benchmarks', '[??/ ?? /??]') |
| 272 | regression_color = build_dict.get('kernel_daily_regression-color', '') |
| 273 | bench_color = build_dict.get('kernel_daily_benchmarks-color', '') |
| 274 | out_file.write(' %6s %6s' |
| 275 | ' %6s %6s %6s %6s\n' % |
| 276 | (inline_color, cq_color, toolchain_color, |
| 277 | security_color, regression_color, bench_color)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 278 | out_file.write('%25s %3s %s %s %s %s %s %s\n' % (builder, status, |
| 279 | inline, cq, |
| 280 | toolchain, security, |
| 281 | regression, bench)) |
| 282 | else: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 283 | out_file.write(' %6s %6s' |
| 284 | ' %6s %6s\n' % (inline_color, cq_color, |
| 285 | toolchain_color, |
| 286 | security_color)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 287 | out_file.write('%25s %3s %s %s %s %s\n' % (builder, status, inline, |
| 288 | cq, toolchain, security)) |
| 289 | else: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 290 | out_file.write(' %6s %6s\n' % |
| 291 | (inline_color, cq_color)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 292 | out_file.write('%25s %3s %s %s\n' % (builder, status, inline, cq)) |
| 293 | if 'build_link' in build_dict: |
| 294 | out_file.write('%s\n\n' % build_dict['build_link']) |
| 295 | |
| 296 | out_file.write('\n\n*P = Number of tests in suite that Passed; F = ' |
| 297 | 'Number of tests in suite that Failed; DR = Number of tests' |
| 298 | ' in suite that Didn\'t Run.\n') |
| 299 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 300 | if omit_failures: |
| 301 | print('Report generated in %s.' % filename) |
| 302 | return filename |
| 303 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 304 | # Write failure report section. |
| 305 | out_file.write('\n\nSummary of Test Failures as of %s\n\n' % date_string) |
| 306 | |
| 307 | # We want to sort the errors and output them in order of the ones that occur |
| 308 | # most often. So we have to collect the data about all of them, then sort |
| 309 | # it. |
| 310 | error_groups = [] |
| 311 | for suite in fail_dict: |
| 312 | suite_dict = fail_dict[suite] |
| 313 | if suite_dict: |
| 314 | for test in suite_dict: |
| 315 | test_dict = suite_dict[test] |
| 316 | for err_msg in test_dict: |
| 317 | err_list = test_dict[err_msg] |
| 318 | sorted_list = sorted(err_list, key=lambda x: x[0], reverse=True) |
| 319 | err_group = [len(sorted_list), suite, test, err_msg, sorted_list] |
| 320 | error_groups.append(err_group) |
| 321 | |
| 322 | # Sort the errors by the number of errors of each type. Then output them in |
| 323 | # order. |
| 324 | sorted_errors = sorted(error_groups, key=lambda x: x[0], reverse=True) |
| 325 | for i in range(0, len(sorted_errors)): |
| 326 | err_group = sorted_errors[i] |
| 327 | suite = err_group[1] |
| 328 | test = err_group[2] |
| 329 | err_msg = err_group[3] |
| 330 | err_list = err_group[4] |
| 331 | out_file.write('Suite: %s\n' % suite) |
| 332 | out_file.write(' %s (%d failures)\n' % (test, len(err_list))) |
| 333 | out_file.write(' (%s)\n' % err_msg) |
| 334 | for i in range(0, len(err_list)): |
| 335 | err = err_list[i] |
| 336 | out_file.write(' %s, %s, %s\n' % (format_date(err[0]), err[1], |
| 337 | err[2])) |
| 338 | out_file.write('\n') |
| 339 | |
| 340 | print('Report generated in %s.' % filename) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 341 | return filename |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 342 | |
| 343 | |
| 344 | def UpdateReport(report_dict, builder, test, report_date, build_link, |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 345 | test_summary, board, color): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 346 | """Update the data in our report dictionary with current test's data.""" |
| 347 | |
| 348 | if 'date' not in report_dict: |
| 349 | report_dict['date'] = [report_date] |
| 350 | elif report_date not in report_dict['date']: |
| 351 | # It is possible that some of the builders started/finished on different |
| 352 | # days, so we allow for multiple dates in the reports. |
| 353 | report_dict['date'].append(report_date) |
| 354 | |
| 355 | build_key = '' |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 356 | if builder == GCC_ROTATING_BUILDER: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 357 | build_key = '%s-gcc-toolchain' % board |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 358 | elif builder == LLVM_ROTATING_BUILDER: |
| 359 | build_key = '%s-llvm-next-toolchain' % board |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 360 | else: |
| 361 | build_key = builder |
| 362 | |
| 363 | if build_key not in report_dict.keys(): |
| 364 | build_dict = dict() |
| 365 | else: |
| 366 | build_dict = report_dict[build_key] |
| 367 | |
| 368 | if 'build_link' not in build_dict: |
| 369 | build_dict['build_link'] = build_link |
| 370 | |
| 371 | if 'date' not in build_dict: |
| 372 | build_dict['date'] = report_date |
| 373 | |
| 374 | if 'board' in build_dict and build_dict['board'] != board: |
| 375 | raise RuntimeError('Error: Two different boards (%s,%s) in one build (%s)!' |
| 376 | % (board, build_dict['board'], build_link)) |
| 377 | build_dict['board'] = board |
| 378 | |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 379 | color_key = '%s-color' % test |
| 380 | build_dict[color_key] = color |
| 381 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 382 | # Check to see if we already have a build status for this build_key |
| 383 | status = '' |
| 384 | if 'build_status' in build_dict.keys(): |
| 385 | # Use current build_status, unless current test failed (see below). |
| 386 | status = build_dict['build_status'] |
| 387 | |
| 388 | if not test_summary: |
| 389 | # Current test data was not available, so something was bad with build. |
| 390 | build_dict['build_status'] = 'bad' |
| 391 | build_dict[test] = '[ no data ]' |
| 392 | else: |
| 393 | build_dict[test] = test_summary |
| 394 | if not status: |
| 395 | # Current test ok; no other data, so assume build was ok. |
| 396 | build_dict['build_status'] = 'ok' |
| 397 | |
| 398 | report_dict[build_key] = build_dict |
| 399 | |
| 400 | |
| 401 | def UpdateBuilds(builds): |
| 402 | """Update the data in our build-data.txt file.""" |
| 403 | |
| 404 | # The build data file records the last build number for which we |
| 405 | # generated a report. When we generate the next report, we read |
| 406 | # this data and increment it to get the new data; when we finish |
| 407 | # generating the reports, we write the updated values into this file. |
| 408 | # NOTE: One side effect of doing this at the end: If the script |
| 409 | # fails in the middle of generating a report, this data does not get |
| 410 | # updated. |
| 411 | with open(BUILD_DATA_FILE, 'w') as fp: |
| 412 | gcc_max = 0 |
| 413 | llvm_max = 0 |
| 414 | for b in builds: |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 415 | if b[0] == GCC_ROTATING_BUILDER: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 416 | gcc_max = max(gcc_max, b[1]) |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 417 | elif b[0] == LLVM_ROTATING_BUILDER: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 418 | llvm_max = max(llvm_max, b[1]) |
| 419 | else: |
| 420 | fp.write('%s,%d\n' % (b[0], b[1])) |
| 421 | if gcc_max > 0: |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 422 | fp.write('%s,%d\n' % (GCC_ROTATING_BUILDER, gcc_max)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 423 | if llvm_max > 0: |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 424 | fp.write('%s,%d\n' % (LLVM_ROTATING_BUILDER, llvm_max)) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 425 | |
| 426 | |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 427 | def GetBuilds(date=0): |
| 428 | """Get build id from builds.""" |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 429 | |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 430 | # If date is set, get the build id from waterfall. |
| 431 | builds = [] |
| 432 | |
| 433 | if date: |
| 434 | for builder in WATERFALL_BUILDERS + ROTATING_BUILDERS: |
| 435 | build_ids = GetBuildID(builder, date) |
| 436 | for build_id in build_ids: |
| 437 | builds.append((builder, build_id)) |
| 438 | return builds |
| 439 | |
| 440 | # If date is not set, we try to get the most recent builds. |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 441 | # Read the values of the last builds used to generate a report, and |
| 442 | # increment them appropriately, to get values for generating the |
| 443 | # current report. (See comments in UpdateBuilds). |
| 444 | with open(BUILD_DATA_FILE, 'r') as fp: |
| 445 | lines = fp.readlines() |
| 446 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 447 | for l in lines: |
| 448 | l = l.rstrip() |
| 449 | words = l.split(',') |
| 450 | builder = words[0] |
| 451 | build = int(words[1]) |
| 452 | builds.append((builder, build + 1)) |
| 453 | # NOTE: We are assuming here that there are always 2 daily builds in |
| 454 | # each of the rotating builders. I am not convinced this is a valid |
| 455 | # assumption. |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 456 | if builder in ROTATING_BUILDERS: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 457 | builds.append((builder, build + 2)) |
| 458 | |
| 459 | return builds |
| 460 | |
| 461 | |
| 462 | def RecordFailures(failure_dict, platform, suite, builder, int_date, log_file, |
| 463 | build_num, failed): |
| 464 | """Read and update the stored data about test failures.""" |
| 465 | |
| 466 | # Get the dictionary for this particular test suite from the failures |
| 467 | # dictionary. |
| 468 | suite_dict = failure_dict[suite] |
| 469 | |
| 470 | # Read in the entire log file for this test/build. |
| 471 | with open(log_file, 'r') as in_file: |
| 472 | lines = in_file.readlines() |
| 473 | |
| 474 | # Update the entries in the failure dictionary for each test within this suite |
| 475 | # that failed. |
| 476 | for test in failed: |
| 477 | # Check to see if there is already an entry in the suite dictionary for this |
| 478 | # test; if so use that, otherwise create a new entry. |
| 479 | if test in suite_dict: |
| 480 | test_dict = suite_dict[test] |
| 481 | else: |
| 482 | test_dict = dict() |
| 483 | # Parse the lines from the log file, looking for lines that indicate this |
| 484 | # test failed. |
| 485 | msg = '' |
| 486 | for l in lines: |
| 487 | words = l.split() |
| 488 | if len(words) < 3: |
| 489 | continue |
| 490 | if ((words[0] == test and words[1] == 'ERROR:') or |
| 491 | (words[0] == 'provision' and words[1] == 'FAIL:')): |
| 492 | words = words[2:] |
| 493 | # Get the error message for the failure. |
| 494 | msg = ' '.join(words) |
| 495 | if not msg: |
| 496 | msg = 'Unknown_Error' |
| 497 | |
| 498 | # Look for an existing entry for this error message in the test dictionary. |
| 499 | # If found use that, otherwise create a new entry for this error message. |
| 500 | if msg in test_dict: |
| 501 | error_list = test_dict[msg] |
| 502 | else: |
| 503 | error_list = list() |
| 504 | # Create an entry for this new failure |
| 505 | new_item = [int_date, platform, builder, build_num] |
| 506 | # Add this failure to the error list if it's not already there. |
| 507 | if new_item not in error_list: |
| 508 | error_list.append([int_date, platform, builder, build_num]) |
| 509 | # Sort the error list by date. |
| 510 | error_list.sort(key=lambda x: x[0]) |
| 511 | # Calculate the earliest date to save; delete records for older failures. |
| 512 | earliest_date = int_date - MAX_SAVE_RECORDS |
| 513 | i = 0 |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 514 | while i < len(error_list) and error_list[i][0] <= earliest_date: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 515 | i += 1 |
| 516 | if i > 0: |
| 517 | error_list = error_list[i:] |
| 518 | # Save the error list in the test's dictionary, keyed on error_msg. |
| 519 | test_dict[msg] = error_list |
| 520 | |
| 521 | # Save the updated test dictionary in the test_suite dictionary. |
| 522 | suite_dict[test] = test_dict |
| 523 | |
| 524 | # Save the updated test_suite dictionary in the failure dictionary. |
| 525 | failure_dict[suite] = suite_dict |
| 526 | |
| 527 | |
| 528 | def ParseLogFile(log_file, test_data_dict, failure_dict, test, builder, |
| 529 | build_num, build_link): |
| 530 | """Parse the log file from the given builder, build_num and test. |
| 531 | |
| 532 | Also adds the results for this test to our test results dictionary, |
| 533 | and calls RecordFailures, to update our test failure data. |
| 534 | """ |
| 535 | |
| 536 | lines = [] |
| 537 | with open(log_file, 'r') as infile: |
| 538 | lines = infile.readlines() |
| 539 | |
| 540 | passed = {} |
| 541 | failed = {} |
| 542 | not_run = {} |
| 543 | date = '' |
| 544 | status = '' |
| 545 | board = '' |
| 546 | num_provision_errors = 0 |
| 547 | build_ok = True |
| 548 | afe_line = '' |
| 549 | |
| 550 | for line in lines: |
| 551 | if line.rstrip() == '<title>404 Not Found</title>': |
| 552 | print('Warning: File for %s (build number %d), %s was not found.' % |
| 553 | (builder, build_num, test)) |
| 554 | build_ok = False |
| 555 | break |
| 556 | if '[ PASSED ]' in line: |
| 557 | test_name = line.split()[0] |
| 558 | if test_name != 'Suite': |
| 559 | passed[test_name] = True |
| 560 | elif '[ FAILED ]' in line: |
| 561 | test_name = line.split()[0] |
| 562 | if test_name == 'provision': |
| 563 | num_provision_errors += 1 |
| 564 | not_run[test_name] = True |
| 565 | elif test_name != 'Suite': |
| 566 | failed[test_name] = True |
| 567 | elif line.startswith('started: '): |
| 568 | date = line.rstrip() |
| 569 | date = date[9:] |
| 570 | date_obj = time.strptime(date, '%a %b %d %H:%M:%S %Y') |
| 571 | int_date = ( |
| 572 | date_obj.tm_year * 10000 + date_obj.tm_mon * 100 + date_obj.tm_mday) |
| 573 | date = time.strftime('%a %b %d %Y', date_obj) |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 574 | elif not status and line.startswith('status: '): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 575 | status = line.rstrip() |
| 576 | words = status.split(':') |
| 577 | status = words[-1] |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 578 | elif line.find('Suite passed with a warning') != -1: |
| 579 | status = 'WARNING' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 580 | elif line.startswith('@@@STEP_LINK@Link to suite@'): |
| 581 | afe_line = line.rstrip() |
| 582 | words = afe_line.split('@') |
| 583 | for w in words: |
| 584 | if w.startswith('http'): |
| 585 | afe_line = w |
| 586 | afe_line = afe_line.replace('&', '&') |
| 587 | elif 'INFO: RunCommand:' in line: |
| 588 | words = line.split() |
| 589 | for i in range(0, len(words) - 1): |
| 590 | if words[i] == '--board': |
| 591 | board = words[i + 1] |
| 592 | |
| 593 | test_dict = test_data_dict[test] |
| 594 | test_list = test_dict['tests'] |
| 595 | |
| 596 | if build_ok: |
| 597 | for t in test_list: |
| 598 | if not t in passed and not t in failed: |
| 599 | not_run[t] = True |
| 600 | |
| 601 | total_pass = len(passed) |
| 602 | total_fail = len(failed) |
| 603 | total_notrun = len(not_run) |
| 604 | |
| 605 | else: |
| 606 | total_pass = 0 |
| 607 | total_fail = 0 |
| 608 | total_notrun = 0 |
| 609 | status = 'Not found.' |
| 610 | if not build_ok: |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 611 | return [], date, board, 0, ' ' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 612 | |
| 613 | build_dict = dict() |
| 614 | build_dict['id'] = build_num |
| 615 | build_dict['builder'] = builder |
| 616 | build_dict['date'] = date |
| 617 | build_dict['build_link'] = build_link |
| 618 | build_dict['total_pass'] = total_pass |
| 619 | build_dict['total_fail'] = total_fail |
| 620 | build_dict['total_not_run'] = total_notrun |
| 621 | build_dict['afe_job_link'] = afe_line |
| 622 | build_dict['provision_errors'] = num_provision_errors |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 623 | if status.strip() == 'SUCCESS': |
| 624 | build_dict['color'] = 'green ' |
| 625 | elif status.strip() == 'FAILURE': |
| 626 | build_dict['color'] = ' red ' |
| 627 | elif status.strip() == 'WARNING': |
| 628 | build_dict['color'] = 'orange' |
| 629 | else: |
| 630 | build_dict['color'] = ' ' |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 631 | |
| 632 | # Use YYYYMMDD (integer) as the build record key |
| 633 | if build_ok: |
| 634 | if board in test_dict: |
| 635 | board_dict = test_dict[board] |
| 636 | else: |
| 637 | board_dict = dict() |
| 638 | board_dict[int_date] = build_dict |
| 639 | |
| 640 | # Only keep the last 5 records (based on date) |
| 641 | keys_list = board_dict.keys() |
| 642 | if len(keys_list) > MAX_SAVE_RECORDS: |
| 643 | min_key = min(keys_list) |
| 644 | del board_dict[min_key] |
| 645 | |
| 646 | # Make sure changes get back into the main dictionary |
| 647 | test_dict[board] = board_dict |
| 648 | test_data_dict[test] = test_dict |
| 649 | |
| 650 | if len(failed) > 0: |
| 651 | RecordFailures(failure_dict, board, test, builder, int_date, log_file, |
| 652 | build_num, failed) |
| 653 | |
| 654 | summary_result = '[%2d/ %2d/ %2d]' % (total_pass, total_fail, total_notrun) |
| 655 | |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 656 | return summary_result, date, board, int_date, build_dict['color'] |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 657 | |
| 658 | |
| 659 | def DownloadLogFile(builder, buildnum, test, test_family): |
| 660 | |
| 661 | ce = command_executer.GetCommandExecuter() |
| 662 | os.system('mkdir -p %s/%s/%s' % (DOWNLOAD_DIR, builder, test)) |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 663 | if builder in ROTATING_BUILDERS: |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 664 | source = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' |
| 665 | '/builders/%s/builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % |
| 666 | (builder, buildnum, test_family, test)) |
| 667 | build_link = ('https://uberchromegw.corp.google.com/i/chromiumos.tryserver' |
| 668 | '/builders/%s/builds/%d' % (builder, buildnum)) |
| 669 | else: |
| 670 | source = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s/' |
| 671 | 'builds/%d/steps/%s%%20%%5B%s%%5D/logs/stdio' % |
| 672 | (builder, buildnum, test_family, test)) |
| 673 | build_link = ('https://uberchromegw.corp.google.com/i/chromeos/builders/%s' |
| 674 | '/builds/%d' % (builder, buildnum)) |
| 675 | |
| 676 | target = '%s/%s/%s/%d' % (DOWNLOAD_DIR, builder, test, buildnum) |
| 677 | if not os.path.isfile(target) or os.path.getsize(target) == 0: |
| 678 | cmd = 'sso_client %s > %s' % (source, target) |
| 679 | status = ce.RunCommand(cmd) |
| 680 | if status != 0: |
| 681 | return '', '' |
| 682 | |
| 683 | return target, build_link |
| 684 | |
| 685 | |
Manoj Gupta | 6382452 | 2016-12-14 11:05:18 -0800 | [diff] [blame] | 686 | # Check for prodaccess. |
| 687 | def CheckProdAccess(): |
| 688 | status, output, _ = command_executer.GetCommandExecuter().RunCommandWOutput( |
| 689 | 'prodcertstatus') |
| 690 | if status != 0: |
| 691 | return False |
| 692 | # Verify that status is not expired |
| 693 | if 'expires' in output: |
| 694 | return True |
| 695 | return False |
| 696 | |
| 697 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 698 | def ValidOptions(parser, options): |
| 699 | too_many_options = False |
| 700 | if options.main: |
| 701 | if options.rotating or options.failures_report: |
| 702 | too_many_options = True |
| 703 | elif options.rotating and options.failures_report: |
| 704 | too_many_options = True |
| 705 | |
| 706 | if too_many_options: |
| 707 | parser.error('Can only specify one of --main, --rotating or' |
| 708 | ' --failures_report.') |
| 709 | |
| 710 | conflicting_failure_options = False |
| 711 | if options.failures_report and options.omit_failures: |
| 712 | conflicting_failure_options = True |
| 713 | parser.error('Cannot specify both --failures_report and --omit_failures.') |
| 714 | |
| 715 | return not too_many_options and not conflicting_failure_options |
| 716 | |
| 717 | |
| 718 | def Main(argv): |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 719 | """Main function for this script.""" |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 720 | parser = argparse.ArgumentParser() |
| 721 | parser.add_argument( |
| 722 | '--main', |
| 723 | dest='main', |
| 724 | default=False, |
| 725 | action='store_true', |
| 726 | help='Generate report only for main waterfall ' |
| 727 | 'builders.') |
| 728 | parser.add_argument( |
| 729 | '--rotating', |
| 730 | dest='rotating', |
| 731 | default=False, |
| 732 | action='store_true', |
| 733 | help='Generate report only for rotating builders.') |
| 734 | parser.add_argument( |
| 735 | '--failures_report', |
| 736 | dest='failures_report', |
| 737 | default=False, |
| 738 | action='store_true', |
| 739 | help='Only generate the failures section of the report.') |
| 740 | parser.add_argument( |
| 741 | '--omit_failures', |
| 742 | dest='omit_failures', |
| 743 | default=False, |
| 744 | action='store_true', |
| 745 | help='Do not generate the failures section of the report.') |
| 746 | parser.add_argument( |
| 747 | '--no_update', |
| 748 | dest='no_update', |
| 749 | default=False, |
| 750 | action='store_true', |
| 751 | help='Run reports, but do not update the data files.') |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 752 | parser.add_argument( |
| 753 | '--date', |
| 754 | dest='date', |
| 755 | default=0, |
| 756 | type=int, |
| 757 | help='The date YYYYMMDD of waterfall report.') |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 758 | |
| 759 | options = parser.parse_args(argv) |
| 760 | |
| 761 | if not ValidOptions(parser, options): |
| 762 | return 1 |
| 763 | |
| 764 | main_only = options.main |
| 765 | rotating_only = options.rotating |
| 766 | failures_report = options.failures_report |
| 767 | omit_failures = options.omit_failures |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 768 | date = options.date |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 769 | |
| 770 | test_data_dict = dict() |
| 771 | failure_dict = dict() |
Manoj Gupta | 6382452 | 2016-12-14 11:05:18 -0800 | [diff] [blame] | 772 | |
| 773 | prod_access = CheckProdAccess() |
| 774 | if not prod_access: |
| 775 | print('ERROR: Please run prodaccess first.') |
| 776 | return |
| 777 | |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 778 | with open('%s/waterfall-test-data.json' % DATA_DIR, 'r') as input_file: |
| 779 | test_data_dict = json.load(input_file) |
| 780 | |
| 781 | with open('%s/test-failure-data.json' % DATA_DIR, 'r') as fp: |
| 782 | failure_dict = json.load(fp) |
| 783 | |
Yunlian Jiang | 6b8bacb | 2016-12-15 14:53:39 -0800 | [diff] [blame] | 784 | builds = GetBuilds(date) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 785 | |
| 786 | waterfall_report_dict = dict() |
| 787 | rotating_report_dict = dict() |
| 788 | int_date = 0 |
| 789 | for test_desc in TESTS: |
| 790 | test, test_family = test_desc |
| 791 | for build in builds: |
| 792 | (builder, buildnum) = build |
| 793 | if test.startswith('kernel') and 'llvm' in builder: |
| 794 | continue |
| 795 | if 'x86' in builder and not test.startswith('bvt'): |
| 796 | continue |
| 797 | target, build_link = DownloadLogFile(builder, buildnum, test, test_family) |
| 798 | |
| 799 | if os.path.exists(target): |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 800 | test_summary, report_date, board, tmp_date, color = ParseLogFile( |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 801 | target, test_data_dict, failure_dict, test, builder, buildnum, |
| 802 | build_link) |
| 803 | |
| 804 | if tmp_date != 0: |
| 805 | int_date = tmp_date |
| 806 | |
| 807 | if builder in ROTATING_BUILDERS: |
| 808 | UpdateReport(rotating_report_dict, builder, test, report_date, |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 809 | build_link, test_summary, board, color) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 810 | else: |
| 811 | UpdateReport(waterfall_report_dict, builder, test, report_date, |
Caroline Tice | f0ad65c | 2016-11-29 10:40:23 -0800 | [diff] [blame] | 812 | build_link, test_summary, board, color) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 813 | |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 814 | PruneOldFailures(failure_dict, int_date) |
| 815 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 816 | if waterfall_report_dict and not rotating_only and not failures_report: |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 817 | main_report = GenerateWaterfallReport(waterfall_report_dict, failure_dict, |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 818 | 'main', int_date, omit_failures) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 819 | EmailReport(main_report, 'Main', format_date(int_date)) |
| 820 | shutil.copy(main_report, ARCHIVE_DIR) |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 821 | if rotating_report_dict and not main_only and not failures_report: |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 822 | rotating_report = GenerateWaterfallReport(rotating_report_dict, |
| 823 | failure_dict, 'rotating', |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 824 | int_date, omit_failures) |
Caroline Tice | e02e9f8 | 2016-12-01 13:14:41 -0800 | [diff] [blame] | 825 | EmailReport(rotating_report, 'Rotating', format_date(int_date)) |
| 826 | shutil.copy(rotating_report, ARCHIVE_DIR) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 827 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 828 | if failures_report: |
| 829 | failures_report = GenerateFailuresReport(failure_dict, int_date) |
| 830 | EmailReport(failures_report, 'Failures', format_date(int_date)) |
| 831 | shutil.copy(failures_report, ARCHIVE_DIR) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 832 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 833 | if not options.no_update: |
| 834 | with open('%s/waterfall-test-data.json' % DATA_DIR, 'w') as out_file: |
| 835 | json.dump(test_data_dict, out_file, indent=2) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 836 | |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 837 | with open('%s/test-failure-data.json' % DATA_DIR, 'w') as out_file: |
| 838 | json.dump(failure_dict, out_file, indent=2) |
| 839 | |
| 840 | UpdateBuilds(builds) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 841 | |
| 842 | |
| 843 | if __name__ == '__main__': |
Caroline Tice | faa3c55 | 2016-12-13 11:29:59 -0800 | [diff] [blame] | 844 | Main(sys.argv[1:]) |
Caroline Tice | 4846206 | 2016-11-18 16:49:00 -0800 | [diff] [blame] | 845 | sys.exit(0) |