blob: 69b7a6120bc9d24e0385fac6561d8bdc09c599f7 [file] [log] [blame]
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -08001#!/usr/bin/env python
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Parses perf data files and creates chrome-based graph files from that data.
7
8This script assumes that extract_perf.py was previously run to extract perf
9test data from a database and then dump it into local text data files. This
10script then parses the extracted perf data files and creates new data files that
11can be directly read in by chrome's perf graphing infrastructure to display
12perf graphs.
13
14This script also generates a set of Javascript/HTML overview pages that present
15birds-eye overviews of multiple perf graphs simultaneously.
16
17Sample usage:
18 python generate_perf_graphs.py -c -v
19
20Run with -h to see the full set of command-line options.
21"""
22
23import fnmatch
24import logging
25import math
26import optparse
27import os
28import re
29import shutil
30import simplejson
31import sys
32
33_SETTINGS = 'autotest_lib.frontend.settings'
34os.environ['DJANGO_SETTINGS_MODULE'] = _SETTINGS
35
36import common
37from django.shortcuts import render_to_response
38
39# Paths to files.
40_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
41_DATA_DIR = os.path.join(_SCRIPT_DIR, 'data')
42_CURR_PID_FILE = os.path.join(_DATA_DIR, __file__ + '.curr_pid.txt')
43_CHART_CONFIG_FILE = os.path.join(_SCRIPT_DIR, 'croschart_defaults.json')
44_TEMPLATE_DIR = os.path.join(_SCRIPT_DIR, 'templates')
45
46_GRAPH_DIR = os.path.join(_SCRIPT_DIR, '..', 'graphs')
47_GRAPH_DATA_DIR = os.path.join(_GRAPH_DIR, 'data')
48_COMPLETED_ID_FILE = os.path.join(_GRAPH_DATA_DIR, 'job_id_complete.txt')
49_REV_NUM_FILE = os.path.join(_GRAPH_DATA_DIR, 'rev_num.txt')
50
51# Values that can be configured through options.
52# TODO(dennisjeffrey): Infer the tip-of-tree milestone dynamically once this
53# issue is addressed: crosbug.com/38564.
54_TOT_MILESTONE = 26
55_OLDEST_MILESTONE_TO_GRAPH = 23
56
57# Other values that can only be configured here in the code.
58_SYMLINK_LIST = [
59 ('report.html', '../../../../ui/cros_plotter.html'),
60 ('js', '../../../../ui/js'),
61]
62
63
64def set_world_read_permissions(path):
65 """Recursively sets the content of |path| to be world-readable.
66
67 @param path: The string path.
68 """
69 logging.debug('Setting world-read permissions recursively on %s', path)
70 os.chmod(path, 0755)
71 for root, dirs, files in os.walk(path):
72 for d in dirs:
73 dname = os.path.join(root, d)
74 if not os.path.islink(dname):
75 os.chmod(dname, 0755)
76 for f in files:
77 fname = os.path.join(root, f)
78 if not os.path.islink(fname):
79 os.chmod(fname, 0755)
80
81
82def remove_path(path):
83 """Remove the given path (whether file or directory).
84
85 @param path: The string path.
86 """
87 if os.path.isdir(path):
88 shutil.rmtree(path)
89 return
90 try:
91 os.remove(path)
92 except OSError:
93 pass
94
95
96def symlink_force(link_name, target):
97 """Create a symlink, accounting for different situations.
98
99 @param link_name: The string name of the link to create.
100 @param target: The string destination file to which the link should point.
101 """
102 try:
103 os.unlink(link_name)
104 except EnvironmentError:
105 pass
106 try:
107 os.symlink(target, link_name)
108 except OSError:
109 remove_path(link_name)
110 os.symlink(target, link_name)
111
112
113def mean_and_standard_deviation(data):
114 """Compute the mean and standard deviation of a list of numbers.
115
116 @param data: A list of numerica values.
117
118 @return A 2-tuple (mean, standard_deviation) computed from |data|.
119 """
120 n = len(data)
121 if n == 0:
122 return 0.0, 0.0
123 mean = float(sum(data)) / n
124 if n == 1:
125 return mean, 0.0
126 # Divide by n-1 to compute "sample standard deviation".
127 variance = sum([(element - mean) ** 2 for element in data]) / (n - 1)
128 return mean, math.sqrt(variance)
129
130
131def get_release_from_jobname(jobname):
132 """Identifies the release number components from an autotest job name.
133
134 For example:
135 'lumpy-release-R21-2384.0.0_pyauto_perf' becomes (21, 2384, 0, 0).
136
137 @param jobname: The string name of an autotest job.
138
139 @return The 4-tuple containing components of the build release number, or
140 None if those components cannot be identifies from the |jobname|.
141 """
142 prog = re.compile('r(\d+)-(\d+).(\d+).(\d+)')
143 m = prog.search(jobname.lower())
144 if m:
145 return (int(m.group(1)), int(m.group(2)), int(m.group(3)),
146 int(m.group(4)))
147 return None
148
149
150def is_on_mainline_of_milestone(jobname, milestone):
151 """Determines whether an autotest build is on mainline of a given milestone.
152
153 @param jobname: The string name of an autotest job (containing release
154 number).
155 @param milestone: The integer milestone number to consider.
156
157 @return True, if the given autotest job name is for a release number that
158 is either (1) an ancestor of the specified milestone, or (2) is on the
159 main branch line of the given milestone. Returns False otherwise.
160 """
161 r = get_release_from_jobname(jobname)
162 m = milestone
163 # Handle garbage data that might exist.
164 if any(item < 0 for item in r):
165 raise Exception('Unexpected release info in job name: %s' % jobname)
166 if m == r[0]:
167 # Yes for jobs from the specified milestone itself.
168 return True
169 if r[0] < m and r[2] == 0 and r[3] == 0:
170 # Yes for jobs from earlier milestones that were before their respective
171 # branch points.
172 return True
173 return False
174
175
176# TODO(dennisjeffrey): Determine whether or not we need all the values in the
177# config file. Remove unnecessary ones and revised necessary ones as needed.
178def create_config_js_file(path, test_name):
179 """Creates a configuration file used by the performance graphs.
180
181 @param path: The string path to the directory in which to create the file.
182 @param test_name: The string name of the test associated with this config
183 file.
184 """
185 config_content = render_to_response(
186 os.path.join(_TEMPLATE_DIR, 'config.js'), locals()).content
187 with open(os.path.join(path, 'config.js'), 'w') as f:
188 f.write(config_content)
189
190
191def output_graph_data_for_entry(test_name, graph_name, job_name, platform,
192 units, better_direction, url, perf_keys,
193 chart_keys, options, summary_id_to_rev_num):
194 """Outputs data for a perf test result into appropriate graph data files.
195
196 @param test_name: The string name of a test.
197 @param graph_name: The string name of the graph associated with this result.
198 @param job_name: The string name of the autotest job associated with this
199 test result.
200 @param platform: The string name of the platform associated with this test
201 result.
202 @param units: The string name of the units displayed on this graph.
203 @param better_direction: A String representing whether better perf results
204 are those that are "higher" or "lower".
205 @param url: The string URL of a webpage docuementing the current graph.
206 @param perf_keys: A list of 2-tuples containing perf keys measured by the
207 test, where the first tuple element is a string key name, and the second
208 tuple element is the associated numeric perf value.
209 @param chart_keys: A list of perf key names that need to be displayed in
210 the current graph.
211 @param options: An optparse.OptionParser options object.
212 @param summary_id_to_rev_num: A dictionary mapping a string (representing
213 a test/platform/release combination), to the next integer revision
214 number to use in the graph data file.
215 """
216 # A string ID that is assumed to be unique across all charts.
217 test_id = test_name + '__' + graph_name
218
219 release_num = get_release_from_jobname(job_name)
220 if not release_num:
221 logging.warning('Could not obtain release number for job name: %s',
222 job_name)
223 return
224 build_num = '%d.%d.%d.%d' % (release_num[0], release_num[1], release_num[2],
225 release_num[3])
226
227 # Filter out particular test runs that we explicitly do not want to
228 # consider.
229 # TODO(dennisjeffrey): Figure out a way to eliminate the need for these
230 # special checks: crosbug.com/36685.
231 if test_name == 'platform_BootPerfServer' and 'perfalerts' not in job_name:
232 # Skip platform_BootPerfServer test results that do not come from the
233 # "perfalerts" runs.
234 return
235
236 # Consider all releases for which this test result may need to be included
237 # on a graph.
238 start_release = max(release_num[0], options.oldest_milestone)
239 for release in xrange(start_release, options.tot_milestone + 1):
240 output_path = os.path.join(_GRAPH_DATA_DIR, 'r%d' % release, platform,
241 test_id)
242 summary_file = os.path.join(output_path, graph_name + '-summary.dat')
243
244 # Set up the output directory if it doesn't already exist.
245 if not os.path.exists(output_path):
246 os.makedirs(output_path)
247
248 # Create auxiliary files.
249 create_config_js_file(output_path, test_name)
250 open(summary_file, 'w').close()
251 graphs = [{
252 'name': graph_name,
253 'units': units,
254 'better_direction': better_direction,
255 'info_url': url,
256 'important': False,
257 }]
258 with open(os.path.join(output_path, 'graphs.dat'), 'w') as f:
259 f.write(simplejson.dumps(graphs, indent=2))
260
261 # Add symlinks to the plotting code.
262 for slink, target in _SYMLINK_LIST:
263 slink = os.path.join(output_path, slink)
264 symlink_force(slink, target)
265
266 # Write data to graph data file if it belongs in the current release.
267 if is_on_mainline_of_milestone(job_name, release):
268 entry = {}
269 entry['traces'] = {}
270 entry['ver'] = build_num
271
272 key_to_vals = {}
273 for perf_key in perf_keys:
274 if perf_key[0] in chart_keys:
275 # Replace dashes with underscores so different lines show
276 # up as different colors in the graphs.
277 key = perf_key[0].replace('-', '_')
278 if key not in key_to_vals:
279 key_to_vals[key] = []
280 # There are some cases where results for
281 # platform_BootPerfServer are negative in reboot/shutdown
282 # times. Ignore these negative values.
283 if float(perf_key[1]) < 0.0:
284 continue
285 key_to_vals[key].append(perf_key[1])
286 for key in key_to_vals:
287 if len(key_to_vals[key]) == 1:
288 entry['traces'][key] = [key_to_vals[key][0], '0.0']
289 else:
290 mean, std_dev = mean_and_standard_deviation(
291 map(float, key_to_vals[key]))
292 entry['traces'][key] = [str(mean), str(std_dev)]
293
294 if entry['traces']:
295 summary_id = '%s|%s|%s' % (test_id, platform, release)
296
297 rev = summary_id_to_rev_num.get(summary_id, 0)
298 summary_id_to_rev_num[summary_id] = rev + 1
299 entry['rev'] = rev
300
301 with open(summary_file, 'a') as f:
302 f.write(simplejson.dumps(entry) + '\n')
303
304
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800305def process_perf_data_files(file_names, test_name, completed_ids,
306 test_name_to_charts, options,
307 summary_id_to_rev_num):
308 """Processes data files for a single test/platform.
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800309
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800310 Multiple data files may exist if the given test name is associated with one
311 or more old test names (i.e., the name of the test has changed over time).
312 In this case, we treat all results from the specified files as if they came
313 from a single test associated with the current test name.
314
315 This function converts the data from the specified data files into new
316 data files formatted in a way that can be graphed.
317
318 @param file_names: A list of perf data files to process.
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800319 @param test_name: The string name of the test associated with the file name
320 to process.
321 @param completed_ids: A dictionary of already-processed job IDs.
322 @param test_name_to_charts: A dictionary mapping test names to a list of
323 dictionaries, in which each dictionary contains information about a
324 chart associated with the given test name.
325 @param options: An optparse.OptionParser options object.
326 @param summary_id_to_rev_num: A dictionary mapping a string (representing
327 a test/platform/release combination) to an integer revision number.
328
329 @return The number of newly-added graph data entries.
330 """
331 newly_added_count = 0
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800332 for file_name in file_names:
333 with open(file_name, 'r') as fp:
334 for line in fp.readlines():
335 info = simplejson.loads(line.strip())
336 job_id = info[0]
337 job_name = info[1]
338 platform = info[2]
339 perf_keys = info[3]
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800340
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800341 # Skip this job ID if it's already been processed.
342 if job_id in completed_ids:
343 continue
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800344
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800345 # Scan the desired charts and see if we need to output the
346 # current line info to a graph output file.
347 for chart in test_name_to_charts[test_name]:
348 graph_name = chart['graph_name']
349 units = chart['units']
350 better_direction = chart['better_direction']
351 url = chart['info_url']
352 chart_keys = chart['keys']
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800353
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800354 store_entry = False
355 for chart_key in chart_keys:
356 if chart_key in [x[0] for x in perf_keys]:
357 store_entry = True
358 break
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800359
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800360 if store_entry:
361 output_graph_data_for_entry(
362 test_name, graph_name, job_name, platform,
363 units, better_direction, url, perf_keys,
364 chart_keys, options, summary_id_to_rev_num)
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800365
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800366 # Mark this job ID as having been processed.
367 with open(_COMPLETED_ID_FILE, 'a') as fp:
368 fp.write(job_id + '\n')
369 completed_ids[job_id] = True
370 newly_added_count += 1
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800371
372 return newly_added_count
373
374
375def initialize_graph_dir(options):
376 """Initialize/populate the directory that will serve the perf graphs.
377
378 @param options: An optparse.OptionParser options object.
379 """
380 charts = simplejson.loads(open(_CHART_CONFIG_FILE, 'r').read())
381
382 # Identify all the job IDs already processed in the graphs, so that we don't
383 # add that data again.
384 completed_ids = {}
385 if os.path.exists(_COMPLETED_ID_FILE):
386 with open(_COMPLETED_ID_FILE, 'r') as fp:
387 job_ids = map(lambda x: x.strip(), fp.readlines())
388 for job_id in job_ids:
389 completed_ids[job_id] = True
390
391 # Identify the next revision number to use in the graph data files for each
392 # test/platform/release combination.
393 summary_id_to_rev_num = {}
394 if os.path.exists(_REV_NUM_FILE):
395 with open(_REV_NUM_FILE, 'r') as fp:
396 summary_id_to_rev_num = simplejson.loads(fp.read())
397
398 test_name_to_charts = {}
399 test_names = set()
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800400 test_name_to_old_names = {}
401 # The _CHART_CONFIG_FILE should (and is assumed to) have one entry per
402 # test_name. That entry should declare all graphs associated with the given
403 # test_name.
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800404 for chart in charts:
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800405 test_name_to_charts[chart['test_name']] = chart['graphs']
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800406 test_names.add(chart['test_name'])
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800407 test_name_to_old_names[chart['test_name']] = (
408 chart.get('old_test_names', []))
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800409
410 # Scan all database data and format/output only the new data specified in
411 # the graph JSON file.
412 newly_added_count = 0
413 for i, test_name in enumerate(test_names):
414 logging.debug('Analyzing/converting data for test %d of %d: %s',
415 i+1, len(test_names), test_name)
416
417 test_data_dir = os.path.join(_DATA_DIR, test_name)
418 if not os.path.exists(test_data_dir):
419 logging.warning('No test data directory for test: %s', test_name)
420 continue
421 files = os.listdir(test_data_dir)
422 for file_name in files:
423 logging.debug('Processing perf platform data file: %s', file_name)
Dennis Jeffreybbe98a22013-02-13 09:59:38 -0800424
425 # The current test may be associated with one or more old test
426 # names for which perf results exist for the current platform.
427 # If so, we need to consider those old perf results too, as being
428 # associated with the current test/platform.
429 files_to_process = [os.path.join(test_data_dir, file_name)]
430 for old_test_name in test_name_to_old_names[test_name]:
431 old_test_file_name = os.path.join(_DATA_DIR, old_test_name,
432 file_name)
433 if os.path.exists(old_test_file_name):
434 logging.debug('(also processing this platform for old test '
435 'name "%s")', old_test_name)
436 files_to_process.append(old_test_file_name)
437
438 newly_added_count += process_perf_data_files(
439 files_to_process, test_name, completed_ids,
440 test_name_to_charts, options, summary_id_to_rev_num)
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -0800441
442 # Store the latest revision numbers for each test/platform/release
443 # combination, to be used on the next invocation of this script.
444 with open(_REV_NUM_FILE, 'w') as fp:
445 fp.write(simplejson.dumps(summary_id_to_rev_num, indent=2))
446
447 logging.info('Added info for %d new jobs to the graphs!', newly_added_count)
448
449
450def create_branch_platform_overview(graph_dir, branch, platform,
451 branch_to_platform_to_test):
452 """Create an overview webpage for the given branch/platform combination.
453
454 @param graph_dir: The string directory containing the graphing files.
455 @param branch: The string name of the milestone (branch).
456 @param platform: The string name of the platform.
457 @param branch_to_platform_to_test: A dictionary mapping branch names to
458 another dictionary, which maps platform names to a list of test names.
459 """
460 branches = sorted(branch_to_platform_to_test.keys(), reverse=True)
461 platform_to_tests = branch_to_platform_to_test[branch]
462 platform_list = sorted(platform_to_tests)
463 tests = []
464 for test_id in sorted(platform_to_tests[platform]):
465 has_data = False
466 test_name = ''
467 test_dir = os.path.join(graph_dir, 'data', branch, platform, test_id)
468 data_file_names = fnmatch.filter(os.listdir(test_dir), '*-summary.dat')
469 if len(data_file_names):
470 txt_name = data_file_names[0]
471 # The name of a test is of the form "X: Y", where X is the
472 # autotest name and Y is the graph name. For example:
473 # "platform_BootPerfServer: seconds_from_kernel".
474 test_name = (test_id[:test_id.find('__')] + ': ' +
475 txt_name[:txt_name.find('-summary.dat')])
476 file_name = os.path.join(test_dir, txt_name)
477 has_data = True if os.path.getsize(file_name) > 3 else False
478 test_info = {
479 'id': test_id,
480 'name': test_name,
481 'has_data': has_data
482 }
483 tests.append(test_info)
484
485 # Special check for certain platforms. Will be removed once we remove
486 # all links to the old-style perf graphs.
487 # TODO(dennisjeffrey): Simplify the below code once the following bug
488 # is addressed to standardize the platform names: crosbug.com/38521.
489 platform_converted = 'snow' if platform == 'daisy' else platform
490 platform_converted_2 = ('x86-' + platform if platform in
491 ['alex', 'mario', 'zgb'] else platform)
492
493 # Output the overview page.
494 page_content = render_to_response(
495 os.path.join(_TEMPLATE_DIR, 'branch_platform_overview.html'),
496 locals()).content
497 file_name = os.path.join(graph_dir, '%s-%s.html' % (branch, platform))
498 with open(file_name, 'w') as f:
499 f.write(page_content)
500
501
502def create_comparison_overview(compare_type, graph_dir, test_id, test_dir,
503 branch_to_platform_to_test):
504 """Create an overview webpage to compare a test by platform or by branch.
505
506 @param compare_type: The string type of comaprison graph this is, either
507 "platform" or "branch".
508 @param graph_dir: The string directory containing the graphing files.
509 @param test_id: The string unique ID for a test result.
510 @param test_dir: The string directory name containing the test data.
511 @param branch_to_platform_to_test: A dictionary mapping branch names to
512 another dictionary, which maps platform names to a list of test names.
513 """
514 branches = sorted(branch_to_platform_to_test.keys())
515 platforms = [x.keys() for x in branch_to_platform_to_test.values()]
516 platforms = sorted(set([x for sublist in platforms for x in sublist]))
517
518 autotest_name = test_id[:test_id.find('__')]
519
520 text_file_names = fnmatch.filter(os.listdir(test_dir), '*-summary.dat')
521 test_name = '???'
522 if len(text_file_names):
523 txt_name = text_file_names[0]
524 test_name = txt_name[:txt_name.find('-summary.dat')]
525
526 if compare_type == 'branch':
527 outer_list_items = platforms
528 inner_list_items = branches
529 outer_item_type = 'platform'
530 else:
531 outer_list_items = reversed(branches)
532 inner_list_items = platforms
533 outer_item_type = 'branch'
534
535 outer_list = []
536 for outer_item in outer_list_items:
537 inner_list = []
538 for inner_item in inner_list_items:
539 if outer_item_type == 'branch':
540 branch = outer_item
541 platform = inner_item
542 else:
543 branch = inner_item
544 platform = outer_item
545 has_data = False
546 test_dir = os.path.join(graph_dir, 'data', branch, platform,
547 test_id)
548 if os.path.exists(test_dir):
549 data_file_names = fnmatch.filter(os.listdir(test_dir),
550 '*-summary.dat')
551 if len(data_file_names):
552 file_name = os.path.join(test_dir, data_file_names[0])
553 has_data = True if os.path.getsize(file_name) > 3 else False
554 info = {
555 'inner_item': inner_item,
556 'outer_item': outer_item,
557 'branch': branch,
558 'platform': platform,
559 'has_data': has_data
560 }
561 inner_list.append(info)
562 outer_list.append(inner_list)
563
564 # Output the overview page.
565 page_content = render_to_response(
566 os.path.join(_TEMPLATE_DIR, 'compare_by_overview.html'),
567 locals()).content
568 if compare_type == 'branch':
569 file_name = os.path.join(graph_dir, test_id + '_branch.html')
570 else:
571 file_name = os.path.join(graph_dir, test_id + '_platform.html')
572 with open(file_name, 'w') as f:
573 f.write(page_content)
574
575
576def generate_overview_pages(graph_dir, options):
577 """Create static overview webpages for all the perf graphs.
578
579 @param graph_dir: The string directory containing all the graph data.
580 @param options: An optparse.OptionParser options object.
581 """
582 # Identify all the milestone names for which we want overview pages.
583 branches_dir = os.path.join(graph_dir, 'data')
584 branches = os.listdir(branches_dir)
585 branches = sorted(branches)
586 branches = [x for x in branches
587 if os.path.isdir(os.path.join(branches_dir, x)) and
588 int(x[1:]) >= options.oldest_milestone]
589
590 unique_tests = set()
591 unique_test_to_dir = {}
592 branch_to_platform_to_test = {}
593
594 for branch in branches:
595 platforms_dir = os.path.join(branches_dir, branch)
596 if not os.path.isdir(platforms_dir):
597 continue
598 platforms = os.listdir(platforms_dir)
599
600 platform_to_tests = {}
601 for platform in platforms:
602 tests_dir = os.path.join(platforms_dir, platform)
603 tests = os.listdir(tests_dir)
604
605 for test in tests:
606 test_dir = os.path.join(tests_dir, test)
607 unique_tests.add(test)
608 unique_test_to_dir[test] = test_dir
609
610 platform_to_tests[platform] = tests
611
612 branch_to_platform_to_test[branch] = platform_to_tests
613
614 for branch in branch_to_platform_to_test:
615 platforms = branch_to_platform_to_test[branch]
616 for platform in platforms:
617 # Create overview page for this branch/platform combination.
618 create_branch_platform_overview(
619 graph_dir, branch, platform, branch_to_platform_to_test)
620
621 # Make index.html a symlink to the most recent branch.
622 latest_branch = branches[-1]
623 first_plat_for_branch = sorted(
624 branch_to_platform_to_test[latest_branch].keys())[0]
625 symlink_force(
626 os.path.join(graph_dir, 'index.html'),
627 '%s-%s.html' % (latest_branch, first_plat_for_branch))
628
629 # Now create overview pages for each test that compare by platform and by
630 # branch.
631 for test_id in unique_tests:
632 for compare_type in ['branch', 'platform']:
633 create_comparison_overview(
634 compare_type, graph_dir, test_id, unique_test_to_dir[test_id],
635 branch_to_platform_to_test)
636
637
638def cleanup():
639 """Cleans up when this script is done."""
640 if os.path.isfile(_CURR_PID_FILE):
641 os.remove(_CURR_PID_FILE)
642
643
644def main():
645 """Main function."""
646 parser = optparse.OptionParser()
647 parser.add_option('-t', '--tot-milestone', metavar='MSTONE', type='int',
648 default=_TOT_MILESTONE,
649 help='Tip-of-tree (most recent) milestone number. '
650 'Defaults to milestone %default (R%default).')
651 parser.add_option('-o', '--oldest-milestone', metavar='MSTONE', type='int',
652 default=_OLDEST_MILESTONE_TO_GRAPH,
653 help='Oldest milestone number to display in the graphs. '
654 'Defaults to milestone %default (R%default).')
655 parser.add_option('-c', '--clean', action='store_true', default=False,
656 help='Clean/delete existing graph files and then '
657 're-create them from scratch.')
658 parser.add_option('-v', '--verbose', action='store_true', default=False,
659 help='Use verbose logging.')
660 options, _ = parser.parse_args()
661
662 log_level = logging.DEBUG if options.verbose else logging.INFO
663 logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
664 level=log_level)
665
666 if not os.path.isdir(_DATA_DIR):
667 logging.error('Could not find data directory "%s"', _DATA_DIR)
668 logging.error('Did you forget to run extract_perf.py first?')
669 sys.exit(1)
670
671 common.die_if_already_running(_CURR_PID_FILE, logging)
672
673 if options.clean:
674 remove_path(_GRAPH_DIR)
675 os.makedirs(_GRAPH_DATA_DIR)
676
677 initialize_graph_dir(options)
678
679 ui_dir = os.path.join(_GRAPH_DIR, 'ui')
680 if not os.path.exists(ui_dir):
681 logging.debug('Copying "ui" directory to %s', ui_dir)
682 shutil.copytree(os.path.join(_SCRIPT_DIR, 'ui'), ui_dir)
683 doc_dir = os.path.join(_GRAPH_DIR, 'doc')
684 if not os.path.exists(doc_dir):
685 logging.debug('Copying "doc" directory to %s', doc_dir)
686 shutil.copytree(os.path.join(_SCRIPT_DIR, 'doc'), doc_dir)
687
688 generate_overview_pages(_GRAPH_DIR, options)
689 set_world_read_permissions(_GRAPH_DIR)
690
691 cleanup()
692 logging.info('All done!')
693
694
695if __name__ == '__main__':
696 main()