blob: dacd714e77702b43a8326070c28fab4da2ce444d [file] [log] [blame]
Dennis Jeffreyb95ba5a2012-11-12 17:55:18 -08001#!/usr/bin/env python
2# Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
3# Use of this source code is governed by a BSD-style license that can be
4# found in the LICENSE file.
5
6"""Parses perf data files and creates chrome-based graph files from that data.
7
8This script assumes that extract_perf.py was previously run to extract perf
9test data from a database and then dump it into local text data files. This
10script then parses the extracted perf data files and creates new data files that
11can be directly read in by chrome's perf graphing infrastructure to display
12perf graphs.
13
14This script also generates a set of Javascript/HTML overview pages that present
15birds-eye overviews of multiple perf graphs simultaneously.
16
17Sample usage:
18 python generate_perf_graphs.py -c -v
19
20Run with -h to see the full set of command-line options.
21"""
22
23import fnmatch
24import logging
25import math
26import optparse
27import os
28import re
29import shutil
30import simplejson
31import sys
32
33_SETTINGS = 'autotest_lib.frontend.settings'
34os.environ['DJANGO_SETTINGS_MODULE'] = _SETTINGS
35
36import common
37from django.shortcuts import render_to_response
38
39# Paths to files.
40_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
41_DATA_DIR = os.path.join(_SCRIPT_DIR, 'data')
42_CURR_PID_FILE = os.path.join(_DATA_DIR, __file__ + '.curr_pid.txt')
43_CHART_CONFIG_FILE = os.path.join(_SCRIPT_DIR, 'croschart_defaults.json')
44_TEMPLATE_DIR = os.path.join(_SCRIPT_DIR, 'templates')
45
46_GRAPH_DIR = os.path.join(_SCRIPT_DIR, '..', 'graphs')
47_GRAPH_DATA_DIR = os.path.join(_GRAPH_DIR, 'data')
48_COMPLETED_ID_FILE = os.path.join(_GRAPH_DATA_DIR, 'job_id_complete.txt')
49_REV_NUM_FILE = os.path.join(_GRAPH_DATA_DIR, 'rev_num.txt')
50
51# Values that can be configured through options.
52# TODO(dennisjeffrey): Infer the tip-of-tree milestone dynamically once this
53# issue is addressed: crosbug.com/38564.
54_TOT_MILESTONE = 26
55_OLDEST_MILESTONE_TO_GRAPH = 23
56
57# Other values that can only be configured here in the code.
58_SYMLINK_LIST = [
59 ('report.html', '../../../../ui/cros_plotter.html'),
60 ('js', '../../../../ui/js'),
61]
62
63
64def set_world_read_permissions(path):
65 """Recursively sets the content of |path| to be world-readable.
66
67 @param path: The string path.
68 """
69 logging.debug('Setting world-read permissions recursively on %s', path)
70 os.chmod(path, 0755)
71 for root, dirs, files in os.walk(path):
72 for d in dirs:
73 dname = os.path.join(root, d)
74 if not os.path.islink(dname):
75 os.chmod(dname, 0755)
76 for f in files:
77 fname = os.path.join(root, f)
78 if not os.path.islink(fname):
79 os.chmod(fname, 0755)
80
81
82def remove_path(path):
83 """Remove the given path (whether file or directory).
84
85 @param path: The string path.
86 """
87 if os.path.isdir(path):
88 shutil.rmtree(path)
89 return
90 try:
91 os.remove(path)
92 except OSError:
93 pass
94
95
96def symlink_force(link_name, target):
97 """Create a symlink, accounting for different situations.
98
99 @param link_name: The string name of the link to create.
100 @param target: The string destination file to which the link should point.
101 """
102 try:
103 os.unlink(link_name)
104 except EnvironmentError:
105 pass
106 try:
107 os.symlink(target, link_name)
108 except OSError:
109 remove_path(link_name)
110 os.symlink(target, link_name)
111
112
113def mean_and_standard_deviation(data):
114 """Compute the mean and standard deviation of a list of numbers.
115
116 @param data: A list of numerica values.
117
118 @return A 2-tuple (mean, standard_deviation) computed from |data|.
119 """
120 n = len(data)
121 if n == 0:
122 return 0.0, 0.0
123 mean = float(sum(data)) / n
124 if n == 1:
125 return mean, 0.0
126 # Divide by n-1 to compute "sample standard deviation".
127 variance = sum([(element - mean) ** 2 for element in data]) / (n - 1)
128 return mean, math.sqrt(variance)
129
130
131def get_release_from_jobname(jobname):
132 """Identifies the release number components from an autotest job name.
133
134 For example:
135 'lumpy-release-R21-2384.0.0_pyauto_perf' becomes (21, 2384, 0, 0).
136
137 @param jobname: The string name of an autotest job.
138
139 @return The 4-tuple containing components of the build release number, or
140 None if those components cannot be identifies from the |jobname|.
141 """
142 prog = re.compile('r(\d+)-(\d+).(\d+).(\d+)')
143 m = prog.search(jobname.lower())
144 if m:
145 return (int(m.group(1)), int(m.group(2)), int(m.group(3)),
146 int(m.group(4)))
147 return None
148
149
150def is_on_mainline_of_milestone(jobname, milestone):
151 """Determines whether an autotest build is on mainline of a given milestone.
152
153 @param jobname: The string name of an autotest job (containing release
154 number).
155 @param milestone: The integer milestone number to consider.
156
157 @return True, if the given autotest job name is for a release number that
158 is either (1) an ancestor of the specified milestone, or (2) is on the
159 main branch line of the given milestone. Returns False otherwise.
160 """
161 r = get_release_from_jobname(jobname)
162 m = milestone
163 # Handle garbage data that might exist.
164 if any(item < 0 for item in r):
165 raise Exception('Unexpected release info in job name: %s' % jobname)
166 if m == r[0]:
167 # Yes for jobs from the specified milestone itself.
168 return True
169 if r[0] < m and r[2] == 0 and r[3] == 0:
170 # Yes for jobs from earlier milestones that were before their respective
171 # branch points.
172 return True
173 return False
174
175
176# TODO(dennisjeffrey): Determine whether or not we need all the values in the
177# config file. Remove unnecessary ones and revised necessary ones as needed.
178def create_config_js_file(path, test_name):
179 """Creates a configuration file used by the performance graphs.
180
181 @param path: The string path to the directory in which to create the file.
182 @param test_name: The string name of the test associated with this config
183 file.
184 """
185 config_content = render_to_response(
186 os.path.join(_TEMPLATE_DIR, 'config.js'), locals()).content
187 with open(os.path.join(path, 'config.js'), 'w') as f:
188 f.write(config_content)
189
190
191def output_graph_data_for_entry(test_name, graph_name, job_name, platform,
192 units, better_direction, url, perf_keys,
193 chart_keys, options, summary_id_to_rev_num):
194 """Outputs data for a perf test result into appropriate graph data files.
195
196 @param test_name: The string name of a test.
197 @param graph_name: The string name of the graph associated with this result.
198 @param job_name: The string name of the autotest job associated with this
199 test result.
200 @param platform: The string name of the platform associated with this test
201 result.
202 @param units: The string name of the units displayed on this graph.
203 @param better_direction: A String representing whether better perf results
204 are those that are "higher" or "lower".
205 @param url: The string URL of a webpage docuementing the current graph.
206 @param perf_keys: A list of 2-tuples containing perf keys measured by the
207 test, where the first tuple element is a string key name, and the second
208 tuple element is the associated numeric perf value.
209 @param chart_keys: A list of perf key names that need to be displayed in
210 the current graph.
211 @param options: An optparse.OptionParser options object.
212 @param summary_id_to_rev_num: A dictionary mapping a string (representing
213 a test/platform/release combination), to the next integer revision
214 number to use in the graph data file.
215 """
216 # A string ID that is assumed to be unique across all charts.
217 test_id = test_name + '__' + graph_name
218
219 release_num = get_release_from_jobname(job_name)
220 if not release_num:
221 logging.warning('Could not obtain release number for job name: %s',
222 job_name)
223 return
224 build_num = '%d.%d.%d.%d' % (release_num[0], release_num[1], release_num[2],
225 release_num[3])
226
227 # Filter out particular test runs that we explicitly do not want to
228 # consider.
229 # TODO(dennisjeffrey): Figure out a way to eliminate the need for these
230 # special checks: crosbug.com/36685.
231 if test_name == 'platform_BootPerfServer' and 'perfalerts' not in job_name:
232 # Skip platform_BootPerfServer test results that do not come from the
233 # "perfalerts" runs.
234 return
235
236 # Consider all releases for which this test result may need to be included
237 # on a graph.
238 start_release = max(release_num[0], options.oldest_milestone)
239 for release in xrange(start_release, options.tot_milestone + 1):
240 output_path = os.path.join(_GRAPH_DATA_DIR, 'r%d' % release, platform,
241 test_id)
242 summary_file = os.path.join(output_path, graph_name + '-summary.dat')
243
244 # Set up the output directory if it doesn't already exist.
245 if not os.path.exists(output_path):
246 os.makedirs(output_path)
247
248 # Create auxiliary files.
249 create_config_js_file(output_path, test_name)
250 open(summary_file, 'w').close()
251 graphs = [{
252 'name': graph_name,
253 'units': units,
254 'better_direction': better_direction,
255 'info_url': url,
256 'important': False,
257 }]
258 with open(os.path.join(output_path, 'graphs.dat'), 'w') as f:
259 f.write(simplejson.dumps(graphs, indent=2))
260
261 # Add symlinks to the plotting code.
262 for slink, target in _SYMLINK_LIST:
263 slink = os.path.join(output_path, slink)
264 symlink_force(slink, target)
265
266 # Write data to graph data file if it belongs in the current release.
267 if is_on_mainline_of_milestone(job_name, release):
268 entry = {}
269 entry['traces'] = {}
270 entry['ver'] = build_num
271
272 key_to_vals = {}
273 for perf_key in perf_keys:
274 if perf_key[0] in chart_keys:
275 # Replace dashes with underscores so different lines show
276 # up as different colors in the graphs.
277 key = perf_key[0].replace('-', '_')
278 if key not in key_to_vals:
279 key_to_vals[key] = []
280 # There are some cases where results for
281 # platform_BootPerfServer are negative in reboot/shutdown
282 # times. Ignore these negative values.
283 if float(perf_key[1]) < 0.0:
284 continue
285 key_to_vals[key].append(perf_key[1])
286 for key in key_to_vals:
287 if len(key_to_vals[key]) == 1:
288 entry['traces'][key] = [key_to_vals[key][0], '0.0']
289 else:
290 mean, std_dev = mean_and_standard_deviation(
291 map(float, key_to_vals[key]))
292 entry['traces'][key] = [str(mean), str(std_dev)]
293
294 if entry['traces']:
295 summary_id = '%s|%s|%s' % (test_id, platform, release)
296
297 rev = summary_id_to_rev_num.get(summary_id, 0)
298 summary_id_to_rev_num[summary_id] = rev + 1
299 entry['rev'] = rev
300
301 with open(summary_file, 'a') as f:
302 f.write(simplejson.dumps(entry) + '\n')
303
304
305def process_perf_data_file(file_name, test_name, completed_ids,
306 test_name_to_charts, options, summary_id_to_rev_num):
307 """Processes a single perf data file to convert into graphable format.
308
309 @param file_name: The string name of the perf data file to process.
310 @param test_name: The string name of the test associated with the file name
311 to process.
312 @param completed_ids: A dictionary of already-processed job IDs.
313 @param test_name_to_charts: A dictionary mapping test names to a list of
314 dictionaries, in which each dictionary contains information about a
315 chart associated with the given test name.
316 @param options: An optparse.OptionParser options object.
317 @param summary_id_to_rev_num: A dictionary mapping a string (representing
318 a test/platform/release combination) to an integer revision number.
319
320 @return The number of newly-added graph data entries.
321 """
322 newly_added_count = 0
323 with open(file_name, 'r') as fp:
324 for line in fp.readlines():
325 info = simplejson.loads(line.strip())
326 job_id = info[0]
327 job_name = info[1]
328 platform = info[2]
329 perf_keys = info[3]
330
331 # Skip this job ID if it's already been processed.
332 if job_id in completed_ids:
333 continue
334
335 # Scan the desired charts and see if we need to output the
336 # current line info to a graph output file.
337 for chart in test_name_to_charts[test_name]:
338 graph_name = chart['graph_name']
339 units = chart['units']
340 better_direction = chart['better_direction']
341 url = chart['info_url']
342 chart_keys = chart['keys']
343
344 store_entry = False
345 for chart_key in chart_keys:
346 if chart_key in [x[0] for x in perf_keys]:
347 store_entry = True
348 break
349
350 if store_entry:
351 output_graph_data_for_entry(
352 test_name, graph_name, job_name, platform,
353 units, better_direction, url, perf_keys,
354 chart_keys, options, summary_id_to_rev_num)
355
356 # Mark this job ID as having been processed.
357 with open(_COMPLETED_ID_FILE, 'a') as fp:
358 fp.write(job_id + '\n')
359 completed_ids[job_id] = True
360 newly_added_count += 1
361
362 return newly_added_count
363
364
365def initialize_graph_dir(options):
366 """Initialize/populate the directory that will serve the perf graphs.
367
368 @param options: An optparse.OptionParser options object.
369 """
370 charts = simplejson.loads(open(_CHART_CONFIG_FILE, 'r').read())
371
372 # Identify all the job IDs already processed in the graphs, so that we don't
373 # add that data again.
374 completed_ids = {}
375 if os.path.exists(_COMPLETED_ID_FILE):
376 with open(_COMPLETED_ID_FILE, 'r') as fp:
377 job_ids = map(lambda x: x.strip(), fp.readlines())
378 for job_id in job_ids:
379 completed_ids[job_id] = True
380
381 # Identify the next revision number to use in the graph data files for each
382 # test/platform/release combination.
383 summary_id_to_rev_num = {}
384 if os.path.exists(_REV_NUM_FILE):
385 with open(_REV_NUM_FILE, 'r') as fp:
386 summary_id_to_rev_num = simplejson.loads(fp.read())
387
388 test_name_to_charts = {}
389 test_names = set()
390 for chart in charts:
391 if chart['test_name'] not in test_name_to_charts:
392 test_name_to_charts[chart['test_name']] = []
393 test_name_to_charts[chart['test_name']].append(chart)
394 test_names.add(chart['test_name'])
395
396 # Scan all database data and format/output only the new data specified in
397 # the graph JSON file.
398 newly_added_count = 0
399 for i, test_name in enumerate(test_names):
400 logging.debug('Analyzing/converting data for test %d of %d: %s',
401 i+1, len(test_names), test_name)
402
403 test_data_dir = os.path.join(_DATA_DIR, test_name)
404 if not os.path.exists(test_data_dir):
405 logging.warning('No test data directory for test: %s', test_name)
406 continue
407 files = os.listdir(test_data_dir)
408 for file_name in files:
409 logging.debug('Processing perf platform data file: %s', file_name)
410 newly_added_count += process_perf_data_file(
411 os.path.join(test_data_dir, file_name), test_name,
412 completed_ids, test_name_to_charts, options,
413 summary_id_to_rev_num)
414
415 # Store the latest revision numbers for each test/platform/release
416 # combination, to be used on the next invocation of this script.
417 with open(_REV_NUM_FILE, 'w') as fp:
418 fp.write(simplejson.dumps(summary_id_to_rev_num, indent=2))
419
420 logging.info('Added info for %d new jobs to the graphs!', newly_added_count)
421
422
423def create_branch_platform_overview(graph_dir, branch, platform,
424 branch_to_platform_to_test):
425 """Create an overview webpage for the given branch/platform combination.
426
427 @param graph_dir: The string directory containing the graphing files.
428 @param branch: The string name of the milestone (branch).
429 @param platform: The string name of the platform.
430 @param branch_to_platform_to_test: A dictionary mapping branch names to
431 another dictionary, which maps platform names to a list of test names.
432 """
433 branches = sorted(branch_to_platform_to_test.keys(), reverse=True)
434 platform_to_tests = branch_to_platform_to_test[branch]
435 platform_list = sorted(platform_to_tests)
436 tests = []
437 for test_id in sorted(platform_to_tests[platform]):
438 has_data = False
439 test_name = ''
440 test_dir = os.path.join(graph_dir, 'data', branch, platform, test_id)
441 data_file_names = fnmatch.filter(os.listdir(test_dir), '*-summary.dat')
442 if len(data_file_names):
443 txt_name = data_file_names[0]
444 # The name of a test is of the form "X: Y", where X is the
445 # autotest name and Y is the graph name. For example:
446 # "platform_BootPerfServer: seconds_from_kernel".
447 test_name = (test_id[:test_id.find('__')] + ': ' +
448 txt_name[:txt_name.find('-summary.dat')])
449 file_name = os.path.join(test_dir, txt_name)
450 has_data = True if os.path.getsize(file_name) > 3 else False
451 test_info = {
452 'id': test_id,
453 'name': test_name,
454 'has_data': has_data
455 }
456 tests.append(test_info)
457
458 # Special check for certain platforms. Will be removed once we remove
459 # all links to the old-style perf graphs.
460 # TODO(dennisjeffrey): Simplify the below code once the following bug
461 # is addressed to standardize the platform names: crosbug.com/38521.
462 platform_converted = 'snow' if platform == 'daisy' else platform
463 platform_converted_2 = ('x86-' + platform if platform in
464 ['alex', 'mario', 'zgb'] else platform)
465
466 # Output the overview page.
467 page_content = render_to_response(
468 os.path.join(_TEMPLATE_DIR, 'branch_platform_overview.html'),
469 locals()).content
470 file_name = os.path.join(graph_dir, '%s-%s.html' % (branch, platform))
471 with open(file_name, 'w') as f:
472 f.write(page_content)
473
474
475def create_comparison_overview(compare_type, graph_dir, test_id, test_dir,
476 branch_to_platform_to_test):
477 """Create an overview webpage to compare a test by platform or by branch.
478
479 @param compare_type: The string type of comaprison graph this is, either
480 "platform" or "branch".
481 @param graph_dir: The string directory containing the graphing files.
482 @param test_id: The string unique ID for a test result.
483 @param test_dir: The string directory name containing the test data.
484 @param branch_to_platform_to_test: A dictionary mapping branch names to
485 another dictionary, which maps platform names to a list of test names.
486 """
487 branches = sorted(branch_to_platform_to_test.keys())
488 platforms = [x.keys() for x in branch_to_platform_to_test.values()]
489 platforms = sorted(set([x for sublist in platforms for x in sublist]))
490
491 autotest_name = test_id[:test_id.find('__')]
492
493 text_file_names = fnmatch.filter(os.listdir(test_dir), '*-summary.dat')
494 test_name = '???'
495 if len(text_file_names):
496 txt_name = text_file_names[0]
497 test_name = txt_name[:txt_name.find('-summary.dat')]
498
499 if compare_type == 'branch':
500 outer_list_items = platforms
501 inner_list_items = branches
502 outer_item_type = 'platform'
503 else:
504 outer_list_items = reversed(branches)
505 inner_list_items = platforms
506 outer_item_type = 'branch'
507
508 outer_list = []
509 for outer_item in outer_list_items:
510 inner_list = []
511 for inner_item in inner_list_items:
512 if outer_item_type == 'branch':
513 branch = outer_item
514 platform = inner_item
515 else:
516 branch = inner_item
517 platform = outer_item
518 has_data = False
519 test_dir = os.path.join(graph_dir, 'data', branch, platform,
520 test_id)
521 if os.path.exists(test_dir):
522 data_file_names = fnmatch.filter(os.listdir(test_dir),
523 '*-summary.dat')
524 if len(data_file_names):
525 file_name = os.path.join(test_dir, data_file_names[0])
526 has_data = True if os.path.getsize(file_name) > 3 else False
527 info = {
528 'inner_item': inner_item,
529 'outer_item': outer_item,
530 'branch': branch,
531 'platform': platform,
532 'has_data': has_data
533 }
534 inner_list.append(info)
535 outer_list.append(inner_list)
536
537 # Output the overview page.
538 page_content = render_to_response(
539 os.path.join(_TEMPLATE_DIR, 'compare_by_overview.html'),
540 locals()).content
541 if compare_type == 'branch':
542 file_name = os.path.join(graph_dir, test_id + '_branch.html')
543 else:
544 file_name = os.path.join(graph_dir, test_id + '_platform.html')
545 with open(file_name, 'w') as f:
546 f.write(page_content)
547
548
549def generate_overview_pages(graph_dir, options):
550 """Create static overview webpages for all the perf graphs.
551
552 @param graph_dir: The string directory containing all the graph data.
553 @param options: An optparse.OptionParser options object.
554 """
555 # Identify all the milestone names for which we want overview pages.
556 branches_dir = os.path.join(graph_dir, 'data')
557 branches = os.listdir(branches_dir)
558 branches = sorted(branches)
559 branches = [x for x in branches
560 if os.path.isdir(os.path.join(branches_dir, x)) and
561 int(x[1:]) >= options.oldest_milestone]
562
563 unique_tests = set()
564 unique_test_to_dir = {}
565 branch_to_platform_to_test = {}
566
567 for branch in branches:
568 platforms_dir = os.path.join(branches_dir, branch)
569 if not os.path.isdir(platforms_dir):
570 continue
571 platforms = os.listdir(platforms_dir)
572
573 platform_to_tests = {}
574 for platform in platforms:
575 tests_dir = os.path.join(platforms_dir, platform)
576 tests = os.listdir(tests_dir)
577
578 for test in tests:
579 test_dir = os.path.join(tests_dir, test)
580 unique_tests.add(test)
581 unique_test_to_dir[test] = test_dir
582
583 platform_to_tests[platform] = tests
584
585 branch_to_platform_to_test[branch] = platform_to_tests
586
587 for branch in branch_to_platform_to_test:
588 platforms = branch_to_platform_to_test[branch]
589 for platform in platforms:
590 # Create overview page for this branch/platform combination.
591 create_branch_platform_overview(
592 graph_dir, branch, platform, branch_to_platform_to_test)
593
594 # Make index.html a symlink to the most recent branch.
595 latest_branch = branches[-1]
596 first_plat_for_branch = sorted(
597 branch_to_platform_to_test[latest_branch].keys())[0]
598 symlink_force(
599 os.path.join(graph_dir, 'index.html'),
600 '%s-%s.html' % (latest_branch, first_plat_for_branch))
601
602 # Now create overview pages for each test that compare by platform and by
603 # branch.
604 for test_id in unique_tests:
605 for compare_type in ['branch', 'platform']:
606 create_comparison_overview(
607 compare_type, graph_dir, test_id, unique_test_to_dir[test_id],
608 branch_to_platform_to_test)
609
610
611def cleanup():
612 """Cleans up when this script is done."""
613 if os.path.isfile(_CURR_PID_FILE):
614 os.remove(_CURR_PID_FILE)
615
616
617def main():
618 """Main function."""
619 parser = optparse.OptionParser()
620 parser.add_option('-t', '--tot-milestone', metavar='MSTONE', type='int',
621 default=_TOT_MILESTONE,
622 help='Tip-of-tree (most recent) milestone number. '
623 'Defaults to milestone %default (R%default).')
624 parser.add_option('-o', '--oldest-milestone', metavar='MSTONE', type='int',
625 default=_OLDEST_MILESTONE_TO_GRAPH,
626 help='Oldest milestone number to display in the graphs. '
627 'Defaults to milestone %default (R%default).')
628 parser.add_option('-c', '--clean', action='store_true', default=False,
629 help='Clean/delete existing graph files and then '
630 're-create them from scratch.')
631 parser.add_option('-v', '--verbose', action='store_true', default=False,
632 help='Use verbose logging.')
633 options, _ = parser.parse_args()
634
635 log_level = logging.DEBUG if options.verbose else logging.INFO
636 logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
637 level=log_level)
638
639 if not os.path.isdir(_DATA_DIR):
640 logging.error('Could not find data directory "%s"', _DATA_DIR)
641 logging.error('Did you forget to run extract_perf.py first?')
642 sys.exit(1)
643
644 common.die_if_already_running(_CURR_PID_FILE, logging)
645
646 if options.clean:
647 remove_path(_GRAPH_DIR)
648 os.makedirs(_GRAPH_DATA_DIR)
649
650 initialize_graph_dir(options)
651
652 ui_dir = os.path.join(_GRAPH_DIR, 'ui')
653 if not os.path.exists(ui_dir):
654 logging.debug('Copying "ui" directory to %s', ui_dir)
655 shutil.copytree(os.path.join(_SCRIPT_DIR, 'ui'), ui_dir)
656 doc_dir = os.path.join(_GRAPH_DIR, 'doc')
657 if not os.path.exists(doc_dir):
658 logging.debug('Copying "doc" directory to %s', doc_dir)
659 shutil.copytree(os.path.join(_SCRIPT_DIR, 'doc'), doc_dir)
660
661 generate_overview_pages(_GRAPH_DIR, options)
662 set_world_read_permissions(_GRAPH_DIR)
663
664 cleanup()
665 logging.info('All done!')
666
667
668if __name__ == '__main__':
669 main()