Rafael Espindola | 6dc954a | 2017-11-14 16:40:30 +0000 | [diff] [blame] | 1 | #!/usr/bin/env python |
| 2 | # |
| 3 | # The LLVM Compiler Infrastructure |
| 4 | # |
| 5 | # This file is distributed under the University of Illinois Open Source |
| 6 | # License. See LICENSE.TXT for details. |
| 7 | # |
| 8 | # ==------------------------------------------------------------------------==# |
| 9 | |
| 10 | import os |
| 11 | import glob |
| 12 | import re |
| 13 | import subprocess |
| 14 | import json |
| 15 | import datetime |
| 16 | import argparse |
| 17 | import urllib |
| 18 | import urllib2 |
| 19 | |
| 20 | parser = argparse.ArgumentParser() |
| 21 | parser.add_argument('benchmark_directory') |
| 22 | parser.add_argument('--runs', type=int, default=10) |
| 23 | parser.add_argument('--wrapper', default='') |
| 24 | parser.add_argument('--machine', required=True) |
| 25 | parser.add_argument('--revision', required=True) |
| 26 | parser.add_argument('--threads', action='store_true') |
| 27 | parser.add_argument('--url', help='The lnt server url to send the results to', |
| 28 | default='http://localhost:8000/db_default/v4/link/submitRun') |
| 29 | args = parser.parse_args() |
| 30 | |
| 31 | class Bench: |
| 32 | def __init__(self, directory, variant): |
| 33 | self.directory = directory |
| 34 | self.variant = variant |
| 35 | def __str__(self): |
| 36 | if not self.variant: |
| 37 | return self.directory |
| 38 | return '%s-%s' % (self.directory, self.variant) |
| 39 | |
| 40 | def getBenchmarks(): |
| 41 | ret = [] |
| 42 | for i in glob.glob('*/response*.txt'): |
| 43 | m = re.match('response-(.*)\.txt', os.path.basename(i)) |
| 44 | variant = m.groups()[0] if m else None |
| 45 | ret.append(Bench(os.path.dirname(i), variant)) |
| 46 | return ret |
| 47 | |
| 48 | def parsePerfNum(num): |
| 49 | num = num.replace(b',',b'') |
| 50 | try: |
| 51 | return int(num) |
| 52 | except ValueError: |
| 53 | return float(num) |
| 54 | |
| 55 | def parsePerfLine(line): |
| 56 | ret = {} |
| 57 | line = line.split(b'#')[0].strip() |
| 58 | if len(line) != 0: |
| 59 | p = line.split() |
| 60 | ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0]) |
| 61 | return ret |
| 62 | |
| 63 | def parsePerf(output): |
| 64 | ret = {} |
| 65 | lines = [x.strip() for x in output.split(b'\n')] |
| 66 | |
| 67 | seconds = [x for x in lines if b'seconds time elapsed' in x][0] |
| 68 | seconds = seconds.strip().split()[0].strip() |
| 69 | ret['seconds-elapsed'] = parsePerfNum(seconds) |
| 70 | |
| 71 | measurement_lines = [x for x in lines if b'#' in x] |
| 72 | for l in measurement_lines: |
| 73 | ret.update(parsePerfLine(l)) |
| 74 | return ret |
| 75 | |
| 76 | def run(cmd): |
| 77 | try: |
| 78 | return subprocess.check_output(cmd, stderr=subprocess.STDOUT) |
| 79 | except subprocess.CalledProcessError as e: |
| 80 | print(e.output) |
| 81 | raise e |
| 82 | |
| 83 | def combinePerfRun(acc, d): |
| 84 | for k,v in d.items(): |
| 85 | a = acc.get(k, []) |
| 86 | a.append(v) |
| 87 | acc[k] = a |
| 88 | |
| 89 | def perf(cmd): |
| 90 | # Discard the first run to warm up any system cache. |
| 91 | run(cmd) |
| 92 | |
| 93 | ret = {} |
| 94 | wrapper_args = [x for x in args.wrapper.split(',') if x] |
| 95 | for i in range(args.runs): |
| 96 | os.unlink('t') |
| 97 | out = run(wrapper_args + ['perf', 'stat'] + cmd) |
| 98 | r = parsePerf(out) |
| 99 | combinePerfRun(ret, r) |
| 100 | os.unlink('t') |
| 101 | return ret |
| 102 | |
| 103 | def runBench(bench): |
| 104 | thread_arg = [] if args.threads else ['--no-threads'] |
| 105 | os.chdir(bench.directory) |
| 106 | suffix = '-%s' % bench.variant if bench.variant else '' |
| 107 | response = 'response' + suffix + '.txt' |
| 108 | ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg) |
| 109 | ret['name'] = str(bench) |
| 110 | os.chdir('..') |
| 111 | return ret |
| 112 | |
| 113 | def buildLntJson(benchmarks): |
| 114 | start = datetime.datetime.utcnow().isoformat() |
| 115 | tests = [runBench(b) for b in benchmarks] |
| 116 | end = datetime.datetime.utcnow().isoformat() |
| 117 | ret = { |
| 118 | 'format_version' : 2, |
| 119 | 'machine' : { 'name' : args.machine }, |
| 120 | 'run' : { |
| 121 | 'end_time' : start, |
| 122 | 'start_time' : end, |
| 123 | 'llvm_project_revision': args.revision |
| 124 | }, |
| 125 | 'tests' : tests |
| 126 | } |
| 127 | return json.dumps(ret, sort_keys=True, indent=4) |
| 128 | |
| 129 | def submitToServer(data): |
| 130 | data2 = urllib.urlencode({ 'input_data' : data }).encode('ascii') |
| 131 | urllib2.urlopen(urllib2.Request(args.url, data2)) |
| 132 | |
| 133 | os.chdir(args.benchmark_directory) |
| 134 | data = buildLntJson(getBenchmarks()) |
| 135 | submitToServer(data) |