blob: f1deb0647a15a78e153a5c295ef18c315dcdcbd1 [file] [log] [blame]
Rafael Espindola6dc954a2017-11-14 16:40:30 +00001#!/usr/bin/env python
2#
3# The LLVM Compiler Infrastructure
4#
5# This file is distributed under the University of Illinois Open Source
6# License. See LICENSE.TXT for details.
7#
8# ==------------------------------------------------------------------------==#
9
10import os
11import glob
12import re
13import subprocess
14import json
15import datetime
16import argparse
17import urllib
18import urllib2
19
20parser = argparse.ArgumentParser()
21parser.add_argument('benchmark_directory')
22parser.add_argument('--runs', type=int, default=10)
23parser.add_argument('--wrapper', default='')
24parser.add_argument('--machine', required=True)
25parser.add_argument('--revision', required=True)
26parser.add_argument('--threads', action='store_true')
27parser.add_argument('--url', help='The lnt server url to send the results to',
28 default='http://localhost:8000/db_default/v4/link/submitRun')
29args = parser.parse_args()
30
31class Bench:
32 def __init__(self, directory, variant):
33 self.directory = directory
34 self.variant = variant
35 def __str__(self):
36 if not self.variant:
37 return self.directory
38 return '%s-%s' % (self.directory, self.variant)
39
40def getBenchmarks():
41 ret = []
42 for i in glob.glob('*/response*.txt'):
43 m = re.match('response-(.*)\.txt', os.path.basename(i))
44 variant = m.groups()[0] if m else None
45 ret.append(Bench(os.path.dirname(i), variant))
46 return ret
47
48def parsePerfNum(num):
49 num = num.replace(b',',b'')
50 try:
51 return int(num)
52 except ValueError:
53 return float(num)
54
55def parsePerfLine(line):
56 ret = {}
57 line = line.split(b'#')[0].strip()
58 if len(line) != 0:
59 p = line.split()
60 ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0])
61 return ret
62
63def parsePerf(output):
64 ret = {}
65 lines = [x.strip() for x in output.split(b'\n')]
66
67 seconds = [x for x in lines if b'seconds time elapsed' in x][0]
68 seconds = seconds.strip().split()[0].strip()
69 ret['seconds-elapsed'] = parsePerfNum(seconds)
70
71 measurement_lines = [x for x in lines if b'#' in x]
72 for l in measurement_lines:
73 ret.update(parsePerfLine(l))
74 return ret
75
76def run(cmd):
77 try:
78 return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
79 except subprocess.CalledProcessError as e:
80 print(e.output)
81 raise e
82
83def combinePerfRun(acc, d):
84 for k,v in d.items():
85 a = acc.get(k, [])
86 a.append(v)
87 acc[k] = a
88
89def perf(cmd):
90 # Discard the first run to warm up any system cache.
91 run(cmd)
92
93 ret = {}
94 wrapper_args = [x for x in args.wrapper.split(',') if x]
95 for i in range(args.runs):
96 os.unlink('t')
97 out = run(wrapper_args + ['perf', 'stat'] + cmd)
98 r = parsePerf(out)
99 combinePerfRun(ret, r)
100 os.unlink('t')
101 return ret
102
103def runBench(bench):
104 thread_arg = [] if args.threads else ['--no-threads']
105 os.chdir(bench.directory)
106 suffix = '-%s' % bench.variant if bench.variant else ''
107 response = 'response' + suffix + '.txt'
108 ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg)
109 ret['name'] = str(bench)
110 os.chdir('..')
111 return ret
112
113def buildLntJson(benchmarks):
114 start = datetime.datetime.utcnow().isoformat()
115 tests = [runBench(b) for b in benchmarks]
116 end = datetime.datetime.utcnow().isoformat()
117 ret = {
118 'format_version' : 2,
119 'machine' : { 'name' : args.machine },
120 'run' : {
121 'end_time' : start,
122 'start_time' : end,
123 'llvm_project_revision': args.revision
124 },
125 'tests' : tests
126 }
127 return json.dumps(ret, sort_keys=True, indent=4)
128
129def submitToServer(data):
130 data2 = urllib.urlencode({ 'input_data' : data }).encode('ascii')
131 urllib2.urlopen(urllib2.Request(args.url, data2))
132
133os.chdir(args.benchmark_directory)
134data = buildLntJson(getBenchmarks())
135submitToServer(data)