blob: 8ca6cca89f3318fabcc27202877a0467a914386c [file] [log] [blame]
Rafael Espindola6dc954a2017-11-14 16:40:30 +00001#!/usr/bin/env python
2#
Chandler Carruth2946cd72019-01-19 08:50:56 +00003# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4# See https://llvm.org/LICENSE.txt for license information.
5# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Rafael Espindola6dc954a2017-11-14 16:40:30 +00006#
7# ==------------------------------------------------------------------------==#
8
9import os
10import glob
11import re
12import subprocess
13import json
14import datetime
15import argparse
16import urllib
17import urllib2
18
19parser = argparse.ArgumentParser()
20parser.add_argument('benchmark_directory')
21parser.add_argument('--runs', type=int, default=10)
22parser.add_argument('--wrapper', default='')
23parser.add_argument('--machine', required=True)
24parser.add_argument('--revision', required=True)
25parser.add_argument('--threads', action='store_true')
26parser.add_argument('--url', help='The lnt server url to send the results to',
27 default='http://localhost:8000/db_default/v4/link/submitRun')
28args = parser.parse_args()
29
30class Bench:
31 def __init__(self, directory, variant):
32 self.directory = directory
33 self.variant = variant
34 def __str__(self):
35 if not self.variant:
36 return self.directory
37 return '%s-%s' % (self.directory, self.variant)
38
39def getBenchmarks():
40 ret = []
41 for i in glob.glob('*/response*.txt'):
42 m = re.match('response-(.*)\.txt', os.path.basename(i))
43 variant = m.groups()[0] if m else None
44 ret.append(Bench(os.path.dirname(i), variant))
45 return ret
46
47def parsePerfNum(num):
48 num = num.replace(b',',b'')
49 try:
50 return int(num)
51 except ValueError:
52 return float(num)
53
54def parsePerfLine(line):
55 ret = {}
56 line = line.split(b'#')[0].strip()
57 if len(line) != 0:
58 p = line.split()
59 ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0])
60 return ret
61
62def parsePerf(output):
63 ret = {}
64 lines = [x.strip() for x in output.split(b'\n')]
65
66 seconds = [x for x in lines if b'seconds time elapsed' in x][0]
67 seconds = seconds.strip().split()[0].strip()
68 ret['seconds-elapsed'] = parsePerfNum(seconds)
69
70 measurement_lines = [x for x in lines if b'#' in x]
71 for l in measurement_lines:
72 ret.update(parsePerfLine(l))
73 return ret
74
75def run(cmd):
76 try:
77 return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
78 except subprocess.CalledProcessError as e:
79 print(e.output)
80 raise e
81
82def combinePerfRun(acc, d):
83 for k,v in d.items():
84 a = acc.get(k, [])
85 a.append(v)
86 acc[k] = a
87
88def perf(cmd):
89 # Discard the first run to warm up any system cache.
90 run(cmd)
91
92 ret = {}
93 wrapper_args = [x for x in args.wrapper.split(',') if x]
94 for i in range(args.runs):
95 os.unlink('t')
96 out = run(wrapper_args + ['perf', 'stat'] + cmd)
97 r = parsePerf(out)
98 combinePerfRun(ret, r)
99 os.unlink('t')
100 return ret
101
102def runBench(bench):
103 thread_arg = [] if args.threads else ['--no-threads']
104 os.chdir(bench.directory)
105 suffix = '-%s' % bench.variant if bench.variant else ''
106 response = 'response' + suffix + '.txt'
107 ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg)
108 ret['name'] = str(bench)
109 os.chdir('..')
110 return ret
111
112def buildLntJson(benchmarks):
113 start = datetime.datetime.utcnow().isoformat()
114 tests = [runBench(b) for b in benchmarks]
115 end = datetime.datetime.utcnow().isoformat()
116 ret = {
117 'format_version' : 2,
118 'machine' : { 'name' : args.machine },
119 'run' : {
120 'end_time' : start,
121 'start_time' : end,
122 'llvm_project_revision': args.revision
123 },
124 'tests' : tests
125 }
126 return json.dumps(ret, sort_keys=True, indent=4)
127
128def submitToServer(data):
129 data2 = urllib.urlencode({ 'input_data' : data }).encode('ascii')
130 urllib2.urlopen(urllib2.Request(args.url, data2))
131
132os.chdir(args.benchmark_directory)
133data = buildLntJson(getBenchmarks())
134submitToServer(data)