xref: /llvm-project/lld/utils/benchmark.py (revision 4514c381f37204cbd26f3ea1d5d00a23b9cde309)
1#!/usr/bin/env python
2#
3# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4# See https://llvm.org/LICENSE.txt for license information.
5# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6#
7# ==------------------------------------------------------------------------==#
8
9import os
10import glob
11import re
12import subprocess
13import json
14import datetime
15import argparse
16
17try:
18    from urllib.parse import urlencode
19    from urllib.request import urlopen, Request
20except ImportError:
21    from urllib import urlencode
22    from urllib2 import urlopen, Request
23
24
25parser = argparse.ArgumentParser()
26parser.add_argument("benchmark_directory")
27parser.add_argument("--runs", type=int, default=10)
28parser.add_argument("--wrapper", default="")
29parser.add_argument("--machine", required=True)
30parser.add_argument("--revision", required=True)
31parser.add_argument("--threads", action="store_true")
32parser.add_argument(
33    "--url",
34    help="The lnt server url to send the results to",
35    default="http://localhost:8000/db_default/v4/link/submitRun",
36)
37args = parser.parse_args()
38
39
40class Bench:
41    def __init__(self, directory, variant):
42        self.directory = directory
43        self.variant = variant
44
45    def __str__(self):
46        if not self.variant:
47            return self.directory
48        return "%s-%s" % (self.directory, self.variant)
49
50
51def getBenchmarks():
52    ret = []
53    for i in glob.glob("*/response*.txt"):
54        m = re.match(r"response-(.*)\.txt", os.path.basename(i))
55        variant = m.groups()[0] if m else None
56        ret.append(Bench(os.path.dirname(i), variant))
57    return ret
58
59
60def parsePerfNum(num):
61    num = num.replace(b",", b"")
62    try:
63        return int(num)
64    except ValueError:
65        return float(num)
66
67
68def parsePerfLine(line):
69    ret = {}
70    line = line.split(b"#")[0].strip()
71    if len(line) != 0:
72        p = line.split()
73        ret[p[1].strip().decode("ascii")] = parsePerfNum(p[0])
74    return ret
75
76
77def parsePerf(output):
78    ret = {}
79    lines = [x.strip() for x in output.split(b"\n")]
80
81    seconds = [x for x in lines if b"seconds time elapsed" in x][0]
82    seconds = seconds.strip().split()[0].strip()
83    ret["seconds-elapsed"] = parsePerfNum(seconds)
84
85    measurement_lines = [x for x in lines if b"#" in x]
86    for l in measurement_lines:
87        ret.update(parsePerfLine(l))
88    return ret
89
90
91def run(cmd):
92    try:
93        return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
94    except subprocess.CalledProcessError as e:
95        print(e.output)
96        raise e
97
98
99def combinePerfRun(acc, d):
100    for k, v in d.items():
101        a = acc.get(k, [])
102        a.append(v)
103        acc[k] = a
104
105
106def perf(cmd):
107    # Discard the first run to warm up any system cache.
108    run(cmd)
109
110    ret = {}
111    wrapper_args = [x for x in args.wrapper.split(",") if x]
112    for i in range(args.runs):
113        os.unlink("t")
114        out = run(wrapper_args + ["perf", "stat"] + cmd)
115        r = parsePerf(out)
116        combinePerfRun(ret, r)
117    os.unlink("t")
118    return ret
119
120
121def runBench(bench):
122    thread_arg = [] if args.threads else ["--no-threads"]
123    os.chdir(bench.directory)
124    suffix = "-%s" % bench.variant if bench.variant else ""
125    response = "response" + suffix + ".txt"
126    ret = perf(["../ld.lld", "@" + response, "-o", "t"] + thread_arg)
127    ret["name"] = str(bench)
128    os.chdir("..")
129    return ret
130
131
132def buildLntJson(benchmarks):
133    start = datetime.datetime.utcnow().isoformat()
134    tests = [runBench(b) for b in benchmarks]
135    end = datetime.datetime.utcnow().isoformat()
136    ret = {
137        "format_version": 2,
138        "machine": {"name": args.machine},
139        "run": {
140            "end_time": start,
141            "start_time": end,
142            "llvm_project_revision": args.revision,
143        },
144        "tests": tests,
145    }
146    return json.dumps(ret, sort_keys=True, indent=4)
147
148
149def submitToServer(data):
150    data2 = urlencode({"input_data": data}).encode("ascii")
151    urlopen(Request(args.url, data2))
152
153
154os.chdir(args.benchmark_directory)
155data = buildLntJson(getBenchmarks())
156submitToServer(data)
157