xref: /llvm-project/llvm/utils/lit/lit/reports.py (revision 8507dbaec3f644b8a0c6291f097800d82a4f4b16)
1import abc
2import base64
3import datetime
4import itertools
5import json
6import os
7import tempfile
8
9from xml.sax.saxutils import quoteattr as quo
10
11import lit.Test
12
13
14def by_suite_and_test_path(test):
15    # Suite names are not necessarily unique.  Include object identity in sort
16    # key to avoid mixing tests of different suites.
17    return (test.suite.name, id(test.suite), test.path_in_suite)
18
19
20class Report(object):
21    def __init__(self, output_file):
22        self.output_file = output_file
23        # Set by the option parser later.
24        self.use_unique_output_file_name = False
25
26    def write_results(self, tests, elapsed):
27        if self.use_unique_output_file_name:
28            filename, ext = os.path.splitext(os.path.basename(self.output_file))
29            fd, _ = tempfile.mkstemp(
30                suffix=ext, prefix=f"{filename}.", dir=os.path.dirname(self.output_file)
31            )
32            report_file = os.fdopen(fd, "w")
33        else:
34            # Overwrite if the results already exist.
35            report_file = open(self.output_file, "w")
36
37        with report_file:
38            self._write_results_to_file(tests, elapsed, report_file)
39
40    @abc.abstractmethod
41    def _write_results_to_file(self, tests, elapsed, file):
42        """Write test results to the file object "file"."""
43        pass
44
45
46class JsonReport(Report):
47    def _write_results_to_file(self, tests, elapsed, file):
48        unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
49        tests = [t for t in tests if t.result.code not in unexecuted_codes]
50        # Construct the data we will write.
51        data = {}
52        # Encode the current lit version as a schema version.
53        data["__version__"] = lit.__versioninfo__
54        data["elapsed"] = elapsed
55        # FIXME: Record some information on the lit configuration used?
56        # FIXME: Record information from the individual test suites?
57
58        # Encode the tests.
59        data["tests"] = tests_data = []
60        for test in tests:
61            test_data = {
62                "name": test.getFullName(),
63                "code": test.result.code.name,
64                "output": test.result.output,
65                "elapsed": test.result.elapsed,
66            }
67
68            # Add test metrics, if present.
69            if test.result.metrics:
70                test_data["metrics"] = metrics_data = {}
71                for key, value in test.result.metrics.items():
72                    metrics_data[key] = value.todata()
73
74            # Report micro-tests separately, if present
75            if test.result.microResults:
76                for key, micro_test in test.result.microResults.items():
77                    # Expand parent test name with micro test name
78                    parent_name = test.getFullName()
79                    micro_full_name = parent_name + ":" + key
80
81                    micro_test_data = {
82                        "name": micro_full_name,
83                        "code": micro_test.code.name,
84                        "output": micro_test.output,
85                        "elapsed": micro_test.elapsed,
86                    }
87                    if micro_test.metrics:
88                        micro_test_data["metrics"] = micro_metrics_data = {}
89                        for key, value in micro_test.metrics.items():
90                            micro_metrics_data[key] = value.todata()
91
92                    tests_data.append(micro_test_data)
93
94            tests_data.append(test_data)
95
96        json.dump(data, file, indent=2, sort_keys=True)
97        file.write("\n")
98
99
100_invalid_xml_chars_dict = {
101    c: None for c in range(32) if chr(c) not in ("\t", "\n", "\r")
102}
103
104
105def remove_invalid_xml_chars(s):
106    # According to the XML 1.0 spec, control characters other than
107    # \t,\r, and \n are not permitted anywhere in the document
108    # (https://www.w3.org/TR/xml/#charsets) and therefore this function
109    # removes them to produce a valid XML document.
110    #
111    # Note: In XML 1.1 only \0 is illegal (https://www.w3.org/TR/xml11/#charsets)
112    # but lit currently produces XML 1.0 output.
113    return s.translate(_invalid_xml_chars_dict)
114
115
116class XunitReport(Report):
117    skipped_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
118
119    def _write_results_to_file(self, tests, elapsed, file):
120        tests.sort(key=by_suite_and_test_path)
121        tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
122
123        file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
124        file.write('<testsuites time="{time:.2f}">\n'.format(time=elapsed))
125        for suite, test_iter in tests_by_suite:
126            self._write_testsuite(file, suite, list(test_iter))
127        file.write("</testsuites>\n")
128
129    def _write_testsuite(self, file, suite, tests):
130        skipped = 0
131        failures = 0
132        time = 0.0
133
134        for t in tests:
135            if t.result.code in self.skipped_codes:
136                skipped += 1
137            if t.isFailure():
138                failures += 1
139            time += t.result.elapsed or 0.0
140
141        name = suite.config.name.replace(".", "-")
142        file.write(
143            f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}" time="{time:.2f}">\n'
144        )
145        for test in tests:
146            self._write_test(file, test, name)
147        file.write("</testsuite>\n")
148
149    def _write_test(self, file, test, suite_name):
150        path = "/".join(test.path_in_suite[:-1]).replace(".", "_")
151        class_name = f"{suite_name}.{path or suite_name}"
152        name = test.path_in_suite[-1]
153        time = test.result.elapsed or 0.0
154        file.write(
155            f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"'
156        )
157
158        if test.isFailure():
159            file.write(">\n  <failure><![CDATA[")
160            # In the unlikely case that the output contains the CDATA
161            # terminator we wrap it by creating a new CDATA block.
162            output = test.result.output.replace("]]>", "]]]]><![CDATA[>")
163            if isinstance(output, bytes):
164                output = output.decode("utf-8", "ignore")
165
166            # Failing test  output sometimes contains control characters like
167            # \x1b (e.g. if there was some -fcolor-diagnostics output) which are
168            # not allowed inside XML files.
169            # This causes problems with CI systems: for example, the Jenkins
170            # JUnit XML will throw an exception when ecountering those
171            # characters and similar problems also occur with GitLab CI.
172            output = remove_invalid_xml_chars(output)
173            file.write(output)
174            file.write("]]></failure>\n</testcase>\n")
175        elif test.result.code in self.skipped_codes:
176            reason = self._get_skip_reason(test)
177            file.write(f">\n  <skipped message={quo(reason)}/>\n</testcase>\n")
178        else:
179            file.write("/>\n")
180
181    def _get_skip_reason(self, test):
182        code = test.result.code
183        if code == lit.Test.EXCLUDED:
184            return "Test not selected (--filter, --max-tests)"
185        if code == lit.Test.SKIPPED:
186            return "User interrupt"
187
188        assert code == lit.Test.UNSUPPORTED
189        features = test.getMissingRequiredFeatures()
190        if features:
191            return "Missing required feature(s): " + ", ".join(features)
192        return "Unsupported configuration"
193
194
195def gen_resultdb_test_entry(
196    test_name, start_time, elapsed_time, test_output, result_code, is_expected
197):
198    test_data = {
199        "testId": test_name,
200        "start_time": datetime.datetime.fromtimestamp(start_time).isoformat() + "Z",
201        "duration": "%.9fs" % elapsed_time,
202        "summary_html": '<p><text-artifact artifact-id="artifact-content-in-request"></p>',
203        "artifacts": {
204            "artifact-content-in-request": {
205                "contents": base64.b64encode(test_output.encode("utf-8")).decode(
206                    "utf-8"
207                ),
208            },
209        },
210        "expected": is_expected,
211    }
212    if (
213        result_code == lit.Test.PASS
214        or result_code == lit.Test.XPASS
215        or result_code == lit.Test.FLAKYPASS
216    ):
217        test_data["status"] = "PASS"
218    elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL:
219        test_data["status"] = "FAIL"
220    elif (
221        result_code == lit.Test.UNSUPPORTED
222        or result_code == lit.Test.SKIPPED
223        or result_code == lit.Test.EXCLUDED
224    ):
225        test_data["status"] = "SKIP"
226    elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT:
227        test_data["status"] = "ABORT"
228    return test_data
229
230
231class ResultDBReport(Report):
232    def _write_results_to_file(self, tests, elapsed, file):
233        unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
234        tests = [t for t in tests if t.result.code not in unexecuted_codes]
235        data = {}
236        data["__version__"] = lit.__versioninfo__
237        data["elapsed"] = elapsed
238        # Encode the tests.
239        data["tests"] = tests_data = []
240        for test in tests:
241            tests_data.append(
242                gen_resultdb_test_entry(
243                    test_name=test.getFullName(),
244                    start_time=test.result.start,
245                    elapsed_time=test.result.elapsed,
246                    test_output=test.result.output,
247                    result_code=test.result.code,
248                    is_expected=not test.result.code.isFailure,
249                )
250            )
251            if test.result.microResults:
252                for key, micro_test in test.result.microResults.items():
253                    # Expand parent test name with micro test name
254                    parent_name = test.getFullName()
255                    micro_full_name = parent_name + ":" + key + "microres"
256                    tests_data.append(
257                        gen_resultdb_test_entry(
258                            test_name=micro_full_name,
259                            start_time=micro_test.start
260                            if micro_test.start
261                            else test.result.start,
262                            elapsed_time=micro_test.elapsed
263                            if micro_test.elapsed
264                            else test.result.elapsed,
265                            test_output=micro_test.output,
266                            result_code=micro_test.code,
267                            is_expected=not micro_test.code.isFailure,
268                        )
269                    )
270
271        json.dump(data, file, indent=2, sort_keys=True)
272        file.write("\n")
273
274
275class TimeTraceReport(Report):
276    skipped_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
277
278    def _write_results_to_file(self, tests, elapsed, file):
279        # Find when first test started so we can make start times relative.
280        first_start_time = min([t.result.start for t in tests])
281        events = [
282            self._get_test_event(x, first_start_time)
283            for x in tests
284            if x.result.code not in self.skipped_codes
285        ]
286
287        json_data = {"traceEvents": events}
288
289        json.dump(json_data, time_trace_file, indent=2, sort_keys=True)
290
291    def _get_test_event(self, test, first_start_time):
292        test_name = test.getFullName()
293        elapsed_time = test.result.elapsed or 0.0
294        start_time = test.result.start - first_start_time if test.result.start else 0.0
295        pid = test.result.pid or 0
296        return {
297            "pid": pid,
298            "tid": 1,
299            "ph": "X",
300            "ts": int(start_time * 1000000.0),
301            "dur": int(elapsed_time * 1000000.0),
302            "name": test_name,
303        }
304