xref: /netbsd-src/external/apache2/llvm/dist/llvm/utils/lit/lit/reports.py (revision 82d56013d7b633d116a93943de88e08335357a7c)
1import itertools
2import json
3
4from xml.sax.saxutils import quoteattr as quo
5
6import lit.Test
7
8
9def by_suite_and_test_path(test):
10    # Suite names are not necessarily unique.  Include object identity in sort
11    # key to avoid mixing tests of different suites.
12    return (test.suite.name, id(test.suite), test.path_in_suite)
13
14
15class JsonReport(object):
16    def __init__(self, output_file):
17        self.output_file = output_file
18
19    def write_results(self, tests, elapsed):
20        unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
21        tests = [t for t in tests if t.result.code not in unexecuted_codes]
22        # Construct the data we will write.
23        data = {}
24        # Encode the current lit version as a schema version.
25        data['__version__'] = lit.__versioninfo__
26        data['elapsed'] = elapsed
27        # FIXME: Record some information on the lit configuration used?
28        # FIXME: Record information from the individual test suites?
29
30        # Encode the tests.
31        data['tests'] = tests_data = []
32        for test in tests:
33            test_data = {
34                'name': test.getFullName(),
35                'code': test.result.code.name,
36                'output': test.result.output,
37                'elapsed': test.result.elapsed}
38
39            # Add test metrics, if present.
40            if test.result.metrics:
41                test_data['metrics'] = metrics_data = {}
42                for key, value in test.result.metrics.items():
43                    metrics_data[key] = value.todata()
44
45            # Report micro-tests separately, if present
46            if test.result.microResults:
47                for key, micro_test in test.result.microResults.items():
48                    # Expand parent test name with micro test name
49                    parent_name = test.getFullName()
50                    micro_full_name = parent_name + ':' + key
51
52                    micro_test_data = {
53                        'name': micro_full_name,
54                        'code': micro_test.code.name,
55                        'output': micro_test.output,
56                        'elapsed': micro_test.elapsed}
57                    if micro_test.metrics:
58                        micro_test_data['metrics'] = micro_metrics_data = {}
59                        for key, value in micro_test.metrics.items():
60                            micro_metrics_data[key] = value.todata()
61
62                    tests_data.append(micro_test_data)
63
64            tests_data.append(test_data)
65
66        with open(self.output_file, 'w') as file:
67            json.dump(data, file, indent=2, sort_keys=True)
68            file.write('\n')
69
70
71_invalid_xml_chars_dict = {c: None for c in range(32) if chr(c) not in ('\t', '\n', '\r')}
72
73
74def remove_invalid_xml_chars(s):
75    # According to the XML 1.0 spec, control characters other than
76    # \t,\r, and \n are not permitted anywhere in the document
77    # (https://www.w3.org/TR/xml/#charsets) and therefore this function
78    # removes them to produce a valid XML document.
79    #
80    # Note: In XML 1.1 only \0 is illegal (https://www.w3.org/TR/xml11/#charsets)
81    # but lit currently produces XML 1.0 output.
82    return s.translate(_invalid_xml_chars_dict)
83
84
85class XunitReport(object):
86    def __init__(self, output_file):
87        self.output_file = output_file
88        self.skipped_codes = {lit.Test.EXCLUDED,
89                              lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
90
91    def write_results(self, tests, elapsed):
92        tests.sort(key=by_suite_and_test_path)
93        tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
94
95        with open(self.output_file, 'w') as file:
96            file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
97            file.write('<testsuites time="{time:.2f}">\n'.format(time=elapsed))
98            for suite, test_iter in tests_by_suite:
99                self._write_testsuite(file, suite, list(test_iter))
100            file.write('</testsuites>\n')
101
102    def _write_testsuite(self, file, suite, tests):
103        skipped = sum(1 for t in tests if t.result.code in self.skipped_codes)
104        failures = sum(1 for t in tests if t.isFailure())
105
106        name = suite.config.name.replace('.', '-')
107        file.write(f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n')
108        for test in tests:
109            self._write_test(file, test, name)
110        file.write('</testsuite>\n')
111
112    def _write_test(self, file, test, suite_name):
113        path = '/'.join(test.path_in_suite[:-1]).replace('.', '_')
114        class_name = f'{suite_name}.{path or suite_name}'
115        name = test.path_in_suite[-1]
116        time = test.result.elapsed or 0.0
117        file.write(f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"')
118
119        if test.isFailure():
120            file.write('>\n  <failure><![CDATA[')
121            # In the unlikely case that the output contains the CDATA
122            # terminator we wrap it by creating a new CDATA block.
123            output = test.result.output.replace(']]>', ']]]]><![CDATA[>')
124            if isinstance(output, bytes):
125                output = output.decode("utf-8", 'ignore')
126
127            # Failing test  output sometimes contains control characters like
128            # \x1b (e.g. if there was some -fcolor-diagnostics output) which are
129            # not allowed inside XML files.
130            # This causes problems with CI systems: for example, the Jenkins
131            # JUnit XML will throw an exception when ecountering those
132            # characters and similar problems also occur with GitLab CI.
133            output = remove_invalid_xml_chars(output)
134            file.write(output)
135            file.write(']]></failure>\n</testcase>\n')
136        elif test.result.code in self.skipped_codes:
137            reason = self._get_skip_reason(test)
138            file.write(f'>\n  <skipped message={quo(reason)}/>\n</testcase>\n')
139        else:
140            file.write('/>\n')
141
142    def _get_skip_reason(self, test):
143        code = test.result.code
144        if code == lit.Test.EXCLUDED:
145            return 'Test not selected (--filter, --max-tests)'
146        if code == lit.Test.SKIPPED:
147            return 'User interrupt'
148
149        assert code == lit.Test.UNSUPPORTED
150        features = test.getMissingRequiredFeatures()
151        if features:
152            return 'Missing required feature(s): ' + ', '.join(features)
153        return 'Unsupported configuration'
154
155
156class TimeTraceReport(object):
157    def __init__(self, output_file):
158        self.output_file = output_file
159        self.skipped_codes = {lit.Test.EXCLUDED,
160                              lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
161
162    def write_results(self, tests, elapsed):
163        # Find when first test started so we can make start times relative.
164        first_start_time = min([t.result.start for t in tests])
165        events = [self._get_test_event(
166            x, first_start_time) for x in tests if x.result.code not in self.skipped_codes]
167
168        json_data = {'traceEvents': events}
169
170        with open(self.output_file, "w") as time_trace_file:
171            json.dump(json_data, time_trace_file, indent=2, sort_keys=True)
172
173    def _get_test_event(self, test, first_start_time):
174        test_name = test.getFullName()
175        elapsed_time = test.result.elapsed or 0.0
176        start_time = test.result.start - first_start_time if test.result.start else 0.0
177        pid = test.result.pid or 0
178        return {
179            'pid': pid,
180            'tid': 1,
181            'ph': 'X',
182            'ts': int(start_time * 1000000.),
183            'dur': int(elapsed_time * 1000000.),
184            'name': test_name,
185        }
186