xref: /netbsd-src/external/apache2/llvm/dist/llvm/utils/lit/lit/main.py (revision 82d56013d7b633d116a93943de88e08335357a7c)
1"""
2lit - LLVM Integrated Tester.
3
4See lit.pod for more information.
5"""
6
7import itertools
8import os
9import platform
10import sys
11import time
12
13import lit.cl_arguments
14import lit.discovery
15import lit.display
16import lit.LitConfig
17import lit.reports
18import lit.run
19import lit.Test
20import lit.util
21from lit.TestTimes import record_test_times
22
23
24def main(builtin_params={}):
25    opts = lit.cl_arguments.parse_args()
26    params = create_params(builtin_params, opts.user_params)
27    is_windows = platform.system() == 'Windows'
28
29    lit_config = lit.LitConfig.LitConfig(
30        progname=os.path.basename(sys.argv[0]),
31        path=opts.path,
32        quiet=opts.quiet,
33        useValgrind=opts.useValgrind,
34        valgrindLeakCheck=opts.valgrindLeakCheck,
35        valgrindArgs=opts.valgrindArgs,
36        noExecute=opts.noExecute,
37        debug=opts.debug,
38        isWindows=is_windows,
39        params=params,
40        config_prefix=opts.configPrefix,
41        echo_all_commands=opts.echoAllCommands)
42
43    discovered_tests = lit.discovery.find_tests_for_inputs(lit_config, opts.test_paths,
44                                                           opts.indirectlyRunCheck)
45    if not discovered_tests:
46        sys.stderr.write('error: did not discover any tests for provided path(s)\n')
47        sys.exit(2)
48
49    if opts.show_suites or opts.show_tests:
50        print_discovered(discovered_tests, opts.show_suites, opts.show_tests)
51        sys.exit(0)
52
53    if opts.show_used_features:
54        features = set(itertools.chain.from_iterable(t.getUsedFeatures() for t in discovered_tests))
55        print(' '.join(sorted(features)))
56        sys.exit(0)
57
58    # Command line overrides configuration for maxIndividualTestTime.
59    if opts.maxIndividualTestTime is not None:  # `not None` is important (default: 0)
60        if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
61            lit_config.note(('The test suite configuration requested an individual'
62                ' test timeout of {0} seconds but a timeout of {1} seconds was'
63                ' requested on the command line. Forcing timeout to be {1}'
64                ' seconds')
65                .format(lit_config.maxIndividualTestTime,
66                        opts.maxIndividualTestTime))
67            lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
68
69    determine_order(discovered_tests, opts.order)
70
71    selected_tests = [t for t in discovered_tests if
72        opts.filter.search(t.getFullName()) and not
73        opts.filter_out.search(t.getFullName())]
74
75    if not selected_tests:
76        sys.stderr.write('error: filter did not match any tests '
77                         '(of %d discovered).  ' % len(discovered_tests))
78        if opts.allow_empty_runs:
79            sys.stderr.write("Suppressing error because '--allow-empty-runs' "
80                             'was specified.\n')
81            sys.exit(0)
82        else:
83            sys.stderr.write("Use '--allow-empty-runs' to suppress this "
84                             'error.\n')
85            sys.exit(2)
86
87    # When running multiple shards, don't include skipped tests in the xunit
88    # output since merging the files will result in duplicates.
89    tests_for_report = discovered_tests
90    if opts.shard:
91        (run, shards) = opts.shard
92        selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
93        tests_for_report = selected_tests
94        if not selected_tests:
95            sys.stderr.write('warning: shard does not contain any tests.  '
96                             'Consider decreasing the number of shards.\n')
97            sys.exit(0)
98
99    selected_tests = selected_tests[:opts.max_tests]
100
101    mark_xfail(discovered_tests, opts)
102
103    mark_excluded(discovered_tests, selected_tests)
104
105    start = time.time()
106    run_tests(selected_tests, lit_config, opts, len(discovered_tests))
107    elapsed = time.time() - start
108
109    record_test_times(selected_tests, lit_config)
110
111    if opts.time_tests:
112        print_histogram(discovered_tests)
113
114    print_results(discovered_tests, elapsed, opts)
115
116    for report in opts.reports:
117        report.write_results(tests_for_report, elapsed)
118
119    if lit_config.numErrors:
120        sys.stderr.write('\n%d error(s) in tests\n' % lit_config.numErrors)
121        sys.exit(2)
122
123    if lit_config.numWarnings:
124        sys.stderr.write('\n%d warning(s) in tests\n' % lit_config.numWarnings)
125
126    has_failure = any(t.isFailure() for t in discovered_tests)
127    if has_failure:
128        if opts.ignoreFail:
129            sys.stderr.write("\nExiting with status 0 instead of 1 because "
130                             "'--ignore-fail' was specified.\n")
131        else:
132            sys.exit(1)
133
134def create_params(builtin_params, user_params):
135    def parse(p):
136        return p.split('=', 1) if '=' in p else (p, '')
137
138    params = dict(builtin_params)
139    params.update([parse(p) for p in user_params])
140    return params
141
142
143def print_discovered(tests, show_suites, show_tests):
144    tests.sort(key=lit.reports.by_suite_and_test_path)
145
146    if show_suites:
147        tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
148        print('-- Test Suites --')
149        for suite, test_iter in tests_by_suite:
150            test_count = sum(1 for _ in test_iter)
151            print('  %s - %d tests' % (suite.name, test_count))
152            print('    Source Root: %s' % suite.source_root)
153            print('    Exec Root  : %s' % suite.exec_root)
154            features = ' '.join(sorted(suite.config.available_features))
155            print('    Available Features: %s' % features)
156            substitutions = sorted(suite.config.substitutions)
157            substitutions = ('%s => %s' % (x, y) for (x, y) in substitutions)
158            substitutions = '\n'.ljust(30).join(substitutions)
159            print('    Available Substitutions: %s' % substitutions)
160
161    if show_tests:
162        print('-- Available Tests --')
163        for t in tests:
164            print('  %s' % t.getFullName())
165
166
167def determine_order(tests, order):
168    from lit.cl_arguments import TestOrder
169    if order == TestOrder.RANDOM:
170        import random
171        random.shuffle(tests)
172    else:
173        assert order == TestOrder.DEFAULT, 'Unknown TestOrder value'
174        tests.sort(key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName()))
175
176
177def filter_by_shard(tests, run, shards, lit_config):
178    test_ixs = range(run - 1, len(tests), shards)
179    selected_tests = [tests[i] for i in test_ixs]
180
181    # For clarity, generate a preview of the first few test indices in the shard
182    # to accompany the arithmetic expression.
183    preview_len = 3
184    preview = ', '.join([str(i + 1) for i in test_ixs[:preview_len]])
185    if len(test_ixs) > preview_len:
186        preview += ', ...'
187    msg = f'Selecting shard {run}/{shards} = ' \
188          f'size {len(selected_tests)}/{len(tests)} = ' \
189          f'tests #({shards}*k)+{run} = [{preview}]'
190    lit_config.note(msg)
191    return selected_tests
192
193
194def mark_xfail(selected_tests, opts):
195    for t in selected_tests:
196        if os.sep.join(t.path_in_suite) in opts.xfail:
197            t.xfails += '*'
198
199def mark_excluded(discovered_tests, selected_tests):
200    excluded_tests = set(discovered_tests) - set(selected_tests)
201    result = lit.Test.Result(lit.Test.EXCLUDED)
202    for t in excluded_tests:
203        t.setResult(result)
204
205
206def run_tests(tests, lit_config, opts, discovered_tests):
207    workers = min(len(tests), opts.workers)
208    display = lit.display.create_display(opts, tests, discovered_tests, workers)
209
210    run = lit.run.Run(tests, lit_config, workers, display.update,
211                      opts.max_failures, opts.timeout)
212
213    display.print_header()
214
215    interrupted = False
216    error = None
217    try:
218        execute_in_tmp_dir(run, lit_config)
219    except KeyboardInterrupt:
220        interrupted = True
221        error = '  interrupted by user'
222    except lit.run.MaxFailuresError:
223        error = 'warning: reached maximum number of test failures'
224    except lit.run.TimeoutError:
225        error = 'warning: reached timeout'
226
227    display.clear(interrupted)
228    if error:
229        sys.stderr.write('%s, skipping remaining tests\n' % error)
230
231
232def execute_in_tmp_dir(run, lit_config):
233    # Create a temp directory inside the normal temp directory so that we can
234    # try to avoid temporary test file leaks. The user can avoid this behavior
235    # by setting LIT_PRESERVES_TMP in the environment, so they can easily use
236    # their own temp directory to monitor temporary file leaks or handle them at
237    # the buildbot level.
238    tmp_dir = None
239    if 'LIT_PRESERVES_TMP' not in os.environ:
240        import tempfile
241        tmp_dir = tempfile.mkdtemp(prefix="lit_tmp_")
242        os.environ.update({
243                'TMPDIR': tmp_dir,
244                'TMP': tmp_dir,
245                'TEMP': tmp_dir,
246                'TEMPDIR': tmp_dir,
247                })
248    try:
249        run.execute()
250    finally:
251        if tmp_dir:
252            try:
253                import shutil
254                shutil.rmtree(tmp_dir)
255            except Exception as e:
256                lit_config.warning("Failed to delete temp directory '%s', try upgrading your version of Python to fix this" % tmp_dir)
257
258
259def print_histogram(tests):
260    test_times = [(t.getFullName(), t.result.elapsed)
261                  for t in tests if t.result.elapsed]
262    if test_times:
263        lit.util.printHistogram(test_times, title='Tests')
264
265
266def print_results(tests, elapsed, opts):
267    tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()}
268    for test in tests:
269        tests_by_code[test.result.code].append(test)
270
271    for code in lit.Test.ResultCode.all_codes():
272        print_group(sorted(tests_by_code[code], key=lambda t: t.getFullName()), code, opts.shown_codes)
273
274    print_summary(tests_by_code, opts.quiet, elapsed)
275
276
277def print_group(tests, code, shown_codes):
278    if not tests:
279        return
280    if not code.isFailure and code not in shown_codes:
281        return
282    print('*' * 20)
283    print('{} Tests ({}):'.format(code.label, len(tests)))
284    for test in tests:
285        print('  %s' % test.getFullName())
286    sys.stdout.write('\n')
287
288
289def print_summary(tests_by_code, quiet, elapsed):
290    if not quiet:
291        print('\nTesting Time: %.2fs' % elapsed)
292
293    codes = [c for c in lit.Test.ResultCode.all_codes()
294             if not quiet or c.isFailure]
295    groups = [(c.label, len(tests_by_code[c])) for c in codes]
296    groups = [(label, count) for label, count in groups if count]
297    if not groups:
298        return
299
300    max_label_len = max(len(label) for label, _ in groups)
301    max_count_len = max(len(str(count)) for _, count in groups)
302
303    for (label, count) in groups:
304        label = label.ljust(max_label_len)
305        count = str(count).rjust(max_count_len)
306        print('  %s: %s' % (label, count))
307