xref: /llvm-project/llvm/utils/lit/lit/main.py (revision c63e83f49575c024cf89fce9bc95d64988f3177b)
1"""
2lit - LLVM Integrated Tester.
3
4See lit.pod for more information.
5"""
6
7import itertools
8import os
9import platform
10import sys
11import time
12
13import lit.cl_arguments
14import lit.discovery
15import lit.display
16import lit.LitConfig
17import lit.reports
18import lit.run
19import lit.Test
20import lit.util
21from lit.formats.googletest import GoogleTest
22from lit.TestTimes import record_test_times
23
24
25def main(builtin_params={}):
26    opts = lit.cl_arguments.parse_args()
27    params = create_params(builtin_params, opts.user_params)
28    is_windows = platform.system() == "Windows"
29
30    lit_config = lit.LitConfig.LitConfig(
31        progname=os.path.basename(sys.argv[0]),
32        path=opts.path,
33        quiet=opts.quiet,
34        useValgrind=opts.useValgrind,
35        valgrindLeakCheck=opts.valgrindLeakCheck,
36        valgrindArgs=opts.valgrindArgs,
37        noExecute=opts.noExecute,
38        debug=opts.debug,
39        isWindows=is_windows,
40        order=opts.order,
41        params=params,
42        config_prefix=opts.configPrefix,
43        per_test_coverage=opts.per_test_coverage,
44        gtest_sharding=opts.gtest_sharding,
45    )
46
47    discovered_tests = lit.discovery.find_tests_for_inputs(
48        lit_config, opts.test_paths
49    )
50    if not discovered_tests:
51        sys.stderr.write("error: did not discover any tests for provided path(s)\n")
52        sys.exit(2)
53
54    if opts.show_suites or opts.show_tests:
55        print_discovered(discovered_tests, opts.show_suites, opts.show_tests)
56        sys.exit(0)
57
58    if opts.show_used_features:
59        features = set(
60            itertools.chain.from_iterable(
61                t.getUsedFeatures()
62                for t in discovered_tests
63                if t.gtest_json_file is None
64            )
65        )
66        print(" ".join(sorted(features)))
67        sys.exit(0)
68
69    # Command line overrides configuration for maxIndividualTestTime.
70    if opts.maxIndividualTestTime is not None:  # `not None` is important (default: 0)
71        if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
72            lit_config.note(
73                (
74                    "The test suite configuration requested an individual"
75                    " test timeout of {0} seconds but a timeout of {1} seconds was"
76                    " requested on the command line. Forcing timeout to be {1}"
77                    " seconds."
78                ).format(lit_config.maxIndividualTestTime, opts.maxIndividualTestTime)
79            )
80            lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
81
82    determine_order(discovered_tests, opts.order)
83
84    selected_tests = [
85        t
86        for t in discovered_tests
87        if opts.filter.search(t.getFullName())
88        and not opts.filter_out.search(t.getFullName())
89    ]
90
91    if not selected_tests:
92        sys.stderr.write(
93            "error: filter did not match any tests "
94            "(of %d discovered).  " % len(discovered_tests)
95        )
96        if opts.allow_empty_runs:
97            sys.stderr.write(
98                "Suppressing error because '--allow-empty-runs' " "was specified.\n"
99            )
100            sys.exit(0)
101        else:
102            sys.stderr.write("Use '--allow-empty-runs' to suppress this " "error.\n")
103            sys.exit(2)
104
105    # When running multiple shards, don't include skipped tests in the xunit
106    # output since merging the files will result in duplicates.
107    if opts.shard:
108        (run, shards) = opts.shard
109        selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
110        if not selected_tests:
111            sys.stderr.write(
112                "warning: shard does not contain any tests.  "
113                "Consider decreasing the number of shards.\n"
114            )
115            sys.exit(0)
116
117    selected_tests = selected_tests[: opts.max_tests]
118
119    mark_xfail(discovered_tests, opts)
120
121    mark_excluded(discovered_tests, selected_tests)
122
123    start = time.time()
124    run_tests(selected_tests, lit_config, opts, len(discovered_tests))
125    elapsed = time.time() - start
126
127    if not opts.skip_test_time_recording:
128        record_test_times(selected_tests, lit_config)
129
130    selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
131        selected_tests, discovered_tests
132    )
133
134    if opts.time_tests:
135        print_histogram(discovered_tests)
136
137    print_results(discovered_tests, elapsed, opts)
138
139    tests_for_report = selected_tests if opts.shard else discovered_tests
140    if opts.report_failures_only:
141        # Only report tests that failed.
142        tests_for_report = [t for t in tests_for_report if t.isFailure()]
143
144    for report in opts.reports:
145        report.write_results(tests_for_report, elapsed)
146
147    if lit_config.numErrors:
148        sys.stderr.write("\n%d error(s) in tests\n" % lit_config.numErrors)
149        sys.exit(2)
150
151    if lit_config.numWarnings:
152        sys.stderr.write("\n%d warning(s) in tests\n" % lit_config.numWarnings)
153
154    has_failure = any(t.isFailure() for t in discovered_tests)
155    if has_failure:
156        if opts.ignoreFail:
157            sys.stderr.write(
158                "\nExiting with status 0 instead of 1 because "
159                "'--ignore-fail' was specified.\n"
160            )
161        else:
162            sys.exit(1)
163
164
165def create_params(builtin_params, user_params):
166    def parse(p):
167        return p.split("=", 1) if "=" in p else (p, "")
168
169    params = dict(builtin_params)
170    params.update([parse(p) for p in user_params])
171    return params
172
173
174def print_discovered(tests, show_suites, show_tests):
175    tests.sort(key=lit.reports.by_suite_and_test_path)
176
177    if show_suites:
178        tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
179        print("-- Test Suites --")
180        for suite, test_iter in tests_by_suite:
181            test_count = sum(1 for _ in test_iter)
182            print("  %s - %d tests" % (suite.name, test_count))
183            print("    Source Root: %s" % suite.source_root)
184            print("    Exec Root  : %s" % suite.exec_root)
185            features = " ".join(sorted(suite.config.available_features))
186            print("    Available Features: %s" % features)
187            substitutions = sorted(suite.config.substitutions)
188            substitutions = ("%s => %s" % (x, y) for (x, y) in substitutions)
189            substitutions = "\n".ljust(30).join(substitutions)
190            print("    Available Substitutions: %s" % substitutions)
191
192    if show_tests:
193        print("-- Available Tests --")
194        for t in tests:
195            print("  %s" % t.getFullName())
196
197
198def determine_order(tests, order):
199    from lit.cl_arguments import TestOrder
200
201    enum_order = TestOrder(order)
202    if enum_order == TestOrder.RANDOM:
203        import random
204
205        random.shuffle(tests)
206    elif enum_order == TestOrder.LEXICAL:
207        tests.sort(key=lambda t: t.getFullName())
208    else:
209        assert enum_order == TestOrder.SMART, "Unknown TestOrder value"
210        tests.sort(
211            key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName())
212        )
213
214
215def filter_by_shard(tests, run, shards, lit_config):
216    test_ixs = range(run - 1, len(tests), shards)
217    selected_tests = [tests[i] for i in test_ixs]
218
219    # For clarity, generate a preview of the first few test indices in the shard
220    # to accompany the arithmetic expression.
221    preview_len = 3
222    preview = ", ".join([str(i + 1) for i in test_ixs[:preview_len]])
223    if len(test_ixs) > preview_len:
224        preview += ", ..."
225    msg = (
226        f"Selecting shard {run}/{shards} = "
227        f"size {len(selected_tests)}/{len(tests)} = "
228        f"tests #({shards}*k)+{run} = [{preview}]"
229    )
230    lit_config.note(msg)
231    return selected_tests
232
233
234def mark_xfail(selected_tests, opts):
235    for t in selected_tests:
236        test_file = os.sep.join(t.path_in_suite)
237        test_full_name = t.getFullName()
238        if test_file in opts.xfail or test_full_name in opts.xfail:
239            t.xfails += "*"
240        if test_file in opts.xfail_not or test_full_name in opts.xfail_not:
241            t.xfail_not = True
242
243
244def mark_excluded(discovered_tests, selected_tests):
245    excluded_tests = set(discovered_tests) - set(selected_tests)
246    result = lit.Test.Result(lit.Test.EXCLUDED)
247    for t in excluded_tests:
248        t.setResult(result)
249
250
251def run_tests(tests, lit_config, opts, discovered_tests):
252    workers = min(len(tests), opts.workers)
253    display = lit.display.create_display(opts, tests, discovered_tests, workers)
254
255    run = lit.run.Run(
256        tests, lit_config, workers, display.update, opts.max_failures, opts.timeout
257    )
258
259    display.print_header()
260
261    interrupted = False
262    error = None
263    try:
264        execute_in_tmp_dir(run, lit_config)
265    except KeyboardInterrupt:
266        interrupted = True
267        error = "  interrupted by user"
268    except lit.run.MaxFailuresError:
269        error = "warning: reached maximum number of test failures"
270    except lit.run.TimeoutError:
271        error = "warning: reached timeout"
272
273    display.clear(interrupted)
274    if error:
275        sys.stderr.write("%s, skipping remaining tests\n" % error)
276
277
278def execute_in_tmp_dir(run, lit_config):
279    # Create a temp directory inside the normal temp directory so that we can
280    # try to avoid temporary test file leaks. The user can avoid this behavior
281    # by setting LIT_PRESERVES_TMP in the environment, so they can easily use
282    # their own temp directory to monitor temporary file leaks or handle them at
283    # the buildbot level.
284    tmp_dir = None
285    if "LIT_PRESERVES_TMP" not in os.environ:
286        import tempfile
287
288        # z/OS linker does not support '_' in paths, so use '-'.
289        tmp_dir = tempfile.mkdtemp(prefix="lit-tmp-")
290        tmp_dir_envs = {k: tmp_dir for k in ["TMP", "TMPDIR", "TEMP", "TEMPDIR"]}
291        os.environ.update(tmp_dir_envs)
292        for cfg in {t.config for t in run.tests}:
293            cfg.environment.update(tmp_dir_envs)
294    try:
295        run.execute()
296    finally:
297        if tmp_dir:
298            try:
299                import shutil
300
301                shutil.rmtree(tmp_dir)
302            except Exception as e:
303                lit_config.warning(
304                    "Failed to delete temp directory '%s', try upgrading your version of Python to fix this"
305                    % tmp_dir
306                )
307
308
309def print_histogram(tests):
310    test_times = [
311        (t.getFullName(), t.result.elapsed) for t in tests if t.result.elapsed
312    ]
313    if test_times:
314        lit.util.printHistogram(test_times, title="Tests")
315
316
317def print_results(tests, elapsed, opts):
318    tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()}
319    total_tests = len(tests)
320    for test in tests:
321        tests_by_code[test.result.code].append(test)
322
323    for code in lit.Test.ResultCode.all_codes():
324        print_group(
325            sorted(tests_by_code[code], key=lambda t: t.getFullName()),
326            code,
327            opts.shown_codes,
328        )
329
330    print_summary(total_tests, tests_by_code, opts.quiet, elapsed)
331
332
333def print_group(tests, code, shown_codes):
334    if not tests:
335        return
336    if not code.isFailure and code not in shown_codes:
337        return
338    print("*" * 20)
339    print("{} Tests ({}):".format(code.label, len(tests)))
340    for test in tests:
341        print("  %s" % test.getFullName())
342    sys.stdout.write("\n")
343
344
345def print_summary(total_tests, tests_by_code, quiet, elapsed):
346    if not quiet:
347        print("\nTesting Time: %.2fs" % elapsed)
348
349    print("\nTotal Discovered Tests: %s" % (total_tests))
350    codes = [c for c in lit.Test.ResultCode.all_codes() if not quiet or c.isFailure]
351    groups = [(c.label, len(tests_by_code[c])) for c in codes]
352    groups = [(label, count) for label, count in groups if count]
353    if not groups:
354        return
355
356    max_label_len = max(len(label) for label, _ in groups)
357    max_count_len = max(len(str(count)) for _, count in groups)
358
359    for (label, count) in groups:
360        label = label.ljust(max_label_len)
361        count = str(count).rjust(max_count_len)
362        print("  %s: %s (%.2f%%)" % (label, count, float(count) / total_tests * 100))
363