xref: /llvm-project/llvm/utils/lit/lit/cl_arguments.py (revision c63e83f49575c024cf89fce9bc95d64988f3177b)
1import argparse
2import enum
3import os
4import shlex
5import sys
6
7import lit.reports
8import lit.util
9
10
11@enum.unique
12class TestOrder(enum.Enum):
13    LEXICAL = "lexical"
14    RANDOM = "random"
15    SMART = "smart"
16
17
18def parse_args():
19    parser = argparse.ArgumentParser(prog="lit", fromfile_prefix_chars="@")
20    parser.add_argument(
21        "test_paths",
22        nargs="+",
23        metavar="TEST_PATH",
24        help="File or path to include in the test suite",
25    )
26
27    parser.add_argument(
28        "--version", action="version", version="%(prog)s " + lit.__version__
29    )
30
31    parser.add_argument(
32        "-j",
33        "--threads",
34        "--workers",
35        dest="workers",
36        metavar="N",
37        help="Number of workers used for testing",
38        type=_positive_int,
39        default=os.getenv("LIT_MAX_WORKERS", lit.util.usable_core_count()),
40    )
41    parser.add_argument(
42        "--config-prefix",
43        dest="configPrefix",
44        metavar="NAME",
45        help="Prefix for 'lit' config files",
46    )
47    parser.add_argument(
48        "-D",
49        "--param",
50        dest="user_params",
51        metavar="NAME=VAL",
52        help="Add 'NAME' = 'VAL' to the user defined parameters",
53        action="append",
54        default=[],
55    )
56
57    format_group = parser.add_argument_group("Output Format")
58    # FIXME: I find these names very confusing, although I like the
59    # functionality.
60    format_group.add_argument(
61        "-q", "--quiet", help="Suppress no error output", action="store_true"
62    )
63    format_group.add_argument(
64        "-s",
65        "--succinct",
66        help="Reduce amount of output."
67        " Additionally, show a progress bar,"
68        " unless --no-progress-bar is specified.",
69        action="store_true",
70    )
71    format_group.add_argument(
72        "-v",
73        "--verbose",
74        dest="showOutput",
75        help="For failed tests, show all output. For example, each command is"
76        " printed before it is executed, so the last printed command is the one"
77        " that failed.",
78        action="store_true",
79    )
80    format_group.add_argument(
81        "-vv",
82        "--echo-all-commands",
83        dest="showOutput",
84        help="Deprecated alias for -v.",
85        action="store_true",
86    )
87    format_group.add_argument(
88        "-a",
89        "--show-all",
90        dest="showAllOutput",
91        help="Enable -v, but for all tests not just failed tests.",
92        action="store_true",
93    )
94    format_group.add_argument(
95        "-o",
96        "--output",
97        type=lit.reports.JsonReport,
98        help="Write test results to the provided path",
99        metavar="PATH",
100    )
101    format_group.add_argument(
102        "--no-progress-bar",
103        dest="useProgressBar",
104        help="Do not use curses based progress bar",
105        action="store_false",
106    )
107
108    # Note: this does not generate flags for user-defined result codes.
109    success_codes = [c for c in lit.Test.ResultCode.all_codes() if not c.isFailure]
110    for code in success_codes:
111        format_group.add_argument(
112            "--show-{}".format(code.name.lower()),
113            dest="shown_codes",
114            help="Show {} tests ({})".format(code.label.lower(), code.name),
115            action="append_const",
116            const=code,
117            default=[],
118        )
119
120    execution_group = parser.add_argument_group("Test Execution")
121    execution_group.add_argument(
122        "--gtest-sharding",
123        help="Enable sharding for GoogleTest format",
124        action="store_true",
125        default=True,
126    )
127    execution_group.add_argument(
128        "--no-gtest-sharding",
129        dest="gtest_sharding",
130        help="Disable sharding for GoogleTest format",
131        action="store_false",
132    )
133    execution_group.add_argument(
134        "--path",
135        help="Additional paths to add to testing environment",
136        action="append",
137        default=[],
138        type=os.path.abspath,
139    )
140    execution_group.add_argument(
141        "--vg", dest="useValgrind", help="Run tests under valgrind", action="store_true"
142    )
143    execution_group.add_argument(
144        "--vg-leak",
145        dest="valgrindLeakCheck",
146        help="Check for memory leaks under valgrind",
147        action="store_true",
148    )
149    execution_group.add_argument(
150        "--vg-arg",
151        dest="valgrindArgs",
152        metavar="ARG",
153        help="Specify an extra argument for valgrind",
154        action="append",
155        default=[],
156    )
157    execution_group.add_argument(
158        "--no-execute",
159        dest="noExecute",
160        help="Don't execute any tests (assume PASS)",
161        action="store_true",
162    )
163    execution_group.add_argument(
164        "--xunit-xml-output",
165        type=lit.reports.XunitReport,
166        help="Write XUnit-compatible XML test reports to the specified file",
167    )
168    execution_group.add_argument(
169        "--report-failures-only",
170        help="Only include unresolved, timed out, failed"
171        " and unexpectedly passed tests in the report",
172        action="store_true",
173    )
174    execution_group.add_argument(
175        "--resultdb-output",
176        type=lit.reports.ResultDBReport,
177        help="Write LuCI ResultDB compatible JSON to the specified file",
178    )
179    execution_group.add_argument(
180        "--time-trace-output",
181        type=lit.reports.TimeTraceReport,
182        help="Write Chrome tracing compatible JSON to the specified file",
183    )
184    # This option only exists for the benefit of LLVM's Buildkite CI pipelines.
185    # As soon as it is not needed, it should be removed. Its help text would be:
186    # When enabled, lit will add a unique element to the output file name,
187    # before the extension. For example "results.xml" will become
188    # "results.<something>.xml". The "<something>" is not ordered in any
189    # way and is chosen so that existing files are not overwritten. [Default: Off]
190    execution_group.add_argument(
191        "--use-unique-output-file-name",
192        help=argparse.SUPPRESS,
193        action="store_true",
194    )
195    execution_group.add_argument(
196        "--timeout",
197        dest="maxIndividualTestTime",
198        help="Maximum time to spend running a single test (in seconds). "
199        "0 means no time limit. [Default: 0]",
200        type=_non_negative_int,
201    )
202    execution_group.add_argument(
203        "--max-failures",
204        help="Stop execution after the given number of failures.",
205        type=_positive_int,
206    )
207    execution_group.add_argument(
208        "--allow-empty-runs",
209        help="Do not fail the run if all tests are filtered out",
210        action="store_true",
211    )
212    execution_group.add_argument(
213        "--per-test-coverage",
214        dest="per_test_coverage",
215        action="store_true",
216        help="Enable individual test case coverage",
217    )
218    execution_group.add_argument(
219        "--ignore-fail",
220        dest="ignoreFail",
221        action="store_true",
222        help="Exit with status zero even if some tests fail",
223    )
224    execution_test_time_group = execution_group.add_mutually_exclusive_group()
225    execution_test_time_group.add_argument(
226        "--skip-test-time-recording",
227        help="Do not track elapsed wall time for each test",
228        action="store_true",
229    )
230    execution_test_time_group.add_argument(
231        "--time-tests",
232        help="Track elapsed wall time for each test printed in a histogram",
233        action="store_true",
234    )
235
236    selection_group = parser.add_argument_group("Test Selection")
237    selection_group.add_argument(
238        "--max-tests",
239        metavar="N",
240        help="Maximum number of tests to run",
241        type=_positive_int,
242    )
243    selection_group.add_argument(
244        "--max-time",
245        dest="timeout",
246        metavar="N",
247        help="Maximum time to spend testing (in seconds)",
248        type=_positive_int,
249    )
250    selection_group.add_argument(
251        "--order",
252        choices=[x.value for x in TestOrder],
253        default=TestOrder.SMART,
254        help="Test order to use (default: smart)",
255    )
256    selection_group.add_argument(
257        "--shuffle",
258        dest="order",
259        help="Run tests in random order (DEPRECATED: use --order=random)",
260        action="store_const",
261        const=TestOrder.RANDOM,
262    )
263    selection_group.add_argument(
264        "-i",
265        "--incremental",
266        help="Run failed tests first (DEPRECATED: use --order=smart)",
267        action="store_true",
268    )
269    selection_group.add_argument(
270        "--filter",
271        metavar="REGEX",
272        type=_case_insensitive_regex,
273        help="Only run tests with paths matching the given regular expression",
274        default=os.environ.get("LIT_FILTER", ".*"),
275    )
276    selection_group.add_argument(
277        "--filter-out",
278        metavar="REGEX",
279        type=_case_insensitive_regex,
280        help="Filter out tests with paths matching the given regular expression",
281        default=os.environ.get("LIT_FILTER_OUT", "^$"),
282    )
283    selection_group.add_argument(
284        "--xfail",
285        metavar="LIST",
286        type=_semicolon_list,
287        help="XFAIL tests with paths in the semicolon separated list",
288        default=os.environ.get("LIT_XFAIL", ""),
289    )
290    selection_group.add_argument(
291        "--xfail-not",
292        metavar="LIST",
293        type=_semicolon_list,
294        help="do not XFAIL tests with paths in the semicolon separated list",
295        default=os.environ.get("LIT_XFAIL_NOT", ""),
296    )
297    selection_group.add_argument(
298        "--num-shards",
299        dest="numShards",
300        metavar="M",
301        help="Split testsuite into M pieces and only run one",
302        type=_positive_int,
303        default=os.environ.get("LIT_NUM_SHARDS"),
304    )
305    selection_group.add_argument(
306        "--run-shard",
307        dest="runShard",
308        metavar="N",
309        help="Run shard #N of the testsuite",
310        type=_positive_int,
311        default=os.environ.get("LIT_RUN_SHARD"),
312    )
313
314    debug_group = parser.add_argument_group("Debug and Experimental Options")
315    debug_group.add_argument(
316        "--debug", help="Enable debugging (for 'lit' development)", action="store_true"
317    )
318    debug_group.add_argument(
319        "--show-suites",
320        help="Show discovered test suites and exit",
321        action="store_true",
322    )
323    debug_group.add_argument(
324        "--show-tests", help="Show all discovered tests and exit", action="store_true"
325    )
326    debug_group.add_argument(
327        "--show-used-features",
328        help="Show all features used in the test suite (in XFAIL, UNSUPPORTED and REQUIRES) and exit",
329        action="store_true",
330    )
331
332    # LIT is special: environment variables override command line arguments.
333    env_args = shlex.split(os.environ.get("LIT_OPTS", ""))
334    args = sys.argv[1:] + env_args
335    opts = parser.parse_args(args)
336
337    # Validate command line options
338    if opts.incremental:
339        print(
340            "WARNING: --incremental is deprecated. Failing tests now always run first."
341        )
342
343    if opts.numShards or opts.runShard:
344        if not opts.numShards or not opts.runShard:
345            parser.error("--num-shards and --run-shard must be used together")
346        if opts.runShard > opts.numShards:
347            parser.error("--run-shard must be between 1 and --num-shards (inclusive)")
348        opts.shard = (opts.runShard, opts.numShards)
349    else:
350        opts.shard = None
351
352    opts.reports = list(
353        filter(
354            None,
355            [
356                opts.output,
357                opts.xunit_xml_output,
358                opts.resultdb_output,
359                opts.time_trace_output,
360            ],
361        )
362    )
363
364    for report in opts.reports:
365        report.use_unique_output_file_name = opts.use_unique_output_file_name
366
367    return opts
368
369
370def _positive_int(arg):
371    return _int(arg, "positive", lambda i: i > 0)
372
373
374def _non_negative_int(arg):
375    return _int(arg, "non-negative", lambda i: i >= 0)
376
377
378def _int(arg, kind, pred):
379    desc = "requires {} integer, but found '{}'"
380    try:
381        i = int(arg)
382    except ValueError:
383        raise _error(desc, kind, arg)
384    if not pred(i):
385        raise _error(desc, kind, arg)
386    return i
387
388
389def _case_insensitive_regex(arg):
390    import re
391
392    try:
393        return re.compile(arg, re.IGNORECASE)
394    except re.error as reason:
395        raise _error("invalid regular expression: '{}', {}", arg, reason)
396
397
398def _semicolon_list(arg):
399    return arg.split(";")
400
401
402def _error(desc, *args):
403    msg = desc.format(*args)
404    return argparse.ArgumentTypeError(msg)
405