xref: /llvm-project/llvm/utils/lit/lit/formats/googletest.py (revision 9f89d31d5185015f8eea9c0f3d35e7ba9d353e67)
1from __future__ import absolute_import
2import json
3import math
4import os
5import shlex
6import subprocess
7import sys
8
9import lit.Test
10import lit.TestRunner
11import lit.util
12from .base import TestFormat
13
14kIsWindows = sys.platform in ["win32", "cygwin"]
15
16
17class GoogleTest(TestFormat):
18    def __init__(self, test_sub_dirs, test_suffix, run_under=[]):
19        self.seen_executables = set()
20        self.test_sub_dirs = str(test_sub_dirs).split(";")
21
22        # On Windows, assume tests will also end in '.exe'.
23        exe_suffix = str(test_suffix)
24        if kIsWindows:
25            exe_suffix += ".exe"
26
27        # Also check for .py files for testing purposes.
28        self.test_suffixes = {exe_suffix, test_suffix + ".py"}
29        self.run_under = run_under
30
31    def get_num_tests(self, path, litConfig, localConfig):
32        list_test_cmd = self.prepareCmd(
33            [path, "--gtest_list_tests", "--gtest_filter=-*DISABLED_*"]
34        )
35        try:
36            out = subprocess.check_output(list_test_cmd, env=localConfig.environment)
37        except subprocess.CalledProcessError as exc:
38            litConfig.warning(
39                "unable to discover google-tests in %r: %s. Process output: %s"
40                % (path, sys.exc_info()[1], exc.output)
41            )
42            return None
43        return sum(
44            map(
45                lambda line: lit.util.to_string(line).startswith("  "),
46                out.splitlines(False),
47            )
48        )
49
50    def getTestsInDirectory(self, testSuite, path_in_suite, litConfig, localConfig):
51        init_shard_size = 512  # number of tests in a shard
52        core_count = lit.util.usable_core_count()
53        source_path = testSuite.getSourcePath(path_in_suite)
54        for subdir in self.test_sub_dirs:
55            dir_path = os.path.join(source_path, subdir)
56            if not os.path.isdir(dir_path):
57                continue
58            for fn in lit.util.listdir_files(dir_path, suffixes=self.test_suffixes):
59                # Discover the tests in this executable.
60                execpath = os.path.join(source_path, subdir, fn)
61                if execpath in self.seen_executables:
62                    litConfig.warning(
63                        "Skip adding %r since it has been added to the test pool"
64                        % execpath
65                    )
66                    continue
67                else:
68                    self.seen_executables.add(execpath)
69                num_tests = self.get_num_tests(execpath, litConfig, localConfig)
70                if num_tests is not None:
71                    if litConfig.gtest_sharding:
72                        # Compute the number of shards.
73                        shard_size = init_shard_size
74                        nshard = int(math.ceil(num_tests / shard_size))
75                        while nshard < core_count and shard_size > 1:
76                            shard_size = shard_size // 2
77                            nshard = int(math.ceil(num_tests / shard_size))
78
79                        # Create one lit test for each shard.
80                        for idx in range(nshard):
81                            testPath = path_in_suite + (
82                                subdir,
83                                fn,
84                                str(idx),
85                                str(nshard),
86                            )
87                            json_file = (
88                                "-".join(
89                                    [
90                                        execpath,
91                                        testSuite.config.name,
92                                        str(os.getpid()),
93                                        str(idx),
94                                        str(nshard),
95                                    ]
96                                )
97                                + ".json"
98                            )
99                            yield lit.Test.Test(
100                                testSuite,
101                                testPath,
102                                localConfig,
103                                file_path=execpath,
104                                gtest_json_file=json_file,
105                            )
106                    else:
107                        testPath = path_in_suite + (subdir, fn)
108                        json_file = (
109                            "-".join(
110                                [
111                                    execpath,
112                                    testSuite.config.name,
113                                    str(os.getpid()),
114                                ]
115                            )
116                            + ".json"
117                        )
118                        yield lit.Test.Test(
119                            testSuite,
120                            testPath,
121                            localConfig,
122                            file_path=execpath,
123                            gtest_json_file=json_file,
124                        )
125                else:
126                    # This doesn't look like a valid gtest file.  This can
127                    # have a number of causes, none of them good.  For
128                    # instance, we could have created a broken executable.
129                    # Alternatively, someone has cruft in their test
130                    # directory.  If we don't return a test here, then no
131                    # failures will get reported, so return a dummy test name
132                    # so that the failure is reported later.
133                    testPath = path_in_suite + (
134                        subdir,
135                        fn,
136                        "failed_to_discover_tests_from_gtest",
137                    )
138                    yield lit.Test.Test(
139                        testSuite, testPath, localConfig, file_path=execpath
140                    )
141
142    def execute(self, test, litConfig):
143        if test.gtest_json_file is None:
144            return lit.Test.FAIL, ""
145
146        testPath = test.getSourcePath()
147        from lit.cl_arguments import TestOrder
148
149        use_shuffle = TestOrder(litConfig.order) == TestOrder.RANDOM
150        shard_env = {
151            "GTEST_OUTPUT": "json:" + test.gtest_json_file,
152            "GTEST_SHUFFLE": "1" if use_shuffle else "0",
153        }
154        if litConfig.gtest_sharding:
155            testPath, testName = os.path.split(test.getSourcePath())
156            while not os.path.exists(testPath):
157                # Handle GTest parameterized and typed tests, whose name includes
158                # some '/'s.
159                testPath, namePrefix = os.path.split(testPath)
160                testName = namePrefix + "/" + testName
161
162            testName, total_shards = os.path.split(testName)
163            testName, shard_idx = os.path.split(testName)
164            shard_env.update(
165                {
166                    "GTEST_TOTAL_SHARDS": os.environ.get(
167                        "GTEST_TOTAL_SHARDS", total_shards
168                    ),
169                    "GTEST_SHARD_INDEX": os.environ.get("GTEST_SHARD_INDEX", shard_idx),
170                }
171            )
172        test.config.environment.update(shard_env)
173
174        cmd = [testPath]
175        cmd = self.prepareCmd(cmd)
176        if litConfig.useValgrind:
177            cmd = litConfig.valgrindArgs + cmd
178
179        if litConfig.noExecute:
180            return lit.Test.PASS, ""
181
182        def get_shard_header(shard_env):
183            shard_envs = " ".join([k + "=" + v for k, v in shard_env.items()])
184            return f"Script(shard):\n--\n%s %s\n--\n" % (shard_envs, " ".join(cmd))
185
186        shard_header = get_shard_header(shard_env)
187
188        try:
189            out, _, exitCode = lit.util.executeCommand(
190                cmd,
191                env=test.config.environment,
192                timeout=litConfig.maxIndividualTestTime,
193                redirect_stderr=True,
194            )
195        except lit.util.ExecuteCommandTimeoutException as e:
196            stream_msg = f"\n{e.out}\n--\nexit: {e.exitCode}\n--\n"
197            return (
198                lit.Test.TIMEOUT,
199                f"{shard_header}{stream_msg}Reached "
200                f"timeout of {litConfig.maxIndividualTestTime} seconds",
201            )
202
203        if not os.path.exists(test.gtest_json_file):
204            errmsg = f"shard JSON output does not exist: %s" % (test.gtest_json_file)
205            stream_msg = f"\n{out}\n--\nexit: {exitCode}\n--\n"
206            return lit.Test.FAIL, shard_header + stream_msg + errmsg
207
208        if exitCode == 0:
209            return lit.Test.PASS, ""
210
211        def get_test_stdout(test_name):
212            res = []
213            header = f"[ RUN      ] " + test_name
214            footer = f"[  FAILED  ] " + test_name
215            in_range = False
216            for l in out.splitlines():
217                if l.startswith(header):
218                    in_range = True
219                elif l.startswith(footer):
220                    return f"" if len(res) == 0 else "\n".join(res)
221                elif in_range:
222                    res.append(l)
223            assert False, f"gtest did not report the result for " + test_name
224
225        found_failed_test = False
226
227        with open(test.gtest_json_file, encoding="utf-8") as f:
228            jf = json.load(f)
229
230            if use_shuffle:
231                shard_env["GTEST_RANDOM_SEED"] = str(jf["random_seed"])
232            output = get_shard_header(shard_env) + "\n"
233
234            for testcase in jf["testsuites"]:
235                for testinfo in testcase["testsuite"]:
236                    result = testinfo["result"]
237                    if result == "SUPPRESSED" or result == "SKIPPED":
238                        continue
239                    testname = testcase["name"] + "." + testinfo["name"]
240                    header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
241                        " ".join(cmd),
242                        testname,
243                    )
244                    if "failures" in testinfo:
245                        found_failed_test = True
246                        output += header
247                        test_out = get_test_stdout(testname)
248                        if test_out:
249                            output += test_out + "\n\n"
250                        for fail in testinfo["failures"]:
251                            output += fail["failure"] + "\n"
252                        output += "\n"
253                    elif result != "COMPLETED":
254                        output += header
255                        output += "unresolved test result\n"
256
257        # In some situations, like running tests with sanitizers, all test passes but
258        # the shard could still fail due to memory issues.
259        if not found_failed_test:
260            output += f"\n{out}\n--\nexit: {exitCode}\n--\n"
261
262        return lit.Test.FAIL, output
263
264    def prepareCmd(self, cmd):
265        """Insert interpreter if needed.
266
267        It inserts the python exe into the command if cmd[0] ends in .py or caller
268        specified run_under.
269        We cannot rely on the system to interpret shebang lines for us on
270        Windows, so add the python executable to the command if this is a .py
271        script.
272        """
273        if cmd[0].endswith(".py"):
274            cmd = [sys.executable] + cmd
275        if self.run_under:
276            if isinstance(self.run_under, list):
277                cmd = self.run_under + cmd
278            else:
279                cmd = shlex.split(self.run_under) + cmd
280        return cmd
281
282    @staticmethod
283    def post_process_shard_results(selected_tests, discovered_tests):
284        def remove_gtest(tests):
285            return [t for t in tests if t.gtest_json_file is None]
286
287        discovered_tests = remove_gtest(discovered_tests)
288        gtests = [t for t in selected_tests if t.gtest_json_file]
289        selected_tests = remove_gtest(selected_tests)
290        for test in gtests:
291            # In case gtest has bugs such that no JSON file was emitted.
292            if not os.path.exists(test.gtest_json_file):
293                selected_tests.append(test)
294                discovered_tests.append(test)
295                continue
296
297            start_time = test.result.start or 0.0
298
299            has_failure_in_shard = False
300
301            # Load json file to retrieve results.
302            with open(test.gtest_json_file, encoding="utf-8") as f:
303                try:
304                    testsuites = json.load(f)["testsuites"]
305                except json.JSONDecodeError as e:
306                    raise RuntimeError(
307                        "Failed to parse json file: "
308                        + test.gtest_json_file
309                        + "\n"
310                        + e.doc
311                    )
312                for testcase in testsuites:
313                    for testinfo in testcase["testsuite"]:
314                        # Ignore disabled tests.
315                        if testinfo["result"] == "SUPPRESSED":
316                            continue
317
318                        testPath = test.path_in_suite[:-2] + (
319                            testcase["name"],
320                            testinfo["name"],
321                        )
322                        subtest = lit.Test.Test(
323                            test.suite, testPath, test.config, test.file_path
324                        )
325
326                        testname = testcase["name"] + "." + testinfo["name"]
327                        header = f"Script:\n--\n%s --gtest_filter=%s\n--\n" % (
328                            test.file_path,
329                            testname,
330                        )
331
332                        output = ""
333                        if testinfo["result"] == "SKIPPED":
334                            returnCode = lit.Test.SKIPPED
335                        elif "failures" in testinfo:
336                            has_failure_in_shard = True
337                            returnCode = (
338                                lit.Test.XFAIL
339                                if test.isExpectedToFail()
340                                else lit.Test.FAIL
341                            )
342                            output = header
343                            for fail in testinfo["failures"]:
344                                output += fail["failure"] + "\n"
345                        elif testinfo["result"] == "COMPLETED":
346                            returnCode = lit.Test.PASS
347                        else:
348                            returnCode = lit.Test.UNRESOLVED
349                            output = header + "unresolved test result\n"
350
351                        elapsed_time = float(testinfo["time"][:-1])
352                        res = lit.Test.Result(returnCode, output, elapsed_time)
353                        res.pid = test.result.pid or 0
354                        res.start = start_time
355                        start_time = start_time + elapsed_time
356                        subtest.setResult(res)
357
358                        selected_tests.append(subtest)
359                        discovered_tests.append(subtest)
360            os.remove(test.gtest_json_file)
361
362            if not has_failure_in_shard and test.isFailure():
363                selected_tests.append(test)
364                discovered_tests.append(test)
365
366        return selected_tests, discovered_tests
367