xref: /llvm-project/cross-project-tests/debuginfo-tests/dexter/dex/tools/test/Tool.py (revision ca92bdfa3ef8f9a1cc97167fc96601f8bd7b436b)
1# DExTer : Debugging Experience Tester
2# ~~~~~~   ~         ~~         ~   ~~
3#
4# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5# See https://llvm.org/LICENSE.txt for license information.
6# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7"""Test tool."""
8
9import math
10import os
11import csv
12import pickle
13import shutil
14import platform
15
16from dex.command.ParseCommand import get_command_infos
17from dex.debugger.Debuggers import run_debugger_subprocess
18from dex.debugger.DebuggerControllers.DefaultController import DefaultController
19from dex.debugger.DebuggerControllers.ConditionalController import ConditionalController
20from dex.dextIR.DextIR import DextIR
21from dex.heuristic import Heuristic
22from dex.tools import TestToolBase
23from dex.utils.Exceptions import DebuggerException
24from dex.utils.Exceptions import BuildScriptException, HeuristicException
25from dex.utils.PrettyOutputBase import Stream
26from dex.utils.ReturnCode import ReturnCode
27
28
29class TestCase(object):
30    def __init__(self, context, name, heuristic, error):
31        self.context = context
32        self.name = name
33        self.heuristic = heuristic
34        self.error = error
35
36    @property
37    def penalty(self):
38        try:
39            return self.heuristic.penalty
40        except AttributeError:
41            return float("nan")
42
43    @property
44    def max_penalty(self):
45        try:
46            return self.heuristic.max_penalty
47        except AttributeError:
48            return float("nan")
49
50    @property
51    def score(self):
52        try:
53            return self.heuristic.score
54        except AttributeError:
55            return float("nan")
56
57    def __str__(self):
58        if self.error and self.context.options.verbose:
59            verbose_error = str(self.error)
60        else:
61            verbose_error = ""
62
63        if self.error:
64            script_error = (
65                " : {}".format(self.error.script_error.splitlines()[0])
66                if getattr(self.error, "script_error", None)
67                else ""
68            )
69
70            error = " [{}{}]".format(str(self.error).splitlines()[0], script_error)
71        else:
72            error = ""
73
74        try:
75            summary = self.heuristic.summary_string
76        except AttributeError:
77            summary = "<r>nan/nan (nan)</>"
78        return "{}: {}{}\n{}".format(self.name, summary, error, verbose_error)
79
80
81class Tool(TestToolBase):
82    """Run the specified DExTer test(s) with the specified compiler and linker
83    options and produce a dextIR file as well as printing out the debugging
84    experience score calculated by the DExTer heuristic.
85    """
86
87    def __init__(self, *args, **kwargs):
88        super(Tool, self).__init__(*args, **kwargs)
89        self._test_cases = []
90
91    @property
92    def name(self):
93        return "DExTer test"
94
95    def add_tool_arguments(self, parser, defaults):
96        parser.add_argument(
97            "--fail-lt",
98            type=float,
99            default=0.0,  # By default TEST always succeeds.
100            help="exit with status FAIL(2) if the test result"
101            " is less than this value.",
102            metavar="<float>",
103        )
104        parser.add_argument(
105            "--calculate-average",
106            action="store_true",
107            help="calculate the average score of every test run",
108        )
109        super(Tool, self).add_tool_arguments(parser, defaults)
110
111    def _init_debugger_controller(self):
112        step_collection = DextIR(
113            executable_path=self.context.options.executable,
114            source_paths=self.context.options.source_files,
115            dexter_version=self.context.version,
116        )
117
118        step_collection.commands, new_source_files = get_command_infos(
119            self.context.options.test_files, self.context.options.source_root_dir
120        )
121
122        self.context.options.source_files.extend(list(new_source_files))
123
124        if "DexLimitSteps" in step_collection.commands:
125            debugger_controller = ConditionalController(self.context, step_collection)
126        else:
127            debugger_controller = DefaultController(self.context, step_collection)
128
129        return debugger_controller
130
131    def _get_steps(self):
132        """Generate a list of debugger steps from a test case."""
133        debugger_controller = self._init_debugger_controller()
134        debugger_controller = run_debugger_subprocess(
135            debugger_controller, self.context.working_directory.path
136        )
137        steps = debugger_controller.step_collection
138        return steps
139
140    def _get_results_basename(self, test_name):
141        def splitall(x):
142            while len(x) > 0:
143                x, y = os.path.split(x)
144                yield y
145
146        all_components = reversed([x for x in splitall(test_name)])
147        return "_".join(all_components)
148
149    def _get_results_path(self, test_name):
150        """Returns the path to the test results directory for the test denoted
151        by test_name.
152        """
153        assert self.context.options.results_directory is not None
154        return os.path.join(
155            self.context.options.results_directory,
156            self._get_results_basename(test_name),
157        )
158
159    def _get_results_text_path(self, test_name):
160        """Returns path results .txt file for test denoted by test_name."""
161        test_results_path = self._get_results_path(test_name)
162        return "{}.txt".format(test_results_path)
163
164    def _get_results_pickle_path(self, test_name):
165        """Returns path results .dextIR file for test denoted by test_name."""
166        test_results_path = self._get_results_path(test_name)
167        return "{}.dextIR".format(test_results_path)
168
169    def _record_steps(self, test_name, steps):
170        """Write out the set of steps out to the test's .txt and .json
171        results file if a results directory has been specified.
172        """
173        if self.context.options.results_directory:
174            output_text_path = self._get_results_text_path(test_name)
175            with open(output_text_path, "w") as fp:
176                self.context.o.auto(str(steps), stream=Stream(fp))
177
178            output_dextIR_path = self._get_results_pickle_path(test_name)
179            with open(output_dextIR_path, "wb") as fp:
180                pickle.dump(steps, fp, protocol=pickle.HIGHEST_PROTOCOL)
181
182    def _record_score(self, test_name, heuristic):
183        """Write out the test's heuristic score to the results .txt file
184        if a results directory has been specified.
185        """
186        if self.context.options.results_directory:
187            output_text_path = self._get_results_text_path(test_name)
188            with open(output_text_path, "a") as fp:
189                self.context.o.auto(heuristic.verbose_output, stream=Stream(fp))
190
191    def _record_test_and_display(self, test_case):
192        """Output test case to o stream and record test case internally for
193        handling later.
194        """
195        self.context.o.auto(test_case)
196        self._test_cases.append(test_case)
197
198    def _record_failed_test(self, test_name, exception):
199        """Instantiate a failed test case with failure exception and
200        store internally.
201        """
202        test_case = TestCase(self.context, test_name, None, exception)
203        self._record_test_and_display(test_case)
204
205    def _record_successful_test(self, test_name, steps, heuristic):
206        """Instantiate a successful test run, store test for handling later.
207        Display verbose output for test case if required.
208        """
209        test_case = TestCase(self.context, test_name, heuristic, None)
210        self._record_test_and_display(test_case)
211        if self.context.options.verbose:
212            self.context.o.auto("\n{}\n".format(steps))
213            self.context.o.auto(heuristic.verbose_output)
214
215    def _run_test(self, test_name):
216        """Attempt to run test files specified in options.source_files. Store
217        result internally in self._test_cases.
218        """
219        try:
220            if self.context.options.binary:
221                if platform.system() == 'Darwin' and os.path.exists(self.context.options.binary + '.dSYM'):
222                    # On Darwin, the debug info is in the .dSYM which might not be found by lldb, copy it into the tmp working directory
223                    shutil.copytree(self.context.options.binary + '.dSYM', self.context.options.executable + '.dSYM')
224                # Copy user's binary into the tmp working directory.
225                shutil.copy(
226                    self.context.options.binary, self.context.options.executable
227                )
228            steps = self._get_steps()
229            self._record_steps(test_name, steps)
230            heuristic_score = Heuristic(self.context, steps)
231            self._record_score(test_name, heuristic_score)
232        except (BuildScriptException, DebuggerException, HeuristicException) as e:
233            self._record_failed_test(test_name, e)
234            return
235
236        self._record_successful_test(test_name, steps, heuristic_score)
237        return
238
239    def _handle_results(self) -> ReturnCode:
240        return_code = ReturnCode.OK
241        options = self.context.options
242
243        if not options.verbose:
244            self.context.o.auto("\n")
245
246        if options.calculate_average:
247            # Calculate and print the average score
248            score_sum = 0.0
249            num_tests = 0
250            for test_case in self._test_cases:
251                score = test_case.score
252                if not test_case.error and not math.isnan(score):
253                    score_sum += test_case.score
254                    num_tests += 1
255
256            if num_tests != 0:
257                print("@avg: ({:.4f})".format(score_sum / num_tests))
258
259        has_failed = lambda test: test.score < options.fail_lt or test.error
260        if any(map(has_failed, self._test_cases)):
261            return_code = ReturnCode.FAIL
262
263        if options.results_directory:
264            summary_path = os.path.join(options.results_directory, "summary.csv")
265            with open(summary_path, mode="w", newline="") as fp:
266                writer = csv.writer(fp, delimiter=",")
267                writer.writerow(["Test Case", "Score", "Error"])
268
269                for test_case in self._test_cases:
270                    writer.writerow(
271                        [
272                            test_case.name,
273                            "{:.4f}".format(test_case.score),
274                            test_case.error,
275                        ]
276                    )
277
278        return return_code
279