xref: /llvm-project/llvm/utils/lit/lit/Test.py (revision b71edfaa4ec3c998aadb35255ce2f60bba2940b0)
1import itertools
2import os
3from json import JSONEncoder
4
5from lit.BooleanExpression import BooleanExpression
6from lit.TestTimes import read_test_times
7
8# Test result codes.
9
10
11class ResultCode(object):
12    """Test result codes."""
13
14    # All result codes (including user-defined ones) in declaration order
15    _all_codes = []
16
17    @staticmethod
18    def all_codes():
19        return ResultCode._all_codes
20
21    # We override __new__ and __getnewargs__ to ensure that pickling still
22    # provides unique ResultCode objects in any particular instance.
23    _instances = {}
24
25    def __new__(cls, name, label, isFailure):
26        res = cls._instances.get(name)
27        if res is None:
28            cls._instances[name] = res = super(ResultCode, cls).__new__(cls)
29        return res
30
31    def __getnewargs__(self):
32        return (self.name, self.label, self.isFailure)
33
34    def __init__(self, name, label, isFailure):
35        self.name = name
36        self.label = label
37        self.isFailure = isFailure
38        ResultCode._all_codes.append(self)
39
40    def __repr__(self):
41        return "%s%r" % (self.__class__.__name__, (self.name, self.isFailure))
42
43
44# Successes
45EXCLUDED = ResultCode("EXCLUDED", "Excluded", False)
46SKIPPED = ResultCode("SKIPPED", "Skipped", False)
47UNSUPPORTED = ResultCode("UNSUPPORTED", "Unsupported", False)
48PASS = ResultCode("PASS", "Passed", False)
49FLAKYPASS = ResultCode("FLAKYPASS", "Passed With Retry", False)
50XFAIL = ResultCode("XFAIL", "Expectedly Failed", False)
51# Failures
52UNRESOLVED = ResultCode("UNRESOLVED", "Unresolved", True)
53TIMEOUT = ResultCode("TIMEOUT", "Timed Out", True)
54FAIL = ResultCode("FAIL", "Failed", True)
55XPASS = ResultCode("XPASS", "Unexpectedly Passed", True)
56
57
58# Test metric values.
59
60
61class MetricValue(object):
62    def format(self):
63        """
64        format() -> str
65
66        Convert this metric to a string suitable for displaying as part of the
67        console output.
68        """
69        raise RuntimeError("abstract method")
70
71    def todata(self):
72        """
73        todata() -> json-serializable data
74
75        Convert this metric to content suitable for serializing in the JSON test
76        output.
77        """
78        raise RuntimeError("abstract method")
79
80
81class IntMetricValue(MetricValue):
82    def __init__(self, value):
83        self.value = value
84
85    def format(self):
86        return str(self.value)
87
88    def todata(self):
89        return self.value
90
91
92class RealMetricValue(MetricValue):
93    def __init__(self, value):
94        self.value = value
95
96    def format(self):
97        return "%.4f" % self.value
98
99    def todata(self):
100        return self.value
101
102
103class JSONMetricValue(MetricValue):
104    """
105    JSONMetricValue is used for types that are representable in the output
106    but that are otherwise uninterpreted.
107    """
108
109    def __init__(self, value):
110        # Ensure the value is a serializable by trying to encode it.
111        # WARNING: The value may change before it is encoded again, and may
112        #          not be encodable after the change.
113        try:
114            e = JSONEncoder()
115            e.encode(value)
116        except TypeError:
117            raise
118        self.value = value
119
120    def format(self):
121        e = JSONEncoder(indent=2, sort_keys=True)
122        return e.encode(self.value)
123
124    def todata(self):
125        return self.value
126
127
128def toMetricValue(value):
129    if isinstance(value, MetricValue):
130        return value
131    elif isinstance(value, int):
132        return IntMetricValue(value)
133    elif isinstance(value, float):
134        return RealMetricValue(value)
135    else:
136        # 'long' is only present in python2
137        try:
138            if isinstance(value, long):
139                return IntMetricValue(value)
140        except NameError:
141            pass
142
143        # Try to create a JSONMetricValue and let the constructor throw
144        # if value is not a valid type.
145        return JSONMetricValue(value)
146
147
148# Test results.
149
150
151class Result(object):
152    """Wrapper for the results of executing an individual test."""
153
154    def __init__(self, code, output="", elapsed=None):
155        # The result code.
156        self.code = code
157        # The test output.
158        self.output = output
159        # The wall timing to execute the test, if timing.
160        self.elapsed = elapsed
161        self.start = None
162        self.pid = None
163        # The metrics reported by this test.
164        self.metrics = {}
165        # The micro-test results reported by this test.
166        self.microResults = {}
167
168    def addMetric(self, name, value):
169        """
170        addMetric(name, value)
171
172        Attach a test metric to the test result, with the given name and list of
173        values. It is an error to attempt to attach the metrics with the same
174        name multiple times.
175
176        Each value must be an instance of a MetricValue subclass.
177        """
178        if name in self.metrics:
179            raise ValueError("result already includes metrics for %r" % (name,))
180        if not isinstance(value, MetricValue):
181            raise TypeError("unexpected metric value: %r" % (value,))
182        self.metrics[name] = value
183
184    def addMicroResult(self, name, microResult):
185        """
186        addMicroResult(microResult)
187
188        Attach a micro-test result to the test result, with the given name and
189        result.  It is an error to attempt to attach a micro-test with the
190        same name multiple times.
191
192        Each micro-test result must be an instance of the Result class.
193        """
194        if name in self.microResults:
195            raise ValueError("Result already includes microResult for %r" % (name,))
196        if not isinstance(microResult, Result):
197            raise TypeError("unexpected MicroResult value %r" % (microResult,))
198        self.microResults[name] = microResult
199
200
201# Test classes.
202
203
204class TestSuite:
205    """TestSuite - Information on a group of tests.
206
207    A test suite groups together a set of logically related tests.
208    """
209
210    def __init__(self, name, source_root, exec_root, config):
211        self.name = name
212        self.source_root = source_root
213        self.exec_root = exec_root
214        # The test suite configuration.
215        self.config = config
216
217        self.test_times = read_test_times(self)
218
219    def getSourcePath(self, components):
220        return os.path.join(self.source_root, *components)
221
222    def getExecPath(self, components):
223        return os.path.join(self.exec_root, *components)
224
225
226class Test:
227    """Test - Information on a single test instance."""
228
229    def __init__(
230        self, suite, path_in_suite, config, file_path=None, gtest_json_file=None
231    ):
232        self.suite = suite
233        self.path_in_suite = path_in_suite
234        self.config = config
235        self.file_path = file_path
236        self.gtest_json_file = gtest_json_file
237
238        # A list of conditions under which this test is expected to fail.
239        # Each condition is a boolean expression of features, or '*'.
240        # These can optionally be provided by test format handlers,
241        # and will be honored when the test result is supplied.
242        self.xfails = []
243
244        # If true, ignore all items in self.xfails.
245        self.xfail_not = False
246
247        # A list of conditions that must be satisfied before running the test.
248        # Each condition is a boolean expression of features. All of them
249        # must be True for the test to run.
250        self.requires = []
251
252        # A list of conditions that prevent execution of the test.
253        # Each condition is a boolean expression of features. All of them
254        # must be False for the test to run.
255        self.unsupported = []
256
257        # An optional number of retries allowed before the test finally succeeds.
258        # The test is run at most once plus the number of retries specified here.
259        self.allowed_retries = getattr(config, "test_retry_attempts", 0)
260
261        # The test result, once complete.
262        self.result = None
263
264        # The previous test failure state, if applicable.
265        self.previous_failure = False
266
267        # The previous test elapsed time, if applicable.
268        self.previous_elapsed = 0.0
269
270        if suite.test_times and "/".join(path_in_suite) in suite.test_times:
271            time = suite.test_times["/".join(path_in_suite)]
272            self.previous_elapsed = abs(time)
273            self.previous_failure = time < 0
274
275    def setResult(self, result):
276        assert self.result is None, "result already set"
277        assert isinstance(result, Result), "unexpected result type"
278        try:
279            expected_to_fail = self.isExpectedToFail()
280        except ValueError as err:
281            # Syntax error in an XFAIL line.
282            result.code = UNRESOLVED
283            result.output = str(err)
284        else:
285            if expected_to_fail:
286                # pass -> unexpected pass
287                if result.code is PASS:
288                    result.code = XPASS
289                # fail -> expected fail
290                elif result.code is FAIL:
291                    result.code = XFAIL
292        self.result = result
293
294    def isFailure(self):
295        assert self.result
296        return self.result.code.isFailure
297
298    def getFullName(self):
299        return self.suite.config.name + " :: " + "/".join(self.path_in_suite)
300
301    def getFilePath(self):
302        if self.file_path:
303            return self.file_path
304        return self.getSourcePath()
305
306    def getSourcePath(self):
307        return self.suite.getSourcePath(self.path_in_suite)
308
309    def getExecPath(self):
310        return self.suite.getExecPath(self.path_in_suite)
311
312    def isExpectedToFail(self):
313        """
314        isExpectedToFail() -> bool
315
316        Check whether this test is expected to fail in the current
317        configuration. This check relies on the test xfails property which by
318        some test formats may not be computed until the test has first been
319        executed.
320        Throws ValueError if an XFAIL line has a syntax error.
321        """
322
323        if self.xfail_not:
324            return False
325
326        features = self.config.available_features
327
328        # Check if any of the xfails match an available feature.
329        for item in self.xfails:
330            # If this is the wildcard, it always fails.
331            if item == "*":
332                return True
333
334            # If this is a True expression of features, it fails.
335            try:
336                if BooleanExpression.evaluate(item, features):
337                    return True
338            except ValueError as e:
339                raise ValueError("Error in XFAIL list:\n%s" % str(e))
340
341        return False
342
343    def isWithinFeatureLimits(self):
344        """
345        isWithinFeatureLimits() -> bool
346
347        A test is within the feature limits set by run_only_tests if
348        1. the test's requirements ARE satisfied by the available features
349        2. the test's requirements ARE NOT satisfied after the limiting
350           features are removed from the available features
351
352        Throws ValueError if a REQUIRES line has a syntax error.
353        """
354
355        if not self.config.limit_to_features:
356            return True  # No limits. Run it.
357
358        # Check the requirements as-is (#1)
359        if self.getMissingRequiredFeatures():
360            return False
361
362        # Check the requirements after removing the limiting features (#2)
363        featuresMinusLimits = [
364            f
365            for f in self.config.available_features
366            if not f in self.config.limit_to_features
367        ]
368        if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits):
369            return False
370
371        return True
372
373    def getMissingRequiredFeaturesFromList(self, features):
374        try:
375            return [
376                item
377                for item in self.requires
378                if not BooleanExpression.evaluate(item, features)
379            ]
380        except ValueError as e:
381            raise ValueError("Error in REQUIRES list:\n%s" % str(e))
382
383    def getMissingRequiredFeatures(self):
384        """
385        getMissingRequiredFeatures() -> list of strings
386
387        Returns a list of features from REQUIRES that are not satisfied."
388        Throws ValueError if a REQUIRES line has a syntax error.
389        """
390
391        features = self.config.available_features
392        return self.getMissingRequiredFeaturesFromList(features)
393
394    def getUnsupportedFeatures(self):
395        """
396        getUnsupportedFeatures() -> list of strings
397
398        Returns a list of features from UNSUPPORTED that are present
399        in the test configuration's features.
400        Throws ValueError if an UNSUPPORTED line has a syntax error.
401        """
402
403        features = self.config.available_features
404
405        try:
406            return [
407                item
408                for item in self.unsupported
409                if BooleanExpression.evaluate(item, features)
410            ]
411        except ValueError as e:
412            raise ValueError("Error in UNSUPPORTED list:\n%s" % str(e))
413
414    def getUsedFeatures(self):
415        """
416        getUsedFeatures() -> list of strings
417
418        Returns a list of all features appearing in XFAIL, UNSUPPORTED and
419        REQUIRES annotations for this test.
420        """
421        import lit.TestRunner
422
423        parsed = lit.TestRunner._parseKeywords(
424            self.getSourcePath(), require_script=False
425        )
426        feature_keywords = ("UNSUPPORTED:", "REQUIRES:", "XFAIL:")
427        boolean_expressions = itertools.chain.from_iterable(
428            parsed[k] or [] for k in feature_keywords
429        )
430        tokens = itertools.chain.from_iterable(
431            BooleanExpression.tokenize(expr)
432            for expr in boolean_expressions
433            if expr != "*"
434        )
435        matchExpressions = set(filter(BooleanExpression.isMatchExpression, tokens))
436        return matchExpressions
437