1import itertools 2import os 3from json import JSONEncoder 4 5from lit.BooleanExpression import BooleanExpression 6from lit.TestTimes import read_test_times 7 8# Test result codes. 9 10class ResultCode(object): 11 """Test result codes.""" 12 13 # All result codes (including user-defined ones) in declaration order 14 _all_codes = [] 15 16 @staticmethod 17 def all_codes(): 18 return ResultCode._all_codes 19 20 # We override __new__ and __getnewargs__ to ensure that pickling still 21 # provides unique ResultCode objects in any particular instance. 22 _instances = {} 23 24 def __new__(cls, name, label, isFailure): 25 res = cls._instances.get(name) 26 if res is None: 27 cls._instances[name] = res = super(ResultCode, cls).__new__(cls) 28 return res 29 30 def __getnewargs__(self): 31 return (self.name, self.label, self.isFailure) 32 33 def __init__(self, name, label, isFailure): 34 self.name = name 35 self.label = label 36 self.isFailure = isFailure 37 ResultCode._all_codes.append(self) 38 39 def __repr__(self): 40 return '%s%r' % (self.__class__.__name__, 41 (self.name, self.isFailure)) 42 43 44# Successes 45EXCLUDED = ResultCode('EXCLUDED', 'Excluded', False) 46SKIPPED = ResultCode('SKIPPED', 'Skipped', False) 47UNSUPPORTED = ResultCode('UNSUPPORTED', 'Unsupported', False) 48PASS = ResultCode('PASS', 'Passed', False) 49FLAKYPASS = ResultCode('FLAKYPASS', 'Passed With Retry', False) 50XFAIL = ResultCode('XFAIL', 'Expectedly Failed', False) 51# Failures 52UNRESOLVED = ResultCode('UNRESOLVED', 'Unresolved', True) 53TIMEOUT = ResultCode('TIMEOUT', 'Timed Out', True) 54FAIL = ResultCode('FAIL', 'Failed', True) 55XPASS = ResultCode('XPASS', 'Unexpectedly Passed', True) 56 57 58# Test metric values. 59 60class MetricValue(object): 61 def format(self): 62 """ 63 format() -> str 64 65 Convert this metric to a string suitable for displaying as part of the 66 console output. 67 """ 68 raise RuntimeError("abstract method") 69 70 def todata(self): 71 """ 72 todata() -> json-serializable data 73 74 Convert this metric to content suitable for serializing in the JSON test 75 output. 76 """ 77 raise RuntimeError("abstract method") 78 79class IntMetricValue(MetricValue): 80 def __init__(self, value): 81 self.value = value 82 83 def format(self): 84 return str(self.value) 85 86 def todata(self): 87 return self.value 88 89class RealMetricValue(MetricValue): 90 def __init__(self, value): 91 self.value = value 92 93 def format(self): 94 return '%.4f' % self.value 95 96 def todata(self): 97 return self.value 98 99class JSONMetricValue(MetricValue): 100 """ 101 JSONMetricValue is used for types that are representable in the output 102 but that are otherwise uninterpreted. 103 """ 104 def __init__(self, value): 105 # Ensure the value is a serializable by trying to encode it. 106 # WARNING: The value may change before it is encoded again, and may 107 # not be encodable after the change. 108 try: 109 e = JSONEncoder() 110 e.encode(value) 111 except TypeError: 112 raise 113 self.value = value 114 115 def format(self): 116 e = JSONEncoder(indent=2, sort_keys=True) 117 return e.encode(self.value) 118 119 def todata(self): 120 return self.value 121 122def toMetricValue(value): 123 if isinstance(value, MetricValue): 124 return value 125 elif isinstance(value, int): 126 return IntMetricValue(value) 127 elif isinstance(value, float): 128 return RealMetricValue(value) 129 else: 130 # 'long' is only present in python2 131 try: 132 if isinstance(value, long): 133 return IntMetricValue(value) 134 except NameError: 135 pass 136 137 # Try to create a JSONMetricValue and let the constructor throw 138 # if value is not a valid type. 139 return JSONMetricValue(value) 140 141 142# Test results. 143 144class Result(object): 145 """Wrapper for the results of executing an individual test.""" 146 147 def __init__(self, code, output='', elapsed=None): 148 # The result code. 149 self.code = code 150 # The test output. 151 self.output = output 152 # The wall timing to execute the test, if timing. 153 self.elapsed = elapsed 154 self.start = None 155 self.pid = None 156 # The metrics reported by this test. 157 self.metrics = {} 158 # The micro-test results reported by this test. 159 self.microResults = {} 160 161 def addMetric(self, name, value): 162 """ 163 addMetric(name, value) 164 165 Attach a test metric to the test result, with the given name and list of 166 values. It is an error to attempt to attach the metrics with the same 167 name multiple times. 168 169 Each value must be an instance of a MetricValue subclass. 170 """ 171 if name in self.metrics: 172 raise ValueError("result already includes metrics for %r" % ( 173 name,)) 174 if not isinstance(value, MetricValue): 175 raise TypeError("unexpected metric value: %r" % (value,)) 176 self.metrics[name] = value 177 178 def addMicroResult(self, name, microResult): 179 """ 180 addMicroResult(microResult) 181 182 Attach a micro-test result to the test result, with the given name and 183 result. It is an error to attempt to attach a micro-test with the 184 same name multiple times. 185 186 Each micro-test result must be an instance of the Result class. 187 """ 188 if name in self.microResults: 189 raise ValueError("Result already includes microResult for %r" % ( 190 name,)) 191 if not isinstance(microResult, Result): 192 raise TypeError("unexpected MicroResult value %r" % (microResult,)) 193 self.microResults[name] = microResult 194 195 196# Test classes. 197 198class TestSuite: 199 """TestSuite - Information on a group of tests. 200 201 A test suite groups together a set of logically related tests. 202 """ 203 204 def __init__(self, name, source_root, exec_root, config): 205 self.name = name 206 self.source_root = source_root 207 self.exec_root = exec_root 208 # The test suite configuration. 209 self.config = config 210 211 self.test_times = read_test_times(self) 212 213 def getSourcePath(self, components): 214 return os.path.join(self.source_root, *components) 215 216 def getExecPath(self, components): 217 return os.path.join(self.exec_root, *components) 218 219class Test: 220 """Test - Information on a single test instance.""" 221 222 def __init__(self, suite, path_in_suite, config, file_path = None): 223 self.suite = suite 224 self.path_in_suite = path_in_suite 225 self.config = config 226 self.file_path = file_path 227 228 # A list of conditions under which this test is expected to fail. 229 # Each condition is a boolean expression of features and target 230 # triple parts. These can optionally be provided by test format 231 # handlers, and will be honored when the test result is supplied. 232 self.xfails = [] 233 234 # A list of conditions that must be satisfied before running the test. 235 # Each condition is a boolean expression of features. All of them 236 # must be True for the test to run. 237 # FIXME should target triple parts count here too? 238 self.requires = [] 239 240 # A list of conditions that prevent execution of the test. 241 # Each condition is a boolean expression of features and target 242 # triple parts. All of them must be False for the test to run. 243 self.unsupported = [] 244 245 # An optional number of retries allowed before the test finally succeeds. 246 # The test is run at most once plus the number of retries specified here. 247 self.allowed_retries = getattr(config, 'test_retry_attempts', 0) 248 249 # The test result, once complete. 250 self.result = None 251 252 # The previous test failure state, if applicable. 253 self.previous_failure = False 254 255 # The previous test elapsed time, if applicable. 256 self.previous_elapsed = 0.0 257 258 if '/'.join(path_in_suite) in suite.test_times: 259 time = suite.test_times['/'.join(path_in_suite)] 260 self.previous_elapsed = abs(time) 261 self.previous_failure = time < 0 262 263 264 def setResult(self, result): 265 assert self.result is None, "result already set" 266 assert isinstance(result, Result), "unexpected result type" 267 try: 268 expected_to_fail = self.isExpectedToFail() 269 except ValueError as err: 270 # Syntax error in an XFAIL line. 271 result.code = UNRESOLVED 272 result.output = str(err) 273 else: 274 if expected_to_fail: 275 # pass -> unexpected pass 276 if result.code is PASS: 277 result.code = XPASS 278 # fail -> expected fail 279 elif result.code is FAIL: 280 result.code = XFAIL 281 self.result = result 282 283 def isFailure(self): 284 assert self.result 285 return self.result.code.isFailure 286 287 def getFullName(self): 288 return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite) 289 290 def getFilePath(self): 291 if self.file_path: 292 return self.file_path 293 return self.getSourcePath() 294 295 def getSourcePath(self): 296 return self.suite.getSourcePath(self.path_in_suite) 297 298 def getExecPath(self): 299 return self.suite.getExecPath(self.path_in_suite) 300 301 def isExpectedToFail(self): 302 """ 303 isExpectedToFail() -> bool 304 305 Check whether this test is expected to fail in the current 306 configuration. This check relies on the test xfails property which by 307 some test formats may not be computed until the test has first been 308 executed. 309 Throws ValueError if an XFAIL line has a syntax error. 310 """ 311 312 features = self.config.available_features 313 triple = getattr(self.suite.config, 'target_triple', "") 314 315 # Check if any of the xfails match an available feature or the target. 316 for item in self.xfails: 317 # If this is the wildcard, it always fails. 318 if item == '*': 319 return True 320 321 # If this is a True expression of features and target triple parts, 322 # it fails. 323 try: 324 if BooleanExpression.evaluate(item, features, triple): 325 return True 326 except ValueError as e: 327 raise ValueError('Error in XFAIL list:\n%s' % str(e)) 328 329 return False 330 331 def isWithinFeatureLimits(self): 332 """ 333 isWithinFeatureLimits() -> bool 334 335 A test is within the feature limits set by run_only_tests if 336 1. the test's requirements ARE satisfied by the available features 337 2. the test's requirements ARE NOT satisfied after the limiting 338 features are removed from the available features 339 340 Throws ValueError if a REQUIRES line has a syntax error. 341 """ 342 343 if not self.config.limit_to_features: 344 return True # No limits. Run it. 345 346 # Check the requirements as-is (#1) 347 if self.getMissingRequiredFeatures(): 348 return False 349 350 # Check the requirements after removing the limiting features (#2) 351 featuresMinusLimits = [f for f in self.config.available_features 352 if not f in self.config.limit_to_features] 353 if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits): 354 return False 355 356 return True 357 358 def getMissingRequiredFeaturesFromList(self, features): 359 try: 360 return [item for item in self.requires 361 if not BooleanExpression.evaluate(item, features)] 362 except ValueError as e: 363 raise ValueError('Error in REQUIRES list:\n%s' % str(e)) 364 365 def getMissingRequiredFeatures(self): 366 """ 367 getMissingRequiredFeatures() -> list of strings 368 369 Returns a list of features from REQUIRES that are not satisfied." 370 Throws ValueError if a REQUIRES line has a syntax error. 371 """ 372 373 features = self.config.available_features 374 return self.getMissingRequiredFeaturesFromList(features) 375 376 def getUnsupportedFeatures(self): 377 """ 378 getUnsupportedFeatures() -> list of strings 379 380 Returns a list of features from UNSUPPORTED that are present 381 in the test configuration's features or target triple. 382 Throws ValueError if an UNSUPPORTED line has a syntax error. 383 """ 384 385 features = self.config.available_features 386 triple = getattr(self.suite.config, 'target_triple', "") 387 388 try: 389 return [item for item in self.unsupported 390 if BooleanExpression.evaluate(item, features, triple)] 391 except ValueError as e: 392 raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e)) 393 394 def getUsedFeatures(self): 395 """ 396 getUsedFeatures() -> list of strings 397 398 Returns a list of all features appearing in XFAIL, UNSUPPORTED and 399 REQUIRES annotations for this test. 400 """ 401 import lit.TestRunner 402 parsed = lit.TestRunner._parseKeywords(self.getSourcePath(), require_script=False) 403 feature_keywords = ('UNSUPPORTED:', 'REQUIRES:', 'XFAIL:') 404 boolean_expressions = itertools.chain.from_iterable( 405 parsed[k] or [] for k in feature_keywords 406 ) 407 tokens = itertools.chain.from_iterable( 408 BooleanExpression.tokenize(expr) for expr in 409 boolean_expressions if expr != '*' 410 ) 411 identifiers = set(filter(BooleanExpression.isIdentifier, tokens)) 412 return identifiers 413