1import itertools 2import os 3from json import JSONEncoder 4 5from lit.BooleanExpression import BooleanExpression 6from lit.TestTimes import read_test_times 7 8# Test result codes. 9 10class ResultCode(object): 11 """Test result codes.""" 12 13 # All result codes (including user-defined ones) in declaration order 14 _all_codes = [] 15 16 @staticmethod 17 def all_codes(): 18 return ResultCode._all_codes 19 20 # We override __new__ and __getnewargs__ to ensure that pickling still 21 # provides unique ResultCode objects in any particular instance. 22 _instances = {} 23 24 def __new__(cls, name, label, isFailure): 25 res = cls._instances.get(name) 26 if res is None: 27 cls._instances[name] = res = super(ResultCode, cls).__new__(cls) 28 return res 29 30 def __getnewargs__(self): 31 return (self.name, self.label, self.isFailure) 32 33 def __init__(self, name, label, isFailure): 34 self.name = name 35 self.label = label 36 self.isFailure = isFailure 37 ResultCode._all_codes.append(self) 38 39 def __repr__(self): 40 return '%s%r' % (self.__class__.__name__, 41 (self.name, self.isFailure)) 42 43 44# Successes 45EXCLUDED = ResultCode('EXCLUDED', 'Excluded', False) 46SKIPPED = ResultCode('SKIPPED', 'Skipped', False) 47UNSUPPORTED = ResultCode('UNSUPPORTED', 'Unsupported', False) 48PASS = ResultCode('PASS', 'Passed', False) 49FLAKYPASS = ResultCode('FLAKYPASS', 'Passed With Retry', False) 50XFAIL = ResultCode('XFAIL', 'Expectedly Failed', False) 51# Failures 52UNRESOLVED = ResultCode('UNRESOLVED', 'Unresolved', True) 53TIMEOUT = ResultCode('TIMEOUT', 'Timed Out', True) 54FAIL = ResultCode('FAIL', 'Failed', True) 55XPASS = ResultCode('XPASS', 'Unexpectedly Passed', True) 56 57 58# Test metric values. 59 60class MetricValue(object): 61 def format(self): 62 """ 63 format() -> str 64 65 Convert this metric to a string suitable for displaying as part of the 66 console output. 67 """ 68 raise RuntimeError("abstract method") 69 70 def todata(self): 71 """ 72 todata() -> json-serializable data 73 74 Convert this metric to content suitable for serializing in the JSON test 75 output. 76 """ 77 raise RuntimeError("abstract method") 78 79class IntMetricValue(MetricValue): 80 def __init__(self, value): 81 self.value = value 82 83 def format(self): 84 return str(self.value) 85 86 def todata(self): 87 return self.value 88 89class RealMetricValue(MetricValue): 90 def __init__(self, value): 91 self.value = value 92 93 def format(self): 94 return '%.4f' % self.value 95 96 def todata(self): 97 return self.value 98 99class JSONMetricValue(MetricValue): 100 """ 101 JSONMetricValue is used for types that are representable in the output 102 but that are otherwise uninterpreted. 103 """ 104 def __init__(self, value): 105 # Ensure the value is a serializable by trying to encode it. 106 # WARNING: The value may change before it is encoded again, and may 107 # not be encodable after the change. 108 try: 109 e = JSONEncoder() 110 e.encode(value) 111 except TypeError: 112 raise 113 self.value = value 114 115 def format(self): 116 e = JSONEncoder(indent=2, sort_keys=True) 117 return e.encode(self.value) 118 119 def todata(self): 120 return self.value 121 122def toMetricValue(value): 123 if isinstance(value, MetricValue): 124 return value 125 elif isinstance(value, int): 126 return IntMetricValue(value) 127 elif isinstance(value, float): 128 return RealMetricValue(value) 129 else: 130 # 'long' is only present in python2 131 try: 132 if isinstance(value, long): 133 return IntMetricValue(value) 134 except NameError: 135 pass 136 137 # Try to create a JSONMetricValue and let the constructor throw 138 # if value is not a valid type. 139 return JSONMetricValue(value) 140 141 142# Test results. 143 144class Result(object): 145 """Wrapper for the results of executing an individual test.""" 146 147 def __init__(self, code, output='', elapsed=None): 148 # The result code. 149 self.code = code 150 # The test output. 151 self.output = output 152 # The wall timing to execute the test, if timing. 153 self.elapsed = elapsed 154 self.start = None 155 self.pid = None 156 # The metrics reported by this test. 157 self.metrics = {} 158 # The micro-test results reported by this test. 159 self.microResults = {} 160 161 def addMetric(self, name, value): 162 """ 163 addMetric(name, value) 164 165 Attach a test metric to the test result, with the given name and list of 166 values. It is an error to attempt to attach the metrics with the same 167 name multiple times. 168 169 Each value must be an instance of a MetricValue subclass. 170 """ 171 if name in self.metrics: 172 raise ValueError("result already includes metrics for %r" % ( 173 name,)) 174 if not isinstance(value, MetricValue): 175 raise TypeError("unexpected metric value: %r" % (value,)) 176 self.metrics[name] = value 177 178 def addMicroResult(self, name, microResult): 179 """ 180 addMicroResult(microResult) 181 182 Attach a micro-test result to the test result, with the given name and 183 result. It is an error to attempt to attach a micro-test with the 184 same name multiple times. 185 186 Each micro-test result must be an instance of the Result class. 187 """ 188 if name in self.microResults: 189 raise ValueError("Result already includes microResult for %r" % ( 190 name,)) 191 if not isinstance(microResult, Result): 192 raise TypeError("unexpected MicroResult value %r" % (microResult,)) 193 self.microResults[name] = microResult 194 195 196# Test classes. 197 198class TestSuite: 199 """TestSuite - Information on a group of tests. 200 201 A test suite groups together a set of logically related tests. 202 """ 203 204 def __init__(self, name, source_root, exec_root, config): 205 self.name = name 206 self.source_root = source_root 207 self.exec_root = exec_root 208 # The test suite configuration. 209 self.config = config 210 211 self.test_times = read_test_times(self) 212 213 def getSourcePath(self, components): 214 return os.path.join(self.source_root, *components) 215 216 def getExecPath(self, components): 217 return os.path.join(self.exec_root, *components) 218 219class Test: 220 """Test - Information on a single test instance.""" 221 222 def __init__(self, suite, path_in_suite, config, file_path = None, gtest_json_file = None): 223 self.suite = suite 224 self.path_in_suite = path_in_suite 225 self.config = config 226 self.file_path = file_path 227 self.gtest_json_file = gtest_json_file 228 229 # A list of conditions under which this test is expected to fail. 230 # Each condition is a boolean expression of features, or '*'. 231 # These can optionally be provided by test format handlers, 232 # and will be honored when the test result is supplied. 233 self.xfails = [] 234 235 # If true, ignore all items in self.xfails. 236 self.xfail_not = False 237 238 # A list of conditions that must be satisfied before running the test. 239 # Each condition is a boolean expression of features. All of them 240 # must be True for the test to run. 241 self.requires = [] 242 243 # A list of conditions that prevent execution of the test. 244 # Each condition is a boolean expression of features. All of them 245 # must be False for the test to run. 246 self.unsupported = [] 247 248 # An optional number of retries allowed before the test finally succeeds. 249 # The test is run at most once plus the number of retries specified here. 250 self.allowed_retries = getattr(config, 'test_retry_attempts', 0) 251 252 # The test result, once complete. 253 self.result = None 254 255 # The previous test failure state, if applicable. 256 self.previous_failure = False 257 258 # The previous test elapsed time, if applicable. 259 self.previous_elapsed = 0.0 260 261 if suite.test_times and '/'.join(path_in_suite) in suite.test_times: 262 time = suite.test_times['/'.join(path_in_suite)] 263 self.previous_elapsed = abs(time) 264 self.previous_failure = time < 0 265 266 267 def setResult(self, result): 268 assert self.result is None, "result already set" 269 assert isinstance(result, Result), "unexpected result type" 270 try: 271 expected_to_fail = self.isExpectedToFail() 272 except ValueError as err: 273 # Syntax error in an XFAIL line. 274 result.code = UNRESOLVED 275 result.output = str(err) 276 else: 277 if expected_to_fail: 278 # pass -> unexpected pass 279 if result.code is PASS: 280 result.code = XPASS 281 # fail -> expected fail 282 elif result.code is FAIL: 283 result.code = XFAIL 284 self.result = result 285 286 def isFailure(self): 287 assert self.result 288 return self.result.code.isFailure 289 290 def getFullName(self): 291 return self.suite.config.name + ' :: ' + '/'.join(self.path_in_suite) 292 293 def getFilePath(self): 294 if self.file_path: 295 return self.file_path 296 return self.getSourcePath() 297 298 def getSourcePath(self): 299 return self.suite.getSourcePath(self.path_in_suite) 300 301 def getExecPath(self): 302 return self.suite.getExecPath(self.path_in_suite) 303 304 def isExpectedToFail(self): 305 """ 306 isExpectedToFail() -> bool 307 308 Check whether this test is expected to fail in the current 309 configuration. This check relies on the test xfails property which by 310 some test formats may not be computed until the test has first been 311 executed. 312 Throws ValueError if an XFAIL line has a syntax error. 313 """ 314 315 if self.xfail_not: 316 return False 317 318 features = self.config.available_features 319 320 # Check if any of the xfails match an available feature. 321 for item in self.xfails: 322 # If this is the wildcard, it always fails. 323 if item == '*': 324 return True 325 326 # If this is a True expression of features, it fails. 327 try: 328 if BooleanExpression.evaluate(item, features): 329 return True 330 except ValueError as e: 331 raise ValueError('Error in XFAIL list:\n%s' % str(e)) 332 333 return False 334 335 def isWithinFeatureLimits(self): 336 """ 337 isWithinFeatureLimits() -> bool 338 339 A test is within the feature limits set by run_only_tests if 340 1. the test's requirements ARE satisfied by the available features 341 2. the test's requirements ARE NOT satisfied after the limiting 342 features are removed from the available features 343 344 Throws ValueError if a REQUIRES line has a syntax error. 345 """ 346 347 if not self.config.limit_to_features: 348 return True # No limits. Run it. 349 350 # Check the requirements as-is (#1) 351 if self.getMissingRequiredFeatures(): 352 return False 353 354 # Check the requirements after removing the limiting features (#2) 355 featuresMinusLimits = [f for f in self.config.available_features 356 if not f in self.config.limit_to_features] 357 if not self.getMissingRequiredFeaturesFromList(featuresMinusLimits): 358 return False 359 360 return True 361 362 def getMissingRequiredFeaturesFromList(self, features): 363 try: 364 return [item for item in self.requires 365 if not BooleanExpression.evaluate(item, features)] 366 except ValueError as e: 367 raise ValueError('Error in REQUIRES list:\n%s' % str(e)) 368 369 def getMissingRequiredFeatures(self): 370 """ 371 getMissingRequiredFeatures() -> list of strings 372 373 Returns a list of features from REQUIRES that are not satisfied." 374 Throws ValueError if a REQUIRES line has a syntax error. 375 """ 376 377 features = self.config.available_features 378 return self.getMissingRequiredFeaturesFromList(features) 379 380 def getUnsupportedFeatures(self): 381 """ 382 getUnsupportedFeatures() -> list of strings 383 384 Returns a list of features from UNSUPPORTED that are present 385 in the test configuration's features. 386 Throws ValueError if an UNSUPPORTED line has a syntax error. 387 """ 388 389 features = self.config.available_features 390 391 try: 392 return [item for item in self.unsupported 393 if BooleanExpression.evaluate(item, features)] 394 except ValueError as e: 395 raise ValueError('Error in UNSUPPORTED list:\n%s' % str(e)) 396 397 def getUsedFeatures(self): 398 """ 399 getUsedFeatures() -> list of strings 400 401 Returns a list of all features appearing in XFAIL, UNSUPPORTED and 402 REQUIRES annotations for this test. 403 """ 404 import lit.TestRunner 405 parsed = lit.TestRunner._parseKeywords(self.getSourcePath(), require_script=False) 406 feature_keywords = ('UNSUPPORTED:', 'REQUIRES:', 'XFAIL:') 407 boolean_expressions = itertools.chain.from_iterable( 408 parsed[k] or [] for k in feature_keywords 409 ) 410 tokens = itertools.chain.from_iterable( 411 BooleanExpression.tokenize(expr) for expr in 412 boolean_expressions if expr != '*' 413 ) 414 matchExpressions = set(filter(BooleanExpression.isMatchExpression, tokens)) 415 return matchExpressions 416