xref: /netbsd-src/external/gpl3/gcc/dist/contrib/testsuite-management/validate_failures.py (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1#!/usr/bin/python
2
3# Script to compare testsuite failures against a list of known-to-fail
4# tests.
5#
6# NOTE: This script is used in installations that are running Python 2.4.
7#       Please stick to syntax features available in 2.4 and earlier
8#       versions.
9
10# Contributed by Diego Novillo <dnovillo@google.com>
11#
12# Copyright (C) 2011-2013 Free Software Foundation, Inc.
13#
14# This file is part of GCC.
15#
16# GCC is free software; you can redistribute it and/or modify
17# it under the terms of the GNU General Public License as published by
18# the Free Software Foundation; either version 3, or (at your option)
19# any later version.
20#
21# GCC is distributed in the hope that it will be useful,
22# but WITHOUT ANY WARRANTY; without even the implied warranty of
23# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24# GNU General Public License for more details.
25#
26# You should have received a copy of the GNU General Public License
27# along with GCC; see the file COPYING.  If not, write to
28# the Free Software Foundation, 51 Franklin Street, Fifth Floor,
29# Boston, MA 02110-1301, USA.
30
31"""This script provides a coarser XFAILing mechanism that requires no
32detailed DejaGNU markings.  This is useful in a variety of scenarios:
33
34- Development branches with many known failures waiting to be fixed.
35- Release branches with known failures that are not considered
36  important for the particular release criteria used in that branch.
37
38The script must be executed from the toplevel build directory.  When
39executed it will:
40
411- Determine the target built: TARGET
422- Determine the source directory: SRCDIR
433- Look for a failure manifest file in
44   <SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail
454- Collect all the <tool>.sum files from the build tree.
465- Produce a report stating:
47   a- Failures expected in the manifest but not present in the build.
48   b- Failures in the build not expected in the manifest.
496- If all the build failures are expected in the manifest, it exits
50   with exit code 0.  Otherwise, it exits with error code 1.
51
52Manifest files contain expected DejaGNU results that are otherwise
53treated as failures.
54They may also contain additional text:
55
56# This is a comment.  - self explanatory
57@include file         - the file is a path relative to the includer
58@remove result text   - result text is removed from the expected set
59"""
60
61import datetime
62import optparse
63import os
64import re
65import sys
66
67# Handled test results.
68_VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
69_VALID_TEST_RESULTS_REX = re.compile("%s" % "|".join(_VALID_TEST_RESULTS))
70
71# Subdirectory of srcdir in which to find the manifest file.
72_MANIFEST_SUBDIR = 'contrib/testsuite-management'
73
74# Pattern for naming manifest files.
75# The first argument should be the toplevel GCC(/GNU tool) source directory.
76# The second argument is the manifest subdir.
77# The third argument is the manifest target, which defaults to the target
78# triplet used during the build.
79_MANIFEST_PATH_PATTERN = '%s/%s/%s.xfail'
80
81# The options passed to the program.
82_OPTIONS = None
83
84def Error(msg):
85  print >>sys.stderr, 'error: %s' % msg
86  sys.exit(1)
87
88
89class TestResult(object):
90  """Describes a single DejaGNU test result as emitted in .sum files.
91
92  We are only interested in representing unsuccessful tests.  So, only
93  a subset of all the tests are loaded.
94
95  The summary line used to build the test result should have this format:
96
97  attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
98  ^^^^^^^^   ^^^^^  ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
99  optional   state  name              description
100  attributes
101
102  Attributes:
103    attrlist: A comma separated list of attributes.
104      Valid values:
105        flaky            Indicates that this test may not always fail.  These
106                         tests are reported, but their presence does not affect
107                         the results.
108
109        expire=YYYYMMDD  After this date, this test will produce an error
110                         whether it is in the manifest or not.
111
112    state: One of UNRESOLVED, XPASS or FAIL.
113    name: File name for the test.
114    description: String describing the test (flags used, dejagnu message, etc)
115    ordinal: Monotonically increasing integer.
116             It is used to keep results for one .exp file sorted
117             by the order the tests were run.
118  """
119
120  def __init__(self, summary_line, ordinal=-1):
121    try:
122      self.attrs = ''
123      if '|' in summary_line:
124        (self.attrs, summary_line) = summary_line.split('|', 1)
125      try:
126        (self.state,
127         self.name,
128         self.description) = re.match(r' *([A-Z]+):\s*(\S+)\s+(.*)',
129                                      summary_line).groups()
130      except:
131        print 'Failed to parse summary line: "%s"' % summary_line
132        raise
133      self.attrs = self.attrs.strip()
134      self.state = self.state.strip()
135      self.description = self.description.strip()
136      self.ordinal = ordinal
137    except ValueError:
138      Error('Cannot parse summary line "%s"' % summary_line)
139
140    if self.state not in _VALID_TEST_RESULTS:
141      Error('Invalid test result %s in "%s" (parsed as "%s")' % (
142            self.state, summary_line, self))
143
144  def __lt__(self, other):
145    return (self.name < other.name or
146            (self.name == other.name and self.ordinal < other.ordinal))
147
148  def __hash__(self):
149    return hash(self.state) ^ hash(self.name) ^ hash(self.description)
150
151  def __eq__(self, other):
152    return (self.state == other.state and
153            self.name == other.name and
154            self.description == other.description)
155
156  def __ne__(self, other):
157    return not (self == other)
158
159  def __str__(self):
160    attrs = ''
161    if self.attrs:
162      attrs = '%s | ' % self.attrs
163    return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
164
165  def ExpirationDate(self):
166    # Return a datetime.date object with the expiration date for this
167    # test result.  Return None, if no expiration has been set.
168    if re.search(r'expire=', self.attrs):
169      expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
170      if not expiration:
171        Error('Invalid expire= format in "%s".  Must be of the form '
172              '"expire=YYYYMMDD"' % self)
173      return datetime.date(int(expiration.group(1)),
174                           int(expiration.group(2)),
175                           int(expiration.group(3)))
176    return None
177
178  def HasExpired(self):
179    # Return True if the expiration date of this result has passed.
180    expiration_date = self.ExpirationDate()
181    if expiration_date:
182      now = datetime.date.today()
183      return now > expiration_date
184
185
186def GetMakefileValue(makefile_name, value_name):
187  if os.path.exists(makefile_name):
188    makefile = open(makefile_name)
189    for line in makefile:
190      if line.startswith(value_name):
191        (_, value) = line.split('=', 1)
192        value = value.strip()
193        makefile.close()
194        return value
195    makefile.close()
196  return None
197
198
199def ValidBuildDirectory(builddir, target):
200  if (not os.path.exists(builddir) or
201      not os.path.exists('%s/Makefile' % builddir) or
202      (not os.path.exists('%s/build-%s' % (builddir, target)) and
203       not os.path.exists('%s/%s' % (builddir, target)))):
204    return False
205  return True
206
207
208def IsComment(line):
209  """Return True if line is a comment."""
210  return line.startswith('#')
211
212
213def IsInterestingResult(line):
214  """Return True if line is one of the summary lines we care about."""
215  if '|' in line:
216    (_, line) = line.split('|', 1)
217    line = line.strip()
218  return bool(_VALID_TEST_RESULTS_REX.match(line))
219
220
221def IsInclude(line):
222  """Return True if line is an include of another file."""
223  return line.startswith("@include ")
224
225
226def GetIncludeFile(line, includer):
227  """Extract the name of the include file from line."""
228  includer_dir = os.path.dirname(includer)
229  include_file = line[len("@include "):]
230  return os.path.join(includer_dir, include_file.strip())
231
232
233def IsNegativeResult(line):
234  """Return True if line should be removed from the expected results."""
235  return line.startswith("@remove ")
236
237
238def GetNegativeResult(line):
239  """Extract the name of the negative result from line."""
240  line = line[len("@remove "):]
241  return line.strip()
242
243
244def ParseManifestWorker(result_set, manifest_path):
245  """Read manifest_path, adding the contents to result_set."""
246  if _OPTIONS.verbosity >= 1:
247    print 'Parsing manifest file %s.' % manifest_path
248  manifest_file = open(manifest_path)
249  for line in manifest_file:
250    line = line.strip()
251    if line == "":
252      pass
253    elif IsComment(line):
254      pass
255    elif IsNegativeResult(line):
256      result_set.remove(TestResult(GetNegativeResult(line)))
257    elif IsInclude(line):
258      ParseManifestWorker(result_set, GetIncludeFile(line, manifest_path))
259    elif IsInterestingResult(line):
260      result_set.add(TestResult(line))
261    else:
262      Error('Unrecognized line in manifest file: %s' % line)
263  manifest_file.close()
264
265
266def ParseManifest(manifest_path):
267  """Create a set of TestResult instances from the given manifest file."""
268  result_set = set()
269  ParseManifestWorker(result_set, manifest_path)
270  return result_set
271
272
273def ParseSummary(sum_fname):
274  """Create a set of TestResult instances from the given summary file."""
275  result_set = set()
276  # ordinal is used when sorting the results so that tests within each
277  # .exp file are kept sorted.
278  ordinal=0
279  sum_file = open(sum_fname)
280  for line in sum_file:
281    if IsInterestingResult(line):
282      result = TestResult(line, ordinal)
283      ordinal += 1
284      if result.HasExpired():
285        # Tests that have expired are not added to the set of expected
286        # results. If they are still present in the set of actual results,
287        # they will cause an error to be reported.
288        print 'WARNING: Expected failure "%s" has expired.' % line.strip()
289        continue
290      result_set.add(result)
291  sum_file.close()
292  return result_set
293
294
295def GetManifest(manifest_path):
296  """Build a set of expected failures from the manifest file.
297
298  Each entry in the manifest file should have the format understood
299  by the TestResult constructor.
300
301  If no manifest file exists for this target, it returns an empty set.
302  """
303  if os.path.exists(manifest_path):
304    return ParseManifest(manifest_path)
305  else:
306    return set()
307
308
309def CollectSumFiles(builddir):
310  sum_files = []
311  for root, dirs, files in os.walk(builddir):
312    for ignored in ('.svn', '.git'):
313      if ignored in dirs:
314        dirs.remove(ignored)
315    for fname in files:
316      if fname.endswith('.sum'):
317        sum_files.append(os.path.join(root, fname))
318  return sum_files
319
320
321def GetResults(sum_files):
322  """Collect all the test results from the given .sum files."""
323  build_results = set()
324  for sum_fname in sum_files:
325    print '\t%s' % sum_fname
326    build_results |= ParseSummary(sum_fname)
327  return build_results
328
329
330def CompareResults(manifest, actual):
331  """Compare sets of results and return two lists:
332     - List of results present in ACTUAL but missing from MANIFEST.
333     - List of results present in MANIFEST but missing from ACTUAL.
334  """
335  # Collect all the actual results not present in the manifest.
336  # Results in this set will be reported as errors.
337  actual_vs_manifest = set()
338  for actual_result in actual:
339    if actual_result not in manifest:
340      actual_vs_manifest.add(actual_result)
341
342  # Collect all the tests in the manifest that were not found
343  # in the actual results.
344  # Results in this set will be reported as warnings (since
345  # they are expected failures that are not failing anymore).
346  manifest_vs_actual = set()
347  for expected_result in manifest:
348    # Ignore tests marked flaky.
349    if 'flaky' in expected_result.attrs:
350      continue
351    if expected_result not in actual:
352      manifest_vs_actual.add(expected_result)
353
354  return actual_vs_manifest, manifest_vs_actual
355
356
357def GetManifestPath(srcdir, target, user_provided_must_exist):
358  """Return the full path to the manifest file."""
359  manifest_path = _OPTIONS.manifest
360  if manifest_path:
361    if user_provided_must_exist and not os.path.exists(manifest_path):
362      Error('Manifest does not exist: %s' % manifest_path)
363    return manifest_path
364  else:
365    assert srcdir and target
366    return _MANIFEST_PATH_PATTERN % (srcdir, _MANIFEST_SUBDIR, target)
367
368
369def GetBuildData():
370  srcdir = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'srcdir =')
371  target = GetMakefileValue('%s/Makefile' % _OPTIONS.build_dir, 'target_alias=')
372  if not ValidBuildDirectory(_OPTIONS.build_dir, target):
373    # If we have been given a set of results to use, we may
374    # not be inside a valid GCC build directory.  In that case,
375    # the user must provide both a manifest file and a set
376    # of results to check against it.
377    if not _OPTIONS.results or not _OPTIONS.manifest:
378      Error('%s is not a valid GCC top level build directory. '
379            'You must use --manifest and --results to do the validation.' %
380            _OPTIONS.build_dir)
381    else:
382      return None, None
383  print 'Source directory: %s' % srcdir
384  print 'Build target:     %s' % target
385  return srcdir, target
386
387
388def PrintSummary(msg, summary):
389  print '\n\n%s' % msg
390  for result in sorted(summary):
391    print result
392
393
394def GetSumFiles(results, build_dir):
395  if not results:
396    print 'Getting actual results from build directory %s' % build_dir
397    sum_files = CollectSumFiles(build_dir)
398  else:
399    print 'Getting actual results from user-provided results'
400    sum_files = results.split()
401  return sum_files
402
403
404def PerformComparison(expected, actual, ignore_missing_failures):
405  actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
406
407  tests_ok = True
408  if len(actual_vs_expected) > 0:
409    PrintSummary('Unexpected results in this build (new failures)',
410                 actual_vs_expected)
411    tests_ok = False
412
413  if not ignore_missing_failures and len(expected_vs_actual) > 0:
414    PrintSummary('Expected results not present in this build (fixed tests)'
415                 '\n\nNOTE: This is not a failure.  It just means that these '
416                 'tests were expected\nto fail, but they worked in this '
417                 'configuration.\n', expected_vs_actual)
418
419  if tests_ok:
420    print '\nSUCCESS: No unexpected failures.'
421
422  return tests_ok
423
424
425def CheckExpectedResults():
426  srcdir, target = GetBuildData()
427  manifest_path = GetManifestPath(srcdir, target, True)
428  print 'Manifest:         %s' % manifest_path
429  manifest = GetManifest(manifest_path)
430  sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
431  actual = GetResults(sum_files)
432
433  if _OPTIONS.verbosity >= 1:
434    PrintSummary('Tests expected to fail', manifest)
435    PrintSummary('\nActual test results', actual)
436
437  return PerformComparison(manifest, actual, _OPTIONS.ignore_missing_failures)
438
439
440def ProduceManifest():
441  (srcdir, target) = GetBuildData()
442  manifest_path = GetManifestPath(srcdir, target, False)
443  print 'Manifest:         %s' % manifest_path
444  if os.path.exists(manifest_path) and not _OPTIONS.force:
445    Error('Manifest file %s already exists.\nUse --force to overwrite.' %
446          manifest_path)
447
448  sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
449  actual = GetResults(sum_files)
450  manifest_file = open(manifest_path, 'w')
451  for result in sorted(actual):
452    print result
453    manifest_file.write('%s\n' % result)
454  manifest_file.close()
455
456  return True
457
458
459def CompareBuilds():
460  (srcdir, target) = GetBuildData()
461
462  sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.build_dir)
463  actual = GetResults(sum_files)
464
465  clean_sum_files = GetSumFiles(_OPTIONS.results, _OPTIONS.clean_build)
466  clean = GetResults(clean_sum_files)
467
468  return PerformComparison(clean, actual, _OPTIONS.ignore_missing_failures)
469
470
471def Main(argv):
472  parser = optparse.OptionParser(usage=__doc__)
473
474  # Keep the following list sorted by option name.
475  parser.add_option('--build_dir', action='store', type='string',
476                    dest='build_dir', default='.',
477                    help='Build directory to check (default = .)')
478  parser.add_option('--clean_build', action='store', type='string',
479                    dest='clean_build', default=None,
480                    help='Compare test results from this build against '
481                    'those of another (clean) build.  Use this option '
482                    'when comparing the test results of your patch versus '
483                    'the test results of a clean build without your patch. '
484                    'You must provide the path to the top directory of your '
485                    'clean build.')
486  parser.add_option('--force', action='store_true', dest='force',
487                    default=False, help='When used with --produce_manifest, '
488                    'it will overwrite an existing manifest file '
489                    '(default = False)')
490  parser.add_option('--ignore_missing_failures', action='store_true',
491                    dest='ignore_missing_failures', default=False,
492                    help='When a failure is expected in the manifest but '
493                    'it is not found in the actual results, the script '
494                    'produces a note alerting to this fact. This means '
495                    'that the expected failure has been fixed, or '
496                    'it did not run, or it may simply be flaky '
497                    '(default = False)')
498  parser.add_option('--manifest', action='store', type='string',
499                    dest='manifest', default=None,
500                    help='Name of the manifest file to use (default = '
501                    'taken from '
502                    'contrib/testsuite-managment/<target_alias>.xfail)')
503  parser.add_option('--produce_manifest', action='store_true',
504                    dest='produce_manifest', default=False,
505                    help='Produce the manifest for the current '
506                    'build (default = False)')
507  parser.add_option('--results', action='store', type='string',
508                    dest='results', default=None, help='Space-separated list '
509                    'of .sum files with the testing results to check. The '
510                    'only content needed from these files are the lines '
511                    'starting with FAIL, XPASS or UNRESOLVED (default = '
512                    '.sum files collected from the build directory).')
513  parser.add_option('--verbosity', action='store', dest='verbosity',
514                    type='int', default=0, help='Verbosity level (default = 0)')
515  global _OPTIONS
516  (_OPTIONS, _) = parser.parse_args(argv[1:])
517
518  if _OPTIONS.produce_manifest:
519    retval = ProduceManifest()
520  elif _OPTIONS.clean_build:
521    retval = CompareBuilds()
522  else:
523    retval = CheckExpectedResults()
524
525  if retval:
526    return 0
527  else:
528    return 1
529
530
531if __name__ == '__main__':
532  retval = Main(sys.argv)
533  sys.exit(retval)
534