1
0
mirror of https://github.com/RPCS3/llvm-mirror.git synced 2024-11-22 18:54:02 +01:00

[lit] Add SKIPPED test result category

Track and print the number of skipped tests.  Skipped tests are tests
that should have been executed but weren't due to:
  * user interrupt [Ctrl+C]
  * --max-time (overall lit timeout)
  * --max-failures

This is part of a larger effort to ensure that all discovered tests are
properly accounted for.

Add test for overall lit timeout feature (`--max-time` option) to
observe skipped tests.  Extend test for `--max-failures` option.

Reviewed By: jdenny

Differential Revision: https://reviews.llvm.org/D77819
This commit is contained in:
Julian Lettner 2019-03-07 20:48:24 -08:00 committed by Julian Lettner
parent f49a6d5d99
commit 8de3a126b1
8 changed files with 48 additions and 19 deletions

View File

@ -36,6 +36,7 @@ XPASS = ResultCode('XPASS', True)
UNRESOLVED = ResultCode('UNRESOLVED', True)
UNSUPPORTED = ResultCode('UNSUPPORTED', False)
TIMEOUT = ResultCode('TIMEOUT', True)
SKIPPED = ResultCode('SKIPPED', False)
# Test metric values.

View File

@ -89,12 +89,14 @@ def main(builtin_params={}):
run_tests(filtered_tests, lit_config, opts, len(discovered_tests))
elapsed = time.time() - start
executed_tests = [t for t in filtered_tests if t.result]
# TODO(yln): eventually, all functions below should act on discovered_tests
executed_tests = [
t for t in filtered_tests if t.result.code != lit.Test.SKIPPED]
if opts.time_tests:
print_histogram(executed_tests)
print_results(executed_tests, elapsed, opts)
print_results(filtered_tests, elapsed, opts)
if opts.output_path:
#TODO(yln): pass in discovered_tests
@ -256,6 +258,7 @@ failure_codes = [
]
all_codes = [
(lit.Test.SKIPPED, 'Skipped Tests', 'Skipped'),
(lit.Test.UNSUPPORTED, 'Unsupported Tests', 'Unsupported'),
(lit.Test.PASS, 'Expected Passes', ''),
(lit.Test.FLAKYPASS, 'Passes With Retry', ''),
@ -277,11 +280,11 @@ def print_results(tests, elapsed, opts):
def print_group(code, label, tests, opts):
if not tests:
return
if code == lit.Test.PASS:
# TODO(yln): FLAKYPASS? Make this more consistent!
if code in {lit.Test.SKIPPED, lit.Test.PASS}:
return
if (lit.Test.XFAIL == code and not opts.show_xfail) or \
(lit.Test.UNSUPPORTED == code and not opts.show_unsupported) or \
(lit.Test.UNRESOLVED == code and (opts.max_failures is not None)):
(lit.Test.UNSUPPORTED == code and not opts.show_unsupported):
return
print('*' * 20)
print('%s Tests (%d):' % (label, len(tests)))

View File

@ -42,7 +42,7 @@ class Run(object):
Upon completion, each test in the run will have its result
computed. Tests which were not actually executed (for any reason) will
be given an UNRESOLVED result.
be marked SKIPPED.
"""
self.failures = 0
@ -51,12 +51,13 @@ class Run(object):
timeout = self.timeout or one_week
deadline = time.time() + timeout
self._execute(deadline)
# Mark any tests that weren't run as UNRESOLVED.
for test in self.tests:
if test.result is None:
test.setResult(lit.Test.Result(lit.Test.UNRESOLVED, '', 0.0))
try:
self._execute(deadline)
finally:
skipped = lit.Test.Result(lit.Test.SKIPPED)
for test in self.tests:
if test.result is None:
test.setResult(skipped)
def _execute(self, deadline):
self._increase_process_limit()

View File

@ -0,0 +1 @@
RUN: true

View File

@ -0,0 +1,6 @@
import lit.formats
config.name = 'lit-time'
config.suffixes = ['.txt']
config.test_format = lit.formats.ShTest()
config.test_source_root = None
config.test_exec_root = None

View File

@ -0,0 +1 @@
RUN: sleep 60

View File

@ -1,14 +1,23 @@
# Check the behavior of --max-failures option.
#
# RUN: not %{lit} -j 1 -v %{inputs}/max-failures > %t.out
# RUN: not %{lit} --max-failures=1 -j 1 -v %{inputs}/max-failures >> %t.out
# RUN: not %{lit} --max-failures=2 -j 1 -v %{inputs}/max-failures >> %t.out
# RUN: not %{lit} --max-failures=0 -j 1 -v %{inputs}/max-failures 2>> %t.out
# RUN: not %{lit} -j 1 %{inputs}/max-failures > %t.out 2>&1
# RUN: not %{lit} --max-failures=1 -j 1 %{inputs}/max-failures >> %t.out 2>&1
# RUN: not %{lit} --max-failures=2 -j 1 %{inputs}/max-failures >> %t.out 2>&1
# RUN: not %{lit} --max-failures=0 -j 1 %{inputs}/max-failures 2>> %t.out
# RUN: FileCheck < %t.out %s
#
# END.
# CHECK: Failing Tests (35)
# CHECK: Failing Tests (1)
# CHECK: Failing Tests (2)
# CHECK-NOT: reached maximum number of test failures
# CHECK-NOT: Skipped Tests
# CHECK: Unexpected Failures: 35
# CHECK: reached maximum number of test failures, skipping remaining tests
# CHECK: Skipped Tests : 41
# CHECK: Unexpected Failures: 1
# CHECK: reached maximum number of test failures, skipping remaining tests
# CHECK: Skipped Tests : 40
# CHECK: Unexpected Failures: 2
# CHECK: error: argument --max-failures: requires positive integer, but found '0'

View File

@ -0,0 +1,7 @@
# Test overall lit timeout (--max-time).
#
# RUN: %{lit} %{inputs}/max-time --max-time=1 2>&1 | FileCheck %s
# CHECK: reached timeout, skipping remaining tests
# CHECK: Skipped Tests : 1
# CHECK: Expected Passes: 1