[test] Drop starting message from indicators
Since test processors create tests dynamically we cannot simply count how many tests will be run. Instead we count only base tests that we've loaded, before creating variants. Bug: v8:6917 Change-Id: Ibc5b9a73f6afad423572afa575f477ca661a99d5 Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel_ng Reviewed-on: https://chromium-review.googlesource.com/868290 Commit-Queue: Michał Majewski <majeski@google.com> Reviewed-by: Michael Achenbach <machenbach@chromium.org> Cr-Commit-Position: refs/heads/master@{#50642}
This commit is contained in:
parent
63a338a198
commit
00ac7641a6
@ -30,7 +30,8 @@ from testrunner.testproc.execution import ExecutionProc
|
||||
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
|
||||
from testrunner.testproc.loader import LoadProc
|
||||
from testrunner.testproc.progress import (VerboseProgressIndicator,
|
||||
ResultsTracker)
|
||||
ResultsTracker,
|
||||
TestsCounter)
|
||||
from testrunner.testproc.rerun import RerunProc
|
||||
from testrunner.testproc.shard import ShardProc
|
||||
from testrunner.testproc.variant import VariantProc
|
||||
@ -586,19 +587,21 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
|
||||
print '>>> Running with test processors'
|
||||
loader = LoadProc()
|
||||
results = ResultsTracker(count_subtests=False)
|
||||
tests_counter = TestsCounter()
|
||||
results = ResultsTracker()
|
||||
indicators = progress_indicator.ToProgressIndicatorProcs()
|
||||
execproc = ExecutionProc(jobs, context)
|
||||
|
||||
procs = [
|
||||
loader,
|
||||
NameFilterProc(args),
|
||||
NameFilterProc(args) if args else None,
|
||||
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
|
||||
self._create_shard_proc(options),
|
||||
tests_counter,
|
||||
VariantProc(VARIANTS),
|
||||
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
|
||||
results,
|
||||
] + indicators + [
|
||||
results,
|
||||
self._create_rerun_proc(context),
|
||||
execproc,
|
||||
]
|
||||
@ -612,18 +615,23 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
tests.sort(key=lambda t: t.is_slow, reverse=True)
|
||||
loader.load_tests(tests)
|
||||
|
||||
for indicator in indicators:
|
||||
indicator.starting()
|
||||
print '>>> Running %d base tests' % tests_counter.total
|
||||
tests_counter.remove_from_chain()
|
||||
|
||||
execproc.start()
|
||||
|
||||
for indicator in indicators:
|
||||
indicator.finished()
|
||||
|
||||
print '>>> %d tests ran' % results.total
|
||||
|
||||
exit_code = 0
|
||||
if results.failed:
|
||||
exit_code = 1
|
||||
if results.remaining:
|
||||
exit_code = 2
|
||||
|
||||
|
||||
if exit_code == 1 and options.json_test_results:
|
||||
print("Force exit code 0 after failures. Json test results file "
|
||||
"generated with failure information.")
|
||||
|
@ -42,6 +42,12 @@ class TestProc(object):
|
||||
next_proc._prev_proc = self
|
||||
self._next_proc = next_proc
|
||||
|
||||
def remove_from_chain(self):
|
||||
if self._prev_proc:
|
||||
self._prev_proc._next_proc = self._next_proc
|
||||
if self._next_proc:
|
||||
self._next_proc._prev_proc = self._prev_proc
|
||||
|
||||
def next_test(self, test):
|
||||
"""
|
||||
Method called by previous processor whenever it produces new test.
|
||||
|
@ -22,30 +22,33 @@ def print_failure_header(test):
|
||||
}
|
||||
|
||||
|
||||
class TestsCounter(base.TestProcObserver):
|
||||
def __init__(self):
|
||||
super(TestsCounter, self).__init__()
|
||||
self.total = 0
|
||||
|
||||
def _on_next_test(self, test):
|
||||
self.total += 1
|
||||
|
||||
|
||||
class ResultsTracker(base.TestProcObserver):
|
||||
def __init__(self, count_subtests):
|
||||
def __init__(self):
|
||||
super(ResultsTracker, self).__init__()
|
||||
self.failed = 0
|
||||
self.remaining = 0
|
||||
self.total = 0
|
||||
self.count_subtests = count_subtests
|
||||
|
||||
def _on_next_test(self, test):
|
||||
self.total += 1
|
||||
self.remaining += 1
|
||||
|
||||
def _on_result_for(self, test, result):
|
||||
# TODO(majeski): Count grouped results when count_subtests is set.
|
||||
# TODO(majeski): Support for dummy/grouped results
|
||||
self.remaining -= 1
|
||||
if result.has_unexpected_output:
|
||||
self.failed += 1
|
||||
|
||||
|
||||
class ProgressIndicator(base.TestProcObserver):
|
||||
def starting(self):
|
||||
pass
|
||||
|
||||
def finished(self):
|
||||
pass
|
||||
|
||||
@ -65,9 +68,6 @@ class SimpleProgressIndicator(ProgressIndicator):
|
||||
if result.has_unexpected_output:
|
||||
self._failed.append((test, result.output))
|
||||
|
||||
def starting(self):
|
||||
print 'Running %i tests' % self._total
|
||||
|
||||
def finished(self):
|
||||
crashed = 0
|
||||
print
|
||||
|
@ -210,10 +210,11 @@ class SystemTest(unittest.TestCase):
|
||||
'sweet/strawberries',
|
||||
infra_staging=infra_staging,
|
||||
)
|
||||
if infra_staging:
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
else:
|
||||
if not infra_staging:
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
else:
|
||||
self.assertIn('Running 1 base tests', result.stdout, result)
|
||||
self.assertIn('2 tests ran', result.stdout, result)
|
||||
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
@ -260,7 +261,11 @@ class SystemTest(unittest.TestCase):
|
||||
'sweet/strawberries',
|
||||
infra_staging=infra_staging,
|
||||
)
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
if not infra_staging:
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
else:
|
||||
self.assertIn('Running 1 base tests', result.stdout, result)
|
||||
self.assertIn('1 tests ran', result.stdout, result)
|
||||
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
|
||||
if not infra_staging:
|
||||
# We run one test, which fails and gets re-run twice.
|
||||
@ -344,9 +349,8 @@ class SystemTest(unittest.TestCase):
|
||||
# TODO(machenbach): Test some more implications of the auto-detected
|
||||
# options, e.g. that the right env variables are set.
|
||||
|
||||
# TODO(majeski): Fix "running 0 tests" vs "Warning: no tests were run!"
|
||||
# def testSkipsProc(self):
|
||||
# self.testSkips(infra_staging=True)
|
||||
def testSkipsProc(self):
|
||||
self.testSkips(infra_staging=True)
|
||||
|
||||
def testSkips(self, infra_staging=False):
|
||||
"""Test skipping tests in status file for a specific variant."""
|
||||
@ -359,12 +363,15 @@ class SystemTest(unittest.TestCase):
|
||||
'sweet/strawberries',
|
||||
infra_staging=infra_staging,
|
||||
)
|
||||
self.assertIn('Running 0 tests', result.stdout, result)
|
||||
if not infra_staging:
|
||||
self.assertIn('Running 0 tests', result.stdout, result)
|
||||
else:
|
||||
self.assertIn('Running 1 base tests', result.stdout, result)
|
||||
self.assertIn('0 tests ran', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
# TODO(majeski): Fix "running 0 tests" vs "Warning: no tests were run!"
|
||||
# def testDefaultProc(self):
|
||||
# self.testDefault(infra_staging=True)
|
||||
def testDefaultProc(self):
|
||||
self.testDefault(infra_staging=True)
|
||||
|
||||
def testDefault(self, infra_staging=False):
|
||||
"""Test using default test suites, though no tests are run since they don't
|
||||
@ -376,7 +383,11 @@ class SystemTest(unittest.TestCase):
|
||||
'--mode=Release',
|
||||
infra_staging=infra_staging,
|
||||
)
|
||||
self.assertIn('Warning: no tests were run!', result.stdout, result)
|
||||
if not infra_staging:
|
||||
self.assertIn('Warning: no tests were run!', result.stdout, result)
|
||||
else:
|
||||
self.assertIn('Running 0 base tests', result.stdout, result)
|
||||
self.assertIn('0 tests ran', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testNoBuildConfig(self):
|
||||
@ -489,7 +500,11 @@ class SystemTest(unittest.TestCase):
|
||||
'sweet/bananas',
|
||||
infra_staging=infra_staging,
|
||||
)
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
if not infra_staging:
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
else:
|
||||
self.assertIn('Running 1 base tests', result.stdout, result)
|
||||
self.assertIn('1 tests ran', result.stdout, result)
|
||||
self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
|
||||
self.assertIn('Test had no allocation output', result.stdout, result)
|
||||
self.assertIn('--predictable --verify_predictable', result.stdout, result)
|
||||
@ -585,7 +600,11 @@ class SystemTest(unittest.TestCase):
|
||||
'--no-sorting', '-j1', # make results order deterministic
|
||||
infra_staging=infra_staging,
|
||||
)
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
if not infra_staging:
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
else:
|
||||
self.assertIn('Running 2 base tests', result.stdout, result)
|
||||
self.assertIn('2 tests ran', result.stdout, result)
|
||||
self.assertIn('F.', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
@ -622,6 +641,5 @@ class SystemTest(unittest.TestCase):
|
||||
self.assertIn('sweet/bananas', result.stdout)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
Loading…
Reference in New Issue
Block a user