[test] Add special result for rerun and json indicator.

Bug: v8:6917
Change-Id: I5136f183bd1728a1ab90a9ebb2560d978e17ef28
Cq-Include-Trybots: luci.v8.try:v8_linux64_fyi_rel_ng
Reviewed-on: https://chromium-review.googlesource.com/863623
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50576}
This commit is contained in:
Michal Majewski 2018-01-15 13:21:54 +01:00 committed by Commit Bot
parent 43ac9d5151
commit bddfee9822
5 changed files with 91 additions and 82 deletions

View File

@ -295,35 +295,42 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.tests = []
def _on_result_for(self, test, result):
# TODO(majeski): Support for dummy/grouped results
output = result.output
# Buffer all tests for sorting the durations in the end.
self.tests.append((test, output.duration))
if result.is_rerun:
self.process_results(test, result.results)
else:
self.process_results(test, [result])
# TODO(majeski): Previously we included reruns here. If we still want this
# json progress indicator should be placed just before execution.
if not result.has_unexpected_output:
# Omit tests that run as expected.
return
def process_results(self, test, results):
for run, result in enumerate(results):
# TODO(majeski): Support for dummy/grouped results
output = result.output
# Buffer all tests for sorting the durations in the end.
self.tests.append((test, output.duration))
self.results.append({
"name": str(test),
"flags": test.cmd.args,
"command": test.cmd.to_string(relative=True),
"run": -100, # TODO(majeski): do we need this?
"stdout": output.stdout,
"stderr": output.stderr,
"exit_code": output.exit_code,
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
# Omit tests that run as expected on the first try.
# Everything that happens after the first run is included in the output
# even if it flakily passes.
if not result.has_unexpected_output and run == 0:
continue
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
})
self.results.append({
"name": str(test),
"flags": test.cmd.args,
"command": test.cmd.to_string(relative=True),
"run": run + 1,
"stdout": output.stdout,
"stderr": output.stderr,
"exit_code": output.exit_code,
"result": test.output_proc.get_outcome(output),
"expected": test.expected_outcomes,
"duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
"random_seed": self.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
})
def finished(self):
complete_results = []

View File

@ -2,43 +2,56 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from . import base
from .result import RerunResult
class RerunProc(base.TestProcProducer):
def __init__(self, rerun_max, rerun_max_total=None):
super(RerunProc, self).__init__('Rerun')
self._rerun = dict()
self._rerun = {}
self._results = collections.defaultdict(list)
self._rerun_max = rerun_max
self._rerun_total_left = rerun_max_total
def _next_test(self, test):
self._init_test(test)
self._send_next_subtest(test)
def _result_for(self, test, subtest, result):
# First result
if subtest.procid[-2:] == '-1':
# Passed, no reruns
if not result.has_unexpected_output:
self._send_result(test, result)
return
self._rerun[test.procid] = 0
results = self._results[test.procid]
results.append(result)
if self._needs_rerun(test, result):
self._rerun[test.procid] += 1
if self._rerun_total_left is not None:
self._rerun_total_left -= 1
self._send_next_subtest(test)
self._send_next_subtest(test, self._rerun[test.procid])
else:
result = RerunResult.create(results)
self._finalize_test(test)
self._send_result(test, result)
def _init_test(self, test):
self._rerun[test.procid] = 0
def _needs_rerun(self, test, result):
# TODO(majeski): Limit reruns count for slow tests.
return ((self._rerun_total_left is None or self._rerun_total_left > 0) and
self._rerun[test.procid] < self._rerun_max and
result.has_unexpected_output)
def _send_next_subtest(self, test):
run = self._rerun[test.procid]
def _send_next_subtest(self, test, run=0):
subtest = self._create_subtest(test, str(run + 1))
self._send_test(subtest)
def _finalize_test(self, test):
del self._rerun[test.procid]
del self._results[test.procid]

View File

@ -12,6 +12,10 @@ class ResultBase(object):
def is_grouped(self):
return False
@property
def is_rerun(self):
return False
class Result(ResultBase):
"""Result created by the output processor."""
@ -58,3 +62,34 @@ class SkippedResult(ResultBase):
SKIPPED = SkippedResult()
class RerunResult(Result):
"""Result generated from several reruns of the same test. It's a subclass of
Result since the result of rerun is result of the last run. In addition to
normal result it contains results of all reruns.
"""
@staticmethod
def create(results):
"""Create RerunResult based on list of results. List cannot be empty. If it
has only one element it's returned as a result.
"""
assert results
if len(results) == 1:
return results[0]
return RerunResult(results)
def __init__(self, results):
"""Has unexpected output and the output itself of the RerunResult equals to
the last result in the passed list.
"""
assert results
last = results[-1]
super(RerunResult, self).__init__(last.has_unexpected_output, last.output)
self.results = results
@property
def is_rerun(self):
return True

View File

@ -242,7 +242,7 @@ class SystemTest(unittest.TestCase):
self.assertIn('3 tests failed', result.stdout, result)
else:
# With test processors we don't count reruns as separated failures.
# TODO(majeski): fix it.
# TODO(majeski): fix it?
self.assertIn('1 tests failed', result.stdout, result)
self.assertEqual(0, result.returncode, result)
@ -264,10 +264,7 @@ class SystemTest(unittest.TestCase):
replace_variable_data(data)
json_output['duration_mean'] = 1
suffix = ''
if infra_staging:
suffix = '-proc'
expected_results_name = 'expected_test_results1%s.json' % suffix
expected_results_name = 'expected_test_results1.json'
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
expected_test_results = json.load(f)

View File

@ -1,43 +0,0 @@
{
"arch": "x64",
"duration_mean": 1,
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"duration": 1,
"exit_code": 1,
"expected": [
"PASS"
],
"flags": [
"--random-seed=123",
"strawberries",
"--nohard-abort"
],
"name": "sweet/strawberries",
"random_seed": 123,
"result": "FAIL",
"run": -100,
"stderr": "",
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
"target_name": "d8_mocked.py",
"variant": "default"
}
],
"slowest_tests": [
{
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
"duration": 1,
"flags": [
"--random-seed=123",
"strawberries",
"--nohard-abort"
],
"marked_slow": true,
"name": "sweet/strawberries"
}
],
"test_total": 1
}