[test] Remove duration from testcase.

Bug: v8:6917
Change-Id: Ifc3dac7a82fa67dda1f8b166fbd3f76a123ffba2
Reviewed-on: https://chromium-review.googlesource.com/824365
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: Sergiy Byelozyorov <sergiyb@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50122}
This commit is contained in:
Michal Majewski 2017-12-14 20:39:48 +01:00 committed by Commit Bot
parent ce2a9e1608
commit 867c39f22c
9 changed files with 55 additions and 48 deletions

View File

@ -8,6 +8,7 @@ import os
import subprocess
import sys
import threading
import time
from ..local import utils
from ..objects import output
@ -79,7 +80,9 @@ class Command(object):
self.timeout, self._kill_process, [process, timeout_occured])
timer.start()
start_time = time.time()
stdout, stderr = process.communicate()
duration = time.time() - start_time
timer.cancel()
@ -89,6 +92,7 @@ class Command(object):
stdout.decode('utf-8', 'replace').encode('utf-8'),
stderr.decode('utf-8', 'replace').encode('utf-8'),
process.pid,
duration
)
def _get_popen_args(self):

View File

@ -31,7 +31,6 @@ import os
import re
import shutil
import sys
import time
from . import command
from . import perfdata
@ -51,9 +50,13 @@ ProcessContext = collections.namedtuple(
'process_context', ['sancov_dir'])
TestJobResult = collections.namedtuple(
'TestJobResult', ['id', 'output'])
def MakeProcessContext(sancov_dir):
return ProcessContext(sancov_dir)
# Global function for multiprocessing, because pickling a static method doesn't
# work on Windows.
def run_job(job, process_context):
@ -102,11 +105,9 @@ class TestJob(Job):
os.rename(sancov_file, new_sancov_file)
def run(self, context):
start_time = time.time()
out = self.cmd.execute()
duration = time.time() - start_time
self._rename_coverage_data(out, context.sancov_dir)
return (self.test_id, out, duration)
return TestJobResult(self.test_id, out)
class Runner(object):
@ -120,9 +121,10 @@ class Runner(object):
self.printed_allocations = False
self.tests = [t for s in suites for t in s.tests]
# TODO(majeski): Pass outputs dynamically instead of keeping them in the
# runner.
# TODO(majeski): Pass dynamically instead of keeping them in the runner.
# Maybe some observer?
self.outputs = {t: None for t in self.tests}
self.suite_names = [s.name for s in suites]
# Always pre-sort by status file, slowest tests first.
@ -130,11 +132,10 @@ class Runner(object):
t.suite.GetStatusFileOutcomes(t.name, t.variant))
self.tests.sort(key=slow_key, reverse=True)
# Sort by stored duration of not opted out.
# Sort by stored duration if not opted out.
if not context.no_sorting:
for t in self.tests:
t.duration = self.perfdata.FetchPerfData(t) or 1.0
self.tests.sort(key=lambda t: t.duration, reverse=True)
self.tests.sort(key=lambda t: self.perfdata.FetchPerfData(t) or 1.0,
reverse=True)
self._CommonInit(suites, progress_indicator, context)
@ -160,7 +161,7 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
def _MaybeRerun(self, pool, test):
def _MaybeRerun(self, pool, test, job_result):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
# test. <= as the flag controls reruns not including the first run.
@ -172,26 +173,24 @@ class Runner(object):
# Don't rerun this if the overall number of rerun tests has been
# reached.
return
if test.run >= 2 and test.duration > self.context.timeout / 20.0:
if (test.run >= 2 and
job_result.output.duration > self.context.timeout / 20.0):
# Rerun slow tests at most once.
return
# Rerun this test.
test.duration = None
test.run += 1
pool.add([TestJob(test.id, test.cmd, test.run)])
self.remaining += 1
self.total += 1
def _ProcessTest(self, test, result, pool):
output = result[1]
self.outputs[test] = output
test.duration = result[2]
def _ProcessTest(self, test, job_result, pool):
self.outputs[test] = job_result.output
has_unexpected_output = test.suite.HasUnexpectedOutput(
test, output, self.context)
test, job_result.output, self.context)
if has_unexpected_output:
self.failed.append(test)
if output.HasCrashed():
if job_result.output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
@ -199,11 +198,12 @@ class Runner(object):
# For the indicator, everything that happens after the first run is treated
# as unexpected even if it flakily passes in order to include it in the
# output.
self.indicator.HasRun(test, output, has_unexpected_output or test.run > 1)
self.indicator.HasRun(test, job_result.output,
has_unexpected_output or test.run > 1)
if has_unexpected_output:
# Rerun test failures after the indicator has processed the results.
self._VerbosePrint("Attempting to rerun test after failure.")
self._MaybeRerun(pool, test)
self._MaybeRerun(pool, test, job_result)
# Update the perf database if the test succeeded.
return not has_unexpected_output
@ -243,10 +243,14 @@ class Runner(object):
if result.heartbeat:
self.indicator.Heartbeat()
continue
test = test_map[result.value[0]]
update_perf = self._ProcessTest(test, result.value, pool)
job_result = result.value
test = test_map[job_result.id]
update_perf = self._ProcessTest(test, job_result, pool)
if update_perf:
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
test, job_result.output.duration))
finally:
self._VerbosePrint("Closing process pool.")
pool.terminate()

View File

@ -69,10 +69,10 @@ class PerfDataStore(object):
return self.database[key].avg
return None
def UpdatePerfData(self, test):
"""Updates the persisted value in the store with test.duration."""
def UpdatePerfData(self, test, duration):
"""Updates the persisted value in the store with duration."""
testkey = test.get_id()
self.RawUpdatePerfData(testkey, test.duration)
self.RawUpdatePerfData(testkey, duration)
def RawUpdatePerfData(self, testkey, duration):
with self.lock:
@ -116,7 +116,7 @@ class PerfDataManager(object):
class NullPerfDataStore(object):
def UpdatePerfData(self, test):
def UpdatePerfData(self, test, duration):
pass
def FetchPerfData(self, test):

View File

@ -287,7 +287,7 @@ class JUnitTestProgressIndicator(ProgressIndicator):
self.outputter.HasRunTest(
test_name=str(test),
test_cmd=test.cmd.to_string(relative=True),
test_duration=test.duration,
test_duration=output.duration,
test_failure=fail_text)
@ -313,20 +313,20 @@ class JsonTestProgressIndicator(ProgressIndicator):
if self.tests:
# Get duration mean.
duration_mean = (
sum(t.duration for t in self.tests) / float(len(self.tests)))
sum(duration for (_, duration) in self.tests) /
float(len(self.tests)))
# Sort tests by duration.
timed_tests = [t for t in self.tests if t.duration is not None]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
self.tests.sort(key=lambda (_, duration): duration, reverse=True)
slowest_tests = [
{
"name": str(test),
"flags": test.cmd.args,
"command": test.cmd.to_string(relative=True),
"duration": test.duration,
"duration": duration,
"marked_slow": statusfile.IsSlow(
test.suite.GetStatusFileOutcomes(test.name, test.variant)),
} for test in timed_tests[:20]
} for (test, duration) in self.tests[:20]
]
complete_results.append({
@ -343,7 +343,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
def HasRun(self, test, output, has_unexpected_output):
# Buffer all tests for sorting the durations in the end.
self.tests.append(test)
self.tests.append((test, output.duration))
if not has_unexpected_output:
# Omit tests that run as expected. Passing tests of reruns after failures
# will have unexpected_output to be reported here has well.
@ -359,7 +359,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
"exit_code": output.exit_code,
"result": test.suite.GetOutcome(test, output),
"expected": test.suite.GetExpectedOutcomes(test),
"duration": test.duration,
"duration": output.duration,
# TODO(machenbach): This stores only the global random seed from the
# context and not possible overrides when using random-seed stress.
@ -404,14 +404,14 @@ class FlakinessTestProgressIndicator(ProgressIndicator):
self.results[key] = {
"actual": outcome,
"expected": " ".join(expected_outcomes),
"times": [test.duration],
"times": [output.duration],
}
self.summary[outcome] = self.summary[outcome] + 1
else:
# This is a rerun and a previous result exists.
result = self.results[key]
result["actual"] = "%s %s" % (result["actual"], outcome)
result["times"].append(test.duration)
result["times"].append(output.duration)
PROGRESS_INDICATORS = {

View File

@ -86,7 +86,6 @@ class TestSuite(object):
self.tests = None # list of TestCase objects
self.rules = None # {variant: {test name: [rule]}}
self.prefix_rules = None # {variant: {test name prefix: [rule]}}
self.total_duration = None # float, assigned on demand
self._outcomes_cache = dict()

View File

@ -86,16 +86,16 @@ def FormatTime(d):
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def PrintTestDurations(suites, overall_time):
def PrintTestDurations(suites, outputs, overall_time):
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(overall_time))
timed_tests = [ t for s in suites for t in s.tests
if t.duration is not None ]
timed_tests.sort(lambda a, b: cmp(b.duration, a.duration))
timed_tests = [(t, outputs[t].duration) for s in suites for t in s.tests
if t in outputs]
timed_tests.sort(key=lambda (_, duration): duration, reverse=True)
index = 1
for test in timed_tests[:20]:
t = FormatTime(test.duration)
for test, duration in timed_tests[:20]:
t = FormatTime(duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, test))
index += 1

View File

@ -32,12 +32,13 @@ from ..local import utils
class Output(object):
def __init__(self, exit_code, timed_out, stdout, stderr, pid):
def __init__(self, exit_code, timed_out, stdout, stderr, pid, duration):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.pid = pid
self.duration = duration
def HasCrashed(self):
if utils.IsWindows():

View File

@ -46,7 +46,6 @@ class TestCase(object):
self.variant_flags = [] # list of strings, flags specific to this test
self.id = None # int, used to map result back to TestCase instance
self.duration = None # assigned during execution
self.run = 1 # The nth time this test is executed.
self.cmd = None

View File

@ -492,7 +492,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
overall_duration = time.time() - start_time
if options.time:
verbose.PrintTestDurations(suites, overall_duration)
verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
if num_tests == 0:
print("Warning: no tests were run!")