Add predictable mode to test driver.
BUG=391747 LOG=n R=ishell@chromium.org Review URL: https://codereview.chromium.org/371363003 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22285 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
6872ad5c46
commit
e3e03d80ef
@ -373,7 +373,8 @@ def Execute(arch, mode, args, options, suites, workspace):
|
||||
options.random_seed,
|
||||
True, # No sorting of test cases.
|
||||
0, # Don't rerun failing tests.
|
||||
0) # No use of a rerun-failing-tests maximum.
|
||||
0, # No use of a rerun-failing-tests maximum.
|
||||
False) # No predictable mode.
|
||||
|
||||
# Find available test suites and read test cases from them.
|
||||
variables = {
|
||||
|
@ -171,6 +171,9 @@ def BuildOptions():
|
||||
help="Comma-separated list of testing variants")
|
||||
result.add_option("--outdir", help="Base directory with compile output",
|
||||
default="out")
|
||||
result.add_option("--predictable",
|
||||
help="Compare output of several reruns of each test",
|
||||
default=False, action="store_true")
|
||||
result.add_option("-p", "--progress",
|
||||
help=("The style of progress indicator"
|
||||
" (verbose, dots, color, mono)"),
|
||||
@ -301,6 +304,11 @@ def ProcessOptions(options):
|
||||
options.flaky_tests = "skip"
|
||||
options.slow_tests = "skip"
|
||||
options.pass_fail_tests = "skip"
|
||||
if options.predictable:
|
||||
VARIANTS = ["default"]
|
||||
options.extra_flags.append("--predictable")
|
||||
options.extra_flags.append("--verify_predictable")
|
||||
options.extra_flags.append("--no-inline-new")
|
||||
|
||||
if not options.shell_dir:
|
||||
if options.shell:
|
||||
@ -416,6 +424,11 @@ def Execute(arch, mode, args, options, suites, workspace):
|
||||
timeout = TIMEOUT_DEFAULT;
|
||||
|
||||
timeout *= TIMEOUT_SCALEFACTOR[mode]
|
||||
|
||||
if options.predictable:
|
||||
# Predictable mode is slower.
|
||||
timeout *= 2
|
||||
|
||||
ctx = context.Context(arch, mode, shell_dir,
|
||||
mode_flags, options.verbose,
|
||||
timeout, options.isolates,
|
||||
@ -425,7 +438,8 @@ def Execute(arch, mode, args, options, suites, workspace):
|
||||
options.random_seed,
|
||||
options.no_sorting,
|
||||
options.rerun_failures_count,
|
||||
options.rerun_failures_max)
|
||||
options.rerun_failures_max,
|
||||
options.predictable)
|
||||
|
||||
# TODO(all): Combine "simulator" and "simulator_run".
|
||||
simulator_run = not options.dont_skip_simulator_slow_tests and \
|
||||
|
@ -126,6 +126,63 @@ class Runner(object):
|
||||
pool.add([self._GetJob(test)])
|
||||
self.remaining += 1
|
||||
|
||||
def _ProcessTestNormal(self, test, result, pool):
|
||||
self.indicator.AboutToRun(test)
|
||||
test.output = result[1]
|
||||
test.duration = result[2]
|
||||
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
|
||||
if has_unexpected_output:
|
||||
self.failed.append(test)
|
||||
if test.output.HasCrashed():
|
||||
self.crashed += 1
|
||||
else:
|
||||
self.succeeded += 1
|
||||
self.remaining -= 1
|
||||
self.indicator.HasRun(test, has_unexpected_output)
|
||||
if has_unexpected_output:
|
||||
# Rerun test failures after the indicator has processed the results.
|
||||
self._MaybeRerun(pool, test)
|
||||
# Update the perf database if the test succeeded.
|
||||
return not has_unexpected_output
|
||||
|
||||
def _ProcessTestPredictable(self, test, result, pool):
|
||||
# Always pass the test duration for the database update.
|
||||
test.duration = result[2]
|
||||
if test.run == 1 and result[1].HasTimedOut():
|
||||
# If we get a timeout in the first run, we are already in an
|
||||
# unpredictable state. Just report it as a failure and don't rerun.
|
||||
self.indicator.AboutToRun(test)
|
||||
test.output = result[1]
|
||||
self.remaining -= 1
|
||||
self.failed.append(test)
|
||||
self.indicator.HasRun(test, True)
|
||||
if test.run > 1 and test.output != result[1]:
|
||||
# From the second run on, check for differences. If a difference is
|
||||
# found, call the indicator twice to report both tests. All runs of each
|
||||
# test are counted as one for the statistic.
|
||||
self.indicator.AboutToRun(test)
|
||||
self.remaining -= 1
|
||||
self.failed.append(test)
|
||||
self.indicator.HasRun(test, True)
|
||||
self.indicator.AboutToRun(test)
|
||||
test.output = result[1]
|
||||
self.indicator.HasRun(test, True)
|
||||
elif test.run >= 3:
|
||||
# No difference on the third run -> report a success.
|
||||
self.indicator.AboutToRun(test)
|
||||
self.remaining -= 1
|
||||
self.succeeded += 1
|
||||
test.output = result[1]
|
||||
self.indicator.HasRun(test, False)
|
||||
else:
|
||||
# No difference yet and less than three runs -> add another run and
|
||||
# remember the output for comparison.
|
||||
test.run += 1
|
||||
test.output = result[1]
|
||||
pool.add([self._GetJob(test)])
|
||||
# Always update the perf database.
|
||||
return True
|
||||
|
||||
def Run(self, jobs):
|
||||
self.indicator.Starting()
|
||||
self._RunInternal(jobs)
|
||||
@ -156,22 +213,12 @@ class Runner(object):
|
||||
it = pool.imap_unordered(RunTest, queue)
|
||||
for result in it:
|
||||
test = test_map[result[0]]
|
||||
self.indicator.AboutToRun(test)
|
||||
test.output = result[1]
|
||||
test.duration = result[2]
|
||||
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
|
||||
if has_unexpected_output:
|
||||
self.failed.append(test)
|
||||
if test.output.HasCrashed():
|
||||
self.crashed += 1
|
||||
if self.context.predictable:
|
||||
update_perf = self._ProcessTestPredictable(test, result, pool)
|
||||
else:
|
||||
update_perf = self._ProcessTestNormal(test, result, pool)
|
||||
if update_perf:
|
||||
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
|
||||
self.succeeded += 1
|
||||
self.remaining -= 1
|
||||
self.indicator.HasRun(test, has_unexpected_output)
|
||||
if has_unexpected_output:
|
||||
# Rerun test failures after the indicator has processed the results.
|
||||
self._MaybeRerun(pool, test)
|
||||
finally:
|
||||
pool.terminate()
|
||||
self._RunPerfSafe(lambda: self.perf_data_manager.close())
|
||||
|
@ -29,7 +29,8 @@
|
||||
class Context():
|
||||
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
|
||||
isolates, command_prefix, extra_flags, noi18n, random_seed,
|
||||
no_sorting, rerun_failures_count, rerun_failures_max):
|
||||
no_sorting, rerun_failures_count, rerun_failures_max,
|
||||
predictable):
|
||||
self.arch = arch
|
||||
self.mode = mode
|
||||
self.shell_dir = shell_dir
|
||||
@ -44,16 +45,17 @@ class Context():
|
||||
self.no_sorting = no_sorting
|
||||
self.rerun_failures_count = rerun_failures_count
|
||||
self.rerun_failures_max = rerun_failures_max
|
||||
self.predictable = predictable
|
||||
|
||||
def Pack(self):
|
||||
return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
|
||||
self.command_prefix, self.extra_flags, self.noi18n,
|
||||
self.random_seed, self.no_sorting, self.rerun_failures_count,
|
||||
self.rerun_failures_max]
|
||||
self.rerun_failures_max, self.predictable]
|
||||
|
||||
@staticmethod
|
||||
def Unpack(packed):
|
||||
# For the order of the fields, refer to Pack() above.
|
||||
return Context(packed[0], packed[1], None, packed[2], False,
|
||||
packed[3], packed[4], packed[5], packed[6], packed[7],
|
||||
packed[8], packed[9], packed[10], packed[11])
|
||||
packed[8], packed[9], packed[10], packed[11], packed[12])
|
||||
|
@ -38,6 +38,12 @@ class Output(object):
|
||||
self.stdout = stdout
|
||||
self.stderr = stderr
|
||||
|
||||
def __ne__(self, other):
|
||||
return (self.exit_code != other.exit_code or
|
||||
self.timed_out != other.timed_out or
|
||||
self.stdout != other.stdout or
|
||||
self.stderr != other.stderr)
|
||||
|
||||
def HasCrashed(self):
|
||||
if utils.IsWindows():
|
||||
return 0x80000000 & self.exit_code and not (0x3FFFFF00 & self.exit_code)
|
||||
|
Loading…
Reference in New Issue
Block a user