2012-09-24 09:38:46 +00:00
|
|
|
# Copyright 2012 the V8 project authors. All rights reserved.
|
|
|
|
# Redistribution and use in source and binary forms, with or without
|
|
|
|
# modification, are permitted provided that the following conditions are
|
|
|
|
# met:
|
|
|
|
#
|
|
|
|
# * Redistributions of source code must retain the above copyright
|
|
|
|
# notice, this list of conditions and the following disclaimer.
|
|
|
|
# * Redistributions in binary form must reproduce the above
|
|
|
|
# copyright notice, this list of conditions and the following
|
|
|
|
# disclaimer in the documentation and/or other materials provided
|
|
|
|
# with the distribution.
|
|
|
|
# * Neither the name of Google Inc. nor the names of its
|
|
|
|
# contributors may be used to endorse or promote products derived
|
|
|
|
# from this software without specific prior written permission.
|
|
|
|
#
|
|
|
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
|
|
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
|
|
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
|
|
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
|
|
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
|
|
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
|
|
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
|
|
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
|
|
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
|
|
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
|
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
|
|
|
|
|
|
|
|
import os
|
2014-06-27 09:51:08 +00:00
|
|
|
import shutil
|
2015-05-26 19:50:42 +00:00
|
|
|
import sys
|
2012-09-24 09:38:46 +00:00
|
|
|
import time
|
|
|
|
|
2014-05-14 13:30:57 +00:00
|
|
|
from pool import Pool
|
2012-09-24 09:38:46 +00:00
|
|
|
from . import commands
|
2014-05-15 12:01:34 +00:00
|
|
|
from . import perfdata
|
2014-11-26 09:34:54 +00:00
|
|
|
from . import statusfile
|
2012-09-24 09:38:46 +00:00
|
|
|
from . import utils
|
|
|
|
|
|
|
|
|
|
|
|
class Job(object):
|
|
|
|
def __init__(self, command, dep_command, test_id, timeout, verbose):
|
|
|
|
self.command = command
|
|
|
|
self.dep_command = dep_command
|
|
|
|
self.id = test_id
|
|
|
|
self.timeout = timeout
|
|
|
|
self.verbose = verbose
|
|
|
|
|
|
|
|
|
|
|
|
def RunTest(job):
|
2014-05-14 13:30:57 +00:00
|
|
|
start_time = time.time()
|
|
|
|
if job.dep_command is not None:
|
|
|
|
dep_output = commands.Execute(job.dep_command, job.verbose, job.timeout)
|
|
|
|
# TODO(jkummerow): We approximate the test suite specific function
|
|
|
|
# IsFailureOutput() by just checking the exit code here. Currently
|
|
|
|
# only cctests define dependencies, for which this simplification is
|
|
|
|
# correct.
|
|
|
|
if dep_output.exit_code != 0:
|
|
|
|
return (job.id, dep_output, time.time() - start_time)
|
|
|
|
output = commands.Execute(job.command, job.verbose, job.timeout)
|
|
|
|
return (job.id, output, time.time() - start_time)
|
2012-09-24 09:38:46 +00:00
|
|
|
|
|
|
|
class Runner(object):
|
|
|
|
|
|
|
|
def __init__(self, suites, progress_indicator, context):
|
2014-06-27 09:51:08 +00:00
|
|
|
self.datapath = os.path.join("out", "testrunner_data")
|
|
|
|
self.perf_data_manager = perfdata.PerfDataManager(self.datapath)
|
2014-05-15 12:01:34 +00:00
|
|
|
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
|
2014-06-27 09:51:08 +00:00
|
|
|
self.perf_failures = False
|
2014-07-10 09:48:31 +00:00
|
|
|
self.printed_allocations = False
|
2012-09-24 09:38:46 +00:00
|
|
|
self.tests = [ t for s in suites for t in s.tests ]
|
2014-05-28 13:01:47 +00:00
|
|
|
if not context.no_sorting:
|
|
|
|
for t in self.tests:
|
|
|
|
t.duration = self.perfdata.FetchPerfData(t) or 1.0
|
|
|
|
self.tests.sort(key=lambda t: t.duration, reverse=True)
|
2015-06-10 06:46:35 +00:00
|
|
|
self._CommonInit(len(self.tests), progress_indicator, context)
|
2012-09-24 09:38:46 +00:00
|
|
|
|
2015-06-10 06:46:35 +00:00
|
|
|
def _CommonInit(self, num_tests, progress_indicator, context):
|
2012-09-24 09:38:46 +00:00
|
|
|
self.indicator = progress_indicator
|
2015-06-09 15:32:36 +00:00
|
|
|
progress_indicator.runner = self
|
2012-09-24 09:38:46 +00:00
|
|
|
self.context = context
|
|
|
|
self.succeeded = 0
|
2015-06-10 06:46:35 +00:00
|
|
|
self.total = num_tests
|
|
|
|
self.remaining = num_tests
|
2012-09-24 09:38:46 +00:00
|
|
|
self.failed = []
|
|
|
|
self.crashed = 0
|
2014-07-02 08:15:44 +00:00
|
|
|
self.reran_tests = 0
|
2012-09-24 09:38:46 +00:00
|
|
|
|
2014-06-27 09:51:08 +00:00
|
|
|
def _RunPerfSafe(self, fun):
|
|
|
|
try:
|
|
|
|
fun()
|
|
|
|
except Exception, e:
|
|
|
|
print("PerfData exception: %s" % e)
|
|
|
|
self.perf_failures = True
|
|
|
|
|
2014-07-02 08:15:44 +00:00
|
|
|
def _GetJob(self, test):
|
|
|
|
command = self.GetCommand(test)
|
|
|
|
timeout = self.context.timeout
|
|
|
|
if ("--stress-opt" in test.flags or
|
|
|
|
"--stress-opt" in self.context.mode_flags or
|
|
|
|
"--stress-opt" in self.context.extra_flags):
|
|
|
|
timeout *= 4
|
2014-11-26 09:34:54 +00:00
|
|
|
# FIXME(machenbach): Make this more OO. Don't expose default outcomes or
|
|
|
|
# the like.
|
|
|
|
if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
|
|
|
|
timeout *= 2
|
2014-07-02 08:15:44 +00:00
|
|
|
if test.dependency is not None:
|
|
|
|
dep_command = [ c.replace(test.path, test.dependency) for c in command ]
|
|
|
|
else:
|
|
|
|
dep_command = None
|
|
|
|
return Job(command, dep_command, test.id, timeout, self.context.verbose)
|
|
|
|
|
|
|
|
def _MaybeRerun(self, pool, test):
|
2014-07-09 12:48:08 +00:00
|
|
|
if test.run <= self.context.rerun_failures_count:
|
2014-07-02 08:15:44 +00:00
|
|
|
# Possibly rerun this test if its run count is below the maximum per
|
2014-07-09 12:48:08 +00:00
|
|
|
# test. <= as the flag controls reruns not including the first run.
|
2014-07-02 08:15:44 +00:00
|
|
|
if test.run == 1:
|
|
|
|
# Count the overall number of reran tests on the first rerun.
|
|
|
|
if self.reran_tests < self.context.rerun_failures_max:
|
|
|
|
self.reran_tests += 1
|
|
|
|
else:
|
|
|
|
# Don't rerun this if the overall number of rerun tests has been
|
|
|
|
# reached.
|
|
|
|
return
|
2014-07-02 15:33:21 +00:00
|
|
|
if test.run >= 2 and test.duration > self.context.timeout / 20.0:
|
2014-07-02 08:15:44 +00:00
|
|
|
# Rerun slow tests at most once.
|
|
|
|
return
|
|
|
|
|
|
|
|
# Rerun this test.
|
|
|
|
test.duration = None
|
|
|
|
test.output = None
|
|
|
|
test.run += 1
|
|
|
|
pool.add([self._GetJob(test)])
|
|
|
|
self.remaining += 1
|
|
|
|
|
2014-07-08 15:59:23 +00:00
|
|
|
def _ProcessTestNormal(self, test, result, pool):
|
|
|
|
self.indicator.AboutToRun(test)
|
|
|
|
test.output = result[1]
|
|
|
|
test.duration = result[2]
|
|
|
|
has_unexpected_output = test.suite.HasUnexpectedOutput(test)
|
|
|
|
if has_unexpected_output:
|
|
|
|
self.failed.append(test)
|
|
|
|
if test.output.HasCrashed():
|
|
|
|
self.crashed += 1
|
|
|
|
else:
|
|
|
|
self.succeeded += 1
|
|
|
|
self.remaining -= 1
|
2014-07-09 11:37:36 +00:00
|
|
|
# For the indicator, everything that happens after the first run is treated
|
|
|
|
# as unexpected even if it flakily passes in order to include it in the
|
|
|
|
# output.
|
|
|
|
self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
|
2014-07-08 15:59:23 +00:00
|
|
|
if has_unexpected_output:
|
|
|
|
# Rerun test failures after the indicator has processed the results.
|
2015-05-26 19:50:42 +00:00
|
|
|
self._VerbosePrint("Attempting to rerun test after failure.")
|
2014-07-08 15:59:23 +00:00
|
|
|
self._MaybeRerun(pool, test)
|
|
|
|
# Update the perf database if the test succeeded.
|
|
|
|
return not has_unexpected_output
|
|
|
|
|
|
|
|
def _ProcessTestPredictable(self, test, result, pool):
|
2014-07-10 09:48:31 +00:00
|
|
|
def HasDifferentAllocations(output1, output2):
|
|
|
|
def AllocationStr(stdout):
|
|
|
|
for line in reversed((stdout or "").splitlines()):
|
|
|
|
if line.startswith("### Allocations = "):
|
|
|
|
self.printed_allocations = True
|
|
|
|
return line
|
|
|
|
return ""
|
|
|
|
return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
|
|
|
|
|
2014-07-08 15:59:23 +00:00
|
|
|
# Always pass the test duration for the database update.
|
|
|
|
test.duration = result[2]
|
|
|
|
if test.run == 1 and result[1].HasTimedOut():
|
|
|
|
# If we get a timeout in the first run, we are already in an
|
|
|
|
# unpredictable state. Just report it as a failure and don't rerun.
|
|
|
|
self.indicator.AboutToRun(test)
|
|
|
|
test.output = result[1]
|
|
|
|
self.remaining -= 1
|
|
|
|
self.failed.append(test)
|
|
|
|
self.indicator.HasRun(test, True)
|
2014-07-10 09:48:31 +00:00
|
|
|
if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
|
|
|
|
# From the second run on, check for different allocations. If a
|
|
|
|
# difference is found, call the indicator twice to report both tests.
|
|
|
|
# All runs of each test are counted as one for the statistic.
|
2014-07-08 15:59:23 +00:00
|
|
|
self.indicator.AboutToRun(test)
|
|
|
|
self.remaining -= 1
|
|
|
|
self.failed.append(test)
|
|
|
|
self.indicator.HasRun(test, True)
|
|
|
|
self.indicator.AboutToRun(test)
|
|
|
|
test.output = result[1]
|
|
|
|
self.indicator.HasRun(test, True)
|
|
|
|
elif test.run >= 3:
|
|
|
|
# No difference on the third run -> report a success.
|
|
|
|
self.indicator.AboutToRun(test)
|
|
|
|
self.remaining -= 1
|
|
|
|
self.succeeded += 1
|
|
|
|
test.output = result[1]
|
|
|
|
self.indicator.HasRun(test, False)
|
|
|
|
else:
|
|
|
|
# No difference yet and less than three runs -> add another run and
|
|
|
|
# remember the output for comparison.
|
|
|
|
test.run += 1
|
|
|
|
test.output = result[1]
|
|
|
|
pool.add([self._GetJob(test)])
|
|
|
|
# Always update the perf database.
|
|
|
|
return True
|
|
|
|
|
2012-09-24 09:38:46 +00:00
|
|
|
def Run(self, jobs):
|
|
|
|
self.indicator.Starting()
|
|
|
|
self._RunInternal(jobs)
|
|
|
|
self.indicator.Done()
|
2013-01-18 12:44:36 +00:00
|
|
|
if self.failed or self.remaining:
|
2012-10-18 14:21:35 +00:00
|
|
|
return 1
|
|
|
|
return 0
|
2012-09-24 09:38:46 +00:00
|
|
|
|
|
|
|
def _RunInternal(self, jobs):
|
2014-05-14 13:30:57 +00:00
|
|
|
pool = Pool(jobs)
|
2012-09-24 09:38:46 +00:00
|
|
|
test_map = {}
|
2014-05-14 13:30:57 +00:00
|
|
|
# TODO(machenbach): Instead of filling the queue completely before
|
|
|
|
# pool.imap_unordered, make this a generator that already starts testing
|
|
|
|
# while the queue is filled.
|
2012-09-24 09:38:46 +00:00
|
|
|
queue = []
|
2012-10-02 08:50:02 +00:00
|
|
|
queued_exception = None
|
2014-05-28 10:49:10 +00:00
|
|
|
for test in self.tests:
|
2012-09-24 09:38:46 +00:00
|
|
|
assert test.id >= 0
|
|
|
|
test_map[test.id] = test
|
2012-10-02 08:50:02 +00:00
|
|
|
try:
|
2014-07-02 08:15:44 +00:00
|
|
|
queue.append([self._GetJob(test)])
|
2012-10-02 08:50:02 +00:00
|
|
|
except Exception, e:
|
|
|
|
# If this failed, save the exception and re-raise it later (after
|
|
|
|
# all other tests have had a chance to run).
|
|
|
|
queued_exception = e
|
|
|
|
continue
|
2012-09-24 09:38:46 +00:00
|
|
|
try:
|
2014-05-14 13:30:57 +00:00
|
|
|
it = pool.imap_unordered(RunTest, queue)
|
2012-09-24 09:38:46 +00:00
|
|
|
for result in it:
|
2015-04-08 09:53:35 +00:00
|
|
|
if result.heartbeat:
|
|
|
|
self.indicator.Heartbeat()
|
|
|
|
continue
|
|
|
|
test = test_map[result.value[0]]
|
2014-07-08 15:59:23 +00:00
|
|
|
if self.context.predictable:
|
2015-04-08 09:53:35 +00:00
|
|
|
update_perf = self._ProcessTestPredictable(test, result.value, pool)
|
2012-09-24 09:38:46 +00:00
|
|
|
else:
|
2015-04-08 09:53:35 +00:00
|
|
|
update_perf = self._ProcessTestNormal(test, result.value, pool)
|
2014-07-08 15:59:23 +00:00
|
|
|
if update_perf:
|
2014-06-27 09:51:08 +00:00
|
|
|
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
|
2014-05-14 13:30:57 +00:00
|
|
|
finally:
|
2015-05-26 19:50:42 +00:00
|
|
|
self._VerbosePrint("Closing process pool.")
|
2012-09-24 09:38:46 +00:00
|
|
|
pool.terminate()
|
2015-05-26 19:50:42 +00:00
|
|
|
self._VerbosePrint("Closing database connection.")
|
2014-06-27 09:51:08 +00:00
|
|
|
self._RunPerfSafe(lambda: self.perf_data_manager.close())
|
|
|
|
if self.perf_failures:
|
|
|
|
# Nuke perf data in case of failures. This might not work on windows as
|
|
|
|
# some files might still be open.
|
|
|
|
print "Deleting perf test data due to db corruption."
|
|
|
|
shutil.rmtree(self.datapath)
|
2012-10-02 08:50:02 +00:00
|
|
|
if queued_exception:
|
|
|
|
raise queued_exception
|
2012-09-24 09:38:46 +00:00
|
|
|
|
2015-05-28 13:06:35 +00:00
|
|
|
# Make sure that any allocations were printed in predictable mode (if we
|
|
|
|
# ran any tests).
|
|
|
|
assert (
|
|
|
|
not self.total or
|
|
|
|
not self.context.predictable or
|
|
|
|
self.printed_allocations
|
|
|
|
)
|
2012-09-24 09:38:46 +00:00
|
|
|
|
2015-05-26 19:50:42 +00:00
|
|
|
def _VerbosePrint(self, text):
|
|
|
|
if self.context.verbose:
|
|
|
|
print text
|
|
|
|
sys.stdout.flush()
|
|
|
|
|
2012-09-24 09:38:46 +00:00
|
|
|
def GetCommand(self, test):
|
|
|
|
d8testflag = []
|
|
|
|
shell = test.suite.shell()
|
|
|
|
if shell == "d8":
|
|
|
|
d8testflag = ["--test"]
|
|
|
|
if utils.IsWindows():
|
|
|
|
shell += ".exe"
|
2013-01-23 11:41:56 +00:00
|
|
|
cmd = (self.context.command_prefix +
|
2012-10-19 09:55:27 +00:00
|
|
|
[os.path.abspath(os.path.join(self.context.shell_dir, shell))] +
|
2012-09-24 09:38:46 +00:00
|
|
|
d8testflag +
|
2014-04-10 07:25:49 +00:00
|
|
|
["--random-seed=%s" % self.context.random_seed] +
|
2012-09-24 09:38:46 +00:00
|
|
|
test.suite.GetFlagsForTestCase(test, self.context) +
|
2013-01-29 13:27:23 +00:00
|
|
|
self.context.extra_flags)
|
2012-09-24 09:38:46 +00:00
|
|
|
return cmd
|
|
|
|
|
|
|
|
|
|
|
|
class BreakNowException(Exception):
|
|
|
|
def __init__(self, value):
|
|
|
|
self.value = value
|
|
|
|
def __str__(self):
|
|
|
|
return repr(self.value)
|