[test] Check output on the worker process.

I added additional exception logging in the execution.py since
errors in processing results were really difficult to debug.

There is a problem on Windows with class serialization when
it comes from dynamically loaded module. To fix it I moved all
output processors to the tools/testrunner/outproc/ and import
them in test/*/testcfg.py.

Bug: v8:6917
Change-Id: Ida604641d659b006e91faf1d56a37769ec47f5f3
Reviewed-on: https://chromium-review.googlesource.com/842784
Commit-Queue: Michał Majewski <majeski@google.com>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#50361}
This commit is contained in:
Michal Majewski 2018-01-04 11:42:49 +01:00 committed by Commit Bot
parent 6328c56570
commit 9f7d440e98
21 changed files with 330 additions and 225 deletions

View File

@ -6,8 +6,8 @@ import os
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import outproc
from testrunner.objects import testcase
from testrunner.outproc import base as outproc
PROTOCOL_TEST_JS = "protocol-test.js"
EXPECTED_SUFFIX = "-expected.txt"

View File

@ -25,14 +25,13 @@
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import itertools
import os
import re
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import outproc
from testrunner.objects import testcase
from testrunner.outproc import message
INVALID_FLAGS = ["--enable-slow-asserts"]
@ -103,56 +102,9 @@ class TestCase(testcase.TestCase):
@property
def output_proc(self):
return OutProc(self.expected_outcomes,
os.path.join(self.suite.root, self.path),
self._expected_fail())
class OutProc(outproc.OutProc):
def __init__(self, expected_outcomes, basepath, expected_fail):
super(OutProc, self).__init__(expected_outcomes)
self._basepath = basepath
self._expected_fail = expected_fail
def _is_failure_output(self, output):
fail = output.exit_code != 0
if fail != self._expected_fail:
return True
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
with open(self._basepath + '.out') as f:
for line in f:
if line.startswith("#") or not line.strip():
continue
expected_lines.append(line)
raw_lines = output.stdout.splitlines()
actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
if len(expected_lines) != len(actual_lines):
return True
env = {
'basename': os.path.basename(self._basepath + '.js'),
}
for (expected, actual) in itertools.izip_longest(
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
pattern = '^%s$' % pattern
if not re.match(pattern, actual):
return True
return False
def _ignore_line(self, string):
"""Ignore empty lines, valgrind output, Android output."""
return (
not string or
not string.strip() or
string.startswith("==") or
string.startswith("**") or
string.startswith("ANDROID")
)
return message.OutProc(self.expected_outcomes,
os.path.join(self.suite.root, self.path),
self._expected_fail())
def GetSuite(name, root):

View File

@ -3,11 +3,10 @@
# found in the LICENSE file.
import os
import difflib
from testrunner.local import testsuite
from testrunner.objects import outproc
from testrunner.objects import testcase
from testrunner.outproc import mkgrokdump
SHELL = 'mkgrokdump'
@ -42,31 +41,7 @@ class TestCase(testcase.TestCase):
@property
def output_proc(self):
return OutProc(self.expected_outcomes, self.suite.expected_path)
class OutProc(outproc.OutProc):
def __init__(self, expected_outcomes, expected_path):
super(OutProc, self).__init__(expected_outcomes)
self._expected_path = expected_path
def _is_failure_output(self, output):
with open(self._expected_path) as f:
expected = f.read()
expected_lines = expected.splitlines()
actual_lines = output.stdout.splitlines()
diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
fromfile="expected_path")
diffstring = '\n'.join(diff)
if diffstring is not "":
if "generated from a non-shipping build" in output.stdout:
return False
if not "generated from a shipping build" in output.stdout:
output.stdout = "Unexpected output:\n\n" + output.stdout
return True
output.stdout = diffstring
return True
return False
return mkgrokdump.OutProc(self.expected_outcomes, self.suite.expected_path)
def GetSuite(name, root):

View File

@ -29,8 +29,8 @@
import os
from testrunner.local import testsuite
from testrunner.objects import outproc
from testrunner.objects import testcase
from testrunner.outproc import mozilla
EXCLUDED = ["CVS", ".svn"]
@ -110,37 +110,13 @@ class TestCase(testcase.TestCase):
def output_proc(self):
if not self.expected_outcomes:
if self.path.endswith('-n'):
return MOZILLA_PASS_NEGATIVE
return MOZILLA_PASS_DEFAULT
return mozilla.MOZILLA_PASS_NEGATIVE
return mozilla.MOZILLA_PASS_DEFAULT
if self.path.endswith('-n'):
return NegOutProc(self.expected_outcomes)
return OutProc(self.expected_outcomes)
return mozilla.NegOutProc(self.expected_outcomes)
return mozilla.OutProc(self.expected_outcomes)
def _is_failure_output(self, output):
return (
output.exit_code != 0 or
'FAILED!' in output.stdout
)
class OutProc(outproc.OutProc):
"""Optimized for positive tests."""
OutProc._is_failure_output = _is_failure_output
class PassOutProc(outproc.PassOutProc):
"""Optimized for positive tests expected to PASS."""
PassOutProc._is_failure_output = _is_failure_output
NegOutProc = outproc.negative(OutProc)
NegPassOutProc = outproc.negative(PassOutProc)
MOZILLA_PASS_DEFAULT = PassOutProc()
MOZILLA_PASS_NEGATIVE = NegPassOutProc()
def GetSuite(name, root):
return TestSuite(name, root)

View File

@ -33,12 +33,13 @@ import re
import sys
import tarfile
from testrunner.local import statusfile
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import outproc
from testrunner.objects import testcase
from testrunner.outproc import base as outproc
from testrunner.outproc import test262
# TODO(littledan): move the flag mapping into the status file
FEATURE_FLAGS = {
@ -246,57 +247,13 @@ class TestCase(testcase.TestCase):
@property
def output_proc(self):
if self._expected_exception is not None:
return ExceptionOutProc(self.expected_outcomes, self._expected_exception)
return test262.ExceptionOutProc(self.expected_outcomes,
self._expected_exception)
if self.expected_outcomes == outproc.OUTCOMES_PASS:
return PASS_NO_EXCEPTION
return NoExceptionOutProc(self.expected_outcomes)
return test262.PASS_NO_EXCEPTION
return test262.NoExceptionOutProc(self.expected_outcomes)
class ExceptionOutProc(outproc.OutProc):
"""Output processor for tests with expected exception."""
def __init__(self, expected_outcomes, expected_exception=None):
super(ExceptionOutProc, self).__init__(expected_outcomes)
self._expected_exception = expected_exception
def _is_failure_output(self, output):
if output.exit_code != 0:
return True
if self._expected_exception != self._parse_exception(output.stdout):
return True
return 'FAILED!' in output.stdout
def _parse_exception(self, string):
# somefile:somelinenumber: someerror[: sometext]
# somefile might include an optional drive letter on windows e.g. "e:".
match = re.search(
'^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', string, re.MULTILINE)
if match:
return match.group(1).strip()
else:
return None
def _is_failure_output(self, output):
return (
output.exit_code != 0 or
'FAILED!' in output.stdout
)
class NoExceptionOutProc(outproc.OutProc):
"""Output processor optimized for tests without expected exception."""
NoExceptionOutProc._is_failure_output = _is_failure_output
class PassNoExceptionOutProc(outproc.PassOutProc):
"""
Output processor optimized for tests expected to PASS without expected
exception.
"""
PassNoExceptionOutProc._is_failure_output = _is_failure_output
PASS_NO_EXCEPTION = PassNoExceptionOutProc()
def GetSuite(name, root):

View File

@ -29,8 +29,8 @@ import os
import re
from testrunner.local import testsuite
from testrunner.objects import outproc
from testrunner.objects import testcase
from testrunner.outproc import webkit
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
SELF_SCRIPT_PATTERN = re.compile(r"//\s+Env: TEST_FILE_NAME")
@ -104,23 +104,10 @@ class TestCase(testcase.TestCase):
@property
def output_proc(self):
return OutProc(
return webkit.OutProc(
self.expected_outcomes,
os.path.join(self.suite.root, self.path) + '-expected.txt')
class OutProc(outproc.ExpectedOutProc):
def _is_failure_output(self, output):
if output.exit_code != 0:
return True
return super(OutProc, self)._is_failure_output(output)
def _ignore_expected_line(self, line):
return (
line.startswith('#') or
super(OutProc, self)._ignore_expected_line(line)
)
def GetSuite(name, root):
return TestSuite(name, root)

View File

@ -264,7 +264,6 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No predictable mode.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.

View File

@ -179,7 +179,6 @@ class GCFuzzer(base_runner.BaseTestRunner):
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No predictable mode.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
@ -213,7 +212,7 @@ class GCFuzzer(base_runner.BaseTestRunner):
for s in suites:
for t in s.tests:
# Skip failed tests.
if s.HasUnexpectedOutput(t, runner.outputs[t]):
if t.output_proc.has_unexpected_output(runner.outputs[t]):
print '%s failed, skipping' % t.path
continue
max_limit = self._get_max_limit_reached(runner.outputs[t])

View File

@ -31,6 +31,7 @@ import os
import re
import shutil
import sys
import traceback
from . import command
from . import perfdata
@ -51,7 +52,7 @@ ProcessContext = collections.namedtuple(
TestJobResult = collections.namedtuple(
'TestJobResult', ['id', 'output'])
'TestJobResult', ['id', 'outproc_result'])
def MakeProcessContext(sancov_dir):
return ProcessContext(sancov_dir)
@ -74,9 +75,10 @@ class Job(object):
class TestJob(Job):
def __init__(self, test_id, cmd, run_num):
def __init__(self, test_id, cmd, outproc, run_num):
self.test_id = test_id
self.cmd = cmd
self.outproc = outproc
self.run_num = run_num
def _rename_coverage_data(self, out, sancov_dir):
@ -105,20 +107,21 @@ class TestJob(Job):
os.rename(sancov_file, new_sancov_file)
def run(self, context):
out = self.cmd.execute()
self._rename_coverage_data(out, context.sancov_dir)
return TestJobResult(self.test_id, out)
output = self.cmd.execute()
self._rename_coverage_data(output, context.sancov_dir)
return TestJobResult(self.test_id, self.outproc.process(output))
class Runner(object):
def __init__(self, suites, progress_indicator, context):
def __init__(self, suites, progress_indicator, context, outproc_factory=None):
self.datapath = os.path.join("out", "testrunner_data")
self.perf_data_manager = perfdata.GetPerfDataManager(
context, self.datapath)
self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
self.perf_failures = False
self.printed_allocations = False
self.outproc_factory = outproc_factory or (lambda test: test.output_proc)
self.tests = [t for s in suites for t in s.tests]
# TODO(majeski): Pass dynamically instead of keeping them in the runner.
@ -159,7 +162,7 @@ class Runner(object):
print("PerfData exception: %s" % e)
self.perf_failures = True
def _MaybeRerun(self, pool, test, job_result):
def _MaybeRerun(self, pool, test, result):
if test.run <= self.context.rerun_failures_count:
# Possibly rerun this test if its run count is below the maximum per
# test. <= as the flag controls reruns not including the first run.
@ -172,23 +175,24 @@ class Runner(object):
# reached.
return
if (test.run >= 2 and
job_result.output.duration > self.context.timeout / 20.0):
result.output.duration > self.context.timeout / 20.0):
# Rerun slow tests at most once.
return
# Rerun this test.
test.run += 1
pool.add([TestJob(test.id, test.cmd, test.run)])
pool.add([
TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
])
self.remaining += 1
self.total += 1
def _ProcessTest(self, test, job_result, pool):
self.outputs[test] = job_result.output
has_unexpected_output = test.suite.HasUnexpectedOutput(
test, job_result.output, self.context)
def _ProcessTest(self, test, result, pool):
self.outputs[test] = result.output
has_unexpected_output = result.has_unexpected_output
if has_unexpected_output:
self.failed.append(test)
if job_result.output.HasCrashed():
if result.output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
@ -196,12 +200,12 @@ class Runner(object):
# For the indicator, everything that happens after the first run is treated
# as unexpected even if it flakily passes in order to include it in the
# output.
self.indicator.HasRun(test, job_result.output,
self.indicator.HasRun(test, result.output,
has_unexpected_output or test.run > 1)
if has_unexpected_output:
# Rerun test failures after the indicator has processed the results.
self._VerbosePrint("Attempting to rerun test after failure.")
self._MaybeRerun(pool, test, job_result)
self._MaybeRerun(pool, test, result)
# Update the perf database if the test succeeded.
return not has_unexpected_output
@ -224,11 +228,13 @@ class Runner(object):
assert test.id >= 0
test_map[test.id] = test
try:
yield [TestJob(test.id, test.cmd, test.run)]
yield [
TestJob(test.id, test.cmd, self.outproc_factory(test), test.run)
]
except Exception, e:
# If this failed, save the exception and re-raise it later (after
# all other tests have had a chance to run).
queued_exception[0] = e
queued_exception[0] = e, traceback.format_exc()
continue
try:
it = pool.imap_unordered(
@ -243,12 +249,19 @@ class Runner(object):
continue
job_result = result.value
test_id = job_result.id
outproc_result = job_result.outproc_result
test = test_map[job_result.id]
update_perf = self._ProcessTest(test, job_result, pool)
test = test_map[test_id]
update_perf = self._ProcessTest(test, outproc_result, pool)
if update_perf:
self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(
test, job_result.output.duration))
test, outproc_result.output.duration))
except KeyboardInterrupt:
raise
except:
traceback.print_exc()
raise
finally:
self._VerbosePrint("Closing process pool.")
pool.terminate()
@ -260,7 +273,9 @@ class Runner(object):
print "Deleting perf test data due to db corruption."
shutil.rmtree(self.datapath)
if queued_exception[0]:
raise queued_exception[0]
e, stacktrace = queued_exception[0]
print stacktrace
raise e
def _VerbosePrint(self, text):
if self.context.verbose:

View File

@ -171,20 +171,6 @@ class TestSuite(object):
break
self.tests = filtered
def HasUnexpectedOutput(self, test, output, ctx=None):
if ctx and ctx.predictable:
# Only check the exit code of the predictable_wrapper in
# verify-predictable mode. Negative tests are not supported as they
# usually also don't print allocation hashes. There are two versions of
# negative tests: one specified by the test, the other specified through
# the status file (e.g. known bugs).
return (
output.exit_code != 0 and
not test.output_proc.negative and
statusfile.FAIL not in test.expected_outcomes
)
return test.output_proc.has_unexpected_output(output)
def _create_test(self, path, **kwargs):
test = self._test_class()(self, path, self._path_to_name(path), **kwargs)
return test

View File

@ -29,8 +29,8 @@
class Context():
def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
isolates, command_prefix, extra_flags, noi18n, random_seed,
no_sorting, rerun_failures_count, rerun_failures_max,
predictable, no_harness, use_perf_data, sancov_dir):
no_sorting, rerun_failures_count, rerun_failures_max, no_harness,
use_perf_data, sancov_dir):
self.arch = arch
self.mode = mode
self.shell_dir = shell_dir
@ -45,7 +45,6 @@ class Context():
self.no_sorting = no_sorting
self.rerun_failures_count = rerun_failures_count
self.rerun_failures_max = rerun_failures_max
self.predictable = predictable
self.no_harness = no_harness
self.use_perf_data = use_perf_data
self.sancov_dir = sancov_dir

View File

@ -0,0 +1,56 @@
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from ..local import statusfile
from ..outproc import base as outproc_base
# Only check the exit code of the predictable_wrapper in
# verify-predictable mode. Negative tests are not supported as they
# usually also don't print allocation hashes. There are two versions of
# negative tests: one specified by the test, the other specified through
# the status file (e.g. known bugs).
def get_outproc(test):
output_proc = test.output_proc
if output_proc.negative or statusfile.FAIL in test.expected_outcomes:
# TODO(majeski): Skip these tests instead of having special outproc.
return NeverUnexpectedOutputOutProc(output_proc)
return OutProc(output_proc)
class OutProc(outproc_base.BaseOutProc):
"""Output processor wrapper for predictable mode. It has custom
has_unexpected_output implementation, but for all other methods it simply
calls wrapped output processor.
"""
def __init__(self, _outproc):
super(OutProc, self).__init__()
self._outproc = _outproc
def process(self, output):
return self._outproc.process(output)
def has_unexpected_output(self, output):
return output.exit_code != 0
def get_outcome(self, output):
return self._outproc.get_outcome(output)
@property
def negative(self):
return self._outproc.negative
@property
def expected_outcomes(self):
return self._outproc.expected_outcomes
class NeverUnexpectedOutputOutProc(OutProc):
"""Output processor wrapper for tests that we will return False for
has_unexpected_output in the predictable mode.
"""
def has_unexpected_output(self, output):
return False

View File

@ -30,7 +30,7 @@ import os
import re
import shlex
from . import outproc
from ..outproc import base as outproc
from ..local import command
from ..local import statusfile
from ..local import utils

View File

@ -0,0 +1,3 @@
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

View File

@ -2,6 +2,7 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
from ..local import statusfile
@ -10,8 +11,13 @@ from ..local import statusfile
OUTCOMES_PASS = [statusfile.PASS]
OUTCOMES_FAIL = [statusfile.FAIL]
Result = collections.namedtuple('Result', ['has_unexpected_output', 'output'])
class BaseOutProc(object):
def process(self, output):
return Result(self.has_unexpected_output(output), output)
def has_unexpected_output(self, output):
return self.get_outcome(output) not in self.expected_outcomes
@ -43,12 +49,10 @@ class BaseOutProc(object):
raise NotImplementedError()
def negative(cls):
class Neg(cls):
@property
def negative(self):
return True
return Neg
class Negative(object):
@property
def negative(self):
return True
class PassOutProc(BaseOutProc):

View File

@ -0,0 +1,56 @@
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import itertools
import os
import re
from . import base
class OutProc(base.OutProc):
def __init__(self, expected_outcomes, basepath, expected_fail):
super(OutProc, self).__init__(expected_outcomes)
self._basepath = basepath
self._expected_fail = expected_fail
def _is_failure_output(self, output):
fail = output.exit_code != 0
if fail != self._expected_fail:
return True
expected_lines = []
# Can't use utils.ReadLinesFrom() here because it strips whitespace.
with open(self._basepath + '.out') as f:
for line in f:
if line.startswith("#") or not line.strip():
continue
expected_lines.append(line)
raw_lines = output.stdout.splitlines()
actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ]
if len(expected_lines) != len(actual_lines):
return True
env = {
'basename': os.path.basename(self._basepath + '.js'),
}
for (expected, actual) in itertools.izip_longest(
expected_lines, actual_lines, fillvalue=''):
pattern = re.escape(expected.rstrip() % env)
pattern = pattern.replace('\\*', '.*')
pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?')
pattern = '^%s$' % pattern
if not re.match(pattern, actual):
return True
return False
def _ignore_line(self, string):
"""Ignore empty lines, valgrind output, Android output."""
return (
not string or
not string.strip() or
string.startswith("==") or
string.startswith("**") or
string.startswith("ANDROID")
)

View File

@ -0,0 +1,31 @@
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import difflib
from . import base
class OutProc(base.OutProc):
def __init__(self, expected_outcomes, expected_path):
super(OutProc, self).__init__(expected_outcomes)
self._expected_path = expected_path
def _is_failure_output(self, output):
with open(self._expected_path) as f:
expected = f.read()
expected_lines = expected.splitlines()
actual_lines = output.stdout.splitlines()
diff = difflib.unified_diff(expected_lines, actual_lines, lineterm="",
fromfile="expected_path")
diffstring = '\n'.join(diff)
if diffstring is not "":
if "generated from a non-shipping build" in output.stdout:
return False
if not "generated from a shipping build" in output.stdout:
output.stdout = "Unexpected output:\n\n" + output.stdout
return True
output.stdout = diffstring
return True
return False

View File

@ -0,0 +1,33 @@
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import base
def _is_failure_output(self, output):
return (
output.exit_code != 0 or
'FAILED!' in output.stdout
)
class OutProc(base.OutProc):
"""Optimized for positive tests."""
OutProc._is_failure_output = _is_failure_output
class PassOutProc(base.PassOutProc):
"""Optimized for positive tests expected to PASS."""
PassOutProc._is_failure_output = _is_failure_output
class NegOutProc(base.Negative, OutProc):
pass
class NegPassOutProc(base.Negative, PassOutProc):
pass
MOZILLA_PASS_DEFAULT = PassOutProc()
MOZILLA_PASS_NEGATIVE = NegPassOutProc()

View File

@ -0,0 +1,54 @@
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from . import base
class ExceptionOutProc(base.OutProc):
"""Output processor for tests with expected exception."""
def __init__(self, expected_outcomes, expected_exception=None):
super(ExceptionOutProc, self).__init__(expected_outcomes)
self._expected_exception = expected_exception
def _is_failure_output(self, output):
if output.exit_code != 0:
return True
if self._expected_exception != self._parse_exception(output.stdout):
return True
return 'FAILED!' in output.stdout
def _parse_exception(self, string):
# somefile:somelinenumber: someerror[: sometext]
# somefile might include an optional drive letter on windows e.g. "e:".
match = re.search(
'^(?:\w:)?[^:]*:[0-9]+: ([^: ]+?)($|: )', string, re.MULTILINE)
if match:
return match.group(1).strip()
else:
return None
def _is_failure_output(self, output):
return (
output.exit_code != 0 or
'FAILED!' in output.stdout
)
class NoExceptionOutProc(base.OutProc):
"""Output processor optimized for tests without expected exception."""
NoExceptionOutProc._is_failure_output = _is_failure_output
class PassNoExceptionOutProc(base.PassOutProc):
"""
Output processor optimized for tests expected to PASS without expected
exception.
"""
PassNoExceptionOutProc._is_failure_output = _is_failure_output
PASS_NO_EXCEPTION = PassNoExceptionOutProc()

View File

@ -0,0 +1,18 @@
# Copyright 2018 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import base
class OutProc(base.ExpectedOutProc):
def _is_failure_output(self, output):
if output.exit_code != 0:
return True
return super(OutProc, self)._is_failure_output(output)
def _ignore_expected_line(self, line):
return (
line.startswith('#') or
super(OutProc, self)._ignore_expected_line(line)
)

View File

@ -25,6 +25,7 @@ from testrunner.local import utils
from testrunner.local import verbose
from testrunner.local.variants import ALL_VARIANTS
from testrunner.objects import context
from testrunner.objects import predictable
TIMEOUT_DEFAULT = 60
@ -377,7 +378,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
options.no_sorting,
options.rerun_failures_count,
options.rerun_failures_max,
self.build_config.predictable,
options.no_harness,
use_perf_data=not options.swarming,
sancov_dir=self.sancov_dir)
@ -495,7 +495,12 @@ class StandardTestRunner(base_runner.BaseTestRunner):
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
options.flakiness_results))
runner = execution.Runner(suites, progress_indicator, ctx)
if self.build_config.predictable:
outproc_factory = predictable.get_outproc
else:
outproc_factory = None
runner = execution.Runner(suites, progress_indicator, ctx,
outproc_factory)
exit_code = runner.Run(options.j)
overall_duration = time.time() - start_time