[test] Add basic test-runner system tests
This tests some high-level scenarios of the V8 test runner, with fake executable, test-suite extension and build configs. The runners are slightly modified to be easier testable. Args are passed from the tests now and the V8 root folder can be faked by the tests. We support coverage if python coverage 4.0 is installed. Otherwise we run without it. Bug: v8:6917 Change-Id: Ib149fd88027cbdc3382bcaea2d82020582f79d2d Reviewed-on: https://chromium-review.googlesource.com/831506 Commit-Queue: Michael Achenbach <machenbach@chromium.org> Reviewed-by: Sergiy Byelozyorov <sergiyb@chromium.org> Cr-Commit-Position: refs/heads/master@{#50310}
This commit is contained in:
parent
b82125c033
commit
13485a6991
8
tools/testrunner/PRESUBMIT.py
Normal file
8
tools/testrunner/PRESUBMIT.py
Normal file
@ -0,0 +1,8 @@
|
||||
# Copyright 2017 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
def CheckChangeOnCommit(input_api, output_api):
|
||||
tests = input_api.canned_checks.GetUnitTestsInDirectory(
|
||||
input_api, output_api, '../unittests', whitelist=['run_tests_test.py$'])
|
||||
return input_api.RunTests(tests)
|
@ -177,16 +177,19 @@ class BuildConfig(object):
|
||||
|
||||
|
||||
class BaseTestRunner(object):
|
||||
def __init__(self):
|
||||
def __init__(self, basedir=None):
|
||||
self.basedir = basedir or BASE_DIR
|
||||
self.outdir = None
|
||||
self.build_config = None
|
||||
self.mode_name = None
|
||||
self.mode_options = None
|
||||
|
||||
def execute(self):
|
||||
def execute(self, sys_args=None):
|
||||
if sys_args is None: # pragma: no cover
|
||||
sys_args = sys.argv[1:]
|
||||
try:
|
||||
parser = self._create_parser()
|
||||
options, args = self._parse_args(parser)
|
||||
options, args = self._parse_args(parser, sys_args)
|
||||
|
||||
self._load_build_config(options)
|
||||
|
||||
@ -231,11 +234,11 @@ class BaseTestRunner(object):
|
||||
def _add_parser_options(self, parser):
|
||||
pass
|
||||
|
||||
def _parse_args(self, parser):
|
||||
options, args = parser.parse_args()
|
||||
def _parse_args(self, parser, sys_args):
|
||||
options, args = parser.parse_args(sys_args)
|
||||
|
||||
if any(map(lambda v: v and ',' in v,
|
||||
[options.arch, options.mode])):
|
||||
[options.arch, options.mode])): # pragma: no cover
|
||||
print 'Multiple arch/mode are deprecated'
|
||||
raise TestRunnerError()
|
||||
|
||||
@ -248,7 +251,7 @@ class BaseTestRunner(object):
|
||||
except TestRunnerError:
|
||||
pass
|
||||
|
||||
if not self.build_config:
|
||||
if not self.build_config: # pragma: no cover
|
||||
print 'Failed to load build config'
|
||||
raise TestRunnerError
|
||||
|
||||
@ -274,14 +277,14 @@ class BaseTestRunner(object):
|
||||
'%s.%s' % (options.arch, options.mode))
|
||||
|
||||
for outdir in outdirs():
|
||||
yield os.path.join(BASE_DIR, outdir)
|
||||
yield os.path.join(self.basedir, outdir)
|
||||
|
||||
# buildbot option
|
||||
if options.mode:
|
||||
yield os.path.join(BASE_DIR, outdir, options.mode)
|
||||
yield os.path.join(self.basedir, outdir, options.mode)
|
||||
|
||||
def _get_gn_outdir(self):
|
||||
gn_out_dir = os.path.join(BASE_DIR, DEFAULT_OUT_GN)
|
||||
gn_out_dir = os.path.join(self.basedir, DEFAULT_OUT_GN)
|
||||
latest_timestamp = -1
|
||||
latest_config = None
|
||||
for gn_config in os.listdir(gn_out_dir):
|
||||
@ -305,7 +308,7 @@ class BaseTestRunner(object):
|
||||
with open(build_config_path) as f:
|
||||
try:
|
||||
build_config_json = json.load(f)
|
||||
except Exception:
|
||||
except Exception: # pragma: no cover
|
||||
print("%s exists but contains invalid json. Is your build up-to-date?"
|
||||
% build_config_path)
|
||||
raise TestRunnerError()
|
||||
@ -324,7 +327,7 @@ class BaseTestRunner(object):
|
||||
|
||||
build_config_mode = 'debug' if self.build_config.is_debug else 'release'
|
||||
if options.mode:
|
||||
if options.mode not in MODES:
|
||||
if options.mode not in MODES: # pragma: no cover
|
||||
print '%s mode is invalid' % options.mode
|
||||
raise TestRunnerError()
|
||||
if MODES[options.mode].execution_mode != build_config_mode:
|
||||
@ -346,7 +349,7 @@ class BaseTestRunner(object):
|
||||
options.arch, self.build_config.arch))
|
||||
raise TestRunnerError()
|
||||
|
||||
if options.shell_dir:
|
||||
if options.shell_dir: # pragma: no cover
|
||||
print('Warning: --shell-dir is deprecated. Searching for executables in '
|
||||
'build directory (%s) instead.' % self.outdir)
|
||||
|
||||
@ -364,7 +367,7 @@ class BaseTestRunner(object):
|
||||
|
||||
def _setup_env(self):
|
||||
# Use the v8 root as cwd as some test cases use "load" with relative paths.
|
||||
os.chdir(BASE_DIR)
|
||||
os.chdir(self.basedir)
|
||||
|
||||
# Many tests assume an English interface.
|
||||
os.environ['LANG'] = 'en_US.UTF-8'
|
||||
@ -403,7 +406,7 @@ class BaseTestRunner(object):
|
||||
|
||||
if self.build_config.tsan:
|
||||
suppressions_file = os.path.join(
|
||||
BASE_DIR,
|
||||
self.basedir,
|
||||
'tools',
|
||||
'sanitizers',
|
||||
'tsan_suppressions.txt')
|
||||
@ -418,7 +421,7 @@ class BaseTestRunner(object):
|
||||
|
||||
def _get_external_symbolizer_option(self):
|
||||
external_symbolizer_path = os.path.join(
|
||||
BASE_DIR,
|
||||
self.basedir,
|
||||
'third_party',
|
||||
'llvm-build',
|
||||
'Release+Asserts',
|
||||
|
@ -37,8 +37,8 @@ DISTRIBUTION_MODES = ["smooth", "random"]
|
||||
|
||||
|
||||
class DeoptFuzzer(base_runner.BaseTestRunner):
|
||||
def __init__(self):
|
||||
super(DeoptFuzzer, self).__init__()
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DeoptFuzzer, self).__init__(*args, **kwargs)
|
||||
|
||||
class RandomDistribution:
|
||||
def __init__(self, seed=None):
|
||||
@ -200,7 +200,7 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
|
||||
return shard
|
||||
|
||||
def _do_execute(self, options, args):
|
||||
suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
|
||||
suite_paths = utils.GetSuitePaths(join(self.basedir, "test"))
|
||||
|
||||
if len(args) == 0:
|
||||
suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
|
||||
@ -215,7 +215,7 @@ class DeoptFuzzer(base_runner.BaseTestRunner):
|
||||
suites = []
|
||||
for root in suite_paths:
|
||||
suite = testsuite.TestSuite.LoadTestSuite(
|
||||
os.path.join(base_runner.BASE_DIR, "test", root))
|
||||
os.path.join(self.basedir, "test", root))
|
||||
if suite:
|
||||
suites.append(suite)
|
||||
|
||||
|
@ -36,8 +36,8 @@ SLOW_ARCHS = ["arm",
|
||||
|
||||
|
||||
class GCFuzzer(base_runner.BaseTestRunner):
|
||||
def __init__(self):
|
||||
super(GCFuzzer, self).__init__()
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(GCFuzzer, self).__init__(*args, **kwargs)
|
||||
|
||||
self.fuzzer_rng = None
|
||||
|
||||
@ -118,7 +118,7 @@ class GCFuzzer(base_runner.BaseTestRunner):
|
||||
return shard
|
||||
|
||||
def _do_execute(self, options, args):
|
||||
suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
|
||||
suite_paths = utils.GetSuitePaths(join(self.basedir, "test"))
|
||||
|
||||
if len(args) == 0:
|
||||
suite_paths = [ s for s in suite_paths if s in DEFAULT_TESTS ]
|
||||
@ -133,7 +133,7 @@ class GCFuzzer(base_runner.BaseTestRunner):
|
||||
suites = []
|
||||
for root in suite_paths:
|
||||
suite = testsuite.TestSuite.LoadTestSuite(
|
||||
os.path.join(base_runner.BASE_DIR, "test", root))
|
||||
os.path.join(self.basedir, "test", root))
|
||||
if suite:
|
||||
suites.append(suite)
|
||||
|
||||
|
@ -71,8 +71,8 @@ PREDICTABLE_WRAPPER = os.path.join(
|
||||
|
||||
|
||||
class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
def __init__(self):
|
||||
super(StandardTestRunner, self).__init__()
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(StandardTestRunner, self).__init__(*args, **kwargs)
|
||||
|
||||
self.sancov_dir = None
|
||||
|
||||
@ -92,7 +92,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
suite_paths = utils.GetSuitePaths(join(base_runner.BASE_DIR, "test"))
|
||||
suite_paths = utils.GetSuitePaths(join(self.basedir, "test"))
|
||||
|
||||
# Use default tests if no test configuration was provided at the cmd line.
|
||||
if len(args) == 0:
|
||||
@ -119,7 +119,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
if options.verbose:
|
||||
print '>>> Loading test suite: %s' % root
|
||||
suite = testsuite.TestSuite.LoadTestSuite(
|
||||
os.path.join(base_runner.BASE_DIR, "test", root))
|
||||
os.path.join(self.basedir, "test", root))
|
||||
if suite:
|
||||
suites.append(suite)
|
||||
|
||||
@ -257,13 +257,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
if options.novfp3:
|
||||
options.extra_flags.append("--noenable-vfp3")
|
||||
|
||||
if options.no_variants:
|
||||
if options.no_variants: # pragma: no cover
|
||||
print ("Option --no-variants is deprecated. "
|
||||
"Pass --variants=default instead.")
|
||||
assert not options.variants
|
||||
options.variants = "default"
|
||||
|
||||
if options.exhaustive_variants:
|
||||
if options.exhaustive_variants: # pragma: no cover
|
||||
# TODO(machenbach): Switch infra to --variants=exhaustive after M65.
|
||||
print ("Option --exhaustive-variants is deprecated. "
|
||||
"Pass --variants=exhaustive instead.")
|
||||
@ -319,7 +319,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
print "All variants must be in %s" % str(ALL_VARIANTS)
|
||||
raise base_runner.TestRunnerError()
|
||||
|
||||
def CheckTestMode(name, option):
|
||||
def CheckTestMode(name, option): # pragma: no cover
|
||||
if not option in ["run", "skip", "dontcare"]:
|
||||
print "Unknown %s mode %s" % (name, option)
|
||||
raise base_runner.TestRunnerError()
|
||||
@ -482,7 +482,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
progress_indicator = progress.IndicatorNotifier()
|
||||
progress_indicator.Register(
|
||||
progress.PROGRESS_INDICATORS[options.progress]())
|
||||
if options.junitout:
|
||||
if options.junitout: # pragma: no cover
|
||||
progress_indicator.Register(progress.JUnitTestProgressIndicator(
|
||||
options.junitout, options.junittestsuite))
|
||||
if options.json_test_results:
|
||||
@ -491,7 +491,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
self.build_config.arch,
|
||||
self.mode_options.execution_mode,
|
||||
ctx.random_seed))
|
||||
if options.flakiness_results:
|
||||
if options.flakiness_results: # pragma: no cover
|
||||
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
|
||||
options.flakiness_results))
|
||||
|
||||
@ -516,8 +516,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
print "Merging sancov files."
|
||||
subprocess.check_call([
|
||||
sys.executable,
|
||||
join(
|
||||
base_runner.BASE_DIR, "tools", "sanitizers", "sancov_merger.py"),
|
||||
join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
|
||||
"--coverage-dir=%s" % self.sancov_dir])
|
||||
except:
|
||||
print >> sys.stderr, "Error: Merging sancov files failed."
|
||||
@ -540,16 +539,20 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
if options.shard_count > 1:
|
||||
# Log if a value was passed on the cmd line and it differs from the
|
||||
# environment variables.
|
||||
if options.shard_count != shard_count:
|
||||
if options.shard_count != shard_count: # pragma: no cover
|
||||
print("shard_count from cmd line differs from environment variable "
|
||||
"GTEST_TOTAL_SHARDS")
|
||||
if options.shard_run > 1 and options.shard_run != shard_run:
|
||||
if (options.shard_run > 1 and
|
||||
options.shard_run != shard_run): # pragma: no cover
|
||||
print("shard_run from cmd line differs from environment variable "
|
||||
"GTEST_SHARD_INDEX")
|
||||
|
||||
if shard_count < 2:
|
||||
return tests
|
||||
if shard_run < 1 or shard_run > shard_count:
|
||||
# TODO(machenbach): Turn this into an assert. If that's wrong on the
|
||||
# bots, printing will be quite useless. Or refactor this code to make
|
||||
# sure we get a return code != 0 after testing if we got here.
|
||||
print "shard-run not a valid number, should be in [1:shard-count]"
|
||||
print "defaulting back to running all tests"
|
||||
return tests
|
||||
|
506
tools/unittests/run_tests_test.py
Executable file
506
tools/unittests/run_tests_test.py
Executable file
@ -0,0 +1,506 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright 2017 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""
|
||||
Global system tests for V8 test runners and fuzzers.
|
||||
|
||||
This hooks up the framework under tools/testrunner testing high-level scenarios
|
||||
with different test suite extensions and build configurations.
|
||||
"""
|
||||
|
||||
# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
|
||||
# independent.
|
||||
# TODO(machenbach): Move coverage recording to a global test entry point to
|
||||
# include other unittest suites in the coverage report.
|
||||
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
|
||||
# TODO(majeski): Add some tests for the fuzzers.
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
from cStringIO import StringIO
|
||||
|
||||
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
|
||||
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
|
||||
|
||||
Result = collections.namedtuple(
|
||||
'Result', ['stdout', 'stderr', 'returncode'])
|
||||
|
||||
Result.__str__ = lambda self: (
|
||||
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
|
||||
(self.returncode, self.stdout, self.stderr))
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temp_dir():
|
||||
"""Wrapper making a temporary directory available."""
|
||||
path = None
|
||||
try:
|
||||
path = tempfile.mkdtemp('v8_test_')
|
||||
yield path
|
||||
finally:
|
||||
if path:
|
||||
shutil.rmtree(path)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def temp_base(baseroot='testroot1'):
|
||||
"""Wrapper that sets up a temporary V8 test root.
|
||||
|
||||
Args:
|
||||
baseroot: The folder with the test root blueprint. Relevant files will be
|
||||
copied to the temporary test root, to guarantee a fresh setup with no
|
||||
dirty state.
|
||||
"""
|
||||
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
|
||||
with temp_dir() as tempbase:
|
||||
builddir = os.path.join(tempbase, 'out', 'Release')
|
||||
testroot = os.path.join(tempbase, 'test')
|
||||
os.makedirs(builddir)
|
||||
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
|
||||
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
|
||||
|
||||
for suite in os.listdir(os.path.join(basedir, 'test')):
|
||||
os.makedirs(os.path.join(testroot, suite))
|
||||
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
|
||||
shutil.copy(
|
||||
os.path.join(basedir, 'test', suite, entry),
|
||||
os.path.join(testroot, suite))
|
||||
yield tempbase
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture():
|
||||
"""Wrapper that replaces system stdout/stderr an provides the streams."""
|
||||
oldout = sys.stdout
|
||||
olderr = sys.stderr
|
||||
try:
|
||||
stdout=StringIO()
|
||||
stderr=StringIO()
|
||||
sys.stdout = stdout
|
||||
sys.stderr = stderr
|
||||
yield stdout, stderr
|
||||
finally:
|
||||
sys.stdout = oldout
|
||||
sys.stderr = olderr
|
||||
|
||||
|
||||
def run_tests(basedir, *args):
|
||||
"""Executes the test runner with captured output."""
|
||||
with capture() as (stdout, stderr):
|
||||
sys_args = ['--command-prefix', sys.executable] + list(args)
|
||||
code = standard_runner.StandardTestRunner(
|
||||
basedir=basedir).execute(sys_args)
|
||||
return Result(stdout.getvalue(), stderr.getvalue(), code)
|
||||
|
||||
|
||||
def override_build_config(basedir, **kwargs):
|
||||
"""Override the build config with new values provided as kwargs."""
|
||||
path = os.path.join(basedir, 'out', 'Release', 'v8_build_config.json')
|
||||
with open(path) as f:
|
||||
config = json.load(f)
|
||||
config.update(kwargs)
|
||||
with open(path, 'w') as f:
|
||||
json.dump(config, f)
|
||||
|
||||
|
||||
class SystemTest(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# Try to set up python coverage and run without it if not available.
|
||||
cls._cov = None
|
||||
try:
|
||||
import coverage
|
||||
if int(coverage.__version__.split('.')[0]) < 4:
|
||||
# First coverage 4.0 can deal with multiprocessing.
|
||||
cls._cov = None
|
||||
print 'Python coverage version >= 4 required.'
|
||||
raise ImportError()
|
||||
cls._cov = coverage.Coverage(
|
||||
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
|
||||
omit=['*unittest*', '*__init__.py'],
|
||||
concurrency='multiprocessing',
|
||||
)
|
||||
cls._cov.exclude('raise NotImplementedError')
|
||||
cls._cov.exclude('if __name__ == .__main__.:')
|
||||
cls._cov.exclude('except TestRunnerError:')
|
||||
cls._cov.exclude('except KeyboardInterrupt:')
|
||||
cls._cov.exclude('if options.verbose:')
|
||||
cls._cov.exclude('if verbose:')
|
||||
cls._cov.exclude('pass')
|
||||
cls._cov.exclude('assert False')
|
||||
cls._cov.start()
|
||||
except ImportError:
|
||||
print 'Running without python coverage.'
|
||||
sys.path.append(TOOLS_ROOT)
|
||||
global standard_runner
|
||||
from testrunner import standard_runner
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
if cls._cov:
|
||||
cls._cov.stop()
|
||||
cls._cov.combine()
|
||||
print ''
|
||||
print cls._cov.report(show_missing=True)
|
||||
|
||||
def testPass(self):
|
||||
"""Test running only passing tests in two variants.
|
||||
|
||||
Also test printing durations.
|
||||
"""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default,stress',
|
||||
'--time',
|
||||
'sweet/bananas',
|
||||
'sweet/raspberries',
|
||||
)
|
||||
self.assertIn('Running 4 tests', result.stdout, result)
|
||||
self.assertIn('Done running sweet/bananas: pass', result.stdout, result)
|
||||
self.assertIn('Total time:', result.stderr, result)
|
||||
self.assertIn('sweet/bananas', result.stderr, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testSharded(self):
|
||||
"""Test running a particular shard."""
|
||||
with temp_base() as basedir:
|
||||
for shard in [1, 2]:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default,stress',
|
||||
'--shard-count=2',
|
||||
'--shard-run=%d' % shard,
|
||||
'sweet/bananas',
|
||||
'sweet/raspberries',
|
||||
)
|
||||
# One of the shards gets one variant of each test.
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
self.assertIn('Done running sweet/bananas', result.stdout, result)
|
||||
self.assertIn('Done running sweet/raspberries', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testFail(self):
|
||||
"""Test running only failing tests in two variants."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default,stress',
|
||||
'sweet/strawberries',
|
||||
)
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testFailWithRerunAndJSON(self):
|
||||
"""Test re-running a failing test and output to json."""
|
||||
with temp_base() as basedir:
|
||||
json_path = os.path.join(basedir, 'out.json')
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default',
|
||||
'--rerun-failures-count=2',
|
||||
'--random-seed=123',
|
||||
'--json-test-results', json_path,
|
||||
'sweet/strawberries',
|
||||
)
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
self.assertIn('Done running sweet/strawberries: FAIL', result.stdout, result)
|
||||
# We run one test, which fails and gets re-run twice.
|
||||
self.assertIn('3 tests failed', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
# Check relevant properties of the json output.
|
||||
with open(json_path) as f:
|
||||
json_output = json.load(f)[0]
|
||||
pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
|
||||
|
||||
# Replace duration in actual output as it's non-deterministic. Also
|
||||
# replace the python executable prefix as it has a different absolute
|
||||
# path dependent on where this runs.
|
||||
def replace_variable_data(data):
|
||||
data['duration'] = 1
|
||||
data['command'] = ' '.join(
|
||||
['/usr/bin/python'] + data['command'].split()[1:])
|
||||
for data in json_output['slowest_tests']:
|
||||
replace_variable_data(data)
|
||||
for data in json_output['results']:
|
||||
replace_variable_data(data)
|
||||
json_output['duration_mean'] = 1
|
||||
|
||||
with open(os.path.join(TEST_DATA_ROOT, 'expected_test_results1.json')) as f:
|
||||
expected_test_results = json.load(f)
|
||||
|
||||
# TODO(majeski): Previously we only reported the variant flags in the
|
||||
# flags field of the test result.
|
||||
# After recent changes we report all flags, including the file names.
|
||||
# This is redundant to the command. Needs investigation.
|
||||
self.assertEqual(json_output, expected_test_results, pretty_json)
|
||||
|
||||
def testAutoDetect(self):
|
||||
"""Fake a build with several auto-detected options.
|
||||
|
||||
Using all those options at once doesn't really make much sense. This is
|
||||
merely for getting coverage.
|
||||
"""
|
||||
with temp_base() as basedir:
|
||||
override_build_config(
|
||||
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
|
||||
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
|
||||
v8_enable_i18n_support=False, v8_target_cpu='x86',
|
||||
v8_use_snapshot=False)
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default',
|
||||
'sweet/bananas',
|
||||
)
|
||||
expect_text = (
|
||||
'>>> Autodetected:\n'
|
||||
'asan\n'
|
||||
'cfi_vptr\n'
|
||||
'dcheck_always_on\n'
|
||||
'msan\n'
|
||||
'no_i18n\n'
|
||||
'no_snap\n'
|
||||
'tsan\n'
|
||||
'ubsan_vptr\n'
|
||||
'>>> Running tests for ia32.release')
|
||||
self.assertIn(expect_text, result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
# TODO(machenbach): Test some more implications of the auto-detected
|
||||
# options, e.g. that the right env variables are set.
|
||||
|
||||
def testSkips(self):
|
||||
"""Test skipping tests in status file for a specific variant."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=nooptimization',
|
||||
'sweet/strawberries',
|
||||
)
|
||||
self.assertIn('Running 0 tests', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testDefault(self):
|
||||
"""Test using default test suites, though no tests are run since they don't
|
||||
exist in a test setting.
|
||||
"""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(basedir, '--mode=Release')
|
||||
self.assertIn('Warning: no tests were run!', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testNoBuildConfig(self):
|
||||
"""Test failing run when build config is not found."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(basedir)
|
||||
self.assertIn('Failed to load build config', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testGNOption(self):
|
||||
"""Test using gn option, but no gn build folder is found."""
|
||||
with temp_base() as basedir:
|
||||
# TODO(machenbach): This should fail gracefully.
|
||||
with self.assertRaises(OSError):
|
||||
run_tests(basedir, '--gn')
|
||||
|
||||
def testInconsistentMode(self):
|
||||
"""Test failing run when attempting to wrongly override the mode."""
|
||||
with temp_base() as basedir:
|
||||
override_build_config(basedir, is_debug=True)
|
||||
result = run_tests(basedir, '--mode=Release')
|
||||
self.assertIn('execution mode (release) for release is inconsistent '
|
||||
'with build config (debug)', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testInconsistentArch(self):
|
||||
"""Test failing run when attempting to wrongly override the arch."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(basedir, '--mode=Release', '--arch=ia32')
|
||||
self.assertIn(
|
||||
'--arch value (ia32) inconsistent with build config (x64).',
|
||||
result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testWrongVariant(self):
|
||||
"""Test using a bogus variant."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(basedir, '--mode=Release', '--variants=meh')
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testModeFromBuildConfig(self):
|
||||
"""Test auto-detection of mode from build config."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(basedir, '--outdir=out/Release', 'sweet/bananas')
|
||||
self.assertIn('Running tests for x64.release', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testReport(self):
|
||||
"""Test the report feature.
|
||||
|
||||
This also exercises various paths in statusfile logic.
|
||||
"""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--variants=default',
|
||||
'sweet',
|
||||
'--report',
|
||||
)
|
||||
self.assertIn(
|
||||
'3 tests are expected to fail that we should fix',
|
||||
result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testWarnUnusedRules(self):
|
||||
"""Test the unused-rules feature."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--variants=default,nooptimization',
|
||||
'sweet',
|
||||
'--warn-unused',
|
||||
)
|
||||
self.assertIn( 'Unused rule: carrots', result.stdout, result)
|
||||
self.assertIn( 'Unused rule: regress/', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testCatNoSources(self):
|
||||
"""Test printing sources, but the suite's tests have none available."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--variants=default',
|
||||
'sweet/bananas',
|
||||
'--cat',
|
||||
)
|
||||
self.assertIn('begin source: sweet/bananas', result.stdout, result)
|
||||
self.assertIn('(no source available)', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testPredictable(self):
|
||||
"""Test running a test in verify-predictable mode.
|
||||
|
||||
The test will fail because of missing allocation output. We verify that and
|
||||
that the predictable flags are passed and printed after failure.
|
||||
"""
|
||||
with temp_base() as basedir:
|
||||
override_build_config(basedir, v8_enable_verify_predictable=True)
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default',
|
||||
'sweet/bananas',
|
||||
)
|
||||
self.assertIn('Running 1 tests', result.stdout, result)
|
||||
self.assertIn('Done running sweet/bananas: FAIL', result.stdout, result)
|
||||
self.assertIn('Test had no allocation output', result.stdout, result)
|
||||
self.assertIn('--predictable --verify_predictable', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testSlowArch(self):
|
||||
"""Test timeout factor manipulation on slow architecture."""
|
||||
with temp_base() as basedir:
|
||||
override_build_config(basedir, v8_target_cpu='arm64')
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default',
|
||||
'sweet/bananas',
|
||||
)
|
||||
# TODO(machenbach): We don't have a way for testing if the correct
|
||||
# timeout was used.
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testRandomSeedStressWithDefault(self):
|
||||
"""Test using random-seed-stress feature has the right number of tests."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default',
|
||||
'--random-seed-stress-count=2',
|
||||
'sweet/bananas',
|
||||
)
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testRandomSeedStressWithSeed(self):
|
||||
"""Test using random-seed-stress feature passing a random seed."""
|
||||
with temp_base() as basedir:
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default',
|
||||
'--random-seed-stress-count=2',
|
||||
'--random-seed=123',
|
||||
'sweet/strawberries',
|
||||
)
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
# We use a failing test so that the command is printed and we can verify
|
||||
# that the right random seed was passed.
|
||||
self.assertIn('--random-seed=123', result.stdout, result)
|
||||
self.assertEqual(1, result.returncode, result)
|
||||
|
||||
def testSpecificVariants(self):
|
||||
"""Test using NO_VARIANTS and FAST_VARIANTS modifiers in status files skips
|
||||
the desire tests.
|
||||
|
||||
The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
|
||||
But the status file applies a modifier to each skipping one of the
|
||||
variants.
|
||||
"""
|
||||
with temp_base() as basedir:
|
||||
override_build_config(basedir, v8_use_snapshot=False)
|
||||
result = run_tests(
|
||||
basedir,
|
||||
'--mode=Release',
|
||||
'--progress=verbose',
|
||||
'--variants=default,stress',
|
||||
'sweet/bananas',
|
||||
'sweet/raspberries',
|
||||
)
|
||||
# Both tests are either marked as running in only default or only
|
||||
# slow variant.
|
||||
self.assertIn('Running 2 tests', result.stdout, result)
|
||||
self.assertEqual(0, result.returncode, result)
|
||||
|
||||
def testStatusFilePresubmit(self):
|
||||
"""Test that the fake status file is well-formed."""
|
||||
with temp_base() as basedir:
|
||||
from testrunner.local import statusfile
|
||||
self.assertTrue(statusfile.PresubmitCheck(
|
||||
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
107
tools/unittests/testdata/expected_test_results1.json
vendored
Normal file
107
tools/unittests/testdata/expected_test_results1.json
vendored
Normal file
@ -0,0 +1,107 @@
|
||||
{
|
||||
"arch": "x64",
|
||||
"duration_mean": 1,
|
||||
"mode": "release",
|
||||
"results": [
|
||||
{
|
||||
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
|
||||
"duration": 1,
|
||||
"exit_code": 1,
|
||||
"expected": [
|
||||
"PASS"
|
||||
],
|
||||
"flags": [
|
||||
"--random-seed=123",
|
||||
"strawberries",
|
||||
"--nohard-abort"
|
||||
],
|
||||
"name": "sweet/strawberries",
|
||||
"random_seed": 123,
|
||||
"result": "FAIL",
|
||||
"run": 1,
|
||||
"stderr": "",
|
||||
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
|
||||
"target_name": "d8_mocked.py",
|
||||
"variant": "default"
|
||||
},
|
||||
{
|
||||
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
|
||||
"duration": 1,
|
||||
"exit_code": 1,
|
||||
"expected": [
|
||||
"PASS"
|
||||
],
|
||||
"flags": [
|
||||
"--random-seed=123",
|
||||
"strawberries",
|
||||
"--nohard-abort"
|
||||
],
|
||||
"name": "sweet/strawberries",
|
||||
"random_seed": 123,
|
||||
"result": "FAIL",
|
||||
"run": 2,
|
||||
"stderr": "",
|
||||
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
|
||||
"target_name": "d8_mocked.py",
|
||||
"variant": "default"
|
||||
},
|
||||
{
|
||||
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
|
||||
"duration": 1,
|
||||
"exit_code": 1,
|
||||
"expected": [
|
||||
"PASS"
|
||||
],
|
||||
"flags": [
|
||||
"--random-seed=123",
|
||||
"strawberries",
|
||||
"--nohard-abort"
|
||||
],
|
||||
"name": "sweet/strawberries",
|
||||
"random_seed": 123,
|
||||
"result": "FAIL",
|
||||
"run": 3,
|
||||
"stderr": "",
|
||||
"stdout": "--random-seed=123 strawberries --nohard-abort\n",
|
||||
"target_name": "d8_mocked.py",
|
||||
"variant": "default"
|
||||
}
|
||||
],
|
||||
"slowest_tests": [
|
||||
{
|
||||
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
|
||||
"duration": 1,
|
||||
"flags": [
|
||||
"--random-seed=123",
|
||||
"strawberries",
|
||||
"--nohard-abort"
|
||||
],
|
||||
"marked_slow": true,
|
||||
"name": "sweet/strawberries"
|
||||
},
|
||||
{
|
||||
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
|
||||
"duration": 1,
|
||||
"flags": [
|
||||
"--random-seed=123",
|
||||
"strawberries",
|
||||
"--nohard-abort"
|
||||
],
|
||||
"marked_slow": true,
|
||||
"name": "sweet/strawberries"
|
||||
},
|
||||
{
|
||||
"command": "/usr/bin/python out/Release/d8_mocked.py --random-seed=123 strawberries --nohard-abort",
|
||||
"duration": 1,
|
||||
"flags": [
|
||||
"--random-seed=123",
|
||||
"strawberries",
|
||||
"--nohard-abort"
|
||||
],
|
||||
"marked_slow": true,
|
||||
"name": "sweet/strawberries"
|
||||
}
|
||||
],
|
||||
"test_total": 3
|
||||
}
|
||||
|
16
tools/unittests/testdata/testroot1/d8_mocked.py
vendored
Normal file
16
tools/unittests/testdata/testroot1/d8_mocked.py
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
# Copyright 2017 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""
|
||||
Dummy d8 replacement. Just passes all test, except if 'berries' is in args.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
args = ' '.join(sys.argv[1:])
|
||||
print args
|
||||
# Let all berries fail.
|
||||
if 'berries' in args:
|
||||
sys.exit(1)
|
||||
sys.exit(0)
|
35
tools/unittests/testdata/testroot1/test/sweet/sweet.status
vendored
Normal file
35
tools/unittests/testdata/testroot1/test/sweet/sweet.status
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
# Copyright 2017 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
[
|
||||
[ALWAYS, {
|
||||
'raspberries': FAIL,
|
||||
'strawberries': [PASS, ['mode == release', SLOW], ['mode == debug', NO_VARIANTS]],
|
||||
|
||||
# Both cherries and apples are to test how PASS an FAIL from different
|
||||
# sections are merged.
|
||||
'cherries': [PASS, SLOW],
|
||||
'apples': [FAIL],
|
||||
|
||||
# Unused rule.
|
||||
'carrots': [PASS, FAIL],
|
||||
}],
|
||||
|
||||
['variant == nooptimization', {
|
||||
'strawberries': [SKIP],
|
||||
}],
|
||||
|
||||
['arch == x64', {
|
||||
'cherries': [FAIL],
|
||||
'apples': [PASS, SLOW],
|
||||
|
||||
# Unused rule.
|
||||
'regress/*': [CRASH],
|
||||
}],
|
||||
|
||||
['no_snap', {
|
||||
'bananas': [PASS, NO_VARIANTS],
|
||||
'raspberries': [FAIL, FAST_VARIANTS],
|
||||
}],
|
||||
]
|
31
tools/unittests/testdata/testroot1/test/sweet/testcfg.py
vendored
Normal file
31
tools/unittests/testdata/testroot1/test/sweet/testcfg.py
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
# Copyright 2017 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
"""
|
||||
Dummy test suite extension with some fruity tests.
|
||||
"""
|
||||
|
||||
from testrunner.local import testsuite
|
||||
from testrunner.objects import testcase
|
||||
|
||||
class TestSuite(testsuite.TestSuite):
|
||||
def ListTests(self, context):
|
||||
return map(
|
||||
self._create_test,
|
||||
['bananas', 'apples', 'cherries', 'strawberries', 'raspberries'],
|
||||
)
|
||||
|
||||
def _test_class(self):
|
||||
return TestCase
|
||||
|
||||
|
||||
class TestCase(testcase.TestCase):
|
||||
def get_shell(self):
|
||||
return 'd8_mocked.py'
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
return [self.name]
|
||||
|
||||
def GetSuite(name, root):
|
||||
return TestSuite(name, root)
|
18
tools/unittests/testdata/testroot1/v8_build_config.json
vendored
Normal file
18
tools/unittests/testdata/testroot1/v8_build_config.json
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
{
|
||||
"current_cpu": "x64",
|
||||
"dcheck_always_on": false,
|
||||
"is_asan": false,
|
||||
"is_cfi": false,
|
||||
"is_component_build": false,
|
||||
"is_debug": false,
|
||||
"is_gcov_coverage": false,
|
||||
"is_ubsan_vptr": false,
|
||||
"is_msan": false,
|
||||
"is_tsan": false,
|
||||
"target_cpu": "x64",
|
||||
"v8_current_cpu": "x64",
|
||||
"v8_enable_i18n_support": true,
|
||||
"v8_enable_verify_predictable": false,
|
||||
"v8_target_cpu": "x64",
|
||||
"v8_use_snapshot": true
|
||||
}
|
Loading…
Reference in New Issue
Block a user