2017-12-22 15:30:32 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
# Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
# Use of this source code is governed by a BSD-style license that can be
|
|
|
|
# found in the LICENSE file.
|
|
|
|
|
|
|
|
"""
|
|
|
|
Global system tests for V8 test runners and fuzzers.
|
|
|
|
|
|
|
|
This hooks up the framework under tools/testrunner testing high-level scenarios
|
|
|
|
with different test suite extensions and build configurations.
|
|
|
|
"""
|
|
|
|
|
|
|
|
# TODO(machenbach): Mock out util.GuessOS to make these tests really platform
|
|
|
|
# independent.
|
|
|
|
# TODO(machenbach): Move coverage recording to a global test entry point to
|
|
|
|
# include other unittest suites in the coverage report.
|
|
|
|
# TODO(machenbach): Coverage data from multiprocessing doesn't work.
|
|
|
|
# TODO(majeski): Add some tests for the fuzzers.
|
|
|
|
|
2019-02-19 08:28:26 +00:00
|
|
|
# for py2/py3 compatibility
|
|
|
|
from __future__ import print_function
|
|
|
|
|
2017-12-22 15:30:32 +00:00
|
|
|
import collections
|
|
|
|
import contextlib
|
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import shutil
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import tempfile
|
|
|
|
import unittest
|
|
|
|
|
|
|
|
from cStringIO import StringIO
|
|
|
|
|
|
|
|
TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
TEST_DATA_ROOT = os.path.join(TOOLS_ROOT, 'unittests', 'testdata')
|
|
|
|
RUN_TESTS_PY = os.path.join(TOOLS_ROOT, 'run-tests.py')
|
|
|
|
|
|
|
|
Result = collections.namedtuple(
|
|
|
|
'Result', ['stdout', 'stderr', 'returncode'])
|
|
|
|
|
|
|
|
Result.__str__ = lambda self: (
|
|
|
|
'\nReturncode: %s\nStdout:\n%s\nStderr:\n%s\n' %
|
|
|
|
(self.returncode, self.stdout, self.stderr))
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def temp_dir():
|
|
|
|
"""Wrapper making a temporary directory available."""
|
|
|
|
path = None
|
|
|
|
try:
|
|
|
|
path = tempfile.mkdtemp('v8_test_')
|
|
|
|
yield path
|
|
|
|
finally:
|
|
|
|
if path:
|
|
|
|
shutil.rmtree(path)
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def temp_base(baseroot='testroot1'):
|
|
|
|
"""Wrapper that sets up a temporary V8 test root.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
baseroot: The folder with the test root blueprint. Relevant files will be
|
|
|
|
copied to the temporary test root, to guarantee a fresh setup with no
|
|
|
|
dirty state.
|
|
|
|
"""
|
|
|
|
basedir = os.path.join(TEST_DATA_ROOT, baseroot)
|
|
|
|
with temp_dir() as tempbase:
|
2020-10-06 11:40:26 +00:00
|
|
|
builddir = os.path.join(tempbase, 'out', 'build')
|
2017-12-22 15:30:32 +00:00
|
|
|
testroot = os.path.join(tempbase, 'test')
|
|
|
|
os.makedirs(builddir)
|
|
|
|
shutil.copy(os.path.join(basedir, 'v8_build_config.json'), builddir)
|
|
|
|
shutil.copy(os.path.join(basedir, 'd8_mocked.py'), builddir)
|
|
|
|
|
|
|
|
for suite in os.listdir(os.path.join(basedir, 'test')):
|
|
|
|
os.makedirs(os.path.join(testroot, suite))
|
|
|
|
for entry in os.listdir(os.path.join(basedir, 'test', suite)):
|
|
|
|
shutil.copy(
|
|
|
|
os.path.join(basedir, 'test', suite, entry),
|
|
|
|
os.path.join(testroot, suite))
|
|
|
|
yield tempbase
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def capture():
|
|
|
|
"""Wrapper that replaces system stdout/stderr an provides the streams."""
|
|
|
|
oldout = sys.stdout
|
|
|
|
olderr = sys.stderr
|
|
|
|
try:
|
|
|
|
stdout=StringIO()
|
|
|
|
stderr=StringIO()
|
|
|
|
sys.stdout = stdout
|
|
|
|
sys.stderr = stderr
|
|
|
|
yield stdout, stderr
|
|
|
|
finally:
|
|
|
|
sys.stdout = oldout
|
|
|
|
sys.stderr = olderr
|
|
|
|
|
|
|
|
|
2018-01-05 13:34:17 +00:00
|
|
|
def run_tests(basedir, *args, **kwargs):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Executes the test runner with captured output."""
|
|
|
|
with capture() as (stdout, stderr):
|
|
|
|
sys_args = ['--command-prefix', sys.executable] + list(args)
|
2018-01-05 13:34:17 +00:00
|
|
|
if kwargs.get('infra_staging', False):
|
|
|
|
sys_args.append('--infra-staging')
|
2018-01-24 09:03:41 +00:00
|
|
|
else:
|
|
|
|
sys_args.append('--no-infra-staging')
|
2018-10-30 14:23:38 +00:00
|
|
|
code = standard_runner.StandardTestRunner(basedir=basedir).execute(sys_args)
|
2017-12-22 15:30:32 +00:00
|
|
|
return Result(stdout.getvalue(), stderr.getvalue(), code)
|
|
|
|
|
|
|
|
|
|
|
|
def override_build_config(basedir, **kwargs):
|
|
|
|
"""Override the build config with new values provided as kwargs."""
|
2020-10-06 11:40:26 +00:00
|
|
|
path = os.path.join(basedir, 'out', 'build', 'v8_build_config.json')
|
2017-12-22 15:30:32 +00:00
|
|
|
with open(path) as f:
|
|
|
|
config = json.load(f)
|
|
|
|
config.update(kwargs)
|
|
|
|
with open(path, 'w') as f:
|
|
|
|
json.dump(config, f)
|
|
|
|
|
|
|
|
|
|
|
|
class SystemTest(unittest.TestCase):
|
|
|
|
@classmethod
|
|
|
|
def setUpClass(cls):
|
|
|
|
# Try to set up python coverage and run without it if not available.
|
|
|
|
cls._cov = None
|
|
|
|
try:
|
|
|
|
import coverage
|
|
|
|
if int(coverage.__version__.split('.')[0]) < 4:
|
|
|
|
cls._cov = None
|
2019-02-19 08:28:26 +00:00
|
|
|
print('Python coverage version >= 4 required.')
|
2017-12-22 15:30:32 +00:00
|
|
|
raise ImportError()
|
|
|
|
cls._cov = coverage.Coverage(
|
|
|
|
source=([os.path.join(TOOLS_ROOT, 'testrunner')]),
|
|
|
|
omit=['*unittest*', '*__init__.py'],
|
|
|
|
)
|
|
|
|
cls._cov.exclude('raise NotImplementedError')
|
|
|
|
cls._cov.exclude('if __name__ == .__main__.:')
|
|
|
|
cls._cov.exclude('except TestRunnerError:')
|
|
|
|
cls._cov.exclude('except KeyboardInterrupt:')
|
|
|
|
cls._cov.exclude('if options.verbose:')
|
|
|
|
cls._cov.exclude('if verbose:')
|
|
|
|
cls._cov.exclude('pass')
|
|
|
|
cls._cov.exclude('assert False')
|
|
|
|
cls._cov.start()
|
|
|
|
except ImportError:
|
2019-02-19 08:28:26 +00:00
|
|
|
print('Running without python coverage.')
|
2017-12-22 15:30:32 +00:00
|
|
|
sys.path.append(TOOLS_ROOT)
|
|
|
|
global standard_runner
|
|
|
|
from testrunner import standard_runner
|
2019-02-12 14:10:53 +00:00
|
|
|
global num_fuzzer
|
|
|
|
from testrunner import num_fuzzer
|
2018-02-20 17:23:37 +00:00
|
|
|
from testrunner.local import command
|
2017-12-30 13:37:49 +00:00
|
|
|
from testrunner.local import pool
|
2018-02-20 17:23:37 +00:00
|
|
|
command.setup_testing()
|
2017-12-30 13:37:49 +00:00
|
|
|
pool.setup_testing()
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def tearDownClass(cls):
|
|
|
|
if cls._cov:
|
|
|
|
cls._cov.stop()
|
2019-02-19 08:28:26 +00:00
|
|
|
print('')
|
|
|
|
print(cls._cov.report(show_missing=True))
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
def testPass(self):
|
|
|
|
"""Test running only passing tests in two variants.
|
|
|
|
|
|
|
|
Also test printing durations.
|
|
|
|
"""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default,stress',
|
|
|
|
'--time',
|
|
|
|
'sweet/bananas',
|
|
|
|
'sweet/raspberries',
|
|
|
|
)
|
2021-05-26 11:46:50 +00:00
|
|
|
self.assertIn('sweet/bananas default: PASS', result.stdout, result)
|
2018-02-01 10:14:06 +00:00
|
|
|
# TODO(majeski): Implement for test processors
|
|
|
|
# self.assertIn('Total time:', result.stderr, result)
|
|
|
|
# self.assertIn('sweet/bananas', result.stderr, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
2018-01-16 16:33:52 +00:00
|
|
|
|
2021-05-21 12:08:41 +00:00
|
|
|
def testPassHeavy(self):
|
|
|
|
"""Test running with some tests marked heavy."""
|
|
|
|
with temp_base(baseroot='testroot3') as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=nooptimization',
|
|
|
|
'-j2',
|
|
|
|
'sweet',
|
|
|
|
)
|
|
|
|
self.assertIn('7 tests ran', result.stdout, result)
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2018-01-16 16:33:52 +00:00
|
|
|
def testShardedProc(self):
|
|
|
|
with temp_base() as basedir:
|
|
|
|
for shard in [1, 2]:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default,stress',
|
|
|
|
'--shard-count=2',
|
|
|
|
'--shard-run=%d' % shard,
|
2019-02-11 08:23:25 +00:00
|
|
|
'sweet/blackberries',
|
2018-01-16 16:33:52 +00:00
|
|
|
'sweet/raspberries',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2018-01-16 16:33:52 +00:00
|
|
|
)
|
|
|
|
# One of the shards gets one variant of each test.
|
|
|
|
self.assertIn('2 tests ran', result.stdout, result)
|
|
|
|
if shard == 1:
|
2020-09-16 12:14:10 +00:00
|
|
|
self.assertIn('sweet/raspberries default', result.stdout, result)
|
|
|
|
self.assertIn('sweet/raspberries stress', result.stdout, result)
|
2019-02-11 08:23:25 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
2018-01-16 16:33:52 +00:00
|
|
|
else:
|
2019-02-11 08:23:25 +00:00
|
|
|
self.assertIn(
|
|
|
|
'sweet/blackberries default: FAIL', result.stdout, result)
|
|
|
|
self.assertIn(
|
|
|
|
'sweet/blackberries stress: FAIL', result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
2018-02-01 10:14:06 +00:00
|
|
|
@unittest.skip("incompatible with test processors")
|
2017-12-22 15:30:32 +00:00
|
|
|
def testSharded(self):
|
|
|
|
"""Test running a particular shard."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
for shard in [1, 2]:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default,stress',
|
|
|
|
'--shard-count=2',
|
|
|
|
'--shard-run=%d' % shard,
|
|
|
|
'sweet/bananas',
|
|
|
|
'sweet/raspberries',
|
|
|
|
)
|
|
|
|
# One of the shards gets one variant of each test.
|
|
|
|
self.assertIn('Running 2 tests', result.stdout, result)
|
2020-09-16 12:14:10 +00:00
|
|
|
self.assertIn('sweet/bananas', result.stdout, result)
|
|
|
|
self.assertIn('sweet/raspberries', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testFail(self):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Test running only failing tests in two variants."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default,stress',
|
|
|
|
'sweet/strawberries',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2017-12-22 15:30:32 +00:00
|
|
|
)
|
2020-09-16 12:14:10 +00:00
|
|
|
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
2018-10-30 14:23:38 +00:00
|
|
|
def check_cleaned_json_output(
|
|
|
|
self, expected_results_name, actual_json, basedir):
|
2018-01-16 14:56:20 +00:00
|
|
|
# Check relevant properties of the json output.
|
|
|
|
with open(actual_json) as f:
|
2020-10-07 16:43:41 +00:00
|
|
|
json_output = json.load(f)
|
2018-01-16 14:56:20 +00:00
|
|
|
|
|
|
|
# Replace duration in actual output as it's non-deterministic. Also
|
|
|
|
# replace the python executable prefix as it has a different absolute
|
|
|
|
# path dependent on where this runs.
|
|
|
|
def replace_variable_data(data):
|
|
|
|
data['duration'] = 1
|
|
|
|
data['command'] = ' '.join(
|
|
|
|
['/usr/bin/python'] + data['command'].split()[1:])
|
2018-10-30 14:23:38 +00:00
|
|
|
data['command'] = data['command'].replace(basedir + '/', '')
|
2018-01-16 14:56:20 +00:00
|
|
|
for data in json_output['slowest_tests']:
|
|
|
|
replace_variable_data(data)
|
|
|
|
for data in json_output['results']:
|
|
|
|
replace_variable_data(data)
|
|
|
|
json_output['duration_mean'] = 1
|
2020-05-05 14:49:31 +00:00
|
|
|
# We need lexicographic sorting here to avoid non-deterministic behaviour
|
|
|
|
# The original sorting key is duration, but in our fake test we have
|
|
|
|
# non-deterministic durations before we reset them to 1
|
|
|
|
json_output['slowest_tests'].sort(key= lambda x: str(x))
|
2018-01-16 14:56:20 +00:00
|
|
|
|
|
|
|
with open(os.path.join(TEST_DATA_ROOT, expected_results_name)) as f:
|
|
|
|
expected_test_results = json.load(f)
|
|
|
|
|
2020-05-05 14:49:31 +00:00
|
|
|
pretty_json = json.dumps(json_output, indent=2, sort_keys=True)
|
2018-01-16 14:56:20 +00:00
|
|
|
msg = None # Set to pretty_json for bootstrapping.
|
|
|
|
self.assertDictEqual(json_output, expected_test_results, msg)
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testFailWithRerunAndJSON(self):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Test re-running a failing test and output to json."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
json_path = os.path.join(basedir, 'out.json')
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'--rerun-failures-count=2',
|
|
|
|
'--random-seed=123',
|
|
|
|
'--json-test-results', json_path,
|
|
|
|
'sweet/strawberries',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2017-12-22 15:30:32 +00:00
|
|
|
)
|
2020-09-16 12:14:10 +00:00
|
|
|
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
|
2019-01-21 09:43:53 +00:00
|
|
|
# With test processors we don't count reruns as separated failures.
|
|
|
|
# TODO(majeski): fix it?
|
|
|
|
self.assertIn('1 tests failed', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
|
|
|
# TODO(majeski): Previously we only reported the variant flags in the
|
|
|
|
# flags field of the test result.
|
|
|
|
# After recent changes we report all flags, including the file names.
|
|
|
|
# This is redundant to the command. Needs investigation.
|
2018-01-31 09:18:13 +00:00
|
|
|
self.maxDiff = None
|
2018-10-30 14:23:38 +00:00
|
|
|
self.check_cleaned_json_output(
|
|
|
|
'expected_test_results1.json', json_path, basedir)
|
2018-01-16 14:56:20 +00:00
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testFlakeWithRerunAndJSON(self):
|
2018-01-16 14:56:20 +00:00
|
|
|
"""Test re-running a failing test and output to json."""
|
|
|
|
with temp_base(baseroot='testroot2') as basedir:
|
|
|
|
json_path = os.path.join(basedir, 'out.json')
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'--rerun-failures-count=2',
|
|
|
|
'--random-seed=123',
|
|
|
|
'--json-test-results', json_path,
|
|
|
|
'sweet',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2018-01-16 14:56:20 +00:00
|
|
|
)
|
2021-05-26 11:46:50 +00:00
|
|
|
self.assertIn('sweet/bananaflakes default: FAIL PASS', result.stdout, result)
|
|
|
|
self.assertIn('=== sweet/bananaflakes (flaky) ===', result.stdout, result)
|
|
|
|
self.assertIn('1 tests failed', result.stdout, result)
|
|
|
|
self.assertIn('1 tests were flaky', result.stdout, result)
|
2018-01-16 14:56:20 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
2018-01-31 09:18:13 +00:00
|
|
|
self.maxDiff = None
|
2018-10-30 14:23:38 +00:00
|
|
|
self.check_cleaned_json_output(
|
|
|
|
'expected_test_results2.json', json_path, basedir)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
def testAutoDetect(self):
|
|
|
|
"""Fake a build with several auto-detected options.
|
|
|
|
|
|
|
|
Using all those options at once doesn't really make much sense. This is
|
|
|
|
merely for getting coverage.
|
|
|
|
"""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
override_build_config(
|
|
|
|
basedir, dcheck_always_on=True, is_asan=True, is_cfi=True,
|
|
|
|
is_msan=True, is_tsan=True, is_ubsan_vptr=True, target_cpu='x86',
|
|
|
|
v8_enable_i18n_support=False, v8_target_cpu='x86',
|
2018-12-19 16:21:38 +00:00
|
|
|
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
|
2021-04-10 02:09:41 +00:00
|
|
|
v8_enable_pointer_compression=False,
|
|
|
|
v8_enable_pointer_compression_shared_cage=False)
|
2017-12-22 15:30:32 +00:00
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'sweet/bananas',
|
|
|
|
)
|
|
|
|
expect_text = (
|
|
|
|
'>>> Autodetected:\n'
|
|
|
|
'asan\n'
|
|
|
|
'cfi_vptr\n'
|
|
|
|
'dcheck_always_on\n'
|
|
|
|
'msan\n'
|
|
|
|
'no_i18n\n'
|
|
|
|
'tsan\n'
|
|
|
|
'ubsan_vptr\n'
|
2021-02-22 09:37:09 +00:00
|
|
|
'webassembly\n'
|
2017-12-22 15:30:32 +00:00
|
|
|
'>>> Running tests for ia32.release')
|
|
|
|
self.assertIn(expect_text, result.stdout, result)
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
# TODO(machenbach): Test some more implications of the auto-detected
|
|
|
|
# options, e.g. that the right env variables are set.
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testSkips(self):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Test skipping tests in status file for a specific variant."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=nooptimization',
|
|
|
|
'sweet/strawberries',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2017-12-22 15:30:32 +00:00
|
|
|
)
|
2019-01-21 09:43:53 +00:00
|
|
|
self.assertIn('0 tests ran', result.stdout, result)
|
2018-02-20 17:23:37 +00:00
|
|
|
self.assertEqual(2, result.returncode, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
2018-11-22 07:33:45 +00:00
|
|
|
def testRunSkips(self):
|
|
|
|
"""Inverse the above. Test parameter to keep running skipped tests."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=nooptimization',
|
|
|
|
'--run-skipped',
|
|
|
|
'sweet/strawberries',
|
|
|
|
)
|
|
|
|
self.assertIn('1 tests failed', result.stdout, result)
|
|
|
|
self.assertIn('1 tests ran', result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testDefault(self):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Test using default test suites, though no tests are run since they don't
|
|
|
|
exist in a test setting.
|
|
|
|
"""
|
|
|
|
with temp_base() as basedir:
|
2018-01-05 13:34:17 +00:00
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2018-01-05 13:34:17 +00:00
|
|
|
)
|
2019-01-21 09:43:53 +00:00
|
|
|
self.assertIn('0 tests ran', result.stdout, result)
|
2018-02-20 17:23:37 +00:00
|
|
|
self.assertEqual(2, result.returncode, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
def testNoBuildConfig(self):
|
|
|
|
"""Test failing run when build config is not found."""
|
2020-10-06 11:40:26 +00:00
|
|
|
with temp_dir() as basedir:
|
2017-12-22 15:30:32 +00:00
|
|
|
result = run_tests(basedir)
|
|
|
|
self.assertIn('Failed to load build config', result.stdout, result)
|
2018-02-20 17:23:37 +00:00
|
|
|
self.assertEqual(5, result.returncode, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
def testInconsistentArch(self):
|
|
|
|
"""Test failing run when attempting to wrongly override the arch."""
|
|
|
|
with temp_base() as basedir:
|
2020-10-06 11:40:26 +00:00
|
|
|
result = run_tests(basedir, '--arch=ia32')
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertIn(
|
|
|
|
'--arch value (ia32) inconsistent with build config (x64).',
|
|
|
|
result.stdout, result)
|
2018-02-20 17:23:37 +00:00
|
|
|
self.assertEqual(5, result.returncode, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
def testWrongVariant(self):
|
|
|
|
"""Test using a bogus variant."""
|
|
|
|
with temp_base() as basedir:
|
2020-10-06 11:40:26 +00:00
|
|
|
result = run_tests(basedir, '--variants=meh')
|
2018-02-20 17:23:37 +00:00
|
|
|
self.assertEqual(5, result.returncode, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
def testModeFromBuildConfig(self):
|
|
|
|
"""Test auto-detection of mode from build config."""
|
|
|
|
with temp_base() as basedir:
|
2020-10-06 11:40:26 +00:00
|
|
|
result = run_tests(basedir, '--outdir=out/build', 'sweet/bananas')
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertIn('Running tests for x64.release', result.stdout, result)
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2018-02-01 10:14:06 +00:00
|
|
|
@unittest.skip("not available with test processors")
|
2017-12-22 15:30:32 +00:00
|
|
|
def testReport(self):
|
|
|
|
"""Test the report feature.
|
|
|
|
|
|
|
|
This also exercises various paths in statusfile logic.
|
|
|
|
"""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--variants=default',
|
|
|
|
'sweet',
|
|
|
|
'--report',
|
|
|
|
)
|
|
|
|
self.assertIn(
|
|
|
|
'3 tests are expected to fail that we should fix',
|
|
|
|
result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
2018-02-01 10:14:06 +00:00
|
|
|
@unittest.skip("not available with test processors")
|
2017-12-22 15:30:32 +00:00
|
|
|
def testWarnUnusedRules(self):
|
|
|
|
"""Test the unused-rules feature."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--variants=default,nooptimization',
|
|
|
|
'sweet',
|
|
|
|
'--warn-unused',
|
|
|
|
)
|
|
|
|
self.assertIn( 'Unused rule: carrots', result.stdout, result)
|
|
|
|
self.assertIn( 'Unused rule: regress/', result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
2018-02-01 10:14:06 +00:00
|
|
|
@unittest.skip("not available with test processors")
|
2017-12-22 15:30:32 +00:00
|
|
|
def testCatNoSources(self):
|
|
|
|
"""Test printing sources, but the suite's tests have none available."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--variants=default',
|
|
|
|
'sweet/bananas',
|
|
|
|
'--cat',
|
|
|
|
)
|
|
|
|
self.assertIn('begin source: sweet/bananas', result.stdout, result)
|
|
|
|
self.assertIn('(no source available)', result.stdout, result)
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testPredictable(self):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Test running a test in verify-predictable mode.
|
|
|
|
|
|
|
|
The test will fail because of missing allocation output. We verify that and
|
|
|
|
that the predictable flags are passed and printed after failure.
|
|
|
|
"""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
override_build_config(basedir, v8_enable_verify_predictable=True)
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'sweet/bananas',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2017-12-22 15:30:32 +00:00
|
|
|
)
|
2019-01-21 09:43:53 +00:00
|
|
|
self.assertIn('1 tests ran', result.stdout, result)
|
2020-09-16 12:14:10 +00:00
|
|
|
self.assertIn('sweet/bananas default: FAIL', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertIn('Test had no allocation output', result.stdout, result)
|
2019-04-03 10:11:59 +00:00
|
|
|
self.assertIn('--predictable --verify-predictable', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
|
|
|
def testSlowArch(self):
|
|
|
|
"""Test timeout factor manipulation on slow architecture."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
override_build_config(basedir, v8_target_cpu='arm64')
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'sweet/bananas',
|
|
|
|
)
|
|
|
|
# TODO(machenbach): We don't have a way for testing if the correct
|
|
|
|
# timeout was used.
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testRandomSeedStressWithDefault(self):
|
2017-12-22 15:30:32 +00:00
|
|
|
"""Test using random-seed-stress feature has the right number of tests."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'--random-seed-stress-count=2',
|
|
|
|
'sweet/bananas',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2017-12-22 15:30:32 +00:00
|
|
|
)
|
2019-01-21 09:43:53 +00:00
|
|
|
self.assertIn('2 tests ran', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
|
|
|
def testRandomSeedStressWithSeed(self):
|
|
|
|
"""Test using random-seed-stress feature passing a random seed."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'--random-seed-stress-count=2',
|
|
|
|
'--random-seed=123',
|
|
|
|
'sweet/strawberries',
|
|
|
|
)
|
2018-02-01 10:14:06 +00:00
|
|
|
self.assertIn('2 tests ran', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
# We use a failing test so that the command is printed and we can verify
|
|
|
|
# that the right random seed was passed.
|
|
|
|
self.assertIn('--random-seed=123', result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
|
|
|
def testSpecificVariants(self):
|
2018-01-15 09:35:02 +00:00
|
|
|
"""Test using NO_VARIANTS modifiers in status files skips the desire tests.
|
2017-12-22 15:30:32 +00:00
|
|
|
|
|
|
|
The test runner cmd line configures 4 tests to run (2 tests * 2 variants).
|
|
|
|
But the status file applies a modifier to each skipping one of the
|
|
|
|
variants.
|
|
|
|
"""
|
|
|
|
with temp_base() as basedir:
|
2019-10-15 06:51:14 +00:00
|
|
|
override_build_config(basedir, is_asan=True)
|
2017-12-22 15:30:32 +00:00
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default,stress',
|
|
|
|
'sweet/bananas',
|
|
|
|
'sweet/raspberries',
|
|
|
|
)
|
|
|
|
# Both tests are either marked as running in only default or only
|
|
|
|
# slow variant.
|
2018-02-01 10:14:06 +00:00
|
|
|
self.assertIn('2 tests ran', result.stdout, result)
|
2017-12-22 15:30:32 +00:00
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
|
|
|
def testStatusFilePresubmit(self):
|
|
|
|
"""Test that the fake status file is well-formed."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
from testrunner.local import statusfile
|
|
|
|
self.assertTrue(statusfile.PresubmitCheck(
|
|
|
|
os.path.join(basedir, 'test', 'sweet', 'sweet.status')))
|
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def testDotsProgress(self):
|
2018-01-08 12:38:00 +00:00
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=dots',
|
|
|
|
'sweet/cherries',
|
|
|
|
'sweet/bananas',
|
|
|
|
'--no-sorting', '-j1', # make results order deterministic
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2018-01-08 12:38:00 +00:00
|
|
|
)
|
2019-01-21 09:43:53 +00:00
|
|
|
self.assertIn('2 tests ran', result.stdout, result)
|
2018-01-08 12:38:00 +00:00
|
|
|
self.assertIn('F.', result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
|
|
|
def testMonoProgress(self):
|
2019-01-21 09:43:53 +00:00
|
|
|
self._testCompactProgress('mono')
|
2018-01-08 12:38:00 +00:00
|
|
|
|
|
|
|
def testColorProgress(self):
|
2019-01-21 09:43:53 +00:00
|
|
|
self._testCompactProgress('color')
|
2018-01-08 12:38:00 +00:00
|
|
|
|
2019-01-21 09:43:53 +00:00
|
|
|
def _testCompactProgress(self, name):
|
2018-01-08 12:38:00 +00:00
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=%s' % name,
|
|
|
|
'sweet/cherries',
|
|
|
|
'sweet/bananas',
|
2019-01-21 09:43:53 +00:00
|
|
|
infra_staging=False,
|
2018-01-08 12:38:00 +00:00
|
|
|
)
|
|
|
|
if name == 'color':
|
2019-02-12 15:30:16 +00:00
|
|
|
expected = ('\033[34m% 28\033[0m|'
|
|
|
|
'\033[32m+ 1\033[0m|'
|
2018-01-08 12:38:00 +00:00
|
|
|
'\033[31m- 1\033[0m]: Done')
|
|
|
|
else:
|
2019-02-12 15:30:16 +00:00
|
|
|
expected = '% 28|+ 1|- 1]: Done'
|
2018-01-08 12:38:00 +00:00
|
|
|
self.assertIn(expected, result.stdout)
|
|
|
|
self.assertIn('sweet/cherries', result.stdout)
|
|
|
|
self.assertIn('sweet/bananas', result.stdout)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
2018-10-02 13:31:32 +00:00
|
|
|
def testExitAfterNFailures(self):
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--exit-after-n-failures=2',
|
|
|
|
'-j1',
|
|
|
|
'sweet/mangoes', # PASS
|
|
|
|
'sweet/strawberries', # FAIL
|
|
|
|
'sweet/blackberries', # FAIL
|
|
|
|
'sweet/raspberries', # should not run
|
|
|
|
)
|
2021-05-26 11:46:50 +00:00
|
|
|
self.assertIn('sweet/mangoes default: PASS', result.stdout, result)
|
2019-02-07 13:03:13 +00:00
|
|
|
self.assertIn('sweet/strawberries default: FAIL', result.stdout, result)
|
2018-10-02 13:31:32 +00:00
|
|
|
self.assertIn('Too many failures, exiting...', result.stdout, result)
|
2019-02-07 13:03:13 +00:00
|
|
|
self.assertIn('sweet/blackberries default: FAIL', result.stdout, result)
|
2020-09-16 12:14:10 +00:00
|
|
|
self.assertNotIn('sweet/raspberries', result.stdout, result)
|
2018-10-02 13:31:32 +00:00
|
|
|
self.assertIn('2 tests failed', result.stdout, result)
|
|
|
|
self.assertIn('3 tests ran', result.stdout, result)
|
|
|
|
self.assertEqual(1, result.returncode, result)
|
|
|
|
|
2019-02-12 14:10:53 +00:00
|
|
|
def testNumFuzzer(self):
|
2020-10-06 11:40:26 +00:00
|
|
|
sys_args = ['--command-prefix', sys.executable, '--outdir', 'out/build']
|
2019-02-12 14:10:53 +00:00
|
|
|
|
|
|
|
with temp_base() as basedir:
|
|
|
|
with capture() as (stdout, stderr):
|
|
|
|
code = num_fuzzer.NumFuzzer(basedir=basedir).execute(sys_args)
|
|
|
|
result = Result(stdout.getvalue(), stderr.getvalue(), code)
|
|
|
|
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2020-02-19 13:52:16 +00:00
|
|
|
def testRunnerFlags(self):
|
|
|
|
"""Test that runner-specific flags are passed to tests."""
|
|
|
|
with temp_base() as basedir:
|
|
|
|
result = run_tests(
|
|
|
|
basedir,
|
|
|
|
'--progress=verbose',
|
|
|
|
'--variants=default',
|
|
|
|
'--random-seed=42',
|
|
|
|
'sweet/bananas',
|
|
|
|
'-v',
|
|
|
|
)
|
|
|
|
|
|
|
|
self.assertIn(
|
|
|
|
'--test bananas --random-seed=42 --nohard-abort --testing-d8-test-runner',
|
|
|
|
result.stdout, result)
|
|
|
|
self.assertEqual(0, result.returncode, result)
|
|
|
|
|
2019-02-12 14:10:53 +00:00
|
|
|
|
2017-12-22 15:30:32 +00:00
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|