[test] Move options to test config
Bug: v8:6917 Cq-Include-Trybots: luci.v8.try:v8_linux_noi18n_rel_ng Change-Id: I254d2e545709029346f585b02a9edf91d3f27893 Reviewed-on: https://chromium-review.googlesource.com/893321 Reviewed-by: Michael Achenbach <machenbach@chromium.org> Commit-Queue: Michał Majewski <majeski@google.com> Cr-Commit-Position: refs/heads/master@{#50996}
This commit is contained in:
parent
0cfbaff422
commit
d3083b46ee
@ -109,7 +109,7 @@ class TestSuite(testsuite.TestSuite):
|
||||
|
||||
|
||||
class TestCase(testcase.TestCase):
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
path = self.path
|
||||
testroot = self.suite.testroot
|
||||
files = []
|
||||
|
@ -38,13 +38,13 @@ SHELL = 'cctest'
|
||||
|
||||
class TestSuite(testsuite.TestSuite):
|
||||
def ListTests(self, context):
|
||||
shell = os.path.abspath(os.path.join(context.shell_dir, SHELL))
|
||||
shell = os.path.abspath(os.path.join(self.test_config.shell_dir, SHELL))
|
||||
if utils.IsWindows():
|
||||
shell += ".exe"
|
||||
cmd = command.Command(
|
||||
cmd_prefix=context.command_prefix,
|
||||
cmd_prefix=self.test_config.command_prefix,
|
||||
shell=shell,
|
||||
args=["--list"] + context.extra_flags)
|
||||
args=["--list"] + self.test_config.extra_flags)
|
||||
output = cmd.execute()
|
||||
if output.exit_code != 0:
|
||||
print cmd
|
||||
@ -63,7 +63,7 @@ class TestCase(testcase.TestCase):
|
||||
def get_shell(self):
|
||||
return SHELL
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return [self.path]
|
||||
|
||||
|
||||
|
@ -62,16 +62,16 @@ class TestCase(testcase.TestCase):
|
||||
files.append(os.path.join(self.suite.root, self.path + self._get_suffix()))
|
||||
return files
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
files = self._source_files
|
||||
if ctx.isolates:
|
||||
if self._test_config.isolates:
|
||||
files = files + ['--isolate'] + files
|
||||
return files
|
||||
|
||||
def _get_source_flags(self):
|
||||
return self._source_flags
|
||||
|
||||
def _get_suite_flags(self, ctx):
|
||||
def _get_suite_flags(self):
|
||||
return ['--enable-inspector', '--allow-natives-syntax']
|
||||
|
||||
def _get_source_path(self):
|
||||
|
@ -42,7 +42,7 @@ class TestSuite(testsuite.TestSuite):
|
||||
|
||||
|
||||
class TestCase(testcase.TestCase):
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
suite, name = self.path.split('/')
|
||||
return [os.path.join(self.suite.root, suite, name)]
|
||||
|
||||
@ -52,7 +52,7 @@ class TestCase(testcase.TestCase):
|
||||
def _get_statusfile_flags(self):
|
||||
return []
|
||||
|
||||
def _get_mode_flags(self, ctx):
|
||||
def _get_mode_flags(self):
|
||||
return []
|
||||
|
||||
def get_shell(self):
|
||||
|
@ -43,7 +43,7 @@ class TestCase(testcase.TestCase):
|
||||
|
||||
self._source_flags = self._parse_source_flags()
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return [
|
||||
os.path.join(self.suite.root, PROTOCOL_TEST_JS),
|
||||
os.path.join(self.suite.root, self.path + self._get_suffix()),
|
||||
|
@ -59,7 +59,7 @@ class TestCase(testcase.TestCase):
|
||||
|
||||
self._source_flags = self._parse_source_flags()
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
files = map(lambda f: os.path.join(self.suite.root, f), [
|
||||
'assert.js',
|
||||
'utils.js',
|
||||
@ -68,14 +68,14 @@ class TestCase(testcase.TestCase):
|
||||
'regexp-assert.js',
|
||||
])
|
||||
|
||||
if ctx.isolates:
|
||||
if self._test_config.isolates:
|
||||
files += ['--isolate'] + files
|
||||
return files
|
||||
|
||||
def _get_source_flags(self):
|
||||
return self._source_flags
|
||||
|
||||
def _get_suite_flags(self, ctx):
|
||||
def _get_suite_flags(self):
|
||||
return ['--allow-natives-syntax']
|
||||
|
||||
def _get_source_path(self):
|
||||
|
@ -83,11 +83,11 @@ class TestCase(testcase.TestCase):
|
||||
path = head
|
||||
return False
|
||||
|
||||
def _get_cmd_params(self, ctx):
|
||||
params = super(TestCase, self)._get_cmd_params(ctx)
|
||||
def _get_cmd_params(self):
|
||||
params = super(TestCase, self)._get_cmd_params()
|
||||
return [p for p in params if p not in INVALID_FLAGS]
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return self._source_files
|
||||
|
||||
def _get_source_flags(self):
|
||||
|
@ -121,12 +121,12 @@ class TestCase(testcase.TestCase):
|
||||
def _get_source_flags(self):
|
||||
return self._source_flags
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
files = list(self._source_files)
|
||||
if not ctx.no_harness:
|
||||
if not self._test_config.no_harness:
|
||||
files += self._mjsunit_files
|
||||
files += self._files_suffix
|
||||
if ctx.isolates:
|
||||
if self._test_config.isolates:
|
||||
files += ['--isolate'] + files
|
||||
|
||||
return files
|
||||
@ -187,7 +187,7 @@ class CombinedTest(testcase.TestCase):
|
||||
self._statusfile_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
|
||||
self.expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
|
||||
|
||||
def _get_shell_with_flags(self, ctx):
|
||||
def _get_shell_with_flags(self):
|
||||
"""In addition to standard set of shell flags it appends:
|
||||
--disable-abortjs: %AbortJS can abort the test even inside
|
||||
trycatch-wrapper, so we disable it.
|
||||
@ -198,9 +198,9 @@ class CombinedTest(testcase.TestCase):
|
||||
shell_flags = ['--test', '--disable-abortjs', '--quiet-load']
|
||||
return shell, shell_flags
|
||||
|
||||
def _get_cmd_params(self, ctx):
|
||||
def _get_cmd_params(self):
|
||||
return (
|
||||
super(CombinedTest, self)._get_cmd_params(ctx) +
|
||||
super(CombinedTest, self)._get_cmd_params() +
|
||||
self._tests[0]._mjsunit_files +
|
||||
['tools/testrunner/trycatch_loader.js', '--'] +
|
||||
[t._files_suffix[0] for t in self._tests]
|
||||
|
@ -33,7 +33,7 @@ class TestCase(testcase.TestCase):
|
||||
def _get_statusfile_flags(self):
|
||||
return []
|
||||
|
||||
def _get_mode_flags(self, ctx):
|
||||
def _get_mode_flags(self):
|
||||
return []
|
||||
|
||||
def get_shell(self):
|
||||
|
@ -86,7 +86,7 @@ class TestSuite(testsuite.TestSuite):
|
||||
|
||||
|
||||
class TestCase(testcase.TestCase):
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
files = [os.path.join(self.suite.root, "mozilla-shell-emulation.js")]
|
||||
testfilename = self.path + ".js"
|
||||
testfilepath = testfilename.split("/")
|
||||
@ -100,7 +100,7 @@ class TestCase(testcase.TestCase):
|
||||
files.append(os.path.join(self.suite.testroot, testfilename))
|
||||
return files
|
||||
|
||||
def _get_suite_flags(self, ctx):
|
||||
def _get_suite_flags(self):
|
||||
return ['--expose-gc']
|
||||
|
||||
def _get_source_path(self):
|
||||
|
@ -91,19 +91,19 @@ class TestCase(testcase.TestCase):
|
||||
self._source = source
|
||||
self._template_flags = template_flags
|
||||
|
||||
def _get_cmd_params(self, ctx):
|
||||
def _get_cmd_params(self):
|
||||
return (
|
||||
self._get_files_params(ctx) +
|
||||
self._get_extra_flags(ctx) +
|
||||
self._get_files_params() +
|
||||
self._get_extra_flags() +
|
||||
['-e', self._source] +
|
||||
self._template_flags +
|
||||
self._get_variant_flags() +
|
||||
self._get_statusfile_flags() +
|
||||
self._get_mode_flags(ctx) +
|
||||
self._get_mode_flags() +
|
||||
self._get_source_flags()
|
||||
)
|
||||
|
||||
def _get_mode_flags(self, ctx):
|
||||
def _get_mode_flags(self):
|
||||
return []
|
||||
|
||||
def is_source_available(self):
|
||||
|
@ -164,7 +164,7 @@ class TestSuite(testsuite.TestSuite):
|
||||
os.walk(self.localtestroot)):
|
||||
for dotted in [x for x in dirs if x.startswith(".")]:
|
||||
dirs.remove(dotted)
|
||||
if context.noi18n and "intl402" in dirs:
|
||||
if self.test_config.noi18n and "intl402" in dirs:
|
||||
dirs.remove("intl402")
|
||||
dirs.sort()
|
||||
files.sort()
|
||||
@ -203,7 +203,7 @@ class TestCase(testcase.TestCase):
|
||||
.get('type', None)
|
||||
)
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return (
|
||||
list(self.suite.harness) +
|
||||
([os.path.join(self.suite.root, "harness-agent.js")]
|
||||
@ -213,7 +213,7 @@ class TestCase(testcase.TestCase):
|
||||
[self._get_source_path()]
|
||||
)
|
||||
|
||||
def _get_suite_flags(self, ctx):
|
||||
def _get_suite_flags(self):
|
||||
return (
|
||||
(["--throws"] if "negative" in self.test_record else []) +
|
||||
(["--allow-natives-syntax"]
|
||||
|
@ -17,16 +17,16 @@ class VariantsGenerator(testsuite.VariantsGenerator):
|
||||
|
||||
class TestSuite(testsuite.TestSuite):
|
||||
def ListTests(self, context):
|
||||
shell = os.path.abspath(os.path.join(context.shell_dir, self.name))
|
||||
shell = os.path.abspath(os.path.join(self.test_config.shell_dir, self.name))
|
||||
if utils.IsWindows():
|
||||
shell += ".exe"
|
||||
|
||||
output = None
|
||||
for i in xrange(3): # Try 3 times in case of errors.
|
||||
cmd = command.Command(
|
||||
cmd_prefix=context.command_prefix,
|
||||
cmd_prefix=self.test_config.command_prefix,
|
||||
shell=shell,
|
||||
args=['--gtest_list_tests'] + context.extra_flags)
|
||||
args=['--gtest_list_tests'] + self.test_config.extra_flags)
|
||||
output = cmd.execute()
|
||||
if output.exit_code == 0:
|
||||
break
|
||||
@ -63,7 +63,7 @@ class TestSuite(testsuite.TestSuite):
|
||||
|
||||
|
||||
class TestCase(testcase.TestCase):
|
||||
def _get_suite_flags(self, ctx):
|
||||
def _get_suite_flags(self):
|
||||
return (
|
||||
["--gtest_filter=" + self.path] +
|
||||
["--gtest_random_seed=%s" % self.random_seed] +
|
||||
|
@ -27,7 +27,7 @@ class TestSuite(testsuite.TestSuite):
|
||||
|
||||
|
||||
class TestCase(testcase.TestCase):
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return [os.path.join(self.suite.root, self.path + self._get_suffix())]
|
||||
|
||||
|
||||
|
@ -90,9 +90,9 @@ class TestCase(testcase.TestCase):
|
||||
files.append(os.path.join(self.suite.root, "resources/standalone-post.js"))
|
||||
return files
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
files = self._source_files
|
||||
if ctx.isolates:
|
||||
if self._test_config.isolates:
|
||||
files = files + ['--isolate'] + files
|
||||
return files
|
||||
|
||||
|
@ -7,6 +7,7 @@ from collections import OrderedDict
|
||||
import json
|
||||
import optparse
|
||||
import os
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
|
||||
@ -92,6 +93,16 @@ TEST_MAP = {
|
||||
],
|
||||
}
|
||||
|
||||
# Double the timeout for these:
|
||||
SLOW_ARCHS = ["arm",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"mips64",
|
||||
"mips64el",
|
||||
"s390",
|
||||
"s390x",
|
||||
"arm64"]
|
||||
|
||||
|
||||
class ModeConfig(object):
|
||||
def __init__(self, flags, timeout_scalefactor, status_mode, execution_mode):
|
||||
@ -253,8 +264,6 @@ class BaseTestRunner(object):
|
||||
" and buildbot builds): %s" % MODES.keys())
|
||||
parser.add_option("--shell-dir", help="DEPRECATED! Executables from build "
|
||||
"directory will be used")
|
||||
parser.add_option("-v", "--verbose", help="Verbose output",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("--shard-count",
|
||||
help="Split tests into this number of shards",
|
||||
default=1, type="int")
|
||||
@ -264,6 +273,20 @@ class BaseTestRunner(object):
|
||||
parser.add_option("--total-timeout-sec", default=0, type="int",
|
||||
help="How long should fuzzer run")
|
||||
|
||||
parser.add_option("--command-prefix", default="",
|
||||
help="Prepended to each shell command used to run a test")
|
||||
parser.add_option("--extra-flags", action="append", default=[],
|
||||
help="Additional flags to pass to each test command")
|
||||
parser.add_option("--isolates", action="store_true", default=False,
|
||||
help="Whether to test isolates")
|
||||
parser.add_option("--no-harness", "--noharness",
|
||||
default=False, action="store_true",
|
||||
help="Run without test harness of a given suite")
|
||||
parser.add_option("-t", "--timeout", default=60, type=int,
|
||||
help="Timeout for single test in seconds")
|
||||
parser.add_option("-v", "--verbose", default=False, action="store_true",
|
||||
help="Verbose output")
|
||||
|
||||
# TODO(machenbach): Temporary options for rolling out new test runner
|
||||
# features.
|
||||
parser.add_option("--mastername", default='',
|
||||
@ -395,6 +418,9 @@ class BaseTestRunner(object):
|
||||
print('Warning: --shell-dir is deprecated. Searching for executables in '
|
||||
'build directory (%s) instead.' % self.outdir)
|
||||
|
||||
options.command_prefix = shlex.split(options.command_prefix)
|
||||
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
|
||||
|
||||
def _buildbot_to_v8_mode(self, config):
|
||||
"""Convert buildbot build configs to configs understood by the v8 runner.
|
||||
|
||||
@ -515,7 +541,32 @@ class BaseTestRunner(object):
|
||||
return map(load_suite, names)
|
||||
|
||||
def _create_test_config(self, options):
|
||||
return TestConfig(options.random_seed)
|
||||
timeout = options.timeout * self._timeout_scalefactor(options)
|
||||
return TestConfig(
|
||||
command_prefix=options.command_prefix,
|
||||
extra_flags=options.extra_flags,
|
||||
isolates=options.isolates,
|
||||
mode_flags=self.mode_options.flags,
|
||||
no_harness=options.no_harness,
|
||||
noi18n=self.build_config.no_i18n,
|
||||
random_seed=options.random_seed,
|
||||
shell_dir=self.outdir,
|
||||
timeout=timeout,
|
||||
verbose=options.verbose,
|
||||
)
|
||||
|
||||
def _timeout_scalefactor(self, options):
|
||||
factor = self.mode_options.timeout_scalefactor
|
||||
|
||||
# Simulators are slow, therefore allow a longer timeout.
|
||||
if self.build_config.arch in SLOW_ARCHS:
|
||||
factor *= 2
|
||||
|
||||
# Predictable mode is slower.
|
||||
if self.build_config.predictable:
|
||||
factor *= 2
|
||||
|
||||
return factor
|
||||
|
||||
# TODO(majeski): remove options & args parameters
|
||||
def _do_execute(self, suites, args, options):
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
import multiprocessing
|
||||
import random
|
||||
import shlex
|
||||
import sys
|
||||
|
||||
# Adds testrunner to the path hence it has to be imported at the beggining.
|
||||
@ -29,11 +28,6 @@ from testrunner.utils import random_utils
|
||||
|
||||
|
||||
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
|
||||
TIMEOUT_DEFAULT = 60
|
||||
|
||||
# Double the timeout for these:
|
||||
SLOW_ARCHS = ["arm",
|
||||
"mipsel"]
|
||||
|
||||
|
||||
class NumFuzzer(base_runner.BaseTestRunner):
|
||||
@ -41,15 +35,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
|
||||
super(NumFuzzer, self).__init__(*args, **kwargs)
|
||||
|
||||
def _add_parser_options(self, parser):
|
||||
parser.add_option("--command-prefix",
|
||||
help="Prepended to each shell command used to run a test",
|
||||
default="")
|
||||
parser.add_option("--dump-results-file", help="Dump maximum limit reached")
|
||||
parser.add_option("--extra-flags",
|
||||
help="Additional flags to pass to each test command",
|
||||
default="")
|
||||
parser.add_option("--isolates", help="Whether to test isolates",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("-j", help="The number of parallel tasks to run",
|
||||
default=0, type="int")
|
||||
parser.add_option("--json-test-results",
|
||||
@ -59,8 +45,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
|
||||
" (verbose, dots, color, mono)"),
|
||||
choices=progress.PROGRESS_INDICATORS.keys(),
|
||||
default="mono")
|
||||
parser.add_option("-t", "--timeout", help="Timeout in seconds",
|
||||
default= -1, type="int")
|
||||
parser.add_option("--random-seed", default=0, type=int,
|
||||
help="Default seed for initializing random generator")
|
||||
parser.add_option("--fuzzer-random-seed", default=0,
|
||||
@ -123,8 +107,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
|
||||
|
||||
|
||||
def _process_options(self, options):
|
||||
options.command_prefix = shlex.split(options.command_prefix)
|
||||
options.extra_flags = shlex.split(options.extra_flags)
|
||||
if options.j == 0:
|
||||
options.j = multiprocessing.cpu_count()
|
||||
if not options.fuzzer_random_seed:
|
||||
@ -144,6 +126,14 @@ class NumFuzzer(base_runner.BaseTestRunner):
|
||||
def _get_default_suite_names(self):
|
||||
return DEFAULT_SUITES
|
||||
|
||||
def _timeout_scalefactor(self, options):
|
||||
factor = super(NumFuzzer, self)._timeout_scalefactor(options)
|
||||
if options.stress_interrupt_budget:
|
||||
# TODO(machenbach): This should be moved to a more generic config.
|
||||
# Fuzzers have too much timeout in debug mode.
|
||||
factor = max(int(factor * 0.25), 1)
|
||||
|
||||
|
||||
def _do_execute(self, suites, args, options):
|
||||
print(">>> Running tests for %s.%s" % (self.build_config.arch,
|
||||
self.mode_name))
|
||||
@ -206,24 +196,12 @@ class NumFuzzer(base_runner.BaseTestRunner):
|
||||
|
||||
def _create_context(self, options):
|
||||
# Populate context object.
|
||||
timeout = options.timeout
|
||||
if timeout == -1:
|
||||
# Simulators are slow, therefore allow a longer default timeout.
|
||||
if self.build_config.arch in SLOW_ARCHS:
|
||||
timeout = 2 * TIMEOUT_DEFAULT;
|
||||
else:
|
||||
timeout = TIMEOUT_DEFAULT;
|
||||
|
||||
timeout *= self.mode_options.timeout_scalefactor
|
||||
if options.stress_interrupt_budget:
|
||||
# TODO(machenbach): This should be moved to a more generic config.
|
||||
# Fuzzers have too much timeout in debug mode.
|
||||
timeout = int(timeout * 0.5)
|
||||
ctx = context.Context(self.build_config.arch,
|
||||
self.mode_options.execution_mode,
|
||||
self.outdir,
|
||||
self.mode_options.flags, options.verbose,
|
||||
timeout, options.isolates,
|
||||
options.timeout * self.timeout_scalefactor(options),
|
||||
options.isolates,
|
||||
options.command_prefix,
|
||||
options.extra_flags,
|
||||
False, # Keep i18n on by default.
|
||||
|
@ -60,7 +60,6 @@ class TestCase(object):
|
||||
self.keep_output = False # Can output of this test be dropped
|
||||
|
||||
# Test config contains information needed to build the command.
|
||||
# TODO(majeski): right now it contains only random seed.
|
||||
self._test_config = test_config
|
||||
# Overrides default random seed from test_config if specified.
|
||||
self._random_seed = None
|
||||
@ -175,14 +174,14 @@ class TestCase(object):
|
||||
def only_standard_variant(self):
|
||||
return statusfile.NO_VARIANTS in self._statusfile_outcomes
|
||||
|
||||
def get_command(self, context):
|
||||
params = self._get_cmd_params(context)
|
||||
def get_command(self):
|
||||
params = self._get_cmd_params()
|
||||
env = self._get_cmd_env()
|
||||
shell, shell_flags = self._get_shell_with_flags(context)
|
||||
timeout = self._get_timeout(params, context.timeout)
|
||||
return self._create_cmd(shell, shell_flags + params, env, timeout, context)
|
||||
shell, shell_flags = self._get_shell_with_flags()
|
||||
timeout = self._get_timeout(params)
|
||||
return self._create_cmd(shell, shell_flags + params, env, timeout)
|
||||
|
||||
def _get_cmd_params(self, ctx):
|
||||
def _get_cmd_params(self):
|
||||
"""Gets command parameters and combines them in the following order:
|
||||
- files [empty by default]
|
||||
- random seed
|
||||
@ -196,20 +195,20 @@ class TestCase(object):
|
||||
methods for getting partial parameters.
|
||||
"""
|
||||
return (
|
||||
self._get_files_params(ctx) +
|
||||
self._get_files_params() +
|
||||
self._get_random_seed_flags() +
|
||||
self._get_extra_flags(ctx) +
|
||||
self._get_extra_flags() +
|
||||
self._get_variant_flags() +
|
||||
self._get_statusfile_flags() +
|
||||
self._get_mode_flags(ctx) +
|
||||
self._get_mode_flags() +
|
||||
self._get_source_flags() +
|
||||
self._get_suite_flags(ctx)
|
||||
self._get_suite_flags()
|
||||
)
|
||||
|
||||
def _get_cmd_env(self):
|
||||
return {}
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return []
|
||||
|
||||
def _get_random_seed_flags(self):
|
||||
@ -219,8 +218,8 @@ class TestCase(object):
|
||||
def random_seed(self):
|
||||
return self._random_seed or self._test_config.random_seed
|
||||
|
||||
def _get_extra_flags(self, ctx):
|
||||
return ctx.extra_flags
|
||||
def _get_extra_flags(self):
|
||||
return self._test_config.extra_flags
|
||||
|
||||
def _get_variant_flags(self):
|
||||
return self.variant_flags
|
||||
@ -232,16 +231,16 @@ class TestCase(object):
|
||||
"""
|
||||
return self._statusfile_flags
|
||||
|
||||
def _get_mode_flags(self, ctx):
|
||||
return ctx.mode_flags
|
||||
def _get_mode_flags(self):
|
||||
return self._test_config.mode_flags
|
||||
|
||||
def _get_source_flags(self):
|
||||
return []
|
||||
|
||||
def _get_suite_flags(self, ctx):
|
||||
def _get_suite_flags(self):
|
||||
return []
|
||||
|
||||
def _get_shell_with_flags(self, ctx):
|
||||
def _get_shell_with_flags(self):
|
||||
shell = self.get_shell()
|
||||
shell_flags = []
|
||||
if shell == 'd8':
|
||||
@ -250,7 +249,8 @@ class TestCase(object):
|
||||
shell += '.exe'
|
||||
return shell, shell_flags
|
||||
|
||||
def _get_timeout(self, params, timeout):
|
||||
def _get_timeout(self, params):
|
||||
timeout = self._test_config.timeout
|
||||
if "--stress-opt" in params:
|
||||
timeout *= 4
|
||||
if "--noenable-vfp3" in params:
|
||||
@ -266,14 +266,14 @@ class TestCase(object):
|
||||
def _get_suffix(self):
|
||||
return '.js'
|
||||
|
||||
def _create_cmd(self, shell, params, env, timeout, ctx):
|
||||
def _create_cmd(self, shell, params, env, timeout):
|
||||
return command.Command(
|
||||
cmd_prefix=ctx.command_prefix,
|
||||
shell=os.path.abspath(os.path.join(ctx.shell_dir, shell)),
|
||||
cmd_prefix=self._test_config.command_prefix,
|
||||
shell=os.path.abspath(os.path.join(self._test_config.shell_dir, shell)),
|
||||
args=params,
|
||||
env=env,
|
||||
timeout=timeout,
|
||||
verbose=ctx.verbose
|
||||
verbose=self._test_config.verbose
|
||||
)
|
||||
|
||||
def _parse_source_flags(self, source=None):
|
||||
|
@ -11,7 +11,6 @@ import multiprocessing
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import shlex
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
@ -39,8 +38,6 @@ from testrunner.testproc.variant import VariantProc
|
||||
from testrunner.utils import random_utils
|
||||
|
||||
|
||||
TIMEOUT_DEFAULT = 60
|
||||
|
||||
VARIANTS = ["default"]
|
||||
|
||||
MORE_VARIANTS = [
|
||||
@ -70,15 +67,6 @@ GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
|
||||
RANDOM_GC_STRESS_FLAGS = ["--random-gc-interval=5000",
|
||||
"--stress-compaction-random"]
|
||||
|
||||
# Double the timeout for these:
|
||||
SLOW_ARCHS = ["arm",
|
||||
"mips",
|
||||
"mipsel",
|
||||
"mips64",
|
||||
"mips64el",
|
||||
"s390",
|
||||
"s390x",
|
||||
"arm64"]
|
||||
|
||||
PREDICTABLE_WRAPPER = os.path.join(
|
||||
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
|
||||
@ -139,13 +127,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
parser.add_option("--random-gc-stress",
|
||||
help="Switch on random GC stress mode",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("--command-prefix",
|
||||
help="Prepended to each shell command used to run a"
|
||||
" test",
|
||||
default="")
|
||||
parser.add_option("--extra-flags",
|
||||
help="Additional flags to pass to each test command",
|
||||
action="append", default=[])
|
||||
parser.add_option("--infra-staging", help="Use new test runner features",
|
||||
dest='infra_staging', default=None,
|
||||
action="store_true")
|
||||
@ -153,13 +134,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
help="Opt out of new test runner features",
|
||||
dest='infra_staging', default=None,
|
||||
action="store_false")
|
||||
parser.add_option("--isolates", help="Whether to test isolates",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("-j", help="The number of parallel tasks to run",
|
||||
default=0, type="int")
|
||||
parser.add_option("--no-harness", "--noharness",
|
||||
help="Run without test harness of a given suite",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("--no-presubmit", "--nopresubmit",
|
||||
help='Skip presubmit checks (deprecated)',
|
||||
default=False, dest="no_presubmit", action="store_true")
|
||||
@ -209,8 +185,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
default=False, action="store_true")
|
||||
parser.add_option("--time", help="Print timing information after running",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("-t", "--timeout", help="Timeout in seconds",
|
||||
default=TIMEOUT_DEFAULT, type="int")
|
||||
parser.add_option("--warn-unused", help="Report unused rules",
|
||||
default=False, action="store_true")
|
||||
parser.add_option("--junitout", help="File name of the JUnit output")
|
||||
@ -251,9 +225,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
print("sancov-dir %s doesn't exist" % self.sancov_dir)
|
||||
raise base_runner.TestRunnerError()
|
||||
|
||||
options.command_prefix = shlex.split(options.command_prefix)
|
||||
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
|
||||
|
||||
if options.gc_stress:
|
||||
options.extra_flags += GC_STRESS_FLAGS
|
||||
|
||||
@ -361,23 +332,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
print(">>> Running tests for %s.%s" % (self.build_config.arch,
|
||||
self.mode_name))
|
||||
# Populate context object.
|
||||
|
||||
# Simulators are slow, therefore allow a longer timeout.
|
||||
if self.build_config.arch in SLOW_ARCHS:
|
||||
options.timeout *= 2
|
||||
|
||||
options.timeout *= self.mode_options.timeout_scalefactor
|
||||
|
||||
if self.build_config.predictable:
|
||||
# Predictable mode is slower.
|
||||
options.timeout *= 2
|
||||
|
||||
ctx = context.Context(self.build_config.arch,
|
||||
self.mode_options.execution_mode,
|
||||
self.outdir,
|
||||
self.mode_options.flags,
|
||||
options.verbose,
|
||||
options.timeout,
|
||||
options.timeout *
|
||||
self._timeout_scalefactor(options),
|
||||
options.isolates,
|
||||
options.command_prefix,
|
||||
options.extra_flags,
|
||||
@ -503,7 +464,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
|
||||
s.tests = self._shard_tests(s.tests, options)
|
||||
|
||||
for t in s.tests:
|
||||
t.cmd = t.get_command(ctx)
|
||||
t.cmd = t.get_command()
|
||||
|
||||
num_tests += len(s.tests)
|
||||
|
||||
|
@ -4,15 +4,29 @@
|
||||
|
||||
import random
|
||||
|
||||
from .utils import random_utils
|
||||
|
||||
|
||||
# TODO(majeski): Move the rest of stuff from context
|
||||
class TestConfig(object):
|
||||
def __init__(self, random_seed):
|
||||
def __init__(self,
|
||||
command_prefix,
|
||||
extra_flags,
|
||||
isolates,
|
||||
mode_flags,
|
||||
no_harness,
|
||||
noi18n,
|
||||
random_seed,
|
||||
shell_dir,
|
||||
timeout,
|
||||
verbose):
|
||||
self.command_prefix = command_prefix
|
||||
self.extra_flags = extra_flags
|
||||
self.isolates = isolates
|
||||
self.mode_flags = mode_flags
|
||||
self.no_harness = no_harness
|
||||
self.noi18n = noi18n
|
||||
# random_seed is always not None.
|
||||
self.random_seed = random_seed or self._gen_random_seed()
|
||||
|
||||
def _gen_random_seed(self):
|
||||
seed = None
|
||||
while not seed:
|
||||
seed = random.SystemRandom().randint(-2147483648, 2147483647)
|
||||
return seed
|
||||
self.random_seed = random_seed or random_utils.random_seed()
|
||||
self.shell_dir = shell_dir
|
||||
self.timeout = timeout
|
||||
self.verbose = verbose
|
||||
|
@ -71,7 +71,7 @@ class ExecutionProc(base.TestProc):
|
||||
return
|
||||
|
||||
test_id = test.procid
|
||||
cmd = test.get_command(self._context)
|
||||
cmd = test.get_command()
|
||||
self._tests[test_id] = test, cmd
|
||||
|
||||
outproc = self._outproc_factory(test)
|
||||
|
@ -24,7 +24,7 @@ class TestCase(testcase.TestCase):
|
||||
def get_shell(self):
|
||||
return 'd8_mocked.py'
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return [self.name]
|
||||
|
||||
def GetSuite(*args, **kwargs):
|
||||
|
@ -24,7 +24,7 @@ class TestCase(testcase.TestCase):
|
||||
def get_shell(self):
|
||||
return 'd8_mocked.py'
|
||||
|
||||
def _get_files_params(self, ctx):
|
||||
def _get_files_params(self):
|
||||
return [self.name]
|
||||
|
||||
def GetSuite(*args, **kwargs):
|
||||
|
Loading…
Reference in New Issue
Block a user