[test] Organize flags into sections and share more code

Bug: v8:6917
Change-Id: I4267900b6beed44eeae3df9b3b7a3e58402d6e6e
Reviewed-on: https://chromium-review.googlesource.com/899366
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51084}
This commit is contained in:
Michal Majewski 2018-02-02 23:46:27 +01:00 committed by Commit Bot
parent d47d5f903a
commit 3a0372f9f1
3 changed files with 90 additions and 110 deletions

View File

@ -5,6 +5,7 @@
from collections import OrderedDict
import json
import multiprocessing
import optparse
import os
import shlex
@ -227,6 +228,10 @@ class BaseTestRunner(object):
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print ' '.join(sys.argv)
self._load_build_config(options)
@ -239,10 +244,14 @@ class BaseTestRunner(object):
args = self._parse_test_args(args)
suites = self._get_suites(args, options)
self._load_status_files(suites, options)
self._prepare_suites(suites, options)
self._setup_env()
return self._do_execute(suites, args, options)
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
tests = [t for s in suites for t in s.tests]
return self._do_execute(tests, args, options)
except TestRunnerError:
return 1
except KeyboardInterrupt:
@ -273,6 +282,11 @@ class BaseTestRunner(object):
"directory will be used")
parser.add_option("--total-timeout-sec", default=0, type="int",
help="How long should fuzzer run")
parser.add_option("--swarming", default=False, action="store_true",
help="Indicates running test driver on swarming.")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
# Shard
parser.add_option("--shard-count", default=1, type=int,
@ -446,6 +460,9 @@ class BaseTestRunner(object):
print('Warning: --shell-dir is deprecated. Searching for executables in '
'build directory (%s) instead.' % self.outdir)
if options.j == 0:
options.j = multiprocessing.cpu_count()
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
@ -565,6 +582,11 @@ class BaseTestRunner(object):
test_config)
return map(load_suite, names)
def _prepare_suites(self, suites, options):
self._load_status_files(suites, options)
for s in suites:
s.ReadTestCases()
def _load_status_files(self, suites, options):
# simd_mips is true if SIMD is fully supported on MIPS
variables = self._get_statusfile_variables(options)
@ -638,6 +660,12 @@ class BaseTestRunner(object):
def _do_execute(self, suites, args, options):
raise NotImplementedError()
def _prepare_procs(self, procs):
procs = filter(None, procs)
for i in xrange(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_shard_proc(self, options):
myid, count = self._get_shard_info(options)
if count == 1:

View File

@ -5,7 +5,6 @@
# found in the LICENSE file.
import multiprocessing
import random
import sys
@ -32,15 +31,9 @@ class NumFuzzer(base_runner.BaseTestRunner):
super(NumFuzzer, self).__init__(*args, **kwargs)
def _add_parser_options(self, parser):
parser.add_option("--dump-results-file", help="Dump maximum limit reached")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
"generator")
parser.add_option("--swarming",
help="Indicates running test driver on swarming.",
default=False, action="store_true")
parser.add_option("--tests-count", default=5, type="int",
help="Number of tests to generate from each base test. "
"Can be combined with --total-timeout-sec with "
@ -88,8 +81,6 @@ class NumFuzzer(base_runner.BaseTestRunner):
def _process_options(self, options):
if options.j == 0:
options.j = multiprocessing.cpu_count()
if not options.fuzzer_random_seed:
options.fuzzer_random_seed = random_utils.random_seed()
@ -129,13 +120,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
})
return variables
def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
self._setup_suites(options, suites)
tests = self._load_tests(options, suites)
def _do_execute(self, tests, args, options):
loader = LoadProc()
fuzzer_rng = random.Random(options.fuzzer_random_seed)
@ -178,31 +163,21 @@ class NumFuzzer(base_runner.BaseTestRunner):
return 1
return 0
def _setup_suites(self, options, suites):
def _load_suites(self, names, options):
suites = super(NumFuzzer, self)._load_suites(names, options)
if options.combine_tests:
suites = [s for s in suites if s.test_combiner_available()]
return suites
def _prepare_suites(self, suites, options):
"""Sets additional configurations on test suites based on options."""
super(NumFuzzer, self)._prepare_suites(suites, options)
if options.stress_interrupt_budget:
# Changing interrupt budget forces us to suppress certain test assertions.
for suite in suites:
suite.do_suppress_internals()
def _load_tests(self, options, suites):
if options.combine_tests:
suites = [s for s in suites if s.test_combiner_available()]
# Find available test suites and read test cases from them.
tests = []
for s in suites:
s.ReadTestCases()
tests += s.tests
return tests
def _prepare_procs(self, procs):
procs = filter(None, procs)
for i in xrange(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
procs[0].setup()
def _create_combiner(self, rng, options):
if not options.combine_tests:
return None

View File

@ -5,7 +5,6 @@
# found in the LICENSE file.
import multiprocessing
import os
import re
import sys
@ -71,53 +70,13 @@ class StandardTestRunner(base_runner.BaseTestRunner):
def _get_default_suite_names(self):
return ['default']
def _do_execute(self, suites, args, options):
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
print ' '.join(sys.argv)
return self._execute(args, options, suites)
def _add_parser_options(self, parser):
parser.add_option("--sancov-dir",
help="Directory where to collect coverage data")
parser.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
parser.add_option("--novfp3",
help="Indicates that V8 was compiled without VFP3"
" support",
default=False, action="store_true")
parser.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
parser.add_option("--slow-tests",
help="Regard slow tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--pass-fail-tests",
help="Regard pass|fail tests (run|skip|dontcare)",
default="dontcare")
parser.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
parser.add_option("--random-gc-stress",
help="Switch on random GC stress mode",
default=False, action="store_true")
parser.add_option("--infra-staging", help="Use new test runner features",
dest='infra_staging', default=None,
action="store_true")
parser.add_option("--no-infra-staging",
help="Opt out of new test runner features",
dest='infra_staging', default=None,
action="store_false")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
parser.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last"
" run.",
default=False, dest="no_sorting", action="store_true")
# Variants
parser.add_option("--no-variants", "--novariants",
help="Deprecated. "
"Equivalent to passing --variants=default",
@ -129,24 +88,26 @@ class StandardTestRunner(base_runner.BaseTestRunner):
default=False, action="store_true",
help="Deprecated. "
"Equivalent to passing --variants=exhaustive")
# Filters
parser.add_option("--slow-tests", default="dontcare",
help="Regard slow tests (run|skip|dontcare)")
parser.add_option("--pass-fail-tests", default="dontcare",
help="Regard pass|fail tests (run|skip|dontcare)")
parser.add_option("--quickcheck", default=False, action="store_true",
help=("Quick check mode (skip slow tests)"))
parser.add_option("--report", help="Print a summary of the tests to be"
" run",
default=False, action="store_true")
parser.add_option("--flakiness-results",
help="Path to a file for storing flakiness json.")
parser.add_option("--dont-skip-slow-simulator-tests",
help="Don't skip more slow tests when using a"
" simulator.",
default=False, action="store_true",
dest="dont_skip_simulator_slow_tests")
parser.add_option("--swarming",
help="Indicates running test driver on swarming.",
# Stress modes
parser.add_option("--gc-stress",
help="Switch on GC stress mode",
default=False, action="store_true")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
parser.add_option("--warn-unused", help="Report unused rules",
parser.add_option("--random-gc-stress",
help="Switch on random GC stress mode",
default=False, action="store_true")
parser.add_option("--random-seed-stress-count", default=1, type="int",
dest="random_seed_stress_count",
@ -154,6 +115,40 @@ class StandardTestRunner(base_runner.BaseTestRunner):
"with test processors: 0 means infinite "
"generation.")
# Noop
parser.add_option("--cfi-vptr",
help="Run tests with UBSAN cfi_vptr option.",
default=False, action="store_true")
parser.add_option("--infra-staging", help="Use new test runner features",
dest='infra_staging', default=None,
action="store_true")
parser.add_option("--no-infra-staging",
help="Opt out of new test runner features",
dest='infra_staging', default=None,
action="store_false")
parser.add_option("--no-sorting", "--nosorting",
help="Don't sort tests according to duration of last"
" run.",
default=False, dest="no_sorting", action="store_true")
parser.add_option("--no-presubmit", "--nopresubmit",
help='Skip presubmit checks (deprecated)',
default=False, dest="no_presubmit", action="store_true")
# Unimplemented for test processors
parser.add_option("--sancov-dir",
help="Directory where to collect coverage data")
parser.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
parser.add_option("--flakiness-results",
help="Path to a file for storing flakiness json.")
parser.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
parser.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
parser.add_option("--report", default=False, action="store_true",
help="Print a summary of the tests to be run")
def _process_options(self, options):
if options.sancov_dir:
self.sancov_dir = options.sancov_dir
@ -210,9 +205,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
if self.build_config.msan:
options.variants = "default"
if options.j == 0:
options.j = multiprocessing.cpu_count()
if options.variants == "infra_staging":
options.variants = "exhaustive"
@ -279,16 +271,7 @@ class StandardTestRunner(base_runner.BaseTestRunner):
})
return variables
def _execute(self, args, options, suites):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
for s in suites:
s.ReadTestCases()
return self._run_test_procs(suites, args, options)
def _run_test_procs(self, suites, args, options):
def _do_execute(self, tests, args, options):
jobs = options.j
print '>>> Running with test processors'
@ -320,15 +303,9 @@ class StandardTestRunner(base_runner.BaseTestRunner):
execproc,
]
procs = filter(None, procs)
for i in xrange(0, len(procs) - 1):
procs[i].connect_to(procs[i + 1])
tests = [t for s in suites for t in s.tests]
self._prepare_procs(procs)
tests.sort(key=lambda t: t.is_slow, reverse=True)
loader.setup()
loader.load_tests(tests)
print '>>> Running %d base tests' % tests_counter.total