2017-10-13 08:13:39 +00:00
|
|
|
#!/usr/bin/env python
|
|
|
|
#
|
|
|
|
# Copyright 2017 the V8 project authors. All rights reserved.
|
|
|
|
# Use of this source code is governed by a BSD-style license that can be
|
|
|
|
# found in the LICENSE file.
|
|
|
|
|
|
|
|
|
|
|
|
from collections import OrderedDict
|
2017-10-16 14:24:43 +00:00
|
|
|
from os.path import join
|
2017-10-13 08:13:39 +00:00
|
|
|
import multiprocessing
|
|
|
|
import os
|
|
|
|
import random
|
|
|
|
import shlex
|
|
|
|
import subprocess
|
|
|
|
import sys
|
|
|
|
import time
|
|
|
|
|
|
|
|
# Adds testrunner to the path hence it has to be imported at the beggining.
|
|
|
|
import base_runner
|
|
|
|
|
|
|
|
from testrunner.local import execution
|
|
|
|
from testrunner.local import progress
|
|
|
|
from testrunner.local import testsuite
|
|
|
|
from testrunner.local import utils
|
|
|
|
from testrunner.local import verbose
|
|
|
|
from testrunner.local.variants import ALL_VARIANTS
|
|
|
|
from testrunner.objects import context
|
2018-01-04 10:42:49 +00:00
|
|
|
from testrunner.objects import predictable
|
2018-01-09 17:52:34 +00:00
|
|
|
from testrunner.testproc.execution import ExecutionProc
|
2018-01-15 17:43:18 +00:00
|
|
|
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
|
2018-01-09 17:52:34 +00:00
|
|
|
from testrunner.testproc.loader import LoadProc
|
|
|
|
from testrunner.testproc.progress import (VerboseProgressIndicator,
|
2018-01-16 16:27:30 +00:00
|
|
|
ResultsTracker,
|
|
|
|
TestsCounter)
|
2018-01-09 17:52:34 +00:00
|
|
|
from testrunner.testproc.rerun import RerunProc
|
2018-01-12 10:07:56 +00:00
|
|
|
from testrunner.testproc.variant import VariantProc
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
|
|
|
|
TIMEOUT_DEFAULT = 60
|
|
|
|
|
|
|
|
# Variants ordered by expected runtime (slowest first).
|
|
|
|
VARIANTS = ["default"]
|
|
|
|
|
|
|
|
MORE_VARIANTS = [
|
|
|
|
"stress",
|
|
|
|
"stress_incremental_marking",
|
|
|
|
"nooptimization",
|
2017-11-03 21:36:23 +00:00
|
|
|
"stress_background_compile",
|
2017-10-13 08:13:39 +00:00
|
|
|
"wasm_traps",
|
|
|
|
]
|
|
|
|
|
|
|
|
VARIANT_ALIASES = {
|
|
|
|
# The default for developer workstations.
|
|
|
|
"dev": VARIANTS,
|
|
|
|
# Additional variants, run on all bots.
|
|
|
|
"more": MORE_VARIANTS,
|
2017-11-27 12:18:20 +00:00
|
|
|
# Shortcut for the two above ("more" first - it has the longer running tests).
|
|
|
|
"exhaustive": MORE_VARIANTS + VARIANTS,
|
2017-10-13 08:13:39 +00:00
|
|
|
# Additional variants, run on a subset of bots.
|
2018-01-05 09:41:12 +00:00
|
|
|
"extra": ["future", "liftoff", "trusted"],
|
2017-10-13 08:13:39 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
GC_STRESS_FLAGS = ["--gc-interval=500", "--stress-compaction",
|
|
|
|
"--concurrent-recompilation-queue-length=64",
|
|
|
|
"--concurrent-recompilation-delay=500",
|
|
|
|
"--concurrent-recompilation"]
|
|
|
|
|
|
|
|
# Double the timeout for these:
|
2017-10-26 14:20:26 +00:00
|
|
|
SLOW_ARCHS = ["arm",
|
2017-10-13 08:13:39 +00:00
|
|
|
"mips",
|
|
|
|
"mipsel",
|
|
|
|
"mips64",
|
|
|
|
"mips64el",
|
|
|
|
"s390",
|
|
|
|
"s390x",
|
|
|
|
"arm64"]
|
|
|
|
|
2017-12-08 14:07:25 +00:00
|
|
|
PREDICTABLE_WRAPPER = os.path.join(
|
|
|
|
base_runner.BASE_DIR, 'tools', 'predictable_wrapper.py')
|
|
|
|
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
class StandardTestRunner(base_runner.BaseTestRunner):
|
2017-12-22 15:30:32 +00:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
super(StandardTestRunner, self).__init__(*args, **kwargs)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-10-25 11:42:29 +00:00
|
|
|
self.sancov_dir = None
|
2017-10-26 09:46:56 +00:00
|
|
|
|
2018-01-15 17:49:16 +00:00
|
|
|
def _get_default_suite_names(self):
|
|
|
|
return ['default']
|
|
|
|
|
|
|
|
def _do_execute(self, suites, args, options):
|
2017-10-13 08:13:39 +00:00
|
|
|
if options.swarming:
|
|
|
|
# Swarming doesn't print how isolated commands are called. Lets make
|
|
|
|
# this less cryptic by printing it ourselves.
|
|
|
|
print ' '.join(sys.argv)
|
|
|
|
|
2017-10-13 11:29:26 +00:00
|
|
|
if utils.GuessOS() == "macos":
|
|
|
|
# TODO(machenbach): Temporary output for investigating hanging test
|
|
|
|
# driver on mac.
|
|
|
|
print "V8 related processes running on this host:"
|
|
|
|
try:
|
|
|
|
print subprocess.check_output(
|
|
|
|
"ps -e | egrep 'd8|cctest|unittests'", shell=True)
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
2018-01-15 17:49:16 +00:00
|
|
|
return self._execute(args, options, suites)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-10-16 14:24:43 +00:00
|
|
|
def _add_parser_options(self, parser):
|
|
|
|
parser.add_option("--sancov-dir",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Directory where to collect coverage data")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--cfi-vptr",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Run tests with UBSAN cfi_vptr option.",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--novfp3",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Indicates that V8 was compiled without VFP3"
|
|
|
|
" support",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--cat", help="Print the source of the tests",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--slow-tests",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Regard slow tests (run|skip|dontcare)",
|
|
|
|
default="dontcare")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--pass-fail-tests",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Regard pass|fail tests (run|skip|dontcare)",
|
|
|
|
default="dontcare")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--gc-stress",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Switch on GC stress mode",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--command-prefix",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Prepended to each shell command used to run a"
|
|
|
|
" test",
|
|
|
|
default="")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--extra-flags",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Additional flags to pass to each test command",
|
|
|
|
action="append", default=[])
|
2017-12-21 15:01:27 +00:00
|
|
|
parser.add_option("--infra-staging", help="Use new test runner features",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--isolates", help="Whether to test isolates",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("-j", help="The number of parallel tasks to run",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=0, type="int")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--no-harness", "--noharness",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Run without test harness of a given suite",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--no-presubmit", "--nopresubmit",
|
2017-10-13 08:13:39 +00:00
|
|
|
help='Skip presubmit checks (deprecated)',
|
|
|
|
default=False, dest="no_presubmit", action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--no-sorting", "--nosorting",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Don't sort tests according to duration of last"
|
|
|
|
" run.",
|
|
|
|
default=False, dest="no_sorting", action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--no-variants", "--novariants",
|
2017-11-27 12:18:20 +00:00
|
|
|
help="Deprecated. "
|
|
|
|
"Equivalent to passing --variants=default",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=False, dest="no_variants", action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--variants",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Comma-separated list of testing variants;"
|
|
|
|
" default: \"%s\"" % ",".join(VARIANTS))
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--exhaustive-variants",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=False, action="store_true",
|
2017-11-27 12:18:20 +00:00
|
|
|
help="Deprecated. "
|
|
|
|
"Equivalent to passing --variants=exhaustive")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("-p", "--progress",
|
2017-10-13 08:13:39 +00:00
|
|
|
help=("The style of progress indicator"
|
|
|
|
" (verbose, dots, color, mono)"),
|
|
|
|
choices=progress.PROGRESS_INDICATORS.keys(),
|
|
|
|
default="mono")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--quickcheck", default=False, action="store_true",
|
2017-10-13 08:13:39 +00:00
|
|
|
help=("Quick check mode (skip slow tests)"))
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--report", help="Print a summary of the tests to be"
|
2017-10-13 08:13:39 +00:00
|
|
|
" run",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--json-test-results",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Path to a file for storing json results.")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--flakiness-results",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Path to a file for storing flakiness json.")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--rerun-failures-count",
|
2017-10-13 08:13:39 +00:00
|
|
|
help=("Number of times to rerun each failing test case."
|
|
|
|
" Very slow tests will be rerun only once."),
|
|
|
|
default=0, type="int")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--rerun-failures-max",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Maximum number of failing test cases to rerun.",
|
|
|
|
default=100, type="int")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--dont-skip-slow-simulator-tests",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Don't skip more slow tests when using a"
|
|
|
|
" simulator.",
|
|
|
|
default=False, action="store_true",
|
|
|
|
dest="dont_skip_simulator_slow_tests")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--swarming",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Indicates running test driver on swarming.",
|
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--time", help="Print timing information after running",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("-t", "--timeout", help="Timeout in seconds",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=TIMEOUT_DEFAULT, type="int")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--warn-unused", help="Report unused rules",
|
2017-10-13 08:13:39 +00:00
|
|
|
default=False, action="store_true")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--junitout", help="File name of the JUnit output")
|
|
|
|
parser.add_option("--junittestsuite",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="The testsuite name in the JUnit output file",
|
|
|
|
default="v8tests")
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--random-seed", default=0, dest="random_seed",
|
2017-10-13 08:13:39 +00:00
|
|
|
help="Default seed for initializing random generator",
|
|
|
|
type=int)
|
2017-10-16 14:24:43 +00:00
|
|
|
parser.add_option("--random-seed-stress-count", default=1, type="int",
|
2017-10-13 08:13:39 +00:00
|
|
|
dest="random_seed_stress_count",
|
|
|
|
help="Number of runs with different random seeds")
|
|
|
|
|
|
|
|
def _process_options(self, options):
|
|
|
|
global VARIANTS
|
|
|
|
|
2017-10-25 11:42:29 +00:00
|
|
|
if options.sancov_dir:
|
|
|
|
self.sancov_dir = options.sancov_dir
|
|
|
|
if not os.path.exists(self.sancov_dir):
|
|
|
|
print("sancov-dir %s doesn't exist" % self.sancov_dir)
|
|
|
|
raise base_runner.TestRunnerError()
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
options.command_prefix = shlex.split(options.command_prefix)
|
|
|
|
options.extra_flags = sum(map(shlex.split, options.extra_flags), [])
|
|
|
|
|
|
|
|
if options.gc_stress:
|
|
|
|
options.extra_flags += GC_STRESS_FLAGS
|
|
|
|
|
2017-10-26 14:20:26 +00:00
|
|
|
if self.build_config.asan:
|
2017-10-13 08:13:39 +00:00
|
|
|
options.extra_flags.append("--invoke-weak-callbacks")
|
|
|
|
options.extra_flags.append("--omit-quit")
|
|
|
|
|
|
|
|
if options.novfp3:
|
|
|
|
options.extra_flags.append("--noenable-vfp3")
|
|
|
|
|
2017-12-22 15:30:32 +00:00
|
|
|
if options.no_variants: # pragma: no cover
|
2017-11-27 12:18:20 +00:00
|
|
|
print ("Option --no-variants is deprecated. "
|
|
|
|
"Pass --variants=default instead.")
|
|
|
|
assert not options.variants
|
|
|
|
options.variants = "default"
|
|
|
|
|
2017-12-22 15:30:32 +00:00
|
|
|
if options.exhaustive_variants: # pragma: no cover
|
2017-11-27 12:18:20 +00:00
|
|
|
# TODO(machenbach): Switch infra to --variants=exhaustive after M65.
|
|
|
|
print ("Option --exhaustive-variants is deprecated. "
|
|
|
|
"Pass --variants=exhaustive instead.")
|
2017-10-13 08:13:39 +00:00
|
|
|
# This is used on many bots. It includes a larger set of default
|
|
|
|
# variants.
|
|
|
|
# Other options for manipulating variants still apply afterwards.
|
2017-11-27 12:18:20 +00:00
|
|
|
assert not options.variants
|
|
|
|
options.variants = "exhaustive"
|
|
|
|
|
|
|
|
if options.quickcheck:
|
|
|
|
assert not options.variants
|
|
|
|
options.variants = "stress,default"
|
|
|
|
options.slow_tests = "skip"
|
|
|
|
options.pass_fail_tests = "skip"
|
|
|
|
|
|
|
|
if self.build_config.predictable:
|
|
|
|
options.variants = "default"
|
|
|
|
options.extra_flags.append("--predictable")
|
|
|
|
options.extra_flags.append("--verify_predictable")
|
|
|
|
options.extra_flags.append("--no-inline-new")
|
2017-12-08 14:07:25 +00:00
|
|
|
# Add predictable wrapper to command prefix.
|
2017-12-18 13:37:42 +00:00
|
|
|
options.command_prefix = (
|
|
|
|
[sys.executable, PREDICTABLE_WRAPPER] + options.command_prefix)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
# TODO(machenbach): Figure out how to test a bigger subset of variants on
|
|
|
|
# msan.
|
2017-10-26 14:20:26 +00:00
|
|
|
if self.build_config.msan:
|
2017-11-27 12:18:20 +00:00
|
|
|
options.variants = "default"
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
if options.j == 0:
|
|
|
|
options.j = multiprocessing.cpu_count()
|
|
|
|
|
|
|
|
if options.random_seed_stress_count <= 1 and options.random_seed == 0:
|
|
|
|
options.random_seed = self._random_seed()
|
|
|
|
|
2017-11-27 12:18:20 +00:00
|
|
|
# Use developer defaults if no variant was specified.
|
|
|
|
options.variants = options.variants or "dev"
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-12-21 15:01:27 +00:00
|
|
|
if options.variants == "infra_staging":
|
|
|
|
options.variants = "exhaustive"
|
|
|
|
options.infra_staging = True
|
|
|
|
|
2017-11-27 12:18:20 +00:00
|
|
|
# Resolve variant aliases and dedupe.
|
|
|
|
# TODO(machenbach): Don't mutate global variable. Rather pass mutated
|
|
|
|
# version as local variable.
|
|
|
|
VARIANTS = list(set(reduce(
|
|
|
|
list.__add__,
|
|
|
|
(VARIANT_ALIASES.get(v, [v]) for v in options.variants.split(",")),
|
|
|
|
[],
|
|
|
|
)))
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-11-27 12:18:20 +00:00
|
|
|
if not set(VARIANTS).issubset(ALL_VARIANTS):
|
|
|
|
print "All variants must be in %s" % str(ALL_VARIANTS)
|
|
|
|
raise base_runner.TestRunnerError()
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-12-22 15:30:32 +00:00
|
|
|
def CheckTestMode(name, option): # pragma: no cover
|
2017-10-13 08:13:39 +00:00
|
|
|
if not option in ["run", "skip", "dontcare"]:
|
|
|
|
print "Unknown %s mode %s" % (name, option)
|
2017-10-16 14:24:43 +00:00
|
|
|
raise base_runner.TestRunnerError()
|
|
|
|
CheckTestMode("slow test", options.slow_tests)
|
|
|
|
CheckTestMode("pass|fail test", options.pass_fail_tests)
|
2017-10-26 14:20:26 +00:00
|
|
|
if self.build_config.no_i18n:
|
2017-10-16 14:24:43 +00:00
|
|
|
base_runner.TEST_MAP["bot_default"].remove("intl")
|
|
|
|
base_runner.TEST_MAP["default"].remove("intl")
|
2018-01-12 20:08:21 +00:00
|
|
|
# TODO(machenbach): uncomment after infra side lands.
|
|
|
|
# base_runner.TEST_MAP["d8_default"].remove("intl")
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-10-25 11:42:29 +00:00
|
|
|
def _setup_env(self):
|
|
|
|
super(StandardTestRunner, self)._setup_env()
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-10-25 11:42:29 +00:00
|
|
|
symbolizer_option = self._get_external_symbolizer_option()
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-10-25 11:42:29 +00:00
|
|
|
if self.sancov_dir:
|
2017-10-13 08:13:39 +00:00
|
|
|
os.environ['ASAN_OPTIONS'] = ":".join([
|
|
|
|
'coverage=1',
|
2017-10-25 11:42:29 +00:00
|
|
|
'coverage_dir=%s' % self.sancov_dir,
|
|
|
|
symbolizer_option,
|
2017-10-13 08:13:39 +00:00
|
|
|
"allow_user_segv_handler=1",
|
|
|
|
])
|
|
|
|
|
|
|
|
def _random_seed(self):
|
|
|
|
seed = 0
|
|
|
|
while not seed:
|
|
|
|
seed = random.SystemRandom().randint(-2147483648, 2147483647)
|
|
|
|
return seed
|
|
|
|
|
2017-10-26 14:20:26 +00:00
|
|
|
def _execute(self, args, options, suites):
|
|
|
|
print(">>> Running tests for %s.%s" % (self.build_config.arch,
|
|
|
|
self.mode_name))
|
2017-10-13 08:13:39 +00:00
|
|
|
# Populate context object.
|
|
|
|
|
|
|
|
# Simulators are slow, therefore allow a longer timeout.
|
2017-10-26 14:20:26 +00:00
|
|
|
if self.build_config.arch in SLOW_ARCHS:
|
2017-10-13 08:13:39 +00:00
|
|
|
options.timeout *= 2
|
|
|
|
|
2017-10-26 14:20:26 +00:00
|
|
|
options.timeout *= self.mode_options.timeout_scalefactor
|
2017-10-13 08:13:39 +00:00
|
|
|
|
2017-10-26 14:20:26 +00:00
|
|
|
if self.build_config.predictable:
|
2017-10-13 08:13:39 +00:00
|
|
|
# Predictable mode is slower.
|
|
|
|
options.timeout *= 2
|
|
|
|
|
2017-10-26 14:20:26 +00:00
|
|
|
ctx = context.Context(self.build_config.arch,
|
|
|
|
self.mode_options.execution_mode,
|
2017-11-02 18:59:38 +00:00
|
|
|
self.outdir,
|
2017-10-26 14:20:26 +00:00
|
|
|
self.mode_options.flags,
|
2017-10-16 14:24:43 +00:00
|
|
|
options.verbose,
|
2017-10-13 08:13:39 +00:00
|
|
|
options.timeout,
|
|
|
|
options.isolates,
|
|
|
|
options.command_prefix,
|
|
|
|
options.extra_flags,
|
2017-10-26 14:20:26 +00:00
|
|
|
self.build_config.no_i18n,
|
2017-10-13 08:13:39 +00:00
|
|
|
options.random_seed,
|
|
|
|
options.no_sorting,
|
|
|
|
options.rerun_failures_count,
|
|
|
|
options.rerun_failures_max,
|
|
|
|
options.no_harness,
|
|
|
|
use_perf_data=not options.swarming,
|
2018-01-05 13:34:17 +00:00
|
|
|
sancov_dir=self.sancov_dir,
|
|
|
|
infra_staging=options.infra_staging)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
# TODO(all): Combine "simulator" and "simulator_run".
|
|
|
|
# TODO(machenbach): In GN we can derive simulator run from
|
|
|
|
# target_arch != v8_target_arch in the dumped build config.
|
2017-10-26 14:20:26 +00:00
|
|
|
simulator_run = (
|
|
|
|
not options.dont_skip_simulator_slow_tests and
|
|
|
|
self.build_config.arch in [
|
|
|
|
'arm64', 'arm', 'mipsel', 'mips', 'mips64', 'mips64el', 'ppc',
|
|
|
|
'ppc64', 's390', 's390x'] and
|
|
|
|
bool(base_runner.ARCH_GUESS) and
|
|
|
|
self.build_config.arch != base_runner.ARCH_GUESS)
|
2017-10-13 08:13:39 +00:00
|
|
|
# Find available test suites and read test cases from them.
|
|
|
|
variables = {
|
2017-10-26 14:20:26 +00:00
|
|
|
"arch": self.build_config.arch,
|
|
|
|
"asan": self.build_config.asan,
|
|
|
|
"byteorder": sys.byteorder,
|
|
|
|
"dcheck_always_on": self.build_config.dcheck_always_on,
|
2017-10-13 08:13:39 +00:00
|
|
|
"deopt_fuzzer": False,
|
2017-11-13 12:54:10 +00:00
|
|
|
"gc_fuzzer": False,
|
2017-10-13 08:13:39 +00:00
|
|
|
"gc_stress": options.gc_stress,
|
2017-10-26 14:20:26 +00:00
|
|
|
"gcov_coverage": self.build_config.gcov_coverage,
|
2017-10-13 08:13:39 +00:00
|
|
|
"isolates": options.isolates,
|
2017-10-26 14:20:26 +00:00
|
|
|
"mode": self.mode_options.status_mode,
|
|
|
|
"msan": self.build_config.msan,
|
|
|
|
"no_harness": options.no_harness,
|
|
|
|
"no_i18n": self.build_config.no_i18n,
|
|
|
|
"no_snap": self.build_config.no_snap,
|
|
|
|
"novfp3": options.novfp3,
|
|
|
|
"predictable": self.build_config.predictable,
|
|
|
|
"simulator": utils.UseSimulator(self.build_config.arch),
|
2017-10-13 08:13:39 +00:00
|
|
|
"simulator_run": simulator_run,
|
|
|
|
"system": utils.GuessOS(),
|
2017-10-26 14:20:26 +00:00
|
|
|
"tsan": self.build_config.tsan,
|
|
|
|
"ubsan_vptr": self.build_config.ubsan_vptr,
|
2017-10-13 08:13:39 +00:00
|
|
|
}
|
2018-01-16 11:02:50 +00:00
|
|
|
|
|
|
|
progress_indicator = progress.IndicatorNotifier()
|
|
|
|
progress_indicator.Register(
|
|
|
|
progress.PROGRESS_INDICATORS[options.progress]())
|
|
|
|
if options.junitout: # pragma: no cover
|
|
|
|
progress_indicator.Register(progress.JUnitTestProgressIndicator(
|
|
|
|
options.junitout, options.junittestsuite))
|
|
|
|
if options.json_test_results:
|
|
|
|
progress_indicator.Register(progress.JsonTestProgressIndicator(
|
|
|
|
options.json_test_results,
|
|
|
|
self.build_config.arch,
|
|
|
|
self.mode_options.execution_mode,
|
|
|
|
ctx.random_seed))
|
|
|
|
if options.flakiness_results: # pragma: no cover
|
|
|
|
progress_indicator.Register(progress.FlakinessTestProgressIndicator(
|
|
|
|
options.flakiness_results))
|
|
|
|
|
|
|
|
if options.infra_staging:
|
|
|
|
for s in suites:
|
|
|
|
s.ReadStatusFile(variables)
|
|
|
|
s.ReadTestCases(ctx)
|
|
|
|
|
|
|
|
return self._run_test_procs(suites, args, options, progress_indicator,
|
|
|
|
ctx)
|
|
|
|
|
2017-10-13 08:13:39 +00:00
|
|
|
all_tests = []
|
|
|
|
num_tests = 0
|
|
|
|
for s in suites:
|
|
|
|
s.ReadStatusFile(variables)
|
|
|
|
s.ReadTestCases(ctx)
|
2018-01-16 11:02:50 +00:00
|
|
|
if len(args) > 0:
|
|
|
|
s.FilterTestCasesByArgs(args)
|
2017-10-13 08:13:39 +00:00
|
|
|
all_tests += s.tests
|
|
|
|
|
2017-11-20 21:42:13 +00:00
|
|
|
# First filtering by status applying the generic rules (tests without
|
|
|
|
# variants)
|
|
|
|
if options.warn_unused:
|
2017-12-19 14:32:59 +00:00
|
|
|
tests = [(t.name, t.variant) for t in s.tests]
|
|
|
|
s.statusfile.warn_unused_rules(tests, check_variant_rules=False)
|
2017-11-20 21:42:13 +00:00
|
|
|
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
if options.cat:
|
|
|
|
verbose.PrintTestSource(s.tests)
|
|
|
|
continue
|
2018-01-16 11:02:50 +00:00
|
|
|
variant_gen = s.CreateLegacyVariantsGenerator(VARIANTS)
|
|
|
|
variant_tests = [ t.create_variant(v, flags)
|
|
|
|
for t in s.tests
|
|
|
|
for v in variant_gen.FilterVariantsByTest(t)
|
|
|
|
for flags in variant_gen.GetFlagSets(t, v) ]
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
if options.random_seed_stress_count > 1:
|
|
|
|
# Duplicate test for random seed stress mode.
|
|
|
|
def iter_seed_flags():
|
|
|
|
for _ in range(0, options.random_seed_stress_count):
|
|
|
|
# Use given random seed for all runs (set by default in
|
|
|
|
# execution.py) or a new random seed if none is specified.
|
|
|
|
if options.random_seed:
|
|
|
|
yield []
|
|
|
|
else:
|
|
|
|
yield ["--random-seed=%d" % self._random_seed()]
|
|
|
|
s.tests = [
|
2018-01-09 10:36:45 +00:00
|
|
|
t.create_variant(t.variant, flags, 'seed-stress-%d' % n)
|
2017-10-13 08:13:39 +00:00
|
|
|
for t in variant_tests
|
2018-01-09 10:36:45 +00:00
|
|
|
for n, flags in enumerate(iter_seed_flags())
|
2017-10-13 08:13:39 +00:00
|
|
|
]
|
|
|
|
else:
|
|
|
|
s.tests = variant_tests
|
|
|
|
|
2017-11-20 21:42:13 +00:00
|
|
|
# Second filtering by status applying also the variant-dependent rules.
|
|
|
|
if options.warn_unused:
|
2017-12-19 14:32:59 +00:00
|
|
|
tests = [(t.name, t.variant) for t in s.tests]
|
|
|
|
s.statusfile.warn_unused_rules(tests, check_variant_rules=True)
|
2018-01-09 17:52:34 +00:00
|
|
|
|
2018-01-16 11:02:50 +00:00
|
|
|
s.FilterTestCasesByStatus(options.slow_tests, options.pass_fail_tests)
|
2017-12-05 11:49:04 +00:00
|
|
|
s.tests = self._shard_tests(s.tests, options)
|
2017-11-20 21:42:13 +00:00
|
|
|
|
|
|
|
for t in s.tests:
|
2017-12-12 21:33:16 +00:00
|
|
|
t.cmd = t.get_command(ctx)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
num_tests += len(s.tests)
|
|
|
|
|
|
|
|
if options.cat:
|
|
|
|
return 0 # We're done here.
|
|
|
|
|
|
|
|
if options.report:
|
|
|
|
verbose.PrintReport(all_tests)
|
|
|
|
|
2017-11-08 15:41:18 +00:00
|
|
|
# Run the tests.
|
2017-10-13 08:13:39 +00:00
|
|
|
start_time = time.time()
|
|
|
|
|
2018-01-04 10:42:49 +00:00
|
|
|
if self.build_config.predictable:
|
|
|
|
outproc_factory = predictable.get_outproc
|
|
|
|
else:
|
|
|
|
outproc_factory = None
|
2018-01-09 17:52:34 +00:00
|
|
|
|
2018-01-16 11:02:50 +00:00
|
|
|
runner = execution.Runner(suites, progress_indicator, ctx,
|
|
|
|
outproc_factory)
|
|
|
|
exit_code = runner.Run(options.j)
|
2017-10-13 08:13:39 +00:00
|
|
|
overall_duration = time.time() - start_time
|
|
|
|
|
|
|
|
if options.time:
|
2017-12-14 19:39:48 +00:00
|
|
|
verbose.PrintTestDurations(suites, runner.outputs, overall_duration)
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
if num_tests == 0:
|
|
|
|
print("Warning: no tests were run!")
|
|
|
|
|
|
|
|
if exit_code == 1 and options.json_test_results:
|
|
|
|
print("Force exit code 0 after failures. Json test results file "
|
|
|
|
"generated with failure information.")
|
|
|
|
exit_code = 0
|
|
|
|
|
2017-10-25 11:42:29 +00:00
|
|
|
if self.sancov_dir:
|
2017-10-13 08:13:39 +00:00
|
|
|
# If tests ran with sanitizer coverage, merge coverage files in the end.
|
|
|
|
try:
|
|
|
|
print "Merging sancov files."
|
|
|
|
subprocess.check_call([
|
|
|
|
sys.executable,
|
2017-12-22 15:30:32 +00:00
|
|
|
join(self.basedir, "tools", "sanitizers", "sancov_merger.py"),
|
2017-10-25 11:42:29 +00:00
|
|
|
"--coverage-dir=%s" % self.sancov_dir])
|
2017-10-13 08:13:39 +00:00
|
|
|
except:
|
|
|
|
print >> sys.stderr, "Error: Merging sancov files failed."
|
|
|
|
exit_code = 1
|
|
|
|
|
|
|
|
return exit_code
|
|
|
|
|
|
|
|
def _shard_tests(self, tests, options):
|
2018-01-16 11:02:50 +00:00
|
|
|
shard_run, shard_count = self._get_shard_info(options)
|
|
|
|
|
|
|
|
if shard_count < 2:
|
|
|
|
return tests
|
|
|
|
count = 0
|
|
|
|
shard = []
|
|
|
|
for test in tests:
|
|
|
|
if count % shard_count == shard_run - 1:
|
|
|
|
shard.append(test)
|
|
|
|
count += 1
|
|
|
|
return shard
|
|
|
|
|
2018-01-15 17:43:18 +00:00
|
|
|
def _run_test_procs(self, suites, args, options, progress_indicator,
|
2018-01-16 11:02:50 +00:00
|
|
|
context):
|
2018-01-09 17:52:34 +00:00
|
|
|
jobs = options.j
|
|
|
|
|
|
|
|
print '>>> Running with test processors'
|
|
|
|
loader = LoadProc()
|
2018-01-16 16:27:30 +00:00
|
|
|
tests_counter = TestsCounter()
|
|
|
|
results = ResultsTracker()
|
2018-01-12 10:07:56 +00:00
|
|
|
indicators = progress_indicator.ToProgressIndicatorProcs()
|
2018-01-16 11:02:50 +00:00
|
|
|
execproc = ExecutionProc(jobs, context)
|
2018-01-09 17:52:34 +00:00
|
|
|
|
2018-01-12 10:07:56 +00:00
|
|
|
procs = [
|
|
|
|
loader,
|
2018-01-16 16:27:30 +00:00
|
|
|
NameFilterProc(args) if args else None,
|
2018-01-16 11:02:50 +00:00
|
|
|
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
|
|
|
|
self._create_shard_proc(options),
|
2018-01-16 16:27:30 +00:00
|
|
|
tests_counter,
|
2018-01-12 10:07:56 +00:00
|
|
|
VariantProc(VARIANTS),
|
|
|
|
StatusFileFilterProc(options.slow_tests, options.pass_fail_tests),
|
2018-01-16 11:02:50 +00:00
|
|
|
] + indicators + [
|
2018-01-16 16:27:30 +00:00
|
|
|
results,
|
2018-01-16 11:02:50 +00:00
|
|
|
self._create_rerun_proc(context),
|
|
|
|
execproc,
|
|
|
|
]
|
2018-01-09 17:52:34 +00:00
|
|
|
|
2018-01-16 11:02:50 +00:00
|
|
|
procs = filter(None, procs)
|
2018-01-09 17:52:34 +00:00
|
|
|
|
|
|
|
for i in xrange(0, len(procs) - 1):
|
|
|
|
procs[i].connect_to(procs[i + 1])
|
|
|
|
|
|
|
|
tests = [t for s in suites for t in s.tests]
|
|
|
|
tests.sort(key=lambda t: t.is_slow, reverse=True)
|
2018-01-17 10:59:24 +00:00
|
|
|
|
|
|
|
loader.setup()
|
2018-01-09 17:52:34 +00:00
|
|
|
loader.load_tests(tests)
|
2018-01-16 11:02:50 +00:00
|
|
|
|
2018-01-16 16:27:30 +00:00
|
|
|
print '>>> Running %d base tests' % tests_counter.total
|
|
|
|
tests_counter.remove_from_chain()
|
|
|
|
|
2018-01-09 17:52:34 +00:00
|
|
|
execproc.start()
|
2018-01-16 16:27:30 +00:00
|
|
|
|
2018-01-09 17:52:34 +00:00
|
|
|
for indicator in indicators:
|
|
|
|
indicator.finished()
|
|
|
|
|
2018-01-16 16:27:30 +00:00
|
|
|
print '>>> %d tests ran' % results.total
|
|
|
|
|
2018-01-16 11:02:50 +00:00
|
|
|
exit_code = 0
|
2018-01-09 17:52:34 +00:00
|
|
|
if results.failed:
|
2018-01-16 11:02:50 +00:00
|
|
|
exit_code = 1
|
2018-01-09 17:52:34 +00:00
|
|
|
if results.remaining:
|
2018-01-16 11:02:50 +00:00
|
|
|
exit_code = 2
|
|
|
|
|
2018-01-16 16:27:30 +00:00
|
|
|
|
2018-01-16 11:02:50 +00:00
|
|
|
if exit_code == 1 and options.json_test_results:
|
|
|
|
print("Force exit code 0 after failures. Json test results file "
|
|
|
|
"generated with failure information.")
|
|
|
|
exit_code = 0
|
|
|
|
return exit_code
|
|
|
|
|
|
|
|
def _create_rerun_proc(self, ctx):
|
|
|
|
if not ctx.rerun_failures_count:
|
|
|
|
return None
|
|
|
|
return RerunProc(ctx.rerun_failures_count,
|
|
|
|
ctx.rerun_failures_max)
|
2018-01-09 17:52:34 +00:00
|
|
|
|
|
|
|
|
2017-10-13 08:13:39 +00:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
sys.exit(StandardTestRunner().execute())
|