[test] Remove old gc and deopt fuzzers

They have been replaced with num_fuzzer.

Bug: v8:6917
Change-Id: I2f78df308cec0a58f0d665bce82503dee68fcebc
Reviewed-on: https://chromium-review.googlesource.com/888641
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Michał Majewski <majeski@google.com>
Cr-Commit-Position: refs/heads/master@{#50903}
This commit is contained in:
Michal Majewski 2018-01-26 15:01:39 +01:00 committed by Commit Bot
parent 8e7527aa2f
commit 1e3a8c156b
5 changed files with 1 additions and 695 deletions

View File

@ -1,14 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from testrunner import deopt_fuzzer
if __name__ == "__main__":
sys.exit(deopt_fuzzer.DeoptFuzzer().execute())

View File

@ -1,14 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from testrunner import gc_fuzzer
if __name__ == "__main__":
sys.exit(gc_fuzzer.GCFuzzer().execute())

View File

@ -4,11 +4,9 @@
{
'variables': {
'command': [
'run-deopt-fuzzer.py',
'run-num-fuzzer.py',
],
'files': [
'run-deopt-fuzzer.py',
'run-gc-fuzzer.py',
'run-num-fuzzer.py',
],
},

View File

@ -1,336 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from os.path import join
import json
import math
import multiprocessing
import os
import random
import shlex
import sys
import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.objects import context
DEFAULT_SUITES = ["mjsunit", "webkit"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mipsel"]
MAX_DEOPT = 1000000000
DISTRIBUTION_MODES = ["smooth", "random"]
class DeoptFuzzer(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
super(DeoptFuzzer, self).__init__(*args, **kwargs)
class RandomDistribution:
def __init__(self, seed=None):
seed = seed or random.randint(1, sys.maxint)
print "Using random distribution with seed %d" % seed
self._random = random.Random(seed)
def Distribute(self, n, m):
if n > m:
n = m
return self._random.sample(xrange(1, m + 1), n)
class SmoothDistribution:
"""Distribute n numbers into the interval [1:m].
F1: Factor of the first derivation of the distribution function.
F2: Factor of the second derivation of the distribution function.
With F1 and F2 set to 0, the distribution will be equal.
"""
def __init__(self, factor1=2.0, factor2=0.2):
self._factor1 = factor1
self._factor2 = factor2
def Distribute(self, n, m):
if n > m:
n = m
if n <= 1:
return [ 1 ]
result = []
x = 0.0
dx = 1.0
ddx = self._factor1
dddx = self._factor2
for i in range(0, n):
result += [ x ]
x += dx
dx += ddx
ddx += dddx
# Project the distribution into the interval [0:M].
result = [ x * m / result[-1] for x in result ]
# Equalize by n. The closer n is to m, the more equal will be the
# distribution.
for (i, x) in enumerate(result):
# The value of x if it was equally distributed.
equal_x = i / float(n - 1) * float(m - 1) + 1
# Difference factor between actual and equal distribution.
diff = 1 - (x / equal_x)
# Equalize x dependent on the number of values to distribute.
result[i] = int(x + (i + 1) * diff)
return result
def _distribution(self, options):
if options.distribution_mode == "random":
return self.RandomDistribution(options.seed)
if options.distribution_mode == "smooth":
return self.SmoothDistribution(options.distribution_factor1,
options.distribution_factor2)
def _add_parser_options(self, parser):
parser.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
parser.add_option("--coverage", help=("Exponential test coverage "
"(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
default=0.4, type="float")
parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
"with a small number of deopt points (range 0, inf)"),
default=20, type="int")
parser.add_option("--distribution-factor1", help=("Factor of the first "
"derivation of the distribution function"), default=2.0,
type="float")
parser.add_option("--distribution-factor2", help=("Factor of the second "
"derivation of the distribution function"), default=0.7,
type="float")
parser.add_option("--distribution-mode", help=("How to select deopt points "
"for a given test (smooth|random)"),
default="smooth")
parser.add_option("--dump-results-file", help=("Dump maximum number of "
"deopt points per test to a file"))
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("--seed", help="The seed for the random distribution",
type="int")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0, dest="random_seed",
help="Default seed for initializing random generator")
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
"generator")
return parser
def _process_options(self, options):
# Special processing of other options, sorted alphabetically.
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
if not options.distribution_mode in DISTRIBUTION_MODES:
print "Unknown distribution mode %s" % options.distribution_mode
return False
if options.distribution_factor1 < 0.0:
print ("Distribution factor1 %s is out of range. Defaulting to 0.0"
% options.distribution_factor1)
options.distribution_factor1 = 0.0
if options.distribution_factor2 < 0.0:
print ("Distribution factor2 %s is out of range. Defaulting to 0.0"
% options.distribution_factor2)
options.distribution_factor2 = 0.0
if options.coverage < 0.0 or options.coverage > 1.0:
print ("Coverage %s is out of range. Defaulting to 0.4"
% options.coverage)
options.coverage = 0.4
if options.coverage_lift < 0:
print ("Coverage lift %s is out of range. Defaulting to 0"
% options.coverage_lift)
options.coverage_lift = 0
return True
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m deopt points with exponential
coverage.
The coverage is expected to be between 0.0 and 1.0.
The 'coverage lift' lifts the coverage for tests with smaller m values.
"""
c = float(options.coverage)
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
def _get_default_suite_names(self):
return DEFAULT_SUITES
def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
dist = self._distribution(options)
# Populate context object.
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if self.build_config.arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= self.mode_options.timeout_scalefactor
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
False, # Keep i18n on by default.
options.random_seed,
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
# Find available test suites and read test cases from them.
variables = {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": True,
"gc_fuzzer": False,
"gc_stress": False,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": False,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
"predictable": self.build_config.predictable,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": False,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
num_tests = 0
test_id = 0
# Remember test case prototypes for the fuzzing phase.
test_backup = dict((s, []) for s in suites)
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
test_backup[s] = s.tests
analysis_flags = ["--deopt-every-n-times", "%d" % MAX_DEOPT,
"--print-deopt-stress"]
s.tests = [t.create_variant(t.variant, analysis_flags, 'analysis')
for t in s.tests]
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
t.cmd = t.get_command(ctx)
test_id += 1
if num_tests == 0:
print "No tests to run."
return 0
print(">>> Collection phase")
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
print(">>> Analysis phase")
num_tests = 0
test_id = 0
for s in suites:
test_results = {}
for t in s.tests:
for line in runner.outputs[t].stdout.splitlines():
if line.startswith("=== Stress deopt counter: "):
test_results[t.path] = MAX_DEOPT - int(line.split(" ")[-1])
for t in s.tests:
if t.path not in test_results:
print "Missing results for %s" % t.path
if options.dump_results_file:
results_dict = dict((t.path, n) for (t, n) in test_results.iteritems())
with file("%s.%d.txt" % (options.dump_results_file, time.time()),
"w") as f:
f.write(json.dumps(results_dict))
# Reset tests and redistribute the prototypes from the collection phase.
s.tests = []
if options.verbose:
print "Test distributions:"
for t in test_backup[s]:
max_deopt = test_results.get(t.path, 0)
if max_deopt == 0:
continue
n_deopt = self._calculate_n_tests(max_deopt, options)
distribution = dist.Distribute(n_deopt, max_deopt)
if options.verbose:
print "%s %s" % (t.path, distribution)
for n, d in enumerate(distribution):
fuzzing_flags = ["--deopt-every-n-times", "%d" % d]
s.tests.append(t.create_variant(t.variant, fuzzing_flags, n))
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
t.cmd = t.get_command(ctx)
test_id += 1
if num_tests == 0:
print "No tests to run."
return exit_code
print(">>> Deopt fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
code = runner.Run(options.j)
return exit_code or code
if __name__ == '__main__':
sys.exit(DeoptFuzzer().execute())

View File

@ -1,328 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2017 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from os.path import join
import itertools
import json
import math
import multiprocessing
import os
import random
import shlex
import sys
import time
# Adds testrunner to the path hence it has to be imported at the beggining.
import base_runner
from testrunner.local import execution
from testrunner.local import progress
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.local import verbose
from testrunner.objects import context
DEFAULT_SUITES = ["mjsunit", "webkit", "benchmarks"]
TIMEOUT_DEFAULT = 60
# Double the timeout for these:
SLOW_ARCHS = ["arm",
"mipsel"]
class GCFuzzer(base_runner.BaseTestRunner):
def __init__(self, *args, **kwargs):
super(GCFuzzer, self).__init__(*args, **kwargs)
self.fuzzer_rng = None
def _add_parser_options(self, parser):
parser.add_option("--command-prefix",
help="Prepended to each shell command used to run a test",
default="")
parser.add_option("--coverage", help=("Exponential test coverage "
"(range 0.0, 1.0) - 0.0: one test, 1.0 all tests (slow)"),
default=0.4, type="float")
parser.add_option("--coverage-lift", help=("Lifts test coverage for tests "
"with a low memory size reached (range 0, inf)"),
default=20, type="int")
parser.add_option("--dump-results-file", help="Dump maximum limit reached")
parser.add_option("--extra-flags",
help="Additional flags to pass to each test command",
default="")
parser.add_option("--isolates", help="Whether to test isolates",
default=False, action="store_true")
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type="int")
parser.add_option("-p", "--progress",
help=("The style of progress indicator"
" (verbose, dots, color, mono)"),
choices=progress.PROGRESS_INDICATORS.keys(),
default="mono")
parser.add_option("-t", "--timeout", help="Timeout in seconds",
default= -1, type="int")
parser.add_option("--random-seed", default=0,
help="Default seed for initializing random generator")
parser.add_option("--fuzzer-random-seed", default=0,
help="Default seed for initializing fuzzer random "
"generator")
parser.add_option("--stress-compaction", default=False, action="store_true",
help="Enable stress_compaction_random flag")
parser.add_option("--stress-gc", default=False, action="store_true",
help="Enable stress-gc-interval flag")
parser.add_option("--stress-marking", default=False, action="store_true",
help="Enable stress-marking flag")
parser.add_option("--stress-scavenge", default=False, action="store_true",
help="Enable stress-scavenge flag")
parser.add_option("--distribution-factor1", help="DEPRECATED")
parser.add_option("--distribution-factor2", help="DEPRECATED")
parser.add_option("--distribution-mode", help="DEPRECATED")
parser.add_option("--seed", help="DEPRECATED")
return parser
def _process_options(self, options):
# Special processing of other options, sorted alphabetically.
options.command_prefix = shlex.split(options.command_prefix)
options.extra_flags = shlex.split(options.extra_flags)
if options.j == 0:
options.j = multiprocessing.cpu_count()
while options.random_seed == 0:
options.random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
while options.fuzzer_random_seed == 0:
options.fuzzer_random_seed = random.SystemRandom().randint(-2147483648,
2147483647)
self.fuzzer_rng = random.Random(options.fuzzer_random_seed)
return True
def _calculate_n_tests(self, m, options):
"""Calculates the number of tests from m points with exponential coverage.
The coverage is expected to be between 0.0 and 1.0.
The 'coverage lift' lifts the coverage for tests with smaller m values.
"""
c = float(options.coverage)
l = float(options.coverage_lift)
return int(math.pow(m, (m * c + l) / (m + l)))
def _get_default_suite_names(self):
return DEFAULT_SUITES
def _do_execute(self, suites, args, options):
print(">>> Running tests for %s.%s" % (self.build_config.arch,
self.mode_name))
# Populate context object.
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
if self.build_config.arch in SLOW_ARCHS:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
timeout *= self.mode_options.timeout_scalefactor
ctx = context.Context(self.build_config.arch,
self.mode_options.execution_mode,
self.outdir,
self.mode_options.flags, options.verbose,
timeout, options.isolates,
options.command_prefix,
options.extra_flags,
False, # Keep i18n on by default.
options.random_seed,
True, # No sorting of test cases.
0, # Don't rerun failing tests.
0, # No use of a rerun-failing-tests maximum.
False, # No no_harness mode.
False, # Don't use perf data.
False) # Coverage not supported.
num_tests = self._load_tests(args, options, suites, ctx)
if num_tests == 0:
print "No tests to run."
return 0
test_backup = dict(map(lambda s: (s, s.tests), suites))
print('>>> Collection phase')
for s in suites:
analysis_flags = ['--fuzzer-gc-analysis']
s.tests = map(lambda t: t.create_variant(t.variant, analysis_flags,
'analysis'),
s.tests)
for t in s.tests:
t.cmd = t.get_command(ctx)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
exit_code = runner.Run(options.j)
print('>>> Analysis phase')
test_results = dict()
for s in suites:
for t in s.tests:
# Skip failed tests.
if t.output_proc.has_unexpected_output(runner.outputs[t]):
print '%s failed, skipping' % t.path
continue
max_limits = self._get_max_limits_reached(runner.outputs[t])
if max_limits:
test_results[t.path] = max_limits
runner = None
if options.dump_results_file:
with file("%s.%d.txt" % (options.dump_results_file, time.time()),
"w") as f:
f.write(json.dumps(test_results))
num_tests = 0
for s in suites:
s.tests = []
for t in test_backup[s]:
results = test_results.get(t.path)
if not results:
continue
max_marking, max_new_space, max_allocations = results
# Only when combining the flags, make sure one has a minimum of 1 if
# also the other is >=1. Otherwise we might skip it below.
if options.stress_marking and options.stress_scavenge:
if max_new_space:
max_marking = max(max_marking, 1)
if max_marking:
max_new_space = max(max_new_space, 1)
# Make as many subtests as determined by the most dominant factor
# (one of marking or new space).
subtests_count = 1
base_flags = []
if options.stress_marking:
if not max_marking:
# Skip 0 as it switches off the flag.
continue
subtests_count = max(
subtests_count, self._calculate_n_tests(max_marking, options))
base_flags += ['--stress_marking', str(max_marking)]
if options.stress_scavenge:
if not max_new_space:
# Skip 0 as it switches off the flag.
continue
# Divide by 5, since new space is more dominating than marking.
subtests_count = max(
subtests_count, self._calculate_n_tests(
max(1, max_new_space / 5), options))
base_flags += ['--stress_scavenge', str(max_new_space)]
if options.stress_gc:
# Only makes sense in combination with other flags, since we always
# reach our upper limit of 5000.
base_flags += ['--random-gc-interval', str(max_allocations)]
if options.stress_compaction:
base_flags.append('--stress_compaction_random')
for i in xrange(0, subtests_count):
fuzzing_flags = [
'--fuzzer_random_seed', str(self._next_fuzzer_seed()),
] + base_flags
s.tests.append(t.create_variant(t.variant, fuzzing_flags, i))
for t in s.tests:
t.cmd = t.get_command(ctx)
num_tests += len(s.tests)
if num_tests == 0:
print "No tests to run."
return exit_code
print(">>> Fuzzing phase (%d test cases)" % num_tests)
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
runner = execution.Runner(suites, progress_indicator, ctx)
return runner.Run(options.j) or exit_code
def _load_tests(self, args, options, suites, ctx):
# Find available test suites and read test cases from them.
variables = {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
"byteorder": sys.byteorder,
"dcheck_always_on": self.build_config.dcheck_always_on,
"deopt_fuzzer": False,
"gc_fuzzer": True,
"gc_stress": True,
"gcov_coverage": self.build_config.gcov_coverage,
"isolates": options.isolates,
"mode": self.mode_options.status_mode,
"msan": self.build_config.msan,
"no_harness": False,
"no_i18n": self.build_config.no_i18n,
"no_snap": self.build_config.no_snap,
"novfp3": False,
"predictable": self.build_config.predictable,
"simulator": utils.UseSimulator(self.build_config.arch),
"simulator_run": False,
"system": utils.GuessOS(),
"tsan": self.build_config.tsan,
"ubsan_vptr": self.build_config.ubsan_vptr,
}
num_tests = 0
test_id = 0
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
if len(args) > 0:
s.FilterTestCasesByArgs(args)
s.FilterTestCasesByStatus(False)
num_tests += len(s.tests)
for t in s.tests:
t.id = test_id
test_id += 1
return num_tests
# Parses test stdout and returns what was the highest reached percent of the
# incremental marking limit (0-100), new space size (0-100) and allocations
# (6-5000).
@staticmethod
def _get_max_limits_reached(output):
"""Returns: list [max marking, max new space, allocations]"""
if not output.stdout:
return None
results = [0, 0, 0]
for l in reversed(output.stdout.splitlines()):
if l.startswith('### Maximum marking limit reached ='):
results[0] = float(l.split()[6])
elif l.startswith('### Maximum new space size reached ='):
results[1] = float(l.split()[7])
elif l.startswith('### Allocations ='):
# Also remove the comma in the end after split.
results[2] = int(l.split()[3][:-1])
if all(results):
break
if any(results):
return (
max(0, int(results[0])),
max(0, int(results[1])),
min(5000, max(6, results[2])),
)
return None
def _next_fuzzer_seed(self):
fuzzer_seed = None
while not fuzzer_seed:
fuzzer_seed = self.fuzzer_rng.randint(-2147483648, 2147483647)
return fuzzer_seed
if __name__ == '__main__':
sys.exit(GCFuzzer().execute())