Refactoring: Make gtest testsuite the default.

BUG=v8:3489
R=bmeurer@chromium.org
LOG=n

Review URL: https://codereview.chromium.org/526133003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@23583 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
machenbach@chromium.org 2014-09-02 09:11:13 +00:00
parent de59be7cad
commit a050734c35
9 changed files with 43 additions and 1051 deletions

View File

@ -3,7 +3,7 @@
"main": "run.js",
"run_count": 2,
"results_regexp": "^%s: (.+)$",
"tests": [
"benchmarks": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "Crypto"},

View File

@ -1,52 +0,0 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
from testrunner.local import commands
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
class BaseUnitTestsSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(BaseUnitTestsSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
return []
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc, dependency=None)
tests.append(test)
tests.sort()
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def shell(self):
return "base-unittests"
def GetSuite(name, root):
return BaseUnitTestsSuite(name, root)

View File

@ -1,52 +0,0 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
from testrunner.local import commands
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
class CompilerUnitTestsSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(CompilerUnitTestsSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
return []
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc, dependency=None)
tests.append(test)
tests.sort()
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def shell(self):
return "compiler-unittests"
def GetSuite(name, root):
return CompilerUnitTestsSuite(name, root)

View File

@ -1,52 +0,0 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
from testrunner.local import commands
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
class HeapUnitTestsSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(HeapUnitTestsSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
return []
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc, dependency=None)
tests.append(test)
tests.sort()
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def shell(self):
return "heap-unittests"
def GetSuite(name, root):
return HeapUnitTestsSuite(name, root)

View File

@ -1,52 +0,0 @@
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import shutil
from testrunner.local import commands
from testrunner.local import testsuite
from testrunner.local import utils
from testrunner.objects import testcase
class LibplatformUnitTestsSuite(testsuite.TestSuite):
def __init__(self, name, root):
super(LibplatformUnitTestsSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
return []
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc, dependency=None)
tests.append(test)
tests.sort()
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def shell(self):
return "libplatform-unittests"
def GetSuite(name, root):
return LibplatformUnitTestsSuite(name, root)

View File

@ -1,488 +0,0 @@
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Performance runner for d8.
Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
The suite json format is expected to be:
{
"path": <relative path chunks to perf resources and main file>,
"name": <optional suite name, file name is default>,
"archs": [<architecture name for which this suite is run>, ...],
"binary": <name of binary to run, default "d8">,
"flags": [<flag to d8>, ...],
"run_count": <how often will this suite run (optional)>,
"run_count_XXX": <how often will this suite run for arch XXX (optional)>,
"resources": [<js file to be loaded before main>, ...]
"main": <main js perf runner file>,
"results_regexp": <optional regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
"tests": [
{
"name": <name of the trace>,
"results_regexp": <optional more specific regexp>,
"results_processor": <optional python results processor script>,
"units": <the unit specification for the performance dashboard>,
}, ...
]
}
The tests field can also nest other suites in arbitrary depth. A suite
with a "main" file is a leaf suite that can contain one more level of
tests.
A suite's results_regexp is expected to have one string place holder
"%s" for the trace name. A trace's results_regexp overwrites suite
defaults.
A suite's results_processor may point to an optional python script. If
specified, it is called after running the tests like this (with a path
relatve to the suite level's path):
<results_processor file> <same flags as for d8> <suite level name> <output>
The <output> is a temporary file containing d8 output. The results_regexp will
be applied to the output of this script.
A suite without "tests" is considered a performance test itself.
Full example (suite with one runner):
{
"path": ["."],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"run_count_ia32": 3,
"main": "run.js",
"results_regexp": "^%s: (.+)$",
"units": "score",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
{"name": "NavierStokes",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Full example (suite with several runners):
{
"path": ["."],
"flags": ["--expose-gc"],
"archs": ["ia32", "x64"],
"run_count": 5,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"main": "run.js",
"run_count": 3,
"results_regexp": "^Richards: (.+)$"},
{"name": "NavierStokes",
"path": ["navier_stokes"],
"main": "run.js",
"results_regexp": "^NavierStokes: (.+)$"}
]
}
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
"""
import json
import math
import optparse
import os
import re
import sys
from testrunner.local import commands
from testrunner.local import utils
ARCH_GUESS = utils.DefaultArch()
SUPPORTED_ARCHS = ["android_arm",
"android_arm64",
"android_ia32",
"arm",
"ia32",
"mips",
"mipsel",
"nacl_ia32",
"nacl_x64",
"x64",
"arm64"]
GENERIC_RESULTS_RE = re.compile(
r"^Trace\(([^\)]+)\), Result\(([^\)]+)\), StdDev\(([^\)]+)\)$")
def GeometricMean(values):
"""Returns the geometric mean of a list of values.
The mean is calculated using log to avoid overflow.
"""
values = map(float, values)
return str(math.exp(sum(map(math.log, values)) / len(values)))
class Results(object):
"""Place holder for result traces."""
def __init__(self, traces=None, errors=None):
self.traces = traces or []
self.errors = errors or []
def ToDict(self):
return {"traces": self.traces, "errors": self.errors}
def WriteToFile(self, file_name):
with open(file_name, "w") as f:
f.write(json.dumps(self.ToDict()))
def __add__(self, other):
self.traces += other.traces
self.errors += other.errors
return self
def __str__(self): # pragma: no cover
return str(self.ToDict())
class Node(object):
"""Represents a node in the suite tree structure."""
def __init__(self, *args):
self._children = []
def AppendChild(self, child):
self._children.append(child)
class DefaultSentinel(Node):
"""Fake parent node with all default values."""
def __init__(self):
super(DefaultSentinel, self).__init__()
self.binary = "d8"
self.run_count = 10
self.path = []
self.graphs = []
self.flags = []
self.resources = []
self.results_regexp = None
self.stddev_regexp = None
self.units = "score"
self.total = False
class Graph(Node):
"""Represents a suite definition.
Can either be a leaf or an inner node that provides default values.
"""
def __init__(self, suite, parent, arch):
super(Graph, self).__init__()
self._suite = suite
assert isinstance(suite.get("path", []), list)
assert isinstance(suite["name"], basestring)
assert isinstance(suite.get("flags", []), list)
assert isinstance(suite.get("resources", []), list)
# Accumulated values.
self.path = parent.path[:] + suite.get("path", [])
self.graphs = parent.graphs[:] + [suite["name"]]
self.flags = parent.flags[:] + suite.get("flags", [])
self.resources = parent.resources[:] + suite.get("resources", [])
# Descrete values (with parent defaults).
self.binary = suite.get("binary", parent.binary)
self.run_count = suite.get("run_count", parent.run_count)
self.run_count = suite.get("run_count_%s" % arch, self.run_count)
self.units = suite.get("units", parent.units)
self.total = suite.get("total", parent.total)
# A regular expression for results. If the parent graph provides a
# regexp and the current suite has none, a string place holder for the
# suite name is expected.
# TODO(machenbach): Currently that makes only sense for the leaf level.
# Multiple place holders for multiple levels are not supported.
if parent.results_regexp:
regexp_default = parent.results_regexp % re.escape(suite["name"])
else:
regexp_default = None
self.results_regexp = suite.get("results_regexp", regexp_default)
# A similar regular expression for the standard deviation (optional).
if parent.stddev_regexp:
stddev_default = parent.stddev_regexp % re.escape(suite["name"])
else:
stddev_default = None
self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
class Trace(Graph):
"""Represents a leaf in the suite tree structure.
Handles collection of measurements.
"""
def __init__(self, suite, parent, arch):
super(Trace, self).__init__(suite, parent, arch)
assert self.results_regexp
self.results = []
self.errors = []
self.stddev = ""
def ConsumeOutput(self, stdout):
try:
self.results.append(
re.search(self.results_regexp, stdout, re.M).group(1))
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.results_regexp, self.graphs[-1]))
try:
if self.stddev_regexp and self.stddev:
self.errors.append("Test %s should only run once since a stddev "
"is provided by the test." % self.graphs[-1])
if self.stddev_regexp:
self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
except:
self.errors.append("Regexp \"%s\" didn't match for test %s."
% (self.stddev_regexp, self.graphs[-1]))
def GetResults(self):
return Results([{
"graphs": self.graphs,
"units": self.units,
"results": self.results,
"stddev": self.stddev,
}], self.errors)
class Runnable(Graph):
"""Represents a runnable suite definition (i.e. has a main file).
"""
@property
def main(self):
return self._suite.get("main", "")
def ChangeCWD(self, suite_path):
"""Changes the cwd to to path defined in the current graph.
The tests are supposed to be relative to the suite configuration.
"""
suite_dir = os.path.abspath(os.path.dirname(suite_path))
bench_dir = os.path.normpath(os.path.join(*self.path))
os.chdir(os.path.join(suite_dir, bench_dir))
def GetCommand(self, shell_dir):
# TODO(machenbach): This requires +.exe if run on windows.
return (
[os.path.join(shell_dir, self.binary)] +
self.flags +
self.resources +
[self.main]
)
def Run(self, runner):
"""Iterates over several runs and handles the output for all traces."""
for stdout in runner():
for trace in self._children:
trace.ConsumeOutput(stdout)
res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
if not res.traces or not self.total:
return res
# Assume all traces have the same structure.
if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
res.errors.append("Not all traces have the same number of results.")
return res
# Calculate the geometric means for all traces. Above we made sure that
# there is at least one trace and that the number of results is the same
# for each trace.
n_results = len(res.traces[0]["results"])
total_results = [GeometricMean(t["results"][i] for t in res.traces)
for i in range(0, n_results)]
res.traces.append({
"graphs": self.graphs + ["Total"],
"units": res.traces[0]["units"],
"results": total_results,
"stddev": "",
})
return res
class RunnableTrace(Trace, Runnable):
"""Represents a runnable suite definition that is a leaf."""
def __init__(self, suite, parent, arch):
super(RunnableTrace, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
for stdout in runner():
self.ConsumeOutput(stdout)
return self.GetResults()
class RunnableGeneric(Runnable):
"""Represents a runnable suite definition with generic traces."""
def __init__(self, suite, parent, arch):
super(RunnableGeneric, self).__init__(suite, parent, arch)
def Run(self, runner):
"""Iterates over several runs and handles the output."""
traces = {}
for stdout in runner():
for line in stdout.strip().splitlines():
match = GENERIC_RESULTS_RE.match(line)
if match:
trace = match.group(1)
result = match.group(2)
stddev = match.group(3)
trace_result = traces.setdefault(trace, Results([{
"graphs": self.graphs + [trace],
"units": self.units,
"results": [],
"stddev": "",
}], []))
trace_result.traces[0]["results"].append(result)
trace_result.traces[0]["stddev"] = stddev
return reduce(lambda r, t: r + t, traces.itervalues(), Results())
def MakeGraph(suite, arch, parent):
"""Factory method for making graph objects."""
if isinstance(parent, Runnable):
# Below a runnable can only be traces.
return Trace(suite, parent, arch)
elif suite.get("main"):
# A main file makes this graph runnable.
if suite.get("tests"):
# This graph has subgraphs (traces).
return Runnable(suite, parent, arch)
else:
# This graph has no subgraphs, it's a leaf.
return RunnableTrace(suite, parent, arch)
elif suite.get("generic"):
# This is a generic suite definition. It is either a runnable executable
# or has a main js file.
return RunnableGeneric(suite, parent, arch)
elif suite.get("tests"):
# This is neither a leaf nor a runnable.
return Graph(suite, parent, arch)
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
def BuildGraphs(suite, arch, parent=None):
"""Builds a tree structure of graph objects that corresponds to the suite
configuration.
"""
parent = parent or DefaultSentinel()
# TODO(machenbach): Implement notion of cpu type?
if arch not in suite.get("archs", ["ia32", "x64"]):
return None
graph = MakeGraph(suite, arch, parent)
for subsuite in suite.get("tests", []):
BuildGraphs(subsuite, arch, graph)
parent.AppendChild(graph)
return graph
def FlattenRunnables(node):
"""Generator that traverses the tree structure and iterates over all
runnables.
"""
if isinstance(node, Runnable):
yield node
elif isinstance(node, Node):
for child in node._children:
for result in FlattenRunnables(child):
yield result
else: # pragma: no cover
raise Exception("Invalid suite configuration.")
# TODO: Implement results_processor.
def Main(args):
parser = optparse.OptionParser()
parser.add_option("--arch",
help=("The architecture to run tests for, "
"'auto' or 'native' for auto-detect"),
default="x64")
parser.add_option("--buildbot",
help="Adapt to path structure used on buildbots",
default=False, action="store_true")
parser.add_option("--json-test-results",
help="Path to a file for storing json results.")
parser.add_option("--outdir", help="Base directory with compile output",
default="out")
(options, args) = parser.parse_args(args)
if len(args) == 0: # pragma: no cover
parser.print_help()
return 1
if options.arch in ["auto", "native"]: # pragma: no cover
options.arch = ARCH_GUESS
if not options.arch in SUPPORTED_ARCHS: # pragma: no cover
print "Unknown architecture %s" % options.arch
return 1
workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
if options.buildbot:
shell_dir = os.path.join(workspace, options.outdir, "Release")
else:
shell_dir = os.path.join(workspace, options.outdir,
"%s.release" % options.arch)
results = Results()
for path in args:
path = os.path.abspath(path)
if not os.path.exists(path): # pragma: no cover
results.errors.append("Configuration file %s does not exist." % path)
continue
with open(path) as f:
suite = json.loads(f.read())
# If no name is given, default to the file name without .json.
suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
print ">>> Running suite: %s" % "/".join(runnable.graphs)
runnable.ChangeCWD(path)
def Runner():
"""Output generator that reruns several times."""
for i in xrange(0, max(1, runnable.run_count)):
# TODO(machenbach): Make timeout configurable in the suite definition.
# Allow timeout per arch like with run_count per arch.
output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60)
print ">>> Stdout (#%d):" % (i + 1)
print output.stdout
if output.stderr: # pragma: no cover
# Print stderr for debugging.
print ">>> Stderr (#%d):" % (i + 1)
print output.stderr
yield output.stdout
# Let runnable iterate over all runs and handle output.
results += runnable.Run(Runner)
if options.json_test_results:
results.WriteToFile(options.json_test_results)
else: # pragma: no cover
print results
return min(1, len(results.errors))
if __name__ == "__main__": # pragma: no cover
sys.exit(Main(sys.argv[1:]))

View File

@ -41,11 +41,13 @@ class TestSuite(object):
try:
(f, pathname, description) = imp.find_module("testcfg", [root])
module = imp.load_module("testcfg", f, pathname, description)
suite = module.GetSuite(name, root)
return module.GetSuite(name, root)
except:
# Use default if no testcfg is present.
return GoogleTestSuite(name, root)
finally:
if f:
f.close()
return suite
def __init__(self, name, root):
self.name = name # string
@ -214,3 +216,40 @@ class TestSuite(object):
for t in self.tests:
self.total_duration += t.duration
return self.total_duration
class GoogleTestSuite(TestSuite):
def __init__(self, name, root):
super(GoogleTestSuite, self).__init__(name, root)
def ListTests(self, context):
shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
if utils.IsWindows():
shell += ".exe"
output = commands.Execute(context.command_prefix +
[shell, "--gtest_list_tests"] +
context.extra_flags)
if output.exit_code != 0:
print output.stdout
print output.stderr
return []
tests = []
test_case = ''
for line in output.stdout.splitlines():
test_desc = line.strip().split()[0]
if test_desc.endswith('.'):
test_case = test_desc
elif test_case and test_desc:
test = testcase.TestCase(self, test_case + test_desc, dependency=None)
tests.append(test)
tests.sort()
return tests
def GetFlagsForTestCase(self, testcase, context):
return (testcase.flags + ["--gtest_filter=" + testcase.path] +
["--gtest_random_seed=%s" % context.random_seed] +
["--gtest_print_time=0"] +
context.mode_flags)
def shell(self):
return self.name

View File

@ -36,9 +36,7 @@ import urllib2
def GetSuitePaths(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
# Reads a file into an array of strings

View File

@ -1,349 +0,0 @@
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import namedtuple
import coverage
import json
from mock import DEFAULT
from mock import MagicMock
import os
from os import path, sys
import shutil
import tempfile
import unittest
# Requires python-coverage and python-mock. Native python coverage
# version >= 3.7.1 should be installed to get the best speed.
TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
V8_JSON = {
"path": ["."],
"binary": "d7",
"flags": ["--flag"],
"main": "run.js",
"run_count": 1,
"results_regexp": "^%s: (.+)$",
"tests": [
{"name": "Richards"},
{"name": "DeltaBlue"},
]
}
V8_NESTED_SUITES_JSON = {
"path": ["."],
"flags": ["--flag"],
"run_count": 1,
"units": "score",
"tests": [
{"name": "Richards",
"path": ["richards"],
"binary": "d7",
"main": "run.js",
"resources": ["file1.js", "file2.js"],
"run_count": 2,
"results_regexp": "^Richards: (.+)$"},
{"name": "Sub",
"path": ["sub"],
"tests": [
{"name": "Leaf",
"path": ["leaf"],
"run_count_x64": 3,
"units": "ms",
"main": "run.js",
"results_regexp": "^Simple: (.+) ms.$"},
]
},
{"name": "DeltaBlue",
"path": ["delta_blue"],
"main": "run.js",
"flags": ["--flag2"],
"results_regexp": "^DeltaBlue: (.+)$"},
{"name": "ShouldntRun",
"path": ["."],
"archs": ["arm"],
"main": "run.js"},
]
}
V8_GENERIC_JSON = {
"path": ["."],
"binary": "cc",
"flags": ["--flag"],
"generic": True,
"run_count": 1,
"units": "ms",
}
Output = namedtuple("Output", "stdout, stderr")
class PerfTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.base = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(cls.base)
cls._cov = coverage.coverage(
include=([os.path.join(cls.base, "run_perf.py")]))
cls._cov.start()
import run_perf
from testrunner.local import commands
global commands
global run_perf
@classmethod
def tearDownClass(cls):
cls._cov.stop()
print ""
print cls._cov.report()
def setUp(self):
self.maxDiff = None
if path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
os.makedirs(TEST_WORKSPACE)
def tearDown(self):
if path.exists(TEST_WORKSPACE):
shutil.rmtree(TEST_WORKSPACE)
def _WriteTestInput(self, json_content):
self._test_input = path.join(TEST_WORKSPACE, "test.json")
with open(self._test_input, "w") as f:
f.write(json.dumps(json_content))
def _MockCommand(self, *args):
# Fake output for each test run.
test_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
def execute(*args, **kwargs):
return test_outputs.pop()
commands.Execute = MagicMock(side_effect=execute)
# Check that d8 is called from the correct cwd for each test run.
dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
def chdir(*args, **kwargs):
self.assertEquals(dirs.pop(), args[0])
os.chdir = MagicMock(side_effect=chdir)
def _CallMain(self, *args):
self._test_output = path.join(TEST_WORKSPACE, "results.json")
all_args=[
"--json-test-results",
self._test_output,
self._test_input,
]
all_args += args
return run_perf.Main(all_args)
def _LoadResults(self):
with open(self._test_output) as f:
return json.load(f)
def _VerifyResults(self, suite, units, traces):
self.assertEquals([
{"units": units,
"graphs": [suite, trace["name"]],
"results": trace["results"],
"stddev": trace["stddev"]} for trace in traces],
self._LoadResults()["traces"])
def _VerifyErrors(self, errors):
self.assertEquals(errors, self._LoadResults()["errors"])
def _VerifyMock(self, binary, *args):
arg = [path.join(path.dirname(self.base), binary)]
arg += args
commands.Execute.assert_called_with(arg, timeout=60)
def _VerifyMockMultiple(self, *args):
expected = []
for arg in args:
a = [path.join(path.dirname(self.base), arg[0])]
a += arg[1:]
expected.append(((a,), {"timeout": 60}))
self.assertEquals(expected, commands.Execute.call_args_list)
def testOneRun(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testTwoRuns_Units_SuiteName(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
test_input["name"] = "v8"
test_input["units"] = "ms"
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 50\nDeltaBlue: 300\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("v8", "ms", [
{"name": "Richards", "results": ["50", "100"], "stddev": ""},
{"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testTwoRuns_SubRegexp(self):
test_input = dict(V8_JSON)
test_input["run_count"] = 2
del test_input["results_regexp"]
test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
self._WriteTestInput(test_input)
self._MockCommand([".", "."],
["Richards: 100\nDeltaBlue: 200\n",
"Richards: 50\nDeltaBlue: 300\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["50", "100"], "stddev": ""},
{"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testNestedSuite(self):
self._WriteTestInput(V8_NESTED_SUITES_JSON)
self._MockCommand(["delta_blue", "sub/leaf", "richards"],
["DeltaBlue: 200\n",
"Simple: 1 ms.\n",
"Simple: 2 ms.\n",
"Simple: 3 ms.\n",
"Richards: 100\n",
"Richards: 50\n"])
self.assertEquals(0, self._CallMain())
self.assertEquals([
{"units": "score",
"graphs": ["test", "Richards"],
"results": ["50", "100"],
"stddev": ""},
{"units": "ms",
"graphs": ["test", "Sub", "Leaf"],
"results": ["3", "2", "1"],
"stddev": ""},
{"units": "score",
"graphs": ["test", "DeltaBlue"],
"results": ["200"],
"stddev": ""},
], self._LoadResults()["traces"])
self._VerifyErrors([])
self._VerifyMockMultiple(
(path.join("out", "x64.release", "d7"), "--flag", "file1.js",
"file2.js", "run.js"),
(path.join("out", "x64.release", "d7"), "--flag", "file1.js",
"file2.js", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "run.js"),
(path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
def testOneRunStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
"DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testTwoRunsStdDevRegExp(self):
test_input = dict(V8_JSON)
test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
test_input["run_count"] = 2
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
"DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
"Richards: 2\nRichards-stddev: 0.5\n"
"DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["2", "3"], "stddev": "0.7"},
{"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
])
self._VerifyErrors(
["Test Richards should only run once since a stddev is provided "
"by the test.",
"Test DeltaBlue should only run once since a stddev is provided "
"by the test.",
"Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
"DeltaBlue."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testBuildbot(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
self.assertEquals(0, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
def testBuildbotWithTotal(self):
test_input = dict(V8_JSON)
test_input["total"] = True
self._WriteTestInput(test_input)
self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
self.assertEquals(0, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": ["1.234"], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
{"name": "Total", "results": ["3626.49109719"], "stddev": ""},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
def testBuildbotWithTotalAndErrors(self):
test_input = dict(V8_JSON)
test_input["total"] = True
self._WriteTestInput(test_input)
self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(1, self._CallMain("--buildbot"))
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
"Not all traces have the same number of results."])
self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
def testRegexpNoMatch(self):
self._WriteTestInput(V8_JSON)
self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
self.assertEquals(1, self._CallMain())
self._VerifyResults("test", "score", [
{"name": "Richards", "results": [], "stddev": ""},
{"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
])
self._VerifyErrors(
["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
def testOneRunGeneric(self):
test_input = dict(V8_GENERIC_JSON)
self._WriteTestInput(test_input)
self._MockCommand(["."], [
"Trace(Test1), Result(1.234), StdDev(0.23)\n"
"Trace(Test2), Result(10657567), StdDev(106)\n"])
self.assertEquals(0, self._CallMain())
self._VerifyResults("test", "ms", [
{"name": "Test1", "results": ["1.234"], "stddev": "0.23"},
{"name": "Test2", "results": ["10657567"], "stddev": "106"},
])
self._VerifyErrors([])
self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")