Make benchmark runner understand chromium perf output.
BUG=406405 LOG=n TEST=python -m unittest run_benchmarks_test R=bmeurer@chromium.org Review URL: https://codereview.chromium.org/498163002 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24509 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
c493ac26be
commit
bb117b4dde
@ -91,6 +91,7 @@ Full example (suite with several runners):
|
||||
Path pieces are concatenated. D8 is always run with the suite's path as cwd.
|
||||
"""
|
||||
|
||||
from collections import OrderedDict
|
||||
import json
|
||||
import math
|
||||
import optparse
|
||||
@ -114,8 +115,10 @@ SUPPORTED_ARCHS = ["android_arm",
|
||||
"x64",
|
||||
"arm64"]
|
||||
|
||||
GENERIC_RESULTS_RE = re.compile(
|
||||
r"^Trace\(([^\)]+)\), Result\(([^\)]+)\), StdDev\(([^\)]+)\)$")
|
||||
GENERIC_RESULTS_RE = re.compile(r"^RESULT ([^:]+): ([^=]+)= ([^ ]+) ([^ ]*)$")
|
||||
RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
|
||||
RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
|
||||
|
||||
|
||||
|
||||
def GeometricMean(values):
|
||||
@ -334,21 +337,33 @@ class RunnableGeneric(Runnable):
|
||||
|
||||
def Run(self, runner):
|
||||
"""Iterates over several runs and handles the output."""
|
||||
traces = {}
|
||||
traces = OrderedDict()
|
||||
for stdout in runner():
|
||||
for line in stdout.strip().splitlines():
|
||||
match = GENERIC_RESULTS_RE.match(line)
|
||||
if match:
|
||||
trace = match.group(1)
|
||||
result = match.group(2)
|
||||
stddev = match.group(3)
|
||||
stddev = ""
|
||||
graph = match.group(1)
|
||||
trace = match.group(2)
|
||||
body = match.group(3)
|
||||
units = match.group(4)
|
||||
match_stddev = RESULT_STDDEV_RE.match(body)
|
||||
match_list = RESULT_LIST_RE.match(body)
|
||||
if match_stddev:
|
||||
result, stddev = map(str.strip, match_stddev.group(1).split(","))
|
||||
results = [result]
|
||||
elif match_list:
|
||||
results = map(str.strip, match_list.group(1).split(","))
|
||||
else:
|
||||
results = [body.strip()]
|
||||
|
||||
trace_result = traces.setdefault(trace, Results([{
|
||||
"graphs": self.graphs + [trace],
|
||||
"units": self.units,
|
||||
"graphs": self.graphs + [graph, trace],
|
||||
"units": (units or self.units).strip(),
|
||||
"results": [],
|
||||
"stddev": "",
|
||||
}], []))
|
||||
trace_result.traces[0]["results"].append(result)
|
||||
trace_result.traces[0]["results"].extend(results)
|
||||
trace_result.traces[0]["stddev"] = stddev
|
||||
|
||||
return reduce(lambda r, t: r + t, traces.itervalues(), Results())
|
||||
|
@ -342,13 +342,24 @@ class PerfTest(unittest.TestCase):
|
||||
test_input = dict(V8_GENERIC_JSON)
|
||||
self._WriteTestInput(test_input)
|
||||
self._MockCommand(["."], [
|
||||
"Trace(Test1), Result(1.234), StdDev(0.23)\n"
|
||||
"Trace(Test2), Result(10657567), StdDev(106)\n"])
|
||||
"RESULT Infra: Constant1= 11 count\n"
|
||||
"RESULT Infra: Constant2= [10,5,10,15] count\n"
|
||||
"RESULT Infra: Constant3= {12,1.2} count\n"])
|
||||
self.assertEquals(0, self._CallMain())
|
||||
self._VerifyResults("test", "ms", [
|
||||
{"name": "Test1", "results": ["1.234"], "stddev": "0.23"},
|
||||
{"name": "Test2", "results": ["10657567"], "stddev": "106"},
|
||||
])
|
||||
self.assertEquals([
|
||||
{"units": "count",
|
||||
"graphs": ["test", "Infra", "Constant1"],
|
||||
"results": ["11"],
|
||||
"stddev": ""},
|
||||
{"units": "count",
|
||||
"graphs": ["test", "Infra", "Constant2"],
|
||||
"results": ["10", "5", "10", "15"],
|
||||
"stddev": ""},
|
||||
{"units": "count",
|
||||
"graphs": ["test", "Infra", "Constant3"],
|
||||
"results": ["12"],
|
||||
"stddev": "1.2"},
|
||||
], self._LoadResults()["traces"])
|
||||
self._VerifyErrors([])
|
||||
self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user