[test] Switch to flattened json output

This flattens the json output to one result record as a dict. In
the past several records with different arch/mode combinations
could be run, but this is deprecated since several releases.

We also drop storing the arch/mode information in the record as it
isn't used on the infra side for anything.

This was prepared on the infra side by:
https://crrev.com/c/2453562

Bug: chromium:1132088
Change-Id: I944514dc00a671e7671bcdbcaa3a72407476d7ad
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2456987
Reviewed-by: Liviu Rau <liviurau@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/master@{#70402}
This commit is contained in:
Michael Achenbach 2020-10-07 18:43:41 +02:00 committed by Commit Bot
parent 0ce4c51ce7
commit 373a9a8cfc
6 changed files with 8 additions and 33 deletions

View File

@ -117,7 +117,7 @@ SLOW_ARCHS = [
ModeConfig = namedtuple(
'ModeConfig', 'label flags timeout_scalefactor status_mode execution_mode')
'ModeConfig', 'label flags timeout_scalefactor status_mode')
DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"]
RELEASE_FLAGS = ["--nohard-abort"]
@ -127,7 +127,6 @@ DEBUG_MODE = ModeConfig(
flags=DEBUG_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="debug",
)
RELEASE_MODE = ModeConfig(
@ -135,7 +134,6 @@ RELEASE_MODE = ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=1,
status_mode="release",
execution_mode="release",
)
# Normal trybot release configuration. There, dchecks are always on which
@ -146,7 +144,6 @@ TRY_RELEASE_MODE = ModeConfig(
flags=RELEASE_FLAGS,
timeout_scalefactor=4,
status_mode="debug",
execution_mode="release",
)
PROGRESS_INDICATORS = {
@ -761,13 +758,7 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
if options.json_test_results:
# TODO(machenbach): Deprecate the execution mode. Previously it was meant
# to differentiate several records in the json output. But there is now
# only one record and the mode information is redundant.
procs.append(progress.JsonTestProgressIndicator(
self.framework_name,
self.build_config.arch,
self.mode_options.execution_mode))
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
for proc in procs:
proc.configure(options)

View File

@ -364,10 +364,8 @@ class StandardTestRunner(base_runner.BaseTestRunner):
]
assert os.path.exists(options.json_test_results)
complete_results = []
with open(options.json_test_results, "r") as f:
complete_results = json.loads(f.read())
output = complete_results[0]
output = json.load(f)
lines = []
for test in output['slowest_tests']:
suffix = ''

View File

@ -349,7 +349,7 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name, arch, mode):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
# We want to drop stdout/err for all passed tests on the first try, but we
# need to get outputs for all runs after the first one. To accommodate that,
@ -358,8 +358,6 @@ class JsonTestProgressIndicator(ProgressIndicator):
self._requirement = base.DROP_PASS_STDOUT
self.framework_name = framework_name
self.arch = arch
self.mode = mode
self.results = []
self.duration_sum = 0
self.test_count = 0
@ -429,24 +427,16 @@ class JsonTestProgressIndicator(ProgressIndicator):
}
def finished(self):
complete_results = []
if os.path.exists(self.options.json_test_results):
with open(self.options.json_test_results, "r") as f:
# On bots we might start out with an empty file.
complete_results = json.loads(f.read() or "[]")
duration_mean = None
if self.test_count:
duration_mean = self.duration_sum / self.test_count
complete_results.append({
"arch": self.arch,
"mode": self.mode,
result = {
"results": self.results,
"slowest_tests": self.tests.as_list(),
"duration_mean": duration_mean,
"test_total": self.test_count,
})
}
with open(self.options.json_test_results, "w") as f:
f.write(json.dumps(complete_results))
json.dump(result, f)

View File

@ -246,7 +246,7 @@ class SystemTest(unittest.TestCase):
self, expected_results_name, actual_json, basedir):
# Check relevant properties of the json output.
with open(actual_json) as f:
json_output = json.load(f)[0]
json_output = json.load(f)
# Replace duration in actual output as it's non-deterministic. Also
# replace the python executable prefix as it has a different absolute

View File

@ -1,7 +1,5 @@
{
"arch": "x64",
"duration_mean": 1,
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner",

View File

@ -1,7 +1,5 @@
{
"arch": "x64",
"duration_mean": 1,
"mode": "release",
"results": [
{
"command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",