diff --git a/tools/testrunner/base_runner.py b/tools/testrunner/base_runner.py index d287750c84..54a9e61b16 100644 --- a/tools/testrunner/base_runner.py +++ b/tools/testrunner/base_runner.py @@ -117,7 +117,7 @@ SLOW_ARCHS = [ ModeConfig = namedtuple( - 'ModeConfig', 'label flags timeout_scalefactor status_mode execution_mode') + 'ModeConfig', 'label flags timeout_scalefactor status_mode') DEBUG_FLAGS = ["--nohard-abort", "--enable-slow-asserts", "--verify-heap"] RELEASE_FLAGS = ["--nohard-abort"] @@ -127,7 +127,6 @@ DEBUG_MODE = ModeConfig( flags=DEBUG_FLAGS, timeout_scalefactor=4, status_mode="debug", - execution_mode="debug", ) RELEASE_MODE = ModeConfig( @@ -135,7 +134,6 @@ RELEASE_MODE = ModeConfig( flags=RELEASE_FLAGS, timeout_scalefactor=1, status_mode="release", - execution_mode="release", ) # Normal trybot release configuration. There, dchecks are always on which @@ -146,7 +144,6 @@ TRY_RELEASE_MODE = ModeConfig( flags=RELEASE_FLAGS, timeout_scalefactor=4, status_mode="debug", - execution_mode="release", ) PROGRESS_INDICATORS = { @@ -761,13 +758,7 @@ class BaseTestRunner(object): def _create_progress_indicators(self, test_count, options): procs = [PROGRESS_INDICATORS[options.progress]()] if options.json_test_results: - # TODO(machenbach): Deprecate the execution mode. Previously it was meant - # to differentiate several records in the json output. But there is now - # only one record and the mode information is redundant. - procs.append(progress.JsonTestProgressIndicator( - self.framework_name, - self.build_config.arch, - self.mode_options.execution_mode)) + procs.append(progress.JsonTestProgressIndicator(self.framework_name)) for proc in procs: proc.configure(options) diff --git a/tools/testrunner/standard_runner.py b/tools/testrunner/standard_runner.py index fc828c7e0e..ff58391110 100755 --- a/tools/testrunner/standard_runner.py +++ b/tools/testrunner/standard_runner.py @@ -364,10 +364,8 @@ class StandardTestRunner(base_runner.BaseTestRunner): ] assert os.path.exists(options.json_test_results) - complete_results = [] with open(options.json_test_results, "r") as f: - complete_results = json.loads(f.read()) - output = complete_results[0] + output = json.load(f) lines = [] for test in output['slowest_tests']: suffix = '' diff --git a/tools/testrunner/testproc/progress.py b/tools/testrunner/testproc/progress.py index eb96e5c354..9ff943a5c2 100644 --- a/tools/testrunner/testproc/progress.py +++ b/tools/testrunner/testproc/progress.py @@ -349,7 +349,7 @@ class MonochromeProgressIndicator(CompactProgressIndicator): class JsonTestProgressIndicator(ProgressIndicator): - def __init__(self, framework_name, arch, mode): + def __init__(self, framework_name): super(JsonTestProgressIndicator, self).__init__() # We want to drop stdout/err for all passed tests on the first try, but we # need to get outputs for all runs after the first one. To accommodate that, @@ -358,8 +358,6 @@ class JsonTestProgressIndicator(ProgressIndicator): self._requirement = base.DROP_PASS_STDOUT self.framework_name = framework_name - self.arch = arch - self.mode = mode self.results = [] self.duration_sum = 0 self.test_count = 0 @@ -429,24 +427,16 @@ class JsonTestProgressIndicator(ProgressIndicator): } def finished(self): - complete_results = [] - if os.path.exists(self.options.json_test_results): - with open(self.options.json_test_results, "r") as f: - # On bots we might start out with an empty file. - complete_results = json.loads(f.read() or "[]") - duration_mean = None if self.test_count: duration_mean = self.duration_sum / self.test_count - complete_results.append({ - "arch": self.arch, - "mode": self.mode, + result = { "results": self.results, "slowest_tests": self.tests.as_list(), "duration_mean": duration_mean, "test_total": self.test_count, - }) + } with open(self.options.json_test_results, "w") as f: - f.write(json.dumps(complete_results)) + json.dump(result, f) diff --git a/tools/unittests/run_tests_test.py b/tools/unittests/run_tests_test.py index 75fe2d15f4..4cd2bdefd5 100755 --- a/tools/unittests/run_tests_test.py +++ b/tools/unittests/run_tests_test.py @@ -246,7 +246,7 @@ class SystemTest(unittest.TestCase): self, expected_results_name, actual_json, basedir): # Check relevant properties of the json output. with open(actual_json) as f: - json_output = json.load(f)[0] + json_output = json.load(f) # Replace duration in actual output as it's non-deterministic. Also # replace the python executable prefix as it has a different absolute diff --git a/tools/unittests/testdata/expected_test_results1.json b/tools/unittests/testdata/expected_test_results1.json index 02d2d6c2c1..08ac623cd7 100644 --- a/tools/unittests/testdata/expected_test_results1.json +++ b/tools/unittests/testdata/expected_test_results1.json @@ -1,7 +1,5 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { "command": "/usr/bin/python out/build/d8_mocked.py --test strawberries --random-seed=123 --nohard-abort --testing-d8-test-runner", diff --git a/tools/unittests/testdata/expected_test_results2.json b/tools/unittests/testdata/expected_test_results2.json index fb57aad48d..dc353f6875 100644 --- a/tools/unittests/testdata/expected_test_results2.json +++ b/tools/unittests/testdata/expected_test_results2.json @@ -1,7 +1,5 @@ { - "arch": "x64", "duration_mean": 1, - "mode": "release", "results": [ { "command": "/usr/bin/python out/build/d8_mocked.py bananaflakes --random-seed=123 --nohard-abort --testing-d8-test-runner",