Revert "[resultdb] Add ResultDB indicator"

This reverts commit 237de893e1.

Reason for revert: breaks flake bisect:
https://ci.chromium.org/ui/p/v8/builders/try.triggered/v8_flako/b8800423657665797553/overview

The change added the implicit requirement to run testing with
vpython3, which is not given everywhere.

Original change's description:
> [resultdb] Add ResultDB indicator
>
> Adds a new indicator that will send every result to ResultDB (and ultimately in a bq table; to be configured later).
>
> If we are not running in a ResultDB context we introduce only a minimal overhead by exiting early from indicator.
>
> To test these changes in a luci context with ResultDB we activated resultdb feature flag via V8-Recipe-Flags. This feature got implemented in https://crrev.com/c/3925576 .
>
>
> V8-Recipe-Flags: resultdb
> Bug: v8:13316
> Change-Id: I5d98e8f27531b536686a8d63b993313b9d6f62c5
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3905385
> Commit-Queue: Liviu Rau <liviurau@google.com>
> Reviewed-by: Alexander Schulze <alexschulze@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83672}

Bug: v8:13316
Change-Id: I7e55668e365475298ed46d2fc8ee0fe1282c3e8e
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3952131
Auto-Submit: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/main@{#83677}
This commit is contained in:
Michael Achenbach 2022-10-13 11:28:41 +00:00 committed by V8 LUCI CQ
parent be223c37dc
commit f09dde9fbb
7 changed files with 46 additions and 191 deletions

View File

@ -74,8 +74,3 @@ wheel: <
name: "infra/python/wheels/protobuf-py3"
version: "version:3.19.3"
>
wheel: <
name: "infra/python/wheels/requests-py2_py3"
version: "version:2.13.0"
>

View File

@ -447,12 +447,8 @@ class TestCase(object):
(other.suite.name, other.name, other.variant)
)
@property
def full_name(self):
return self.suite.name + '/' + self.name
def __str__(self):
return self.full_name
return self.suite.name + '/' + self.name
class D8TestCase(TestCase):

View File

@ -14,7 +14,7 @@ from . import util
def print_failure_header(test, is_flaky=False):
text = [test.full_name]
text = [str(test)]
if test.output_proc.negative:
text.append('[negative]')
if is_flaky:
@ -24,23 +24,6 @@ def print_failure_header(test, is_flaky=False):
print(output.encode(encoding, errors='replace').decode(encoding))
def formatted_result_output(result):
lines = []
if result.output.stderr:
lines.append("--- stderr ---")
lines.append(result.output.stderr.strip())
if result.output.stdout:
lines.append("--- stdout ---")
lines.append(result.output.stdout.strip())
lines.append("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
lines.append("exit code: %s" % result.output.exit_code_string)
lines.append("--- CRASHED ---")
if result.output.HasTimedOut():
lines.append("--- TIMEOUT ---")
return '\n'.join(lines)
class ProgressIndicator():
def __init__(self, context, options, test_count):
@ -85,7 +68,19 @@ class SimpleProgressIndicator(ProgressIndicator):
for test, result, is_flaky in self._failed:
flaky += int(is_flaky)
print_failure_header(test, is_flaky=is_flaky)
print(formatted_result_output(result))
if result.output.stderr:
print("--- stderr ---")
print(result.output.stderr.strip())
if result.output.stdout:
print("--- stdout ---")
print(result.output.stdout.strip())
print("Command: %s" % result.cmd.to_string())
if result.output.HasCrashed():
print("exit code: %s" % result.output.exit_code_string)
print("--- CRASHED ---")
crashed += 1
if result.output.HasTimedOut():
print("--- TIMEOUT ---")
if len(self._failed) == 0:
print("===")
print("=== All tests succeeded")
@ -235,7 +230,7 @@ class CompactProgressIndicator(ProgressIndicator):
else:
self._passed += 1
self._print_progress(test.full_name)
self._print_progress(str(test))
if result.has_unexpected_output:
output = result.output
stdout = output.stdout.strip()
@ -363,7 +358,10 @@ class JsonTestProgressIndicator(ProgressIndicator):
self.test_count = 0
def on_test_result(self, test, result):
self.process_results(test, result.as_list)
if result.is_rerun:
self.process_results(test, result.results)
else:
self.process_results(test, [result])
def process_results(self, test, results):
for run, result in enumerate(results):
@ -378,7 +376,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
if not result.has_unexpected_output and run == 0:
continue
record = self._test_record(test, result, run)
record = self._test_record(test, result, output, run)
record.update({
"result": test.output_proc.get_outcome(output),
"stdout": output.stdout,
@ -394,22 +392,30 @@ class JsonTestProgressIndicator(ProgressIndicator):
return ""
return test.output_proc.get_outcome(output)
record = self._test_record(test, result, run)
record.update(
result=result_value(test, result, output),
marked_slow=test.is_slow,
)
record = self._test_record(test, result, output, run)
record.update({
"result": result_value(test, result, output),
"marked_slow": test.is_slow,
})
self.tests.add(record)
self.duration_sum += record['duration']
self.test_count += 1
def _test_record(self, test, result, run):
record = util.base_test_record(test, result, run)
record.update(
framework_name=self.framework_name,
command=result.cmd.to_string(relative=True),
)
return record
def _test_record(self, test, result, output, run):
return {
"name": str(test),
"flags": result.cmd.args,
"command": result.cmd.to_string(relative=True),
"run": run + 1,
"exit_code": output.exit_code,
"expected": test.expected_outcomes,
"duration": output.duration,
"random_seed": test.random_seed,
"target_name": test.get_shell(),
"variant": test.variant,
"variant_flags": test.variant_flags,
"framework_name": self.framework_name,
}
def finished(self):
duration_mean = None
@ -417,10 +423,10 @@ class JsonTestProgressIndicator(ProgressIndicator):
duration_mean = self.duration_sum / self.test_count
result = {
'results': self.results,
'slowest_tests': self.tests.as_list(),
'duration_mean': duration_mean,
'test_total': self.test_count,
"results": self.results,
"slowest_tests": self.tests.as_list(),
"duration_mean": duration_mean,
"test_total": self.test_count,
}
with open(self.options.json_test_results, "w") as f:

View File

@ -6,7 +6,6 @@
from . import base
from testrunner.local import utils
from testrunner.testproc.indicators import JsonTestProgressIndicator, PROGRESS_INDICATORS
from testrunner.testproc.resultdb import ResultDBIndicator
class ResultsTracker(base.TestProcObserver):
@ -67,7 +66,7 @@ class ProgressProc(base.TestProcObserver):
0,
JsonTestProgressIndicator(context, options, test_count,
framework_name))
self.procs.append(ResultDBIndicator(context, options, test_count))
self._requirement = max(proc._requirement for proc in self.procs)
def _on_result_for(self, test, result):

View File

@ -16,10 +16,6 @@ class ResultBase(object):
def is_rerun(self):
return False
@property
def as_list(self):
return [self]
class Result(ResultBase):
"""Result created by the output processor."""
@ -116,9 +112,5 @@ class RerunResult(Result):
def is_rerun(self):
return True
@property
def as_list(self):
return self.results
def status(self):
return ' '.join(r.status() for r in self.results)

View File

@ -1,95 +0,0 @@
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import pprint
import requests
import os
from . import base
from .indicators import (
formatted_result_output,
ProgressIndicator,
)
from .util import (
base_test_record,
extract_tags,
strip_ascii_control_characters,
)
class ResultDBIndicator(ProgressIndicator):
def __init__(self, context, options, test_count):
super(ResultDBIndicator, self).__init__(context, options, test_count)
self._requirement = base.DROP_PASS_OUTPUT
self.rpc = ResultDB_RPC()
def on_test_result(self, test, result):
for run, sub_result in enumerate(result.as_list):
self.send_result(test, sub_result, run)
def send_result(self, test, result, run):
# We need to recalculate the observed (but lost) test behaviour.
# `result.has_unexpected_output` indicates that the run behaviour of the
# test matches the expected behaviour irrespective of passing or failing.
result_expected = not result.has_unexpected_output
test_should_pass = not test.is_fail
run_passed = (result_expected == test_should_pass)
rdb_result = {
'testId': strip_ascii_control_characters(test.full_name),
'status': 'PASS' if run_passed else 'FAIL',
'expected': result_expected,
}
if result.output and result.output.duration:
rdb_result.update(duration=f'{result.output.duration}ms')
if result.has_unexpected_output:
formated_output = formatted_result_output(result)
sanitized = strip_ascii_control_characters(formated_output)
# TODO(liviurau): do we have a better presentation data for this?
# Protobuf strings can have len == 2**32.
rdb_result.update(summaryHtml=f'<pre>{sanitized}</pre>')
record = base_test_record(test, result, run)
rdb_result.update(tags=extract_tags(record))
self.rpc.send(rdb_result)
class ResultDB_RPC:
def __init__(self):
self.session = None
luci_context = os.environ.get('LUCI_CONTEXT')
# TODO(liviurau): use a factory method and return None in absence of
# necessary context.
if not luci_context:
logging.warning(
f'No LUCI_CONTEXT found. No results will be sent to ResutDB.')
return
with open(luci_context, mode="r", encoding="utf-8") as f:
config = json.load(f)
sink = config.get('result_sink', None)
if not sink:
logging.warning(
f'No ResultDB sink found. No results will be sent to ResutDB.')
return
self.session = requests.Session()
self.session.headers = {
'Authorization': f'ResultSink {sink.get("auth_token")}',
}
self.url = f'http://{sink.get("address")}/prpc/luci.resultsink.v1.Sink/ReportTestResults'
def send(self, result):
if self.session:
payload = dict(testResults=[result])
try:
self.session.post(self.url, json=payload).raise_for_status()
except Exception as e:
logging.error(f'Request failed: {payload}')
raise e
def __del__(self):
if self.session:
self.session.close()

View File

@ -7,7 +7,6 @@ import heapq
import logging
import os
import platform
import re
import signal
import subprocess
@ -54,43 +53,6 @@ def kill_processes_linux():
logging.exception('Failed to kill process')
def strip_ascii_control_characters(unicode_string):
return re.sub(r'[^\x20-\x7E]', '?', str(unicode_string))
def base_test_record(test, result, run):
record = {
'name': test.full_name,
'flags': result.cmd.args,
'run': run + 1,
'expected': test.expected_outcomes,
'random_seed': test.random_seed,
'target_name': test.get_shell(),
'variant': test.variant,
'variant_flags': test.variant_flags,
}
if result.output:
record.update(
exit_code=result.output.exit_code,
duration=result.output.duration,
)
return record
def extract_tags(record):
tags = []
for k, v in record.items():
if type(v) == list:
tags += [sanitized_kv_dict(k, e) for e in v]
else:
tags.append(sanitized_kv_dict(k, v))
return tags
def sanitized_kv_dict(k, v):
return dict(key=k, value=strip_ascii_control_characters(v))
class FixedSizeTopList():
"""Utility collection for gathering a fixed number of elements with the
biggest value for the given key. It employs a heap from which we pop the