Reland "[resultdb] Add ResultDB indicator"
This is a reland of commit 237de893e1
We now guard against requests Python module not being available when running the testrunner. If preconditions (modules & luci context) are not met we no longer add ResultDBIndicator to the chain.
Original change's description:
> [resultdb] Add ResultDB indicator
>
> Adds a new indicator that will send every result to ResultDB (and ultimately in a bq table; to be configured later).
>
> If we are not running in a ResultDB context we introduce only a minimal overhead by exiting early from indicator.
>
> To test these changes in a luci context with ResultDB we activated resultdb feature flag via V8-Recipe-Flags. This feature got implemented in https://crrev.com/c/3925576 .
>
>
> V8-Recipe-Flags: resultdb
> Bug: v8:13316
> Change-Id: I5d98e8f27531b536686a8d63b993313b9d6f62c5
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3905385
> Commit-Queue: Liviu Rau <liviurau@google.com>
> Reviewed-by: Alexander Schulze <alexschulze@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#83672}
V8-Recipe-Flags: resultdb
Bug: v8:13316
Change-Id: I0bdfae13cc7f250c41a18f2d3a513a3bfc580f6d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3955263
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Commit-Queue: Liviu Rau <liviurau@google.com>
Cr-Commit-Position: refs/heads/main@{#83711}
This commit is contained in:
parent
b73215cd6c
commit
b625371491
@ -74,3 +74,8 @@ wheel: <
|
||||
name: "infra/python/wheels/protobuf-py3"
|
||||
version: "version:3.19.3"
|
||||
>
|
||||
|
||||
wheel: <
|
||||
name: "infra/python/wheels/requests-py2_py3"
|
||||
version: "version:2.13.0"
|
||||
>
|
||||
|
@ -447,9 +447,13 @@ class TestCase(object):
|
||||
(other.suite.name, other.name, other.variant)
|
||||
)
|
||||
|
||||
def __str__(self):
|
||||
@property
|
||||
def full_name(self):
|
||||
return self.suite.name + '/' + self.name
|
||||
|
||||
def __str__(self):
|
||||
return self.full_name
|
||||
|
||||
|
||||
class D8TestCase(TestCase):
|
||||
def get_shell(self):
|
||||
|
@ -14,7 +14,7 @@ from . import util
|
||||
|
||||
|
||||
def print_failure_header(test, is_flaky=False):
|
||||
text = [str(test)]
|
||||
text = [test.full_name]
|
||||
if test.output_proc.negative:
|
||||
text.append('[negative]')
|
||||
if is_flaky:
|
||||
@ -24,6 +24,23 @@ def print_failure_header(test, is_flaky=False):
|
||||
print(output.encode(encoding, errors='replace').decode(encoding))
|
||||
|
||||
|
||||
def formatted_result_output(result):
|
||||
lines = []
|
||||
if result.output.stderr:
|
||||
lines.append("--- stderr ---")
|
||||
lines.append(result.output.stderr.strip())
|
||||
if result.output.stdout:
|
||||
lines.append("--- stdout ---")
|
||||
lines.append(result.output.stdout.strip())
|
||||
lines.append("Command: %s" % result.cmd.to_string())
|
||||
if result.output.HasCrashed():
|
||||
lines.append("exit code: %s" % result.output.exit_code_string)
|
||||
lines.append("--- CRASHED ---")
|
||||
if result.output.HasTimedOut():
|
||||
lines.append("--- TIMEOUT ---")
|
||||
return '\n'.join(lines)
|
||||
|
||||
|
||||
class ProgressIndicator():
|
||||
|
||||
def __init__(self, context, options, test_count):
|
||||
@ -68,19 +85,7 @@ class SimpleProgressIndicator(ProgressIndicator):
|
||||
for test, result, is_flaky in self._failed:
|
||||
flaky += int(is_flaky)
|
||||
print_failure_header(test, is_flaky=is_flaky)
|
||||
if result.output.stderr:
|
||||
print("--- stderr ---")
|
||||
print(result.output.stderr.strip())
|
||||
if result.output.stdout:
|
||||
print("--- stdout ---")
|
||||
print(result.output.stdout.strip())
|
||||
print("Command: %s" % result.cmd.to_string())
|
||||
if result.output.HasCrashed():
|
||||
print("exit code: %s" % result.output.exit_code_string)
|
||||
print("--- CRASHED ---")
|
||||
crashed += 1
|
||||
if result.output.HasTimedOut():
|
||||
print("--- TIMEOUT ---")
|
||||
print(formatted_result_output(result))
|
||||
if len(self._failed) == 0:
|
||||
print("===")
|
||||
print("=== All tests succeeded")
|
||||
@ -230,7 +235,7 @@ class CompactProgressIndicator(ProgressIndicator):
|
||||
else:
|
||||
self._passed += 1
|
||||
|
||||
self._print_progress(str(test))
|
||||
self._print_progress(test.full_name)
|
||||
if result.has_unexpected_output:
|
||||
output = result.output
|
||||
stdout = output.stdout.strip()
|
||||
@ -358,10 +363,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
|
||||
self.test_count = 0
|
||||
|
||||
def on_test_result(self, test, result):
|
||||
if result.is_rerun:
|
||||
self.process_results(test, result.results)
|
||||
else:
|
||||
self.process_results(test, [result])
|
||||
self.process_results(test, result.as_list)
|
||||
|
||||
def process_results(self, test, results):
|
||||
for run, result in enumerate(results):
|
||||
@ -376,7 +378,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
|
||||
if not result.has_unexpected_output and run == 0:
|
||||
continue
|
||||
|
||||
record = self._test_record(test, result, output, run)
|
||||
record = self._test_record(test, result, run)
|
||||
record.update({
|
||||
"result": test.output_proc.get_outcome(output),
|
||||
"stdout": output.stdout,
|
||||
@ -392,30 +394,22 @@ class JsonTestProgressIndicator(ProgressIndicator):
|
||||
return ""
|
||||
return test.output_proc.get_outcome(output)
|
||||
|
||||
record = self._test_record(test, result, output, run)
|
||||
record.update({
|
||||
"result": result_value(test, result, output),
|
||||
"marked_slow": test.is_slow,
|
||||
})
|
||||
record = self._test_record(test, result, run)
|
||||
record.update(
|
||||
result=result_value(test, result, output),
|
||||
marked_slow=test.is_slow,
|
||||
)
|
||||
self.tests.add(record)
|
||||
self.duration_sum += record['duration']
|
||||
self.test_count += 1
|
||||
|
||||
def _test_record(self, test, result, output, run):
|
||||
return {
|
||||
"name": str(test),
|
||||
"flags": result.cmd.args,
|
||||
"command": result.cmd.to_string(relative=True),
|
||||
"run": run + 1,
|
||||
"exit_code": output.exit_code,
|
||||
"expected": test.expected_outcomes,
|
||||
"duration": output.duration,
|
||||
"random_seed": test.random_seed,
|
||||
"target_name": test.get_shell(),
|
||||
"variant": test.variant,
|
||||
"variant_flags": test.variant_flags,
|
||||
"framework_name": self.framework_name,
|
||||
}
|
||||
def _test_record(self, test, result, run):
|
||||
record = util.base_test_record(test, result, run)
|
||||
record.update(
|
||||
framework_name=self.framework_name,
|
||||
command=result.cmd.to_string(relative=True),
|
||||
)
|
||||
return record
|
||||
|
||||
def finished(self):
|
||||
duration_mean = None
|
||||
@ -423,10 +417,10 @@ class JsonTestProgressIndicator(ProgressIndicator):
|
||||
duration_mean = self.duration_sum / self.test_count
|
||||
|
||||
result = {
|
||||
"results": self.results,
|
||||
"slowest_tests": self.tests.as_list(),
|
||||
"duration_mean": duration_mean,
|
||||
"test_total": self.test_count,
|
||||
'results': self.results,
|
||||
'slowest_tests': self.tests.as_list(),
|
||||
'duration_mean': duration_mean,
|
||||
'test_total': self.test_count,
|
||||
}
|
||||
|
||||
with open(self.options.json_test_results, "w") as f:
|
||||
|
@ -6,6 +6,7 @@
|
||||
from . import base
|
||||
from testrunner.local import utils
|
||||
from testrunner.testproc.indicators import JsonTestProgressIndicator, PROGRESS_INDICATORS
|
||||
from testrunner.testproc.resultdb import rdb_sink, ResultDBIndicator
|
||||
|
||||
|
||||
class ResultsTracker(base.TestProcObserver):
|
||||
@ -66,7 +67,9 @@ class ProgressProc(base.TestProcObserver):
|
||||
0,
|
||||
JsonTestProgressIndicator(context, options, test_count,
|
||||
framework_name))
|
||||
|
||||
sink = rdb_sink()
|
||||
if sink:
|
||||
self.procs.append(ResultDBIndicator(context, options, test_count, sink))
|
||||
self._requirement = max(proc._requirement for proc in self.procs)
|
||||
|
||||
def _on_result_for(self, test, result):
|
||||
|
@ -16,6 +16,10 @@ class ResultBase(object):
|
||||
def is_rerun(self):
|
||||
return False
|
||||
|
||||
@property
|
||||
def as_list(self):
|
||||
return [self]
|
||||
|
||||
|
||||
class Result(ResultBase):
|
||||
"""Result created by the output processor."""
|
||||
@ -112,5 +116,9 @@ class RerunResult(Result):
|
||||
def is_rerun(self):
|
||||
return True
|
||||
|
||||
@property
|
||||
def as_list(self):
|
||||
return self.results
|
||||
|
||||
def status(self):
|
||||
return ' '.join(r.status() for r in self.results)
|
||||
|
101
tools/testrunner/testproc/resultdb.py
Normal file
101
tools/testrunner/testproc/resultdb.py
Normal file
@ -0,0 +1,101 @@
|
||||
# Copyright 2022 the V8 project authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import pprint
|
||||
import os
|
||||
|
||||
from . import base
|
||||
from .indicators import (
|
||||
formatted_result_output,
|
||||
ProgressIndicator,
|
||||
)
|
||||
from .util import (
|
||||
base_test_record,
|
||||
extract_tags,
|
||||
strip_ascii_control_characters,
|
||||
)
|
||||
|
||||
|
||||
class ResultDBIndicator(ProgressIndicator):
|
||||
|
||||
def __init__(self, context, options, test_count, sink):
|
||||
super(ResultDBIndicator, self).__init__(context, options, test_count)
|
||||
self._requirement = base.DROP_PASS_OUTPUT
|
||||
self.rpc = ResultDB_RPC(sink)
|
||||
|
||||
def on_test_result(self, test, result):
|
||||
for run, sub_result in enumerate(result.as_list):
|
||||
self.send_result(test, sub_result, run)
|
||||
|
||||
def send_result(self, test, result, run):
|
||||
# We need to recalculate the observed (but lost) test behaviour.
|
||||
# `result.has_unexpected_output` indicates that the run behaviour of the
|
||||
# test matches the expected behaviour irrespective of passing or failing.
|
||||
result_expected = not result.has_unexpected_output
|
||||
test_should_pass = not test.is_fail
|
||||
run_passed = (result_expected == test_should_pass)
|
||||
rdb_result = {
|
||||
'testId': strip_ascii_control_characters(test.full_name),
|
||||
'status': 'PASS' if run_passed else 'FAIL',
|
||||
'expected': result_expected,
|
||||
}
|
||||
|
||||
if result.output and result.output.duration:
|
||||
rdb_result.update(duration=f'{result.output.duration}ms')
|
||||
if result.has_unexpected_output:
|
||||
formated_output = formatted_result_output(result)
|
||||
sanitized = strip_ascii_control_characters(formated_output)
|
||||
# TODO(liviurau): do we have a better presentation data for this?
|
||||
# Protobuf strings can have len == 2**32.
|
||||
rdb_result.update(summaryHtml=f'<pre>{sanitized}</pre>')
|
||||
record = base_test_record(test, result, run)
|
||||
rdb_result.update(tags=extract_tags(record))
|
||||
self.rpc.send(rdb_result)
|
||||
|
||||
|
||||
def rdb_sink():
|
||||
try:
|
||||
import requests
|
||||
except:
|
||||
log_instantiation_failure('Failed to import requests module.')
|
||||
return None
|
||||
luci_context = os.environ.get('LUCI_CONTEXT')
|
||||
if not luci_context:
|
||||
log_instantiation_failure('No LUCI_CONTEXT found.')
|
||||
return None
|
||||
with open(luci_context, mode="r", encoding="utf-8") as f:
|
||||
config = json.load(f)
|
||||
sink = config.get('result_sink', None)
|
||||
if not sink:
|
||||
log_instantiation_failure('No ResultDB sink found.')
|
||||
return None
|
||||
return sink
|
||||
|
||||
|
||||
def log_instantiation_failure(error_message):
|
||||
logging.info(f'{error_message} No results will be sent to ResultDB.')
|
||||
|
||||
|
||||
class ResultDB_RPC:
|
||||
|
||||
def __init__(self, sink):
|
||||
import requests
|
||||
self.session = requests.Session()
|
||||
self.session.headers = {
|
||||
'Authorization': f'ResultSink {sink.get("auth_token")}',
|
||||
}
|
||||
self.url = f'http://{sink.get("address")}/prpc/luci.resultsink.v1.Sink/ReportTestResults'
|
||||
|
||||
def send(self, result):
|
||||
payload = dict(testResults=[result])
|
||||
try:
|
||||
self.session.post(self.url, json=payload).raise_for_status()
|
||||
except Exception as e:
|
||||
logging.error(f'Request failed: {payload}')
|
||||
raise e
|
||||
|
||||
def __del__(self):
|
||||
self.session.close()
|
@ -7,6 +7,7 @@ import heapq
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import re
|
||||
import signal
|
||||
import subprocess
|
||||
|
||||
@ -53,6 +54,43 @@ def kill_processes_linux():
|
||||
logging.exception('Failed to kill process')
|
||||
|
||||
|
||||
def strip_ascii_control_characters(unicode_string):
|
||||
return re.sub(r'[^\x20-\x7E]', '?', str(unicode_string))
|
||||
|
||||
|
||||
def base_test_record(test, result, run):
|
||||
record = {
|
||||
'name': test.full_name,
|
||||
'flags': result.cmd.args,
|
||||
'run': run + 1,
|
||||
'expected': test.expected_outcomes,
|
||||
'random_seed': test.random_seed,
|
||||
'target_name': test.get_shell(),
|
||||
'variant': test.variant,
|
||||
'variant_flags': test.variant_flags,
|
||||
}
|
||||
if result.output:
|
||||
record.update(
|
||||
exit_code=result.output.exit_code,
|
||||
duration=result.output.duration,
|
||||
)
|
||||
return record
|
||||
|
||||
|
||||
def extract_tags(record):
|
||||
tags = []
|
||||
for k, v in record.items():
|
||||
if type(v) == list:
|
||||
tags += [sanitized_kv_dict(k, e) for e in v]
|
||||
else:
|
||||
tags.append(sanitized_kv_dict(k, v))
|
||||
return tags
|
||||
|
||||
|
||||
def sanitized_kv_dict(k, v):
|
||||
return dict(key=k, value=strip_ascii_control_characters(v))
|
||||
|
||||
|
||||
class FixedSizeTopList():
|
||||
"""Utility collection for gathering a fixed number of elements with the
|
||||
biggest value for the given key. It employs a heap from which we pop the
|
||||
|
Loading…
Reference in New Issue
Block a user