[resultdb] Add more data about tests

Data added:
 - subtest_id as it is generated by Processors
 - processor_name to trace back the processors that generate subtests
 - path of the test
 - test_id suffixes introduced by processors:
   - numfuzz will have
     - an `analysis` suffix for analysis phase
     - a number suffix for the variants generated after analysis
   - variant processor adds a suffix for every variant
   - subtests will inherit suffixes from the parent tests (origin)

V8-Recipe-Flags: resultdb
Cq-Include-Trybots: luci.v8.try:v8_numfuzz_dbg,v8_numfuzz_rel,v8_numfuzz_tsan_compile_rel,v8_numfuzz_tsan_rel
Bug: v8:13316
Change-Id: I67d8b92b575c31b201238cfbcfc76cd076a2f7af
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4081127
Commit-Queue: Liviu Rau <liviurau@google.com>
Reviewed-by: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/main@{#84720}
This commit is contained in:
Liviu Rau 2022-12-07 10:26:21 +01:00 committed by V8 LUCI CQ
parent 011d5ea8ce
commit 576d8f9418
12 changed files with 184 additions and 19 deletions

View File

@ -267,6 +267,9 @@ class TestSuite(object):
def status_file(self):
return "%s/%s.status" % (self.root, self.name)
def statusfile_outcomes(self, test_name, variant):
return self.statusfile.get_outcomes(test_name, variant)
@property
def _test_loader_class(self):
raise NotImplementedError

View File

@ -84,13 +84,15 @@ class TestCase(object):
self.path = path # string, e.g. 'div-mod', 'test-api/foo'
self.name = name # string that identifies test in the status file
self.subtest_id = None # string that identifies subtests
self.variant = None # name of the used testing variant
self.variant_flags = [] # list of strings, flags specific to this test
# Fields used by the test processors.
self.origin = None # Test that this test is subtest of.
self.processor = None # Processor that created this subtest.
# Processor that created this subtest, initialised to a default value
self.processor = DuckProcessor()
self.procid = '%s/%s' % (self.suite.name, self.name) # unique id
self.keep_output = False # Can output of this test be dropped
@ -114,7 +116,8 @@ class TestCase(object):
subtest = copy.copy(self)
subtest.origin = self
subtest.processor = processor
subtest.procid += '.%s' % subtest_id
subtest.subtest_id = subtest_id
subtest.procid += f'.{subtest.processor_name}-{subtest_id}'
subtest.keep_output |= keep_output
if random_seed:
subtest._random_seed = random_seed
@ -133,7 +136,7 @@ class TestCase(object):
def not_flag(outcome):
return not is_flag(outcome)
outcomes = self.suite.statusfile.get_outcomes(self.name, self.variant)
outcomes = self.suite.statusfile_outcomes(self.name, self.variant)
self._statusfile_outcomes = list(filter(not_flag, outcomes))
self._statusfile_flags = list(filter(is_flag, outcomes))
self._expected_outcomes = (
@ -457,6 +460,27 @@ class TestCase(object):
def __str__(self):
return self.full_name
@property
def rdb_test_id(self):
rdb_id = self.origin.rdb_test_id if self.origin else self.full_name
rdb_id += self.processor.test_suffix(self)
return rdb_id
@property
def processor_name(self):
return self.processor.name
class DuckProcessor:
"""Dummy default processor for original tests implemented by duck-typing."""
def test_suffix(self, test):
return ''
@property
def name(self):
return None
class D8TestCase(TestCase):
def get_shell(self):

View File

@ -0,0 +1,75 @@
#!/usr/bin/env python3
# Copyright 2022 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
# Needed because the test runner contains relative imports.
TOOLS_PATH = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(TOOLS_PATH)
from testrunner.objects.testcase import TestCase
class TestCaseTest(unittest.TestCase):
def testSubtestsProperties(self):
test = TestCase(
suite=FakeSuite(),
path='far/away',
name='parent',
test_config=None,
framework_name='none')
self.assertEqual(test.rdb_test_id, 'fakeSuite/parent')
# provide by DuckProcessor
self.assertEqual(test.processor.name, None)
self.assertEqual(test.procid, 'fakeSuite/parent')
self.assertEqual(test.keep_output, False)
subtest = test.create_subtest(FakeProcessor(), 0, keep_output=True)
self.assertEqual(subtest.rdb_test_id, 'fakeSuite/parent/fakep/0')
# provide by FakeProcessor
self.assertEqual(subtest.processor.name, 'fake_processor1')
self.assertEqual(subtest.procid, 'fakeSuite/parent.fake_processor1-0')
self.assertEqual(subtest.keep_output, True)
subsubtest = subtest.create_subtest(FakeProcessor(), 1)
self.assertEqual(subsubtest.rdb_test_id, 'fakeSuite/parent/fakep/0/fakep/1')
# provide by FakeProcessor
self.assertEqual(subsubtest.processor.name, 'fake_processor2')
self.assertEqual(subsubtest.procid,
'fakeSuite/parent.fake_processor1-0.fake_processor2-1')
self.assertEqual(subsubtest.keep_output, True)
class FakeSuite:
@property
def name(self):
return 'fakeSuite'
def statusfile_outcomes(self, name, variant):
return []
class FakeProcessor:
instance_count = 0
def __init__(self):
FakeProcessor.instance_count += 1
self.idx = FakeProcessor.instance_count
@property
def name(self):
return f'fake_processor{self.idx}'
def test_suffix(self, test):
return f'/fakep/{test.subtest_id}'
if __name__ == '__main__':
unittest.main()

View File

@ -163,6 +163,34 @@ class StandardRunnerTest(TestRunnerTest):
# This is redundant to the command. Needs investigation.
result.json_content_equals('expected_test_results1.json')
def testRDB(self):
with self.with_fake_rdb() as records:
# sweet/bananaflakes fails first time on stress but passes on default
def tag_dict(tags):
return {t['key']: t['value'] for t in tags}
self.run_tests(
'--variants=default,stress',
'--rerun-failures-count=2',
'--time',
'sweet',
baseroot='testroot2',
infra_staging=False,
)
self.assertEquals(len(records), 3)
self.assertEquals(records[0]['testId'], 'sweet/bananaflakes/stress')
self.assertEquals(tag_dict(records[0]['tags'])['run'], '1')
self.assertFalse(records[0]['expected'])
self.assertEquals(records[1]['testId'], 'sweet/bananaflakes/stress')
self.assertEquals(tag_dict(records[1]['tags'])['run'], '2')
self.assertTrue(records[1]['expected'])
self.assertEquals(records[2]['testId'], 'sweet/bananaflakes/default')
self.assertEquals(tag_dict(records[2]['tags'])['run'], '1')
self.assertTrue(records[2]['expected'])
def testFlakeWithRerunAndJSON(self):
"""Test re-running a failing test and output to json."""
result = self.run_tests(

View File

@ -138,7 +138,7 @@ class TestProcProducer(TestProc):
def __init__(self, name):
super(TestProcProducer, self).__init__()
self._name = name
self.name = name
def next_test(self, test):
return self._next_test(test)
@ -161,11 +161,9 @@ class TestProcProducer(TestProc):
"""
raise NotImplementedError()
### Managing subtests
def _create_subtest(self, test, subtest_id, **kwargs):
"""Creates subtest with subtest id <processor name>-`subtest_id`."""
return test.create_subtest(self, '%s-%s' % (self._name, subtest_id),
**kwargs)
def test_suffix(self, test):
"""Default implementation of rdb test id suffix generated by a producer"""
return ''
class TestProcFilter(TestProc):

View File

@ -10,7 +10,7 @@ class ExpectationProc(base.TestProcProducer):
super(ExpectationProc, self).__init__('no-timeout')
def _next_test(self, test):
subtest = self._create_subtest(test, 'no_timeout')
subtest = test.create_subtest(self, 'no_timeout')
subtest.allow_timeouts()
subtest.allow_pass()
return self._send_test(subtest)

View File

@ -57,6 +57,7 @@ EXTRA_FLAGS = [
MIN_DEOPT = 1
MAX_DEOPT = 10**9
ANALYSIS_SUFFIX = 'analysis'
def random_extra_flags(rng):
@ -171,6 +172,9 @@ class FuzzerProc(base.TestProcProducer):
self._disable_analysis = disable_analysis
self._gens = {}
def test_suffix(self, test):
return '/' + test.subtest_id
def _next_test(self, test):
if self.is_stopped:
return False
@ -193,12 +197,13 @@ class FuzzerProc(base.TestProcProducer):
if analysis_flags:
analysis_flags = list(set(analysis_flags))
return self._create_subtest(test, 'analysis', flags=analysis_flags,
keep_output=True)
return test.create_subtest(
self, ANALYSIS_SUFFIX, flags=analysis_flags, keep_output=True)
def _result_for(self, test, subtest, result):
if not self._disable_analysis:
if result is not None and subtest.procid.endswith('Fuzzer-analysis'):
if result is not None and subtest.procid.endswith(
f'{self.name}-{ANALYSIS_SUFFIX}'):
# Analysis phase, for fuzzing we drop the result.
if result.has_unexpected_output:
self._send_result(test, None)
@ -245,7 +250,7 @@ class FuzzerProc(base.TestProcProducer):
flags.append('--fuzzer-random-seed=%s' % self._next_seed())
flags = _drop_contradictory_flags(flags, test.get_flags())
yield self._create_subtest(test, str(i), flags=flags)
yield test.create_subtest(self, str(i), flags=flags)
i += 1

View File

@ -58,7 +58,7 @@ class RerunProc(base.TestProcProducer):
result.has_unexpected_output)
def _send_next_subtest(self, test, run=0):
subtest = self._create_subtest(test, str(run + 1), keep_output=(run != 0))
subtest = test.create_subtest(self, str(run + 1), keep_output=(run != 0))
return self._send_test(subtest)
def _finalize_test(self, test):

View File

@ -34,7 +34,7 @@ class ResultDBIndicator(ProgressIndicator):
test_should_pass = not test.is_fail
run_passed = (result_expected == test_should_pass)
rdb_result = {
'testId': strip_ascii_control_characters(test.full_name),
'testId': strip_ascii_control_characters(test.rdb_test_id),
'status': 'PASS' if run_passed else 'FAIL',
'expected': result_expected,
}
@ -55,6 +55,11 @@ class ResultDBIndicator(ProgressIndicator):
rdb_result.update(summary_html=summary)
record = base_test_record(test, result, run)
record.update(
processor=test.processor_name,
subtest_id=test.subtest_id,
path=test.path)
rdb_result.update(tags=extract_tags(record))
self.rpc.send(rdb_result)
@ -69,6 +74,8 @@ def write_artifact(value):
def extract_tags(record):
tags = []
for k, v in record.items():
if not v:
continue
if type(v) == list:
tags += [sanitized_kv_dict(k, e) for e in v]
else:

View File

@ -45,7 +45,7 @@ class SeedProc(base.TestProcProducer):
def _try_send_next_test(self, test):
def create_subtest(idx):
seed = self._seed or random_utils.random_seed()
return self._create_subtest(test, idx, random_seed=seed)
return test.create_subtest(self, idx, random_seed=seed)
num = self._last_idx[test.procid]
if not self._count or num < self._count:

View File

@ -28,6 +28,9 @@ class VariantProc(base.TestProcProducer):
self._variant_gens = {}
self._variants = variants
def test_suffix(self, test):
return f'/{test.variant}'
def _next_test(self, test):
gen = self._variants_gen(test)
self._next_variant[test.procid] = gen
@ -43,8 +46,8 @@ class VariantProc(base.TestProcProducer):
def _try_send_new_subtest(self, test, variants_gen):
for variant, flags, suffix in variants_gen:
subtest = self._create_subtest(test, '%s-%s' % (variant, suffix),
variant=variant, flags=flags)
subtest = test.create_subtest(
self, '%s-%s' % (variant, suffix), variant=variant, flags=flags)
if self._send_test(subtest):
return True

View File

@ -14,6 +14,7 @@ import unittest
from contextlib import contextmanager
from dataclasses import dataclass
from io import StringIO
from mock import patch
from os.path import dirname as up
from testrunner.local.command import BaseCommand
@ -82,6 +83,8 @@ def clean_json_output(json_path, basedir):
# Extract relevant properties of the json output.
if not json_path:
return None
if not os.path.exists(json_path):
return None
with open(json_path) as f:
json_output = json.load(f)
@ -194,6 +197,25 @@ class TestRunnerTest(unittest.TestCase):
"""Implement to return the runner class"""
return None
@contextmanager
def with_fake_rdb(self):
records = []
def fake_sink():
return True
class Fake_RPC:
def __init__(self, sink):
pass
def send(self, r):
records.append(r)
with patch('testrunner.testproc.progress.rdb_sink', fake_sink), \
patch('testrunner.testproc.resultdb.ResultDB_RPC', Fake_RPC):
yield records
class FakeOSContext(DefaultOSContext):