Write XML test log files even for the flaky test re-runs

Previously the qt-testrunner script was avoiding to write XML log files
for the flaky test re-runs, since parsing them always provides limited
information about what went wrong (for example unparseable files) and
might miss issues completely (for example a crashed test might not even
write a log file), or even mislead if multiple different data points
about a single testcase end up into the database..

But currently Coin is the system that uploads test information to the
testresults database and expects to find such log files in an attempt to
send as much information as possible regarding failures.

Re-enabling these log files will allow Coin to deduce as much
information as possible about the test runs.

This is a temporary step at least, until the test-uploading logic is
separated from Coin. Then it will be easier to find the full information
about what happened with flaky or crashed tests by special logs from
qt-testrunner.py, which is the test re-running orchestrator and
therefore has more knowledge about what went wrong.

Fixes: QTQAINFRA-4754
Change-Id: I69e4469cbe6c0a6ca5b2473eaf8d034632564342
Reviewed-by: Fabian Kosmale <fabian.kosmale@qt.io>
Reviewed-by: Toni Saario <toni.saario@qt.io>
Reviewed-by: Simo Fält <simo.falt@qt.io>
This commit is contained in:
Dimitrios Apostolou 2022-01-26 20:56:35 +01:00
parent b4d9d5c89c
commit b013f94086

View File

@ -79,6 +79,7 @@ import argparse
import subprocess
import os
import traceback
import time
import timeit
import xml.etree.ElementTree as ET
import logging as L
@ -241,6 +242,10 @@ def run_test(arg_list: List[str], **kwargs):
return proc
def unique_filename(test_basename: str) -> str:
timestamp = round(time.time() * 1000)
return f"{test_basename}-{timestamp}.xml"
# Returns tuple: (exit_code, xml_logfile)
def run_full_test(test_basename, testargs: List[str], output_dir: str,
no_extra_args=False, dryrun=False,
@ -253,7 +258,8 @@ def run_full_test(test_basename, testargs: List[str], output_dir: str,
# Append arguments to write log to qtestlib XML file,
# and text to stdout.
if not no_extra_args:
results_files.append(os.path.join(output_dir, test_basename + ".xml"))
results_files.append(
os.path.join(output_dir, unique_filename(test_basename)))
output_testargs.extend(["-o", results_files[0] + ",xml"])
output_testargs.extend(["-o", "-,txt"])
@ -263,7 +269,8 @@ def run_full_test(test_basename, testargs: List[str], output_dir: str,
return (proc.returncode, results_files[0] if results_files else None)
def rerun_failed_testcase(testargs: List[str], what_failed: WhatFailed,
def rerun_failed_testcase(test_basename, testargs: List[str], output_dir: str,
what_failed: WhatFailed,
max_repeats, passes_needed,
dryrun=False, timeout=None) -> bool:
"""Run a specific function:tag of a test, until it passes enough times, or
@ -276,13 +283,20 @@ def rerun_failed_testcase(testargs: List[str], what_failed: WhatFailed,
if what_failed.tag:
failed_arg += ":" + what_failed.tag
n_passes = 0
for i in range(max_repeats):
# For the individual testcase re-runs, we log to file since Coin needs
# to parse it. That is the reason we use unique filename every time.
output_args = [
"-o", os.path.join(output_dir, unique_filename(test_basename)) + ",xml",
"-o", "-,txt"]
L.info("Re-running testcase: %s", failed_arg)
if i < max_repeats - 1:
proc = run_test(testargs + [failed_arg], timeout=timeout)
proc = run_test(testargs + output_args + [failed_arg],
timeout=timeout)
else: # last re-run
proc = run_test(testargs + VERBOSE_ARGS + [failed_arg],
proc = run_test(testargs + output_args + VERBOSE_ARGS + [failed_arg],
timeout=timeout,
env={**os.environ, **VERBOSE_ENV})
if proc.returncode == 0:
@ -356,8 +370,8 @@ def main():
for what_failed in failed_functions:
try:
ret = rerun_failed_testcase(args.testargs, what_failed,
args.max_repeats, args.passes_needed,
ret = rerun_failed_testcase(args.test_basename, args.testargs, args.log_dir,
what_failed, args.max_repeats, args.passes_needed,
dryrun=args.dry_run, timeout=args.timeout)
except Exception as e:
L.exception("The test executable CRASHed uncontrollably!"