2012-10-29 18:06:26 +00:00
|
|
|
#!/usr/bin/python
|
|
|
|
|
|
|
|
'''
|
|
|
|
Copyright 2012 Google Inc.
|
|
|
|
|
|
|
|
Use of this source code is governed by a BSD-style license that can be
|
|
|
|
found in the LICENSE file.
|
|
|
|
'''
|
|
|
|
|
|
|
|
'''
|
2012-11-29 21:50:34 +00:00
|
|
|
Rebaselines the given GM tests, on all bots and all configurations.
|
2012-10-29 18:06:26 +00:00
|
|
|
'''
|
|
|
|
|
2013-06-05 15:43:37 +00:00
|
|
|
# System-level imports
|
2013-05-30 15:46:19 +00:00
|
|
|
import argparse
|
2013-08-20 16:21:55 +00:00
|
|
|
import json
|
2013-05-29 17:09:43 +00:00
|
|
|
import os
|
2013-06-12 17:44:14 +00:00
|
|
|
import re
|
2013-07-24 15:38:39 +00:00
|
|
|
import subprocess
|
2013-05-29 17:09:43 +00:00
|
|
|
import sys
|
2013-06-05 15:43:37 +00:00
|
|
|
import urllib2
|
|
|
|
|
|
|
|
# Imports from within Skia
|
|
|
|
#
|
2013-06-12 14:25:30 +00:00
|
|
|
# We need to add the 'gm' directory, so that we can import gm_json.py within
|
|
|
|
# that directory. That script allows us to parse the actual-results.json file
|
|
|
|
# written out by the GM tool.
|
|
|
|
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
|
|
|
|
# so any dirs that are already in the PYTHONPATH will be preferred.
|
|
|
|
#
|
|
|
|
# This assumes that the 'gm' directory has been checked out as a sibling of
|
|
|
|
# the 'tools' directory containing this script, which will be the case if
|
|
|
|
# 'trunk' was checked out as a single unit.
|
2013-06-05 15:43:37 +00:00
|
|
|
GM_DIRECTORY = os.path.realpath(
|
|
|
|
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
|
|
|
|
if GM_DIRECTORY not in sys.path:
|
2013-08-02 20:54:46 +00:00
|
|
|
sys.path.append(GM_DIRECTORY)
|
2013-06-05 15:43:37 +00:00
|
|
|
import gm_json
|
|
|
|
|
2013-08-24 20:45:31 +00:00
|
|
|
# TODO(epoger): In the long run, we want to build this list automatically,
|
|
|
|
# but for now we hard-code it until we can properly address
|
|
|
|
# https://code.google.com/p/skia/issues/detail?id=1544
|
|
|
|
# ('live query of builder list makes rebaseline.py slow to start up')
|
|
|
|
TEST_BUILDERS = [
|
|
|
|
'Test-Android-GalaxyNexus-SGX540-Arm7-Debug',
|
|
|
|
'Test-Android-GalaxyNexus-SGX540-Arm7-Release',
|
|
|
|
'Test-Android-IntelRhb-SGX544-x86-Debug',
|
|
|
|
'Test-Android-IntelRhb-SGX544-x86-Release',
|
|
|
|
'Test-Android-Nexus10-MaliT604-Arm7-Debug',
|
|
|
|
'Test-Android-Nexus10-MaliT604-Arm7-Release',
|
|
|
|
'Test-Android-Nexus4-Adreno320-Arm7-Debug',
|
|
|
|
'Test-Android-Nexus4-Adreno320-Arm7-Release',
|
|
|
|
'Test-Android-Nexus7-Tegra3-Arm7-Debug',
|
|
|
|
'Test-Android-Nexus7-Tegra3-Arm7-Release',
|
|
|
|
'Test-Android-NexusS-SGX540-Arm7-Debug',
|
|
|
|
'Test-Android-NexusS-SGX540-Arm7-Release',
|
|
|
|
'Test-Android-Xoom-Tegra2-Arm7-Debug',
|
|
|
|
'Test-Android-Xoom-Tegra2-Arm7-Release',
|
|
|
|
'Test-ChromeOS-Alex-GMA3150-x86-Debug',
|
|
|
|
'Test-ChromeOS-Alex-GMA3150-x86-Release',
|
|
|
|
'Test-ChromeOS-Daisy-MaliT604-Arm7-Debug',
|
|
|
|
'Test-ChromeOS-Daisy-MaliT604-Arm7-Release',
|
|
|
|
'Test-ChromeOS-Link-HD4000-x86_64-Debug',
|
|
|
|
'Test-ChromeOS-Link-HD4000-x86_64-Release',
|
|
|
|
'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Debug',
|
|
|
|
'Test-Mac10.6-MacMini4.1-GeForce320M-x86-Release',
|
|
|
|
'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Debug',
|
|
|
|
'Test-Mac10.6-MacMini4.1-GeForce320M-x86_64-Release',
|
|
|
|
'Test-Mac10.7-MacMini4.1-GeForce320M-x86-Debug',
|
|
|
|
'Test-Mac10.7-MacMini4.1-GeForce320M-x86-Release',
|
|
|
|
'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Debug',
|
|
|
|
'Test-Mac10.7-MacMini4.1-GeForce320M-x86_64-Release',
|
|
|
|
'Test-Mac10.8-MacMini4.1-GeForce320M-x86-Debug',
|
|
|
|
'Test-Mac10.8-MacMini4.1-GeForce320M-x86-Release',
|
|
|
|
'Test-Mac10.8-MacMini4.1-GeForce320M-x86_64-Debug',
|
|
|
|
'Test-Mac10.8-MacMini4.1-GeForce320M-x86_64-Release',
|
|
|
|
'Test-Ubuntu12-ShuttleA-ATI5770-x86-Debug',
|
|
|
|
'Test-Ubuntu12-ShuttleA-ATI5770-x86-Release',
|
|
|
|
'Test-Ubuntu12-ShuttleA-ATI5770-x86_64-Debug',
|
|
|
|
'Test-Ubuntu12-ShuttleA-ATI5770-x86_64-Release',
|
|
|
|
'Test-Ubuntu12-ShuttleA-HD2000-x86_64-Release-Valgrind',
|
|
|
|
'Test-Ubuntu12-ShuttleA-NoGPU-x86_64-Debug',
|
|
|
|
'Test-Ubuntu13-ShuttleA-HD2000-x86_64-Debug-ASAN',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86-Debug',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86-Debug-ANGLE',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86-Debug-DirectWrite',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86-Release',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86-Release-ANGLE',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86-Release-DirectWrite',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86_64-Debug',
|
|
|
|
'Test-Win7-ShuttleA-HD2000-x86_64-Release',
|
|
|
|
]
|
2013-05-29 17:09:43 +00:00
|
|
|
|
2013-07-11 19:20:30 +00:00
|
|
|
class _InternalException(Exception):
|
2013-08-02 20:54:46 +00:00
|
|
|
pass
|
2013-06-04 14:58:47 +00:00
|
|
|
|
2013-07-16 17:35:39 +00:00
|
|
|
# Object that handles exceptions, either raising them immediately or collecting
|
|
|
|
# them to display later on.
|
|
|
|
class ExceptionHandler(object):
|
|
|
|
|
2013-08-02 20:54:46 +00:00
|
|
|
# params:
|
|
|
|
# keep_going_on_failure: if False, report failures and quit right away;
|
|
|
|
# if True, collect failures until
|
|
|
|
# ReportAllFailures() is called
|
|
|
|
def __init__(self, keep_going_on_failure=False):
|
|
|
|
self._keep_going_on_failure = keep_going_on_failure
|
|
|
|
self._failures_encountered = []
|
|
|
|
self._exiting = False
|
|
|
|
|
|
|
|
# Exit the program with the given status value.
|
|
|
|
def _Exit(self, status=1):
|
|
|
|
self._exiting = True
|
|
|
|
sys.exit(status)
|
|
|
|
|
|
|
|
# We have encountered an exception; either collect the info and keep going,
|
|
|
|
# or exit the program right away.
|
|
|
|
def RaiseExceptionOrContinue(self, e):
|
|
|
|
# If we are already quitting the program, propagate any exceptions
|
|
|
|
# so that the proper exit status will be communicated to the shell.
|
|
|
|
if self._exiting:
|
|
|
|
raise e
|
|
|
|
|
|
|
|
if self._keep_going_on_failure:
|
|
|
|
print >> sys.stderr, 'WARNING: swallowing exception %s' % e
|
|
|
|
self._failures_encountered.append(e)
|
|
|
|
else:
|
|
|
|
print >> sys.stderr, e
|
|
|
|
print >> sys.stderr, (
|
|
|
|
'Halting at first exception; to keep going, re-run ' +
|
|
|
|
'with the --keep-going-on-failure option set.')
|
|
|
|
self._Exit()
|
2013-07-16 17:35:39 +00:00
|
|
|
|
2013-08-02 20:54:46 +00:00
|
|
|
def ReportAllFailures(self):
|
|
|
|
if self._failures_encountered:
|
|
|
|
print >> sys.stderr, ('Encountered %d failures (see above).' %
|
|
|
|
len(self._failures_encountered))
|
|
|
|
self._Exit()
|
2013-07-16 17:35:39 +00:00
|
|
|
|
|
|
|
|
2013-06-19 18:56:59 +00:00
|
|
|
# Object that rebaselines a JSON expectations file (not individual image files).
|
|
|
|
class JsonRebaseliner(object):
|
2013-05-29 17:09:43 +00:00
|
|
|
|
2013-08-02 20:54:46 +00:00
|
|
|
# params:
|
|
|
|
# expectations_root: root directory of all expectations JSON files
|
|
|
|
# expectations_input_filename: filename (under expectations_root) of JSON
|
|
|
|
# expectations file to read; typically
|
|
|
|
# "expected-results.json"
|
|
|
|
# expectations_output_filename: filename (under expectations_root) to
|
|
|
|
# which updated expectations should be
|
|
|
|
# written; typically the same as
|
|
|
|
# expectations_input_filename, to overwrite
|
|
|
|
# the old content
|
|
|
|
# actuals_base_url: base URL from which to read actual-result JSON files
|
|
|
|
# actuals_filename: filename (under actuals_base_url) from which to read a
|
|
|
|
# summary of results; typically "actual-results.json"
|
|
|
|
# exception_handler: reference to rebaseline.ExceptionHandler object
|
|
|
|
# tests: list of tests to rebaseline, or None if we should rebaseline
|
|
|
|
# whatever files the JSON results summary file tells us to
|
|
|
|
# configs: which configs to run for each test, or None if we should
|
|
|
|
# rebaseline whatever configs the JSON results summary file tells
|
|
|
|
# us to
|
|
|
|
# add_new: if True, add expectations for tests which don't have any yet
|
|
|
|
def __init__(self, expectations_root, expectations_input_filename,
|
|
|
|
expectations_output_filename, actuals_base_url,
|
|
|
|
actuals_filename, exception_handler,
|
|
|
|
tests=None, configs=None, add_new=False):
|
|
|
|
self._expectations_root = expectations_root
|
|
|
|
self._expectations_input_filename = expectations_input_filename
|
|
|
|
self._expectations_output_filename = expectations_output_filename
|
|
|
|
self._tests = tests
|
|
|
|
self._configs = configs
|
|
|
|
self._actuals_base_url = actuals_base_url
|
|
|
|
self._actuals_filename = actuals_filename
|
|
|
|
self._exception_handler = exception_handler
|
|
|
|
self._add_new = add_new
|
|
|
|
self._image_filename_re = re.compile(gm_json.IMAGE_FILENAME_PATTERN)
|
|
|
|
self._using_svn = os.path.isdir(os.path.join(expectations_root, '.svn'))
|
|
|
|
|
|
|
|
# Executes subprocess.call(cmd).
|
|
|
|
# Raises an Exception if the command fails.
|
|
|
|
def _Call(self, cmd):
|
|
|
|
if subprocess.call(cmd) != 0:
|
|
|
|
raise _InternalException('error running command: ' + ' '.join(cmd))
|
|
|
|
|
|
|
|
# Returns the full contents of filepath, as a single string.
|
|
|
|
# If filepath looks like a URL, try to read it that way instead of as
|
|
|
|
# a path on local storage.
|
|
|
|
#
|
|
|
|
# Raises _InternalException if there is a problem.
|
|
|
|
def _GetFileContents(self, filepath):
|
|
|
|
if filepath.startswith('http:') or filepath.startswith('https:'):
|
|
|
|
try:
|
|
|
|
return urllib2.urlopen(filepath).read()
|
|
|
|
except urllib2.HTTPError as e:
|
|
|
|
raise _InternalException('unable to read URL %s: %s' % (
|
|
|
|
filepath, e))
|
|
|
|
else:
|
|
|
|
return open(filepath, 'r').read()
|
|
|
|
|
|
|
|
# Returns a dictionary of actual results from actual-results.json file.
|
|
|
|
#
|
|
|
|
# The dictionary returned has this format:
|
|
|
|
# {
|
|
|
|
# u'imageblur_565.png': [u'bitmap-64bitMD5', 3359963596899141322],
|
|
|
|
# u'imageblur_8888.png': [u'bitmap-64bitMD5', 4217923806027861152],
|
|
|
|
# u'shadertext3_8888.png': [u'bitmap-64bitMD5', 3713708307125704716]
|
|
|
|
# }
|
|
|
|
#
|
|
|
|
# If the JSON actual result summary file cannot be loaded, logs a warning
|
|
|
|
# message and returns None.
|
|
|
|
# If the JSON actual result summary file can be loaded, but we have
|
|
|
|
# trouble parsing it, raises an Exception.
|
|
|
|
#
|
|
|
|
# params:
|
|
|
|
# json_url: URL pointing to a JSON actual result summary file
|
|
|
|
# sections: a list of section names to include in the results, e.g.
|
|
|
|
# [gm_json.JSONKEY_ACTUALRESULTS_FAILED,
|
|
|
|
# gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON] ;
|
|
|
|
# if None, then include ALL sections.
|
|
|
|
def _GetActualResults(self, json_url, sections=None):
|
|
|
|
try:
|
|
|
|
json_contents = self._GetFileContents(json_url)
|
|
|
|
except _InternalException:
|
|
|
|
print >> sys.stderr, (
|
|
|
|
'could not read json_url %s ; skipping this platform.' %
|
|
|
|
json_url)
|
|
|
|
return None
|
|
|
|
json_dict = gm_json.LoadFromString(json_contents)
|
|
|
|
results_to_return = {}
|
|
|
|
actual_results = json_dict[gm_json.JSONKEY_ACTUALRESULTS]
|
|
|
|
if not sections:
|
|
|
|
sections = actual_results.keys()
|
|
|
|
for section in sections:
|
|
|
|
section_results = actual_results[section]
|
|
|
|
if section_results:
|
|
|
|
results_to_return.update(section_results)
|
|
|
|
return results_to_return
|
|
|
|
|
|
|
|
# Rebaseline all tests/types we specified in the constructor,
|
2013-08-20 16:21:55 +00:00
|
|
|
# within this builder's subdirectory in expectations/gm .
|
2013-08-02 20:54:46 +00:00
|
|
|
#
|
|
|
|
# params:
|
|
|
|
# builder : e.g. 'Test-Win7-ShuttleA-HD2000-x86-Release'
|
2013-08-20 16:21:55 +00:00
|
|
|
def RebaselineSubdir(self, builder):
|
2013-08-02 20:54:46 +00:00
|
|
|
# Read in the actual result summary, and extract all the tests whose
|
|
|
|
# results we need to update.
|
|
|
|
actuals_url = '/'.join([self._actuals_base_url,
|
2013-08-20 16:21:55 +00:00
|
|
|
builder, self._actuals_filename])
|
2013-08-02 20:54:46 +00:00
|
|
|
# In most cases, we won't need to re-record results that are already
|
|
|
|
# succeeding, but including the SUCCEEDED results will allow us to
|
|
|
|
# re-record expectations if they somehow get out of sync.
|
|
|
|
sections = [gm_json.JSONKEY_ACTUALRESULTS_FAILED,
|
|
|
|
gm_json.JSONKEY_ACTUALRESULTS_SUCCEEDED]
|
|
|
|
if self._add_new:
|
|
|
|
sections.append(gm_json.JSONKEY_ACTUALRESULTS_NOCOMPARISON)
|
|
|
|
results_to_update = self._GetActualResults(json_url=actuals_url,
|
|
|
|
sections=sections)
|
|
|
|
|
|
|
|
# Read in current expectations.
|
|
|
|
expectations_input_filepath = os.path.join(
|
2013-08-20 16:21:55 +00:00
|
|
|
self._expectations_root, builder, self._expectations_input_filename)
|
2013-08-02 20:54:46 +00:00
|
|
|
expectations_dict = gm_json.LoadFromFile(expectations_input_filepath)
|
|
|
|
expected_results = expectations_dict[gm_json.JSONKEY_EXPECTEDRESULTS]
|
|
|
|
|
|
|
|
# Update the expectations in memory, skipping any tests/configs that
|
|
|
|
# the caller asked to exclude.
|
|
|
|
skipped_images = []
|
|
|
|
if results_to_update:
|
|
|
|
for (image_name, image_results) in results_to_update.iteritems():
|
|
|
|
(test, config) = self._image_filename_re.match(image_name).groups()
|
|
|
|
if self._tests:
|
|
|
|
if test not in self._tests:
|
|
|
|
skipped_images.append(image_name)
|
|
|
|
continue
|
|
|
|
if self._configs:
|
|
|
|
if config not in self._configs:
|
|
|
|
skipped_images.append(image_name)
|
|
|
|
continue
|
|
|
|
if not expected_results.get(image_name):
|
|
|
|
expected_results[image_name] = {}
|
|
|
|
expected_results[image_name][gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS] = \
|
2013-07-16 18:56:32 +00:00
|
|
|
[image_results]
|
2013-06-04 14:58:47 +00:00
|
|
|
|
2013-08-02 20:54:46 +00:00
|
|
|
# Write out updated expectations.
|
|
|
|
expectations_output_filepath = os.path.join(
|
2013-08-20 16:21:55 +00:00
|
|
|
self._expectations_root, builder, self._expectations_output_filename)
|
2013-08-02 20:54:46 +00:00
|
|
|
gm_json.WriteToFile(expectations_dict, expectations_output_filepath)
|
2013-05-29 17:09:43 +00:00
|
|
|
|
2013-08-02 20:54:46 +00:00
|
|
|
# Mark the JSON file as plaintext, so text-style diffs can be applied.
|
|
|
|
# Fixes https://code.google.com/p/skia/issues/detail?id=1442
|
|
|
|
if self._using_svn:
|
|
|
|
self._Call(['svn', 'propset', '--quiet', 'svn:mime-type',
|
|
|
|
'text/x-json', expectations_output_filepath])
|
2012-10-29 18:06:26 +00:00
|
|
|
|
2013-05-30 15:46:19 +00:00
|
|
|
# main...
|
|
|
|
|
|
|
|
parser = argparse.ArgumentParser()
|
2013-07-08 17:51:58 +00:00
|
|
|
parser.add_argument('--actuals-base-url',
|
|
|
|
help='base URL from which to read files containing JSON ' +
|
|
|
|
'summaries of actual GM results; defaults to %(default)s',
|
|
|
|
default='http://skia-autogen.googlecode.com/svn/gm-actual')
|
|
|
|
parser.add_argument('--actuals-filename',
|
2013-08-20 16:21:55 +00:00
|
|
|
help='filename (within builder-specific subdirectories ' +
|
2013-07-08 17:51:58 +00:00
|
|
|
'of ACTUALS_BASE_URL) to read a summary of results from; ' +
|
|
|
|
'defaults to %(default)s',
|
|
|
|
default='actual-results.json')
|
|
|
|
# TODO(epoger): Add test that exercises --add-new argument.
|
2013-06-12 14:25:30 +00:00
|
|
|
parser.add_argument('--add-new', action='store_true',
|
|
|
|
help='in addition to the standard behavior of ' +
|
|
|
|
'updating expectations for failing tests, add ' +
|
|
|
|
'expectations for tests which don\'t have expectations ' +
|
|
|
|
'yet.')
|
2013-08-20 16:21:55 +00:00
|
|
|
parser.add_argument('--builders', metavar='BUILDER', nargs='+',
|
|
|
|
help='which platforms to rebaseline; ' +
|
|
|
|
'if unspecified, rebaseline all platforms, same as ' +
|
|
|
|
'"--builders %s"' % ' '.join(sorted(TEST_BUILDERS)))
|
2013-07-08 17:51:58 +00:00
|
|
|
# TODO(epoger): Add test that exercises --configs argument.
|
2013-05-30 15:46:19 +00:00
|
|
|
parser.add_argument('--configs', metavar='CONFIG', nargs='+',
|
|
|
|
help='which configurations to rebaseline, e.g. ' +
|
2013-07-10 15:27:18 +00:00
|
|
|
'"--configs 565 8888", as a filter over the full set of ' +
|
|
|
|
'results in ACTUALS_FILENAME; if unspecified, rebaseline ' +
|
|
|
|
'*all* configs that are available.')
|
2013-07-08 17:51:58 +00:00
|
|
|
parser.add_argument('--expectations-filename',
|
|
|
|
help='filename (under EXPECTATIONS_ROOT) to read ' +
|
|
|
|
'current expectations from, and to write new ' +
|
2013-07-24 19:36:51 +00:00
|
|
|
'expectations into (unless a separate ' +
|
|
|
|
'EXPECTATIONS_FILENAME_OUTPUT has been specified); ' +
|
|
|
|
'defaults to %(default)s',
|
2013-07-08 17:51:58 +00:00
|
|
|
default='expected-results.json')
|
2013-07-24 19:36:51 +00:00
|
|
|
parser.add_argument('--expectations-filename-output',
|
|
|
|
help='filename (under EXPECTATIONS_ROOT) to write ' +
|
|
|
|
'updated expectations into; by default, overwrites the ' +
|
|
|
|
'input file (EXPECTATIONS_FILENAME)',
|
|
|
|
default='')
|
2013-06-19 18:56:59 +00:00
|
|
|
parser.add_argument('--expectations-root',
|
|
|
|
help='root of expectations directory to update-- should ' +
|
2013-08-20 16:21:55 +00:00
|
|
|
'contain one or more builder subdirectories. Defaults to ' +
|
2013-06-19 18:56:59 +00:00
|
|
|
'%(default)s',
|
2013-07-23 19:37:03 +00:00
|
|
|
default=os.path.join('expectations', 'gm'))
|
2013-07-16 17:35:39 +00:00
|
|
|
parser.add_argument('--keep-going-on-failure', action='store_true',
|
|
|
|
help='instead of halting at the first error encountered, ' +
|
|
|
|
'keep going and rebaseline as many tests as possible, ' +
|
|
|
|
'and then report the full set of errors at the end')
|
2013-07-08 17:51:58 +00:00
|
|
|
# TODO(epoger): Add test that exercises --tests argument.
|
2013-06-05 15:43:37 +00:00
|
|
|
parser.add_argument('--tests', metavar='TEST', nargs='+',
|
2013-05-30 15:46:19 +00:00
|
|
|
help='which tests to rebaseline, e.g. ' +
|
2013-07-10 15:27:18 +00:00
|
|
|
'"--tests aaclip bigmatrix", as a filter over the full ' +
|
|
|
|
'set of results in ACTUALS_FILENAME; if unspecified, ' +
|
|
|
|
'rebaseline *all* tests that are available.')
|
2013-05-30 15:46:19 +00:00
|
|
|
args = parser.parse_args()
|
2013-07-16 17:35:39 +00:00
|
|
|
exception_handler = ExceptionHandler(
|
|
|
|
keep_going_on_failure=args.keep_going_on_failure)
|
2013-08-20 16:21:55 +00:00
|
|
|
if args.builders:
|
|
|
|
builders = args.builders
|
2013-08-02 20:54:46 +00:00
|
|
|
missing_json_is_fatal = True
|
2013-06-19 18:56:59 +00:00
|
|
|
else:
|
2013-08-20 16:21:55 +00:00
|
|
|
builders = sorted(TEST_BUILDERS)
|
2013-08-02 20:54:46 +00:00
|
|
|
missing_json_is_fatal = False
|
2013-08-20 16:21:55 +00:00
|
|
|
for builder in builders:
|
|
|
|
if not builder in TEST_BUILDERS:
|
|
|
|
raise Exception(('unrecognized builder "%s"; ' +
|
2013-08-02 20:54:46 +00:00
|
|
|
'should be one of %s') % (
|
2013-08-20 16:21:55 +00:00
|
|
|
builder, TEST_BUILDERS))
|
2013-08-02 20:54:46 +00:00
|
|
|
|
2013-08-20 16:21:55 +00:00
|
|
|
expectations_json_file = os.path.join(args.expectations_root, builder,
|
2013-08-02 20:54:46 +00:00
|
|
|
args.expectations_filename)
|
|
|
|
if os.path.isfile(expectations_json_file):
|
|
|
|
rebaseliner = JsonRebaseliner(
|
|
|
|
expectations_root=args.expectations_root,
|
|
|
|
expectations_input_filename=args.expectations_filename,
|
|
|
|
expectations_output_filename=(args.expectations_filename_output or
|
|
|
|
args.expectations_filename),
|
|
|
|
tests=args.tests, configs=args.configs,
|
|
|
|
actuals_base_url=args.actuals_base_url,
|
|
|
|
actuals_filename=args.actuals_filename,
|
|
|
|
exception_handler=exception_handler,
|
|
|
|
add_new=args.add_new)
|
2013-07-10 17:23:47 +00:00
|
|
|
try:
|
2013-08-20 16:21:55 +00:00
|
|
|
rebaseliner.RebaselineSubdir(builder=builder)
|
2013-07-10 17:23:47 +00:00
|
|
|
except BaseException as e:
|
2013-08-02 20:54:46 +00:00
|
|
|
exception_handler.RaiseExceptionOrContinue(e)
|
|
|
|
else:
|
|
|
|
exception_handler.RaiseExceptionOrContinue(_InternalException(
|
|
|
|
'expectations_json_file %s not found' % expectations_json_file))
|
2013-07-16 17:35:39 +00:00
|
|
|
|
|
|
|
exception_handler.ReportAllFailures()
|