Remove tools/tests.
BUG=skia: R=bsalomon@google.com Review URL: https://codereview.chromium.org/796813004
@ -1,145 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Use of this source code is governed by a BSD-style license that can be
|
||||
found in the LICENSE file.
|
||||
|
||||
A wrapper around the standard Python unittest library, adding features we need
|
||||
for various unittests within this directory.
|
||||
|
||||
TODO(epoger): Move this into the common repo for broader use? Or at least in
|
||||
a more common place within the Skia repo?
|
||||
"""
|
||||
|
||||
import errno
|
||||
import filecmp
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import unittest
|
||||
|
||||
TRUNK_DIR = os.path.abspath(os.path.join(
|
||||
os.path.dirname(__file__), os.pardir, os.pardir))
|
||||
|
||||
|
||||
class TestCase(unittest.TestCase):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(TestCase, self).__init__(*args, **kwargs)
|
||||
# Subclasses should override this default value if they want their output
|
||||
# to be automatically compared against expectations (see setUp and tearDown)
|
||||
self._testdata_dir = None
|
||||
|
||||
def setUp(self):
|
||||
"""Called before each test."""
|
||||
# Get the name of this test, in such a way that it will be consistent
|
||||
# regardless of the directory it is run from (throw away package names,
|
||||
# if any).
|
||||
self._test_name = '.'.join(self.id().split('.')[-3:])
|
||||
|
||||
self._temp_dir = tempfile.mkdtemp()
|
||||
if self._testdata_dir:
|
||||
self.create_empty_dir(self.output_dir_actual)
|
||||
|
||||
def tearDown(self):
|
||||
"""Called after each test."""
|
||||
shutil.rmtree(self._temp_dir)
|
||||
if self._testdata_dir and os.path.exists(self.output_dir_expected):
|
||||
different_files = _find_different_files(self.output_dir_actual,
|
||||
self.output_dir_expected)
|
||||
# Don't add any cleanup code below this assert!
|
||||
# Then if tests fail, the artifacts will not be cleaned up.
|
||||
assert (not different_files), \
|
||||
('found differing files:\n' +
|
||||
'\n'.join(['tkdiff %s %s &' % (
|
||||
os.path.join(self.output_dir_actual, basename),
|
||||
os.path.join(self.output_dir_expected, basename))
|
||||
for basename in different_files]))
|
||||
|
||||
@property
|
||||
def temp_dir(self):
|
||||
return self._temp_dir
|
||||
|
||||
@property
|
||||
def input_dir(self):
|
||||
assert self._testdata_dir, 'self._testdata_dir must be set'
|
||||
return os.path.join(self._testdata_dir, 'inputs')
|
||||
|
||||
@property
|
||||
def output_dir_actual(self):
|
||||
assert self._testdata_dir, 'self._testdata_dir must be set'
|
||||
return os.path.join(
|
||||
self._testdata_dir, 'outputs', 'actual', self._test_name)
|
||||
|
||||
@property
|
||||
def output_dir_expected(self):
|
||||
assert self._testdata_dir, 'self._testdata_dir must be set'
|
||||
return os.path.join(
|
||||
self._testdata_dir, 'outputs', 'expected', self._test_name)
|
||||
|
||||
def shortDescription(self):
|
||||
"""Tell unittest framework to not print docstrings for test cases."""
|
||||
return None
|
||||
|
||||
def create_empty_dir(self, path):
|
||||
"""Creates an empty directory at path and returns path.
|
||||
|
||||
Args:
|
||||
path: path on local disk
|
||||
"""
|
||||
# Delete the old one, if any.
|
||||
if os.path.isdir(path):
|
||||
shutil.rmtree(path=path, ignore_errors=True)
|
||||
elif os.path.lexists(path):
|
||||
os.remove(path)
|
||||
|
||||
# Create the new one.
|
||||
try:
|
||||
os.makedirs(path)
|
||||
except OSError as exc:
|
||||
# Guard against race condition (somebody else is creating the same dir)
|
||||
if exc.errno != errno.EEXIST:
|
||||
raise
|
||||
return path
|
||||
|
||||
|
||||
def _find_different_files(dir1, dir2, ignore_subtree_names=None):
|
||||
"""Returns a list of any files that differ between the directory trees rooted
|
||||
at dir1 and dir2.
|
||||
|
||||
Args:
|
||||
dir1: root of a directory tree; if nonexistent, will raise OSError
|
||||
dir2: root of another directory tree; if nonexistent, will raise OSError
|
||||
ignore_subtree_names: list of subtree directory names to ignore;
|
||||
defaults to ['.svn'], so all SVN files are ignores
|
||||
|
||||
TODO(epoger): include the dirname within each filename (not just the
|
||||
basename), to make it easier to locate any differences
|
||||
"""
|
||||
differing_files = []
|
||||
if ignore_subtree_names is None:
|
||||
ignore_subtree_names = ['.svn']
|
||||
dircmp = filecmp.dircmp(dir1, dir2, ignore=ignore_subtree_names)
|
||||
differing_files.extend(dircmp.left_only)
|
||||
differing_files.extend(dircmp.right_only)
|
||||
differing_files.extend(dircmp.common_funny)
|
||||
differing_files.extend(dircmp.diff_files)
|
||||
differing_files.extend(dircmp.funny_files)
|
||||
for common_dir in dircmp.common_dirs:
|
||||
differing_files.extend(_find_different_files(
|
||||
os.path.join(dir1, common_dir), os.path.join(dir2, common_dir)))
|
||||
return differing_files
|
||||
|
||||
|
||||
def main(test_case_class):
|
||||
"""Run the unit tests within the given class.
|
||||
|
||||
Raises an Exception if any of those tests fail (in case we are running in the
|
||||
context of run_all.py, which depends on that Exception to signal failures).
|
||||
"""
|
||||
suite = unittest.TestLoader().loadTestsFromTestCase(test_case_class)
|
||||
results = unittest.TextTestRunner(verbosity=2).run(suite)
|
||||
if not results.wasSuccessful():
|
||||
raise Exception('failed unittest %s' % test_case_class)
|
@ -1,46 +0,0 @@
|
||||
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style license that can be
|
||||
# found in the LICENSE file.
|
||||
|
||||
|
||||
"""
|
||||
Verify that the bench_pictures.cfg file is sane.
|
||||
"""
|
||||
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def ThrowIfNotAString(obj):
|
||||
""" Raise a TypeError if obj is not a string. """
|
||||
if str(obj) != obj:
|
||||
raise TypeError('%s is not a string!' % str(obj))
|
||||
|
||||
|
||||
def Main(argv):
|
||||
""" Verify that the bench_pictures.cfg file is sane.
|
||||
|
||||
- Exec the file to ensure that it uses correct Python syntax.
|
||||
- Make sure that every element is a string, because the buildbot scripts will
|
||||
fail to execute if this is not the case.
|
||||
|
||||
This test does not verify that the well-formed configs are actually valid.
|
||||
"""
|
||||
vars = {'import_path': 'tools'}
|
||||
execfile(os.path.join('tools', 'bench_pictures.cfg'), vars)
|
||||
bench_pictures_cfg = vars['bench_pictures_cfg']
|
||||
|
||||
for config_name, config_list in bench_pictures_cfg.iteritems():
|
||||
ThrowIfNotAString(config_name)
|
||||
for config in config_list:
|
||||
for key, value in config.iteritems():
|
||||
ThrowIfNotAString(key)
|
||||
if type(value).__name__ == 'list':
|
||||
for item in value:
|
||||
ThrowIfNotAString(item)
|
||||
elif not value is True:
|
||||
ThrowIfNotAString(value)
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(Main(sys.argv))
|
@ -1,4 +0,0 @@
|
||||
# Bench expectation entries for testing check_bench_regressions.py.
|
||||
desk_amazon.skp_record_,Perf-Android-Nexus7-Tegra3-Arm7-Release-25th,1.1,-1,1.2
|
||||
desk_baidu.skp_record_,Perf-Android-Nexus7-Tegra3-Arm7-Release-25th,0.939,0.9,1
|
||||
desk_blogger.skp_record_,Perf-Android-Nexus7-Tegra3-Arm7-Release-25th,0.5,0.4,0.6
|
@ -1 +0,0 @@
|
||||
python bench/check_bench_regressions.py -a 25th -b Perf-Android-Nexus7-Tegra3-Arm7-Release -d tools/tests/benchalerts/Perf-Android-Nexus7-Tegra3-Arm7-Release/raw-bench-data -e tools/tests/benchalerts/Perf-Android-Nexus7-Tegra3-Arm7-Release/expectations.txt -r 69c9e1a7261a3c8361e2b2c109d6340862149e34
|
@ -1 +0,0 @@
|
||||
1
|
@ -1,12 +0,0 @@
|
||||
Exception:
|
||||
|
||||
2 benches got slower (sorted by % difference):
|
||||
Bench desk_blogger.skp_record_,Perf-Android-Nexus7-Tegra3-Arm7-Release-25th out of range [0.4, 0.6] (1.794 vs 0.5, 258.8%).
|
||||
http://go/skpdash/#15~desk_blogger~Perf-Android-Nexus7-Tegra3-Arm7-Release~record
|
||||
Bench desk_amazon.skp_record_,Perf-Android-Nexus7-Tegra3-Arm7-Release-25th out of range [-1.0, 1.2] (1.213 vs 1.1, 10.2727272727%).
|
||||
http://go/skpdash/#15~desk_amazon~Perf-Android-Nexus7-Tegra3-Arm7-Release~record
|
||||
|
||||
1 benches got faster (sorted by % difference):
|
||||
Bench desk_baidu.skp_record_,Perf-Android-Nexus7-Tegra3-Arm7-Release-25th out of range [0.9, 1.0] (0.83 vs 0.939, -11.6080937167%).
|
||||
http://go/skpdash/#15~desk_baidu~Perf-Android-Nexus7-Tegra3-Arm7-Release~record
|
||||
|
@ -1,20 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Use of this source code is governed by a BSD-style license that can be
|
||||
found in the LICENSE file.
|
||||
|
||||
Adds possibly-needed directories to PYTHONPATH, if they aren't already there.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
TRUNK_DIRECTORY = os.path.abspath(os.path.join(
|
||||
os.path.dirname(__file__), os.pardir, os.pardir))
|
||||
for subdir in ['tools']:
|
||||
fullpath = os.path.join(TRUNK_DIRECTORY, subdir)
|
||||
if fullpath not in sys.path:
|
||||
sys.path.append(fullpath)
|
@ -1,22 +0,0 @@
|
||||
{
|
||||
"expected-results" : {
|
||||
"identical.png" : {
|
||||
"allowed-digests" : [
|
||||
[ "bitmap-64bitMD5", 1111111 ]
|
||||
],
|
||||
"ignore-failure" : false
|
||||
},
|
||||
"differing.png" : {
|
||||
"allowed-digests" : [
|
||||
[ "bitmap-64bitMD5", 777777777 ]
|
||||
],
|
||||
"ignore-failure" : false
|
||||
},
|
||||
"missing-from-old.png" : {
|
||||
"allowed-digests" : [
|
||||
[ "bitmap-64bitMD5", 3333333 ]
|
||||
],
|
||||
"ignore-failure" : false
|
||||
}
|
||||
}
|
||||
}
|
@ -1,22 +0,0 @@
|
||||
{
|
||||
"expected-results" : {
|
||||
"identical.png" : {
|
||||
"allowed-digests" : [
|
||||
[ "bitmap-64bitMD5", 1111111 ]
|
||||
],
|
||||
"ignore-failure" : false
|
||||
},
|
||||
"differing.png" : {
|
||||
"allowed-digests" : [
|
||||
[ "bitmap-64bitMD5", 888888888 ]
|
||||
],
|
||||
"ignore-failure" : false
|
||||
},
|
||||
"missing-from-new.png" : {
|
||||
"allowed-digests" : [
|
||||
[ "bitmap-64bitMD5", 44444444 ]
|
||||
],
|
||||
"ignore-failure" : false
|
||||
}
|
||||
}
|
||||
}
|
1
tools/tests/jsondiff/output/.gitignore
vendored
@ -1 +0,0 @@
|
||||
*/output-actual/
|
@ -1 +0,0 @@
|
||||
python tools/jsondiff.py tools/tests/jsondiff/input/old.json tools/tests/jsondiff/input/new.json
|
@ -1,14 +0,0 @@
|
||||
{
|
||||
"differing.png": {
|
||||
"new": 777777777,
|
||||
"old": 888888888
|
||||
},
|
||||
"missing-from-new.png": {
|
||||
"new": null,
|
||||
"old": 44444444
|
||||
},
|
||||
"missing-from-old.png": {
|
||||
"new": 3333333,
|
||||
"old": null
|
||||
}
|
||||
}
|
@ -1,98 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Rebaseline the skdiff/*/output-expected/ subdirectories used by the skdiff
|
||||
# self-tests, and similar for benchgraphs/*/output-expected.
|
||||
#
|
||||
# Use with caution: are you sure the new results are actually correct?
|
||||
#
|
||||
# YOU MUST RE-RUN THIS UNTIL THE SELF-TESTS SUCCEED!
|
||||
#
|
||||
# TODO: currently, this must be run on Linux to generate baselines that match
|
||||
# the ones on the housekeeper bot (which runs on Linux... see
|
||||
# http://70.32.156.51:10117/builders/Skia_PerCommit_House_Keeping/builds/1417/steps/RunGmSelfTests/logs/stdio )
|
||||
# See https://code.google.com/p/skia/issues/detail?id=677
|
||||
# ('make tools/tests/run.sh work cross-platform')
|
||||
|
||||
# Replace expected output with actual output, within subdir $1.
|
||||
function replace_expected_with_actual {
|
||||
if [ $# != 1 ]; then
|
||||
echo "replace_expected_with_actual requires exactly 1 parameter, got $#"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete all the expected output files
|
||||
EXPECTED_FILES=$(find $1/*/output-expected -type f | grep -v /\.svn/)
|
||||
for EXPECTED_FILE in $EXPECTED_FILES; do
|
||||
rm $EXPECTED_FILE
|
||||
done
|
||||
|
||||
# Copy all the actual output files into the "expected" directories,
|
||||
# creating new subdirs as we go.
|
||||
ACTUAL_FILES=$(find $1/*/output-actual -type f | grep -v /\.svn/)
|
||||
for ACTUAL_FILE in $ACTUAL_FILES; do
|
||||
EXPECTED_FILE=${ACTUAL_FILE//actual/expected}
|
||||
mkdir -p $(dirname $EXPECTED_FILE)
|
||||
cp $ACTUAL_FILE $EXPECTED_FILE
|
||||
done
|
||||
}
|
||||
|
||||
# Add all new files to SVN control, within subdir $1.
|
||||
function svn_add_new_files {
|
||||
if [ $# != 1 ]; then
|
||||
echo "svn_add_new_files requires exactly 1 parameter, got $#"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Delete all the "actual" directories, so we can svn-add any new "expected"
|
||||
# directories without adding the "actual" ones.
|
||||
rm -rf $1/*/output-actual $1/*/raw-bench-data
|
||||
FILES=$(svn stat $1/* | grep ^\? | awk '{print $2}')
|
||||
for FILE in $FILES; do
|
||||
svn add $FILE
|
||||
done
|
||||
FILES=$(svn stat $1/*/output-expected | grep ^\? | awk '{print $2}')
|
||||
for FILE in $FILES; do
|
||||
svn add $FILE
|
||||
done
|
||||
}
|
||||
|
||||
# For any files that have been removed from subdir $1, remove them from
|
||||
# SVN control.
|
||||
function svn_delete_old_files {
|
||||
if [ $# != 1 ]; then
|
||||
echo "svn_delete_old_files requires exactly 1 parameter, got $#"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
FILES=$(svn stat $1/*/output-expected | grep ^\! | awk '{print $2}')
|
||||
for FILE in $FILES; do
|
||||
svn rm $FILE
|
||||
done
|
||||
FILES=$(svn stat $1/* | grep ^\! | awk '{print $2}')
|
||||
for FILE in $FILES; do
|
||||
svn rm $FILE
|
||||
done
|
||||
}
|
||||
|
||||
|
||||
# cd into the gm self-test dir
|
||||
cd $(dirname $0)
|
||||
|
||||
./run.sh
|
||||
SELFTEST_RESULT=$?
|
||||
SUBDIRS="skdiff benchgraphs rebaseline/output jsondiff/output"
|
||||
echo
|
||||
if [ "$SELFTEST_RESULT" != "0" ]; then
|
||||
for SUBDIR in $SUBDIRS; do
|
||||
replace_expected_with_actual $SUBDIR
|
||||
done
|
||||
echo "Self-tests still failing, you should probably run this again..."
|
||||
else
|
||||
for SUBDIR in $SUBDIRS; do
|
||||
svn_add_new_files $SUBDIR
|
||||
svn_delete_old_files $SUBDIR
|
||||
done
|
||||
echo "Self-tests succeeded this time, you should be done!"
|
||||
fi
|
||||
exit $SELFTEST_RESULT
|
||||
|
@ -1,754 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
"""
|
||||
Copyright 2014 Google Inc.
|
||||
|
||||
Use of this source code is governed by a BSD-style license that can be
|
||||
found in the LICENSE file.
|
||||
|
||||
Test the render_pictures binary.
|
||||
"""
|
||||
|
||||
# System-level imports
|
||||
import copy
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
# Must fix up PYTHONPATH before importing from within Skia
|
||||
import fix_pythonpath # pylint: disable=W0611
|
||||
|
||||
# Imports from within Skia
|
||||
import base_unittest
|
||||
import find_run_binary
|
||||
|
||||
# Maximum length of text diffs to show when tests fail
|
||||
MAX_DIFF_LENGTH = 30000
|
||||
|
||||
EXPECTED_HEADER_CONTENTS = {
|
||||
"type" : "ChecksummedImages",
|
||||
"revision" : 1,
|
||||
}
|
||||
|
||||
# Manually verified: 640x400 red rectangle with black border
|
||||
# Standard expectations will be set up in such a way that this image fails
|
||||
# the comparison.
|
||||
RED_WHOLEIMAGE = {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 2853310525600416231,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp.png",
|
||||
}
|
||||
|
||||
# Manually verified: 640x400 green rectangle with black border
|
||||
# Standard expectations will be set up in such a way that this image passes
|
||||
# the comparison.
|
||||
GREEN_WHOLEIMAGE = {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 11143979097452425335,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp.png",
|
||||
}
|
||||
|
||||
# Manually verified these 6 images, all 256x256 tiles,
|
||||
# consistent with a tiled version of the 640x400 red rect
|
||||
# with black borders.
|
||||
# Standard expectations will be set up in such a way that these images fail
|
||||
# the comparison.
|
||||
RED_TILES = [{
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 5815827069051002745,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp-tile0.png",
|
||||
},{
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 9323613075234140270,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp-tile1.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 15939355025996362179,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp-tile2.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 649771916797529222,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp-tile3.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 8132820002266077288,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp-tile4.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 2406160701181324581,
|
||||
"comparisonResult" : "failed",
|
||||
"filepath" : "red_skp-tile5.png",
|
||||
}]
|
||||
|
||||
# Manually verified these 6 images, all 256x256 tiles,
|
||||
# consistent with a tiled version of the 640x400 green rect
|
||||
# with black borders.
|
||||
# Standard expectations will be set up in such a way that these images pass
|
||||
# the comparison.
|
||||
GREEN_TILES = [{
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 12587324416545178013,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp-tile0.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 7624374914829746293,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp-tile1.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 11866144860997809880,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp-tile2.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 3893392565127823822,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp-tile3.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 2083084978343901738,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp-tile4.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 89620927366502076,
|
||||
"comparisonResult" : "succeeded",
|
||||
"filepath" : "green_skp-tile5.png",
|
||||
}]
|
||||
|
||||
|
||||
def modified_dict(input_dict, modification_dict):
|
||||
"""Returns a dict, with some modifications applied to it.
|
||||
|
||||
Args:
|
||||
input_dict: a dictionary (which will be copied, not modified in place)
|
||||
modification_dict: a set of key/value pairs to overwrite in the dict
|
||||
"""
|
||||
output_dict = input_dict.copy()
|
||||
output_dict.update(modification_dict)
|
||||
return output_dict
|
||||
|
||||
|
||||
def modified_list_of_dicts(input_list, modification_dict):
|
||||
"""Returns a list of dicts, with some modifications applied to each dict.
|
||||
|
||||
Args:
|
||||
input_list: a list of dictionaries; these dicts will be copied, not
|
||||
modified in place
|
||||
modification_dict: a set of key/value pairs to overwrite in each dict
|
||||
within input_list
|
||||
"""
|
||||
output_list = []
|
||||
for input_dict in input_list:
|
||||
output_dict = modified_dict(input_dict, modification_dict)
|
||||
output_list.append(output_dict)
|
||||
return output_list
|
||||
|
||||
|
||||
class RenderPicturesTest(base_unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.maxDiff = MAX_DIFF_LENGTH
|
||||
self._expectations_dir = tempfile.mkdtemp()
|
||||
self._input_skp_dir = tempfile.mkdtemp()
|
||||
# All output of render_pictures binary will go into this directory.
|
||||
self._output_dir = tempfile.mkdtemp()
|
||||
|
||||
def tearDown(self):
|
||||
shutil.rmtree(self._expectations_dir)
|
||||
shutil.rmtree(self._input_skp_dir)
|
||||
shutil.rmtree(self._output_dir)
|
||||
|
||||
def test_tiled_whole_image(self):
|
||||
"""Run render_pictures with tiles and --writeWholeImage flag.
|
||||
|
||||
TODO(epoger): This test generates undesired results! The JSON summary
|
||||
includes both whole-image and tiled-images (as it should), but only
|
||||
whole-images are written out to disk. See http://skbug.com/2463
|
||||
Once I fix that, I should add a similar test that exercises mismatchPath.
|
||||
|
||||
TODO(epoger): I noticed that when this is run without --writePath being
|
||||
specified, this test writes red_skp.png and green_skp.png to the current
|
||||
directory. We should fix that... if --writePath is not specified, this
|
||||
probably shouldn't write out red_skp.png and green_skp.png at all!
|
||||
See http://skbug.com/2464
|
||||
"""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
expectations_path = self._create_expectations()
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--bbh', 'grid', '256', '256',
|
||||
'--mode', 'tile', '256', '256',
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--writeJsonSummaryPath', output_json_path,
|
||||
'--writePath', write_path_dir,
|
||||
'--writeWholeImage'])
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"tiled-images": RED_TILES,
|
||||
"whole-image": RED_WHOLEIMAGE,
|
||||
},
|
||||
"green.skp": {
|
||||
"tiled-images": GREEN_TILES,
|
||||
"whole-image": GREEN_WHOLEIMAGE,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(
|
||||
write_path_dir, ['red_skp.png', 'green_skp.png'])
|
||||
|
||||
def test_ignore_some_failures(self):
|
||||
"""test_tiled_whole_image, but ignoring some failed tests.
|
||||
"""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
expectations_path = self._create_expectations(ignore_some_failures=True)
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--bbh', 'grid', '256', '256',
|
||||
'--mode', 'tile', '256', '256',
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--writeJsonSummaryPath', output_json_path,
|
||||
'--writePath', write_path_dir,
|
||||
'--writeWholeImage'])
|
||||
modified_red_tiles = copy.deepcopy(RED_TILES)
|
||||
modified_red_tiles[5]['comparisonResult'] = 'failure-ignored'
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"tiled-images": modified_red_tiles,
|
||||
"whole-image": modified_dict(
|
||||
RED_WHOLEIMAGE, {"comparisonResult" : "failure-ignored"}),
|
||||
},
|
||||
"green.skp": {
|
||||
"tiled-images": GREEN_TILES,
|
||||
"whole-image": GREEN_WHOLEIMAGE,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(
|
||||
write_path_dir, ['red_skp.png', 'green_skp.png'])
|
||||
|
||||
def test_missing_tile_and_whole_image(self):
|
||||
"""test_tiled_whole_image, but missing expectations for some images.
|
||||
"""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
expectations_path = self._create_expectations(missing_some_images=True)
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--bbh', 'grid', '256', '256',
|
||||
'--mode', 'tile', '256', '256',
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--writeJsonSummaryPath', output_json_path,
|
||||
'--writePath', write_path_dir,
|
||||
'--writeWholeImage'])
|
||||
modified_red_tiles = copy.deepcopy(RED_TILES)
|
||||
modified_red_tiles[5]['comparisonResult'] = 'no-comparison'
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"tiled-images": modified_red_tiles,
|
||||
"whole-image": modified_dict(
|
||||
RED_WHOLEIMAGE, {"comparisonResult" : "no-comparison"}),
|
||||
},
|
||||
"green.skp": {
|
||||
"tiled-images": GREEN_TILES,
|
||||
"whole-image": GREEN_WHOLEIMAGE,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
|
||||
def _test_untiled(self, expectations_path=None, expected_summary_dict=None,
|
||||
additional_args=None):
|
||||
"""Base for multiple tests without tiles.
|
||||
|
||||
Args:
|
||||
expectations_path: path we should pass using --readJsonSummaryPath, or
|
||||
None if we should create the default expectations file
|
||||
expected_summary_dict: dict we should compare against the output actual
|
||||
results summary, or None if we should use a default comparison dict
|
||||
additional_args: array of command-line args to add when we run
|
||||
render_pictures
|
||||
"""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
if expectations_path == None:
|
||||
expectations_path = self._create_expectations()
|
||||
args = [
|
||||
'-r', self._input_skp_dir,
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--writePath', write_path_dir,
|
||||
'--writeJsonSummaryPath', output_json_path,
|
||||
]
|
||||
if additional_args:
|
||||
args.extend(additional_args)
|
||||
self._run_render_pictures(args)
|
||||
if expected_summary_dict == None:
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"whole-image": RED_WHOLEIMAGE,
|
||||
},
|
||||
"green.skp": {
|
||||
"whole-image": GREEN_WHOLEIMAGE,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(
|
||||
write_path_dir, ['red_skp.png', 'green_skp.png'])
|
||||
|
||||
def test_untiled(self):
|
||||
"""Basic test without tiles."""
|
||||
self._test_untiled()
|
||||
|
||||
def test_untiled_empty_expectations_file(self):
|
||||
"""Same as test_untiled, but with an empty expectations file."""
|
||||
expectations_path = os.path.join(self._expectations_dir, 'empty')
|
||||
with open(expectations_path, 'w'):
|
||||
pass
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"whole-image": modified_dict(
|
||||
RED_WHOLEIMAGE, {"comparisonResult" : "no-comparison"}),
|
||||
},
|
||||
"green.skp": {
|
||||
"whole-image": modified_dict(
|
||||
GREEN_WHOLEIMAGE, {"comparisonResult" : "no-comparison"}),
|
||||
}
|
||||
}
|
||||
}
|
||||
self._test_untiled(expectations_path=expectations_path,
|
||||
expected_summary_dict=expected_summary_dict)
|
||||
|
||||
def test_untiled_writeChecksumBasedFilenames(self):
|
||||
"""Same as test_untiled, but with --writeChecksumBasedFilenames."""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--descriptions', 'builder=builderName', 'renderMode=renderModeName',
|
||||
'--writeChecksumBasedFilenames',
|
||||
'--writePath', write_path_dir,
|
||||
'--writeJsonSummaryPath', output_json_path,
|
||||
])
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : {
|
||||
"builder": "builderName",
|
||||
"renderMode": "renderModeName",
|
||||
},
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
# Manually verified: 640x400 red rectangle with black border
|
||||
"whole-image": {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 2853310525600416231,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_2853310525600416231.png",
|
||||
},
|
||||
},
|
||||
"green.skp": {
|
||||
# Manually verified: 640x400 green rectangle with black border
|
||||
"whole-image": {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 11143979097452425335,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_11143979097452425335.png",
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(write_path_dir, ['red_skp', 'green_skp'])
|
||||
self._assert_directory_contents(
|
||||
os.path.join(write_path_dir, 'red_skp'),
|
||||
['bitmap-64bitMD5_2853310525600416231.png'])
|
||||
self._assert_directory_contents(
|
||||
os.path.join(write_path_dir, 'green_skp'),
|
||||
['bitmap-64bitMD5_11143979097452425335.png'])
|
||||
|
||||
def test_untiled_validate(self):
|
||||
"""Same as test_untiled, but with --validate."""
|
||||
self._test_untiled(additional_args=['--validate'])
|
||||
|
||||
def test_untiled_without_writePath(self):
|
||||
"""Same as test_untiled, but without --writePath."""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
self._generate_skps()
|
||||
expectations_path = self._create_expectations()
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--writeJsonSummaryPath', output_json_path])
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"whole-image": RED_WHOLEIMAGE,
|
||||
},
|
||||
"green.skp": {
|
||||
"whole-image": GREEN_WHOLEIMAGE,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
|
||||
def test_tiled(self):
|
||||
"""Generate individual tiles."""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
expectations_path = self._create_expectations()
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--bbh', 'grid', '256', '256',
|
||||
'--mode', 'tile', '256', '256',
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--writePath', write_path_dir,
|
||||
'--writeJsonSummaryPath', output_json_path])
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"tiled-images": RED_TILES,
|
||||
},
|
||||
"green.skp": {
|
||||
"tiled-images": GREEN_TILES,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(
|
||||
write_path_dir,
|
||||
['red_skp-tile0.png', 'red_skp-tile1.png', 'red_skp-tile2.png',
|
||||
'red_skp-tile3.png', 'red_skp-tile4.png', 'red_skp-tile5.png',
|
||||
'green_skp-tile0.png', 'green_skp-tile1.png', 'green_skp-tile2.png',
|
||||
'green_skp-tile3.png', 'green_skp-tile4.png', 'green_skp-tile5.png',
|
||||
])
|
||||
|
||||
def test_tiled_mismatches(self):
|
||||
"""Same as test_tiled, but only write out mismatching images."""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
mismatch_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'mismatchPath'))
|
||||
self._generate_skps()
|
||||
expectations_path = self._create_expectations()
|
||||
self._run_render_pictures([
|
||||
'-r', self._input_skp_dir,
|
||||
'--bbh', 'grid', '256', '256',
|
||||
'--mode', 'tile', '256', '256',
|
||||
'--readJsonSummaryPath', expectations_path,
|
||||
'--mismatchPath', mismatch_path_dir,
|
||||
'--writeJsonSummaryPath', output_json_path])
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
"tiled-images": RED_TILES,
|
||||
},
|
||||
"green.skp": {
|
||||
"tiled-images": GREEN_TILES,
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(
|
||||
mismatch_path_dir,
|
||||
['red_skp-tile0.png', 'red_skp-tile1.png', 'red_skp-tile2.png',
|
||||
'red_skp-tile3.png', 'red_skp-tile4.png', 'red_skp-tile5.png',
|
||||
])
|
||||
|
||||
def test_tiled_writeChecksumBasedFilenames(self):
|
||||
"""Same as test_tiled, but with --writeChecksumBasedFilenames."""
|
||||
output_json_path = os.path.join(self._output_dir, 'actuals.json')
|
||||
write_path_dir = self.create_empty_dir(
|
||||
path=os.path.join(self._output_dir, 'writePath'))
|
||||
self._generate_skps()
|
||||
self._run_render_pictures(['-r', self._input_skp_dir,
|
||||
'--bbh', 'grid', '256', '256',
|
||||
'--mode', 'tile', '256', '256',
|
||||
'--writeChecksumBasedFilenames',
|
||||
'--writePath', write_path_dir,
|
||||
'--writeJsonSummaryPath', output_json_path])
|
||||
expected_summary_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"image-base-gs-url" : None,
|
||||
"descriptions" : None,
|
||||
"actual-results" : {
|
||||
"red.skp": {
|
||||
# Manually verified these 6 images, all 256x256 tiles,
|
||||
# consistent with a tiled version of the 640x400 red rect
|
||||
# with black borders.
|
||||
"tiled-images": [{
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 5815827069051002745,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_5815827069051002745.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 9323613075234140270,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_9323613075234140270.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 15939355025996362179,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_15939355025996362179.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 649771916797529222,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_649771916797529222.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 8132820002266077288,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_8132820002266077288.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 2406160701181324581,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"red_skp/bitmap-64bitMD5_2406160701181324581.png",
|
||||
}],
|
||||
},
|
||||
"green.skp": {
|
||||
# Manually verified these 6 images, all 256x256 tiles,
|
||||
# consistent with a tiled version of the 640x400 green rect
|
||||
# with black borders.
|
||||
"tiled-images": [{
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 12587324416545178013,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_12587324416545178013.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 7624374914829746293,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_7624374914829746293.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 11866144860997809880,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_11866144860997809880.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 3893392565127823822,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_3893392565127823822.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 2083084978343901738,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_2083084978343901738.png",
|
||||
}, {
|
||||
"checksumAlgorithm" : "bitmap-64bitMD5",
|
||||
"checksumValue" : 89620927366502076,
|
||||
"comparisonResult" : "no-comparison",
|
||||
"filepath" :
|
||||
"green_skp/bitmap-64bitMD5_89620927366502076.png",
|
||||
}],
|
||||
}
|
||||
}
|
||||
}
|
||||
self._assert_json_contents(output_json_path, expected_summary_dict)
|
||||
self._assert_directory_contents(write_path_dir, ['red_skp', 'green_skp'])
|
||||
self._assert_directory_contents(
|
||||
os.path.join(write_path_dir, 'red_skp'),
|
||||
['bitmap-64bitMD5_5815827069051002745.png',
|
||||
'bitmap-64bitMD5_9323613075234140270.png',
|
||||
'bitmap-64bitMD5_15939355025996362179.png',
|
||||
'bitmap-64bitMD5_649771916797529222.png',
|
||||
'bitmap-64bitMD5_8132820002266077288.png',
|
||||
'bitmap-64bitMD5_2406160701181324581.png'])
|
||||
self._assert_directory_contents(
|
||||
os.path.join(write_path_dir, 'green_skp'),
|
||||
['bitmap-64bitMD5_12587324416545178013.png',
|
||||
'bitmap-64bitMD5_7624374914829746293.png',
|
||||
'bitmap-64bitMD5_11866144860997809880.png',
|
||||
'bitmap-64bitMD5_3893392565127823822.png',
|
||||
'bitmap-64bitMD5_2083084978343901738.png',
|
||||
'bitmap-64bitMD5_89620927366502076.png'])
|
||||
|
||||
def _run_render_pictures(self, args):
|
||||
binary = find_run_binary.find_path_to_program('render_pictures')
|
||||
return find_run_binary.run_command(
|
||||
[binary, '--config', '8888'] + args)
|
||||
|
||||
def _create_expectations(self, missing_some_images=False,
|
||||
ignore_some_failures=False,
|
||||
rel_path='expectations.json'):
|
||||
"""Creates expectations JSON file within self._expectations_dir .
|
||||
|
||||
Args:
|
||||
missing_some_images: (bool) whether to remove expectations for a subset
|
||||
of the images
|
||||
ignore_some_failures: (bool) whether to ignore some failing tests
|
||||
rel_path: (string) relative path within self._expectations_dir to write
|
||||
the expectations into
|
||||
|
||||
Returns: full path to the expectations file created.
|
||||
"""
|
||||
expectations_dict = {
|
||||
"header" : EXPECTED_HEADER_CONTENTS,
|
||||
"descriptions" : None,
|
||||
"expected-results" : {
|
||||
# red.skp: these should fail the comparison
|
||||
"red.skp": {
|
||||
"tiled-images": modified_list_of_dicts(
|
||||
RED_TILES, {'checksumValue': 11111}),
|
||||
"whole-image": modified_dict(
|
||||
RED_WHOLEIMAGE, {'checksumValue': 22222}),
|
||||
},
|
||||
# green.skp: these should pass the comparison
|
||||
"green.skp": {
|
||||
"tiled-images": GREEN_TILES,
|
||||
"whole-image": GREEN_WHOLEIMAGE,
|
||||
}
|
||||
}
|
||||
}
|
||||
if missing_some_images:
|
||||
red_subdict = expectations_dict['expected-results']['red.skp']
|
||||
del red_subdict['whole-image']
|
||||
del red_subdict['tiled-images'][-1]
|
||||
elif ignore_some_failures:
|
||||
red_subdict = expectations_dict['expected-results']['red.skp']
|
||||
red_subdict['whole-image']['ignoreFailure'] = True
|
||||
red_subdict['tiled-images'][-1]['ignoreFailure'] = True
|
||||
path = os.path.join(self._expectations_dir, rel_path)
|
||||
with open(path, 'w') as fh:
|
||||
json.dump(expectations_dict, fh)
|
||||
return path
|
||||
|
||||
def _generate_skps(self):
|
||||
"""Runs the skpmaker binary to generate files in self._input_skp_dir."""
|
||||
self._run_skpmaker(
|
||||
output_path=os.path.join(self._input_skp_dir, 'red.skp'), red=255)
|
||||
self._run_skpmaker(
|
||||
output_path=os.path.join(self._input_skp_dir, 'green.skp'), green=255)
|
||||
|
||||
def _run_skpmaker(self, output_path, red=0, green=0, blue=0,
|
||||
width=640, height=400):
|
||||
"""Runs the skpmaker binary to generate SKP with known characteristics.
|
||||
|
||||
Args:
|
||||
output_path: Filepath to write the SKP into.
|
||||
red: Value of red color channel in image, 0-255.
|
||||
green: Value of green color channel in image, 0-255.
|
||||
blue: Value of blue color channel in image, 0-255.
|
||||
width: Width of canvas to create.
|
||||
height: Height of canvas to create.
|
||||
"""
|
||||
binary = find_run_binary.find_path_to_program('skpmaker')
|
||||
return find_run_binary.run_command([
|
||||
binary,
|
||||
'--red', str(red),
|
||||
'--green', str(green),
|
||||
'--blue', str(blue),
|
||||
'--width', str(width),
|
||||
'--height', str(height),
|
||||
'--writePath', str(output_path),
|
||||
])
|
||||
|
||||
def _assert_directory_contents(self, dir_path, expected_filenames):
|
||||
"""Asserts that files found in a dir are identical to expected_filenames.
|
||||
|
||||
Args:
|
||||
dir_path: Path to a directory on local disk.
|
||||
expected_filenames: Set containing the expected filenames within the dir.
|
||||
|
||||
Raises:
|
||||
AssertionError: contents of the directory are not identical to
|
||||
expected_filenames.
|
||||
"""
|
||||
self.assertEqual(set(os.listdir(dir_path)), set(expected_filenames))
|
||||
|
||||
def _assert_json_contents(self, json_path, expected_dict):
|
||||
"""Asserts that contents of a JSON file are identical to expected_dict.
|
||||
|
||||
Args:
|
||||
json_path: Path to a JSON file.
|
||||
expected_dict: Dictionary indicating the expected contents of the JSON
|
||||
file.
|
||||
|
||||
Raises:
|
||||
AssertionError: contents of the JSON file are not identical to
|
||||
expected_dict.
|
||||
"""
|
||||
prettyprinted_expected_dict = json.dumps(expected_dict, sort_keys=True,
|
||||
indent=2)
|
||||
with open(json_path, 'r') as fh:
|
||||
prettyprinted_json_dict = json.dumps(json.load(fh), sort_keys=True,
|
||||
indent=2)
|
||||
self.assertMultiLineEqual(prettyprinted_expected_dict,
|
||||
prettyprinted_json_dict)
|
||||
|
||||
|
||||
def main():
|
||||
base_unittest.main(RenderPicturesTest)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -1,214 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Tests for our tools.
|
||||
#
|
||||
# TODO: currently, this only passes on Linux (which is the platform that
|
||||
# the housekeeper bot runs on, e.g.
|
||||
# http://70.32.156.51:10117/builders/Skia_PerCommit_House_Keeping/builds/1415/steps/RunToolSelfTests/logs/stdio )
|
||||
# See https://code.google.com/p/skia/issues/detail?id=677
|
||||
# ('make tools/tests/run.sh work cross-platform')
|
||||
# Ideally, these tests should pass on all development platforms...
|
||||
# otherwise, how can developers be expected to test them before committing a
|
||||
# change?
|
||||
|
||||
# cd into .../trunk so all the paths will work
|
||||
cd $(dirname $0)/../..
|
||||
|
||||
# TODO: make it look in Release and/or Debug
|
||||
SKDIFF_BINARY=out/Debug/skdiff
|
||||
|
||||
# Suffixes of the raw bench data files we want to process.
|
||||
BENCHDATA_FILE_SUFFIXES_YES_INDIVIDUAL_TILES=\
|
||||
"data_skp_scale_1.3061_config_8888_mode_tile_256_256_timeIndividualTiles_bbh_rtree "\
|
||||
"data_skp_scale_1.3061_config_8888_mode_tile_256_256_timeIndividualTiles"
|
||||
BENCHDATA_FILE_SUFFIXES_NO_INDIVIDUAL_TILES=\
|
||||
"data_skp_multi_4_scale_1.3061_config_8888_mode_tile_256_256 "\
|
||||
"data_skp_scale_1.3061_config_8888_mode_record"
|
||||
|
||||
# Compare contents of all files within directories $1 and $2,
|
||||
# EXCEPT for any dotfiles.
|
||||
# If there are any differences, a description is written to stdout and
|
||||
# we exit with a nonzero return value.
|
||||
# Otherwise, we write nothing to stdout and return.
|
||||
function compare_directories {
|
||||
if [ $# != 2 ]; then
|
||||
echo "compare_directories requires exactly 2 parameters, got $#"
|
||||
exit 1
|
||||
fi
|
||||
diff --recursive --exclude=.* $1 $2
|
||||
if [ $? != 0 ]; then
|
||||
echo "failed in: compare_directories $1 $2"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run skdiff with arguments in $1 (plus implicit final argument causing skdiff
|
||||
# to write its output, if any, to directory $2/output-actual).
|
||||
# Then compare its results against those in $2/output-expected.
|
||||
function skdiff_test {
|
||||
if [ $# != 2 ]; then
|
||||
echo "skdiff_test requires exactly 2 parameters, got $#"
|
||||
exit 1
|
||||
fi
|
||||
SKDIFF_ARGS="$1"
|
||||
ACTUAL_OUTPUT_DIR="$2/output-actual"
|
||||
EXPECTED_OUTPUT_DIR="$2/output-expected"
|
||||
|
||||
rm -rf $ACTUAL_OUTPUT_DIR
|
||||
mkdir -p $ACTUAL_OUTPUT_DIR
|
||||
COMMAND="$SKDIFF_BINARY $SKDIFF_ARGS $ACTUAL_OUTPUT_DIR"
|
||||
echo "$COMMAND" >$ACTUAL_OUTPUT_DIR/command_line
|
||||
$COMMAND &>$ACTUAL_OUTPUT_DIR/stdout
|
||||
echo $? >$ACTUAL_OUTPUT_DIR/return_value
|
||||
|
||||
compare_directories $EXPECTED_OUTPUT_DIR $ACTUAL_OUTPUT_DIR
|
||||
}
|
||||
|
||||
# Download a subset of the raw bench data for platform $1 at revision $2.
|
||||
# (For the subset, download all files matching any of the suffixes in
|
||||
# whitespace-separated list $3.)
|
||||
# If any of those files already exist locally, we assume that they are
|
||||
# correct and up to date, and we don't download them again.
|
||||
function download_bench_rawdata {
|
||||
if [ $# != 3 ]; then
|
||||
echo "download_bench_rawdata requires exactly 3 parameters, got $#"
|
||||
exit 1
|
||||
fi
|
||||
PLATFORM="$1"
|
||||
REV="$2"
|
||||
FILE_SUFFIXES="$3"
|
||||
|
||||
PLATFORM_DIR="tools/tests/benchalerts/$PLATFORM"
|
||||
RAW_BENCH_DATA_DIR="$PLATFORM_DIR/raw-bench-data"
|
||||
mkdir -p $RAW_BENCH_DATA_DIR
|
||||
|
||||
for FILE_SUFFIX in $FILE_SUFFIXES; do
|
||||
FILE=bench_${REV}_${FILE_SUFFIX}
|
||||
DESTFILE=$RAW_BENCH_DATA_DIR/$FILE
|
||||
if [ ! -f $DESTFILE ];
|
||||
then
|
||||
URL=http://chromium-skia-gm.commondatastorage.googleapis.com/perfdata/${PLATFORM}/${FILE}
|
||||
echo Downloading $URL ...
|
||||
curl $URL --output $DESTFILE
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Run check_bench_regressions.py across the data from platform $1,
|
||||
# writing its output to output-actual and comparing those results against
|
||||
# output-expected.
|
||||
function benchalert_test {
|
||||
if [ $# != 2 ]; then
|
||||
echo "benchalert_test requires exactly 2 parameter, got $#"
|
||||
exit 1
|
||||
fi
|
||||
PLATFORM="$1"
|
||||
REVISION="$2"
|
||||
|
||||
PLATFORM_DIR="tools/tests/benchalerts/$PLATFORM"
|
||||
RAW_BENCH_DATA_DIR="$PLATFORM_DIR/raw-bench-data"
|
||||
ACTUAL_OUTPUT_DIR="$PLATFORM_DIR/output-actual"
|
||||
EXPECTED_OUTPUT_DIR="$PLATFORM_DIR/output-expected"
|
||||
|
||||
# Run check_bench_regressions.py .
|
||||
rm -rf $ACTUAL_OUTPUT_DIR
|
||||
mkdir -p $ACTUAL_OUTPUT_DIR
|
||||
COMMAND="python bench/check_bench_regressions.py -a 25th -b $PLATFORM -d $RAW_BENCH_DATA_DIR -e $PLATFORM_DIR/expectations.txt -r $REVISION"
|
||||
echo "$COMMAND" >$ACTUAL_OUTPUT_DIR/command_line
|
||||
START_TIMESTAMP=$(date +%s)
|
||||
$COMMAND 2>$ACTUAL_OUTPUT_DIR/stderr
|
||||
echo $? >$ACTUAL_OUTPUT_DIR/return_value
|
||||
END_TIMESTAMP=$(date +%s)
|
||||
|
||||
SECONDS_RUN=$(expr $END_TIMESTAMP - $START_TIMESTAMP)
|
||||
echo "check_bench_regressions.py took $SECONDS_RUN seconds to complete"
|
||||
|
||||
compare_directories $EXPECTED_OUTPUT_DIR $ACTUAL_OUTPUT_DIR
|
||||
}
|
||||
|
||||
# Run jsondiff.py with arguments in $1, recording its output.
|
||||
# Then compare that output to the content of $2/output-expected.
|
||||
function jsondiff_test {
|
||||
if [ $# != 2 ]; then
|
||||
echo "jsondiff_test requires exactly 2 parameters, got $#"
|
||||
exit 1
|
||||
fi
|
||||
ARGS="$1"
|
||||
ACTUAL_OUTPUT_DIR="$2/output-actual"
|
||||
EXPECTED_OUTPUT_DIR="$2/output-expected"
|
||||
|
||||
rm -rf $ACTUAL_OUTPUT_DIR
|
||||
mkdir -p $ACTUAL_OUTPUT_DIR
|
||||
COMMAND="python tools/jsondiff.py $ARGS"
|
||||
echo "$COMMAND" >$ACTUAL_OUTPUT_DIR/command_line
|
||||
$COMMAND &>$ACTUAL_OUTPUT_DIR/stdout
|
||||
echo $? >$ACTUAL_OUTPUT_DIR/return_value
|
||||
|
||||
compare_directories $EXPECTED_OUTPUT_DIR $ACTUAL_OUTPUT_DIR
|
||||
}
|
||||
|
||||
|
||||
|
||||
#
|
||||
# Run skdiff tests...
|
||||
#
|
||||
|
||||
SKDIFF_TESTDIR=tools/tests/skdiff
|
||||
|
||||
# Run skdiff over a variety of file pair types: identical bits, identical pixels, missing from
|
||||
# baseDir, etc.
|
||||
skdiff_test "$SKDIFF_TESTDIR/baseDir $SKDIFF_TESTDIR/comparisonDir" "$SKDIFF_TESTDIR/test1"
|
||||
|
||||
# Run skdiff over the same set of files, but with arguments as used by our buildbots:
|
||||
# - return the number of mismatching file pairs (but ignore any files missing from either
|
||||
# baseDir or comparisonDir)
|
||||
# - list filenames with each result type to stdout
|
||||
# - don't generate HTML output files
|
||||
skdiff_test "--failonresult DifferentPixels --failonresult DifferentSizes --failonresult Unknown --failonstatus CouldNotDecode,CouldNotRead any --failonstatus any CouldNotDecode,CouldNotRead --listfilenames --nodiffs $SKDIFF_TESTDIR/baseDir $SKDIFF_TESTDIR/comparisonDir" "$SKDIFF_TESTDIR/test2"
|
||||
|
||||
# Run skdiff over just the files that have identical bits.
|
||||
skdiff_test "--nodiffs --match identical-bits $SKDIFF_TESTDIR/baseDir $SKDIFF_TESTDIR/comparisonDir" "$SKDIFF_TESTDIR/identical-bits"
|
||||
|
||||
# Run skdiff over just the files that have identical bits or identical pixels.
|
||||
skdiff_test "--nodiffs --match identical-bits --match identical-pixels $SKDIFF_TESTDIR/baseDir $SKDIFF_TESTDIR/comparisonDir" "$SKDIFF_TESTDIR/identical-bits-or-pixels"
|
||||
|
||||
#
|
||||
# Run bench alerts tests...
|
||||
#
|
||||
|
||||
# Parse a collection of bench data
|
||||
PLATFORM=Perf-Android-Nexus7-Tegra3-Arm7-Release
|
||||
REVISION=69c9e1a7261a3c8361e2b2c109d6340862149e34
|
||||
download_bench_rawdata $PLATFORM $REVISION "$BENCHDATA_FILE_SUFFIXES_NO_INDIVIDUAL_TILES"
|
||||
download_bench_rawdata $PLATFORM $REVISION "$BENCHDATA_FILE_SUFFIXES_YES_INDIVIDUAL_TILES"
|
||||
benchalert_test $PLATFORM $REVISION
|
||||
|
||||
#
|
||||
# Test jsondiff.py ...
|
||||
#
|
||||
|
||||
JSONDIFF_INPUT=tools/tests/jsondiff/input
|
||||
JSONDIFF_OUTPUT=tools/tests/jsondiff/output
|
||||
jsondiff_test "$JSONDIFF_INPUT/old.json $JSONDIFF_INPUT/new.json" "$JSONDIFF_OUTPUT/old-vs-new"
|
||||
|
||||
|
||||
#
|
||||
# Launch all the self-tests which have been written in Python.
|
||||
#
|
||||
# TODO: Over time, we should move all of our tests into Python, and delete
|
||||
# the bash tests above.
|
||||
# See https://code.google.com/p/skia/issues/detail?id=677
|
||||
# ('make tools/tests/run.sh work cross-platform')
|
||||
#
|
||||
|
||||
COMMAND="python tools/test_all.py"
|
||||
echo "$COMMAND"
|
||||
$COMMAND
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
echo "failure in Python self-tests; see stack trace above"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
||||
echo "All tests passed."
|
1
tools/tests/skdiff/.gitignore
vendored
@ -1 +0,0 @@
|
||||
*/output-actual/
|
Before Width: | Height: | Size: 63 KiB |
@ -1 +0,0 @@
|
||||
hhqwekjlkji
|
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 63 KiB |
@ -1 +0,0 @@
|
||||
ioiojgwwerrgkjoiiuhunkbmujois
|
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 63 KiB |
@ -1 +0,0 @@
|
||||
ppouow
|
Before Width: | Height: | Size: 61 KiB |
@ -1 +0,0 @@
|
||||
awiowejroiwjeoijowimc
|
Before Width: | Height: | Size: 67 KiB |
Before Width: | Height: | Size: 46 KiB |
Before Width: | Height: | Size: 101 KiB |
Before Width: | Height: | Size: 803 B |
@ -1 +0,0 @@
|
||||
ioiojgwwerrgkjoiiuhunkbmujois
|
Before Width: | Height: | Size: 63 KiB |
Before Width: | Height: | Size: 63 KiB |
@ -1 +0,0 @@
|
||||
bblksdffff
|
@ -1 +0,0 @@
|
||||
out/Debug/skdiff --nodiffs --match identical-bits --match identical-pixels tools/tests/skdiff/baseDir tools/tests/skdiff/comparisonDir tools/tests/skdiff/identical-bits-or-pixels/output-actual
|
@ -1,14 +0,0 @@
|
||||
baseDir is [tools/tests/skdiff/baseDir/]
|
||||
comparisonDir is [tools/tests/skdiff/comparisonDir/]
|
||||
not writing any diffs to outputDir [tools/tests/skdiff/identical-bits-or-pixels/output-actual/]
|
||||
|
||||
compared 3 file pairs:
|
||||
[_] 2 file pairs contain exactly the same bits
|
||||
[_] 1 file pairs contain the same pixel values, but not the same bits
|
||||
[_] 0 file pairs have identical dimensions but some differing pixels
|
||||
[_] 0 file pairs have differing dimensions
|
||||
[_] 0 file pairs could not be compared
|
||||
[_] 0 file pairs not compared yet
|
||||
(results marked with [*] will cause nonzero return value)
|
||||
|
||||
number of mismatching file pairs: 0
|
@ -1 +0,0 @@
|
||||
out/Debug/skdiff --nodiffs --match identical-bits tools/tests/skdiff/baseDir tools/tests/skdiff/comparisonDir tools/tests/skdiff/identical-bits/output-actual
|
@ -1 +0,0 @@
|
||||
0
|
@ -1,14 +0,0 @@
|
||||
baseDir is [tools/tests/skdiff/baseDir/]
|
||||
comparisonDir is [tools/tests/skdiff/comparisonDir/]
|
||||
not writing any diffs to outputDir [tools/tests/skdiff/identical-bits/output-actual/]
|
||||
|
||||
compared 2 file pairs:
|
||||
[_] 2 file pairs contain exactly the same bits
|
||||
[_] 0 file pairs contain the same pixel values, but not the same bits
|
||||
[_] 0 file pairs have identical dimensions but some differing pixels
|
||||
[_] 0 file pairs have differing dimensions
|
||||
[_] 0 file pairs could not be compared
|
||||
[_] 0 file pairs not compared yet
|
||||
(results marked with [*] will cause nonzero return value)
|
||||
|
||||
number of mismatching file pairs: 0
|
@ -1 +0,0 @@
|
||||
out/Debug/skdiff tools/tests/skdiff/baseDir tools/tests/skdiff/comparisonDir tools/tests/skdiff/test1/output-actual
|
Before Width: | Height: | Size: 8.7 KiB |
Before Width: | Height: | Size: 2.6 KiB |
Before Width: | Height: | Size: 116 KiB |
Before Width: | Height: | Size: 4.3 KiB |
@ -1,50 +0,0 @@
|
||||
<html>
|
||||
<head>
|
||||
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js"></script>
|
||||
<script type="text/javascript">
|
||||
function generateCheckedList() {
|
||||
var boxes = $(":checkbox:checked");
|
||||
var fileCmdLineString = '';
|
||||
var fileMultiLineString = '';
|
||||
for (var i = 0; i < boxes.length; i++) {
|
||||
fileMultiLineString += boxes[i].name + '<br>';
|
||||
fileCmdLineString += boxes[i].name + ' ';
|
||||
}
|
||||
$("#checkedList").html(fileCmdLineString + '<br><br>' + fileMultiLineString);
|
||||
}
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<table>
|
||||
<tr><th>select image</th>
|
||||
<th>3 of 12 diffs matched exactly.<br></th>
|
||||
<th>every different pixel shown in white</th>
|
||||
<th>color difference at each pixel</th>
|
||||
<th>baseDir: tools/tests/skdiff/baseDir/</th>
|
||||
<th>comparisonDir: tools/tests/skdiff/comparisonDir/</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="different-bits/different-bits-unknown-format.xyz" checked="yes"></td><td><b>different-bits/different-bits-unknown-format.xyz</b><br>Could not compare.<br>base: could not be decoded<br>comparison: could not be decoded</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/baseDir/different-bits/different-bits-unknown-format.xyz">N/A</a></td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/different-bits/different-bits-unknown-format.xyz">N/A</a></td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="missing-files/missing-from-baseDir.png" checked="yes"></td><td><b>missing-files/missing-from-baseDir.png</b><br>Could not compare.<br>base: not found<br>comparison: decoded</td><td>N/A</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/missing-files/missing-from-baseDir.png"><img src="../../../../../tools/tests/skdiff/comparisonDir/missing-files/missing-from-baseDir.png" height="240px"></a></td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="missing-files/missing-from-baseDir.xyz" checked="yes"></td><td><b>missing-files/missing-from-baseDir.xyz</b><br>Could not compare.<br>base: not found<br>comparison: could not be decoded</td><td>N/A</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/missing-files/missing-from-baseDir.xyz">N/A</a></td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="missing-files/missing-from-comparisonDir.png" checked="yes"></td><td><b>missing-files/missing-from-comparisonDir.png</b><br>Could not compare.<br>base: decoded<br>comparison: not found</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/baseDir/missing-files/missing-from-comparisonDir.png"><img src="../../../../../tools/tests/skdiff/baseDir/missing-files/missing-from-comparisonDir.png" height="240px"></a></td><td>N/A</td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="missing-files/missing-from-comparisonDir.xyz" checked="yes"></td><td><b>missing-files/missing-from-comparisonDir.xyz</b><br>Could not compare.<br>base: could not be decoded<br>comparison: not found</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/baseDir/missing-files/missing-from-comparisonDir.xyz">N/A</a></td><td>N/A</td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="different-bits/slightly-different-sizes.png" checked="yes"></td><td><b>different-bits/slightly-different-sizes.png</b><br>Image sizes differ</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/baseDir/different-bits/slightly-different-sizes.png"><img src="../../../../../tools/tests/skdiff/baseDir/different-bits/slightly-different-sizes.png" height="240px"></a></td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/different-bits/slightly-different-sizes.png"><img src="../../../../../tools/tests/skdiff/comparisonDir/different-bits/slightly-different-sizes.png" height="240px"></a></td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="different-bits/very-different-sizes.png" checked="yes"></td><td><b>different-bits/very-different-sizes.png</b><br>Image sizes differ</td><td>N/A</td><td>N/A</td><td><a href="../../../../../tools/tests/skdiff/baseDir/different-bits/very-different-sizes.png"><img src="../../../../../tools/tests/skdiff/baseDir/different-bits/very-different-sizes.png" height="240px"></a></td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/different-bits/very-different-sizes.png"><img src="../../../../../tools/tests/skdiff/comparisonDir/different-bits/very-different-sizes.png" height="128px"></a></td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="different-bits/very-different-pixels-same-size.png" checked="yes"></td><td><b>different-bits/very-different-pixels-same-size.png</b><br>97.9926% of pixels differ
|
||||
(42.8911% weighted)<br><br>Max alpha channel mismatch 0<br>Total alpha channel mismatch 0<br><br>Average color mismatch 89<br>Max color mismatch 239</td><td><a href="different-bits_very-different-pixels-same-size-white.png"><img src="different-bits_very-different-pixels-same-size-white.png" height="240px"></a></td><td><a href="different-bits_very-different-pixels-same-size-diff.png"><img src="different-bits_very-different-pixels-same-size-diff.png" height="240px"></a></td><td><a href="../../../../../tools/tests/skdiff/baseDir/different-bits/very-different-pixels-same-size.png"><img src="../../../../../tools/tests/skdiff/baseDir/different-bits/very-different-pixels-same-size.png" height="240px"></a></td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/different-bits/very-different-pixels-same-size.png"><img src="../../../../../tools/tests/skdiff/comparisonDir/different-bits/very-different-pixels-same-size.png" height="240px"></a></td></tr>
|
||||
<tr>
|
||||
<td><input type="checkbox" name="different-bits/slightly-different-pixels-same-size.png" checked="yes"></td><td><b>different-bits/slightly-different-pixels-same-size.png</b><br>0.6630% of pixels differ
|
||||
(0.1904% weighted)<br>(2164 pixels)<br><br>Max alpha channel mismatch 0<br>Total alpha channel mismatch 0<br><br>Average color mismatch 0<br>Max color mismatch 213</td><td><a href="different-bits_slightly-different-pixels-same-size-white.png"><img src="different-bits_slightly-different-pixels-same-size-white.png" height="240px"></a></td><td><a href="different-bits_slightly-different-pixels-same-size-diff.png"><img src="different-bits_slightly-different-pixels-same-size-diff.png" height="240px"></a></td><td><a href="../../../../../tools/tests/skdiff/baseDir/different-bits/slightly-different-pixels-same-size.png"><img src="../../../../../tools/tests/skdiff/baseDir/different-bits/slightly-different-pixels-same-size.png" height="240px"></a></td><td><a href="../../../../../tools/tests/skdiff/comparisonDir/different-bits/slightly-different-pixels-same-size.png"><img src="../../../../../tools/tests/skdiff/comparisonDir/different-bits/slightly-different-pixels-same-size.png" height="240px"></a></td></tr>
|
||||
</table>
|
||||
<input type="button" onclick="generateCheckedList()" value="Create Rebaseline List">
|
||||
<div id="checkedList"></div>
|
||||
</body>
|
||||
</html>
|
@ -1 +0,0 @@
|
||||
0
|
@ -1,27 +0,0 @@
|
||||
ERROR: no codec found for <tools/tests/skdiff/baseDir/different-bits/different-bits-unknown-format.xyz>
|
||||
ERROR: no codec found for <tools/tests/skdiff/comparisonDir/different-bits/different-bits-unknown-format.xyz>
|
||||
ERROR: no codec found for <tools/tests/skdiff/baseDir/identical-bits/identical-bits-unknown-format.xyz>
|
||||
ERROR: no codec found for <tools/tests/skdiff/comparisonDir/identical-bits/identical-bits-unknown-format.xyz>
|
||||
ERROR: no codec found for <tools/tests/skdiff/comparisonDir/missing-files/missing-from-baseDir.xyz>
|
||||
ERROR: no codec found for <tools/tests/skdiff/baseDir/missing-files/missing-from-comparisonDir.xyz>
|
||||
baseDir is [tools/tests/skdiff/baseDir/]
|
||||
comparisonDir is [tools/tests/skdiff/comparisonDir/]
|
||||
writing diffs to outputDir is [tools/tests/skdiff/test1/output-actual/]
|
||||
|
||||
compared 12 file pairs:
|
||||
[_] 2 file pairs contain exactly the same bits
|
||||
[_] 1 file pairs contain the same pixel values, but not the same bits
|
||||
[_] 2 file pairs have identical dimensions but some differing pixels
|
||||
[_] 2 file pairs have differing dimensions
|
||||
[_] 5 file pairs could not be compared
|
||||
[_] 1 file pairs decoded in baseDir and not found in comparisonDir
|
||||
[_] 1 file pairs could not be decoded in baseDir and could not be decoded in comparisonDir
|
||||
[_] 1 file pairs could not be decoded in baseDir and not found in comparisonDir
|
||||
[_] 1 file pairs not found in baseDir and decoded in comparisonDir
|
||||
[_] 1 file pairs not found in baseDir and could not be decoded in comparisonDir
|
||||
[_] 0 file pairs not compared yet
|
||||
(results marked with [*] will cause nonzero return value)
|
||||
|
||||
number of mismatching file pairs: 9
|
||||
Maximum pixel intensity mismatch 239
|
||||
Largest area mismatch was 97.99% of pixels
|
@ -1 +0,0 @@
|
||||
out/Debug/skdiff --failonresult DifferentPixels --failonresult DifferentSizes --failonresult Unknown --failonstatus CouldNotDecode,CouldNotRead any --failonstatus any CouldNotDecode,CouldNotRead --listfilenames --nodiffs tools/tests/skdiff/baseDir tools/tests/skdiff/comparisonDir tools/tests/skdiff/test2/output-actual
|
@ -1 +0,0 @@
|
||||
5
|
@ -1,21 +0,0 @@
|
||||
ERROR: no codec found for <tools/tests/skdiff/baseDir/different-bits/different-bits-unknown-format.xyz>
|
||||
ERROR: no codec found for <tools/tests/skdiff/comparisonDir/different-bits/different-bits-unknown-format.xyz>
|
||||
baseDir is [tools/tests/skdiff/baseDir/]
|
||||
comparisonDir is [tools/tests/skdiff/comparisonDir/]
|
||||
not writing any diffs to outputDir [tools/tests/skdiff/test2/output-actual/]
|
||||
|
||||
compared 12 file pairs:
|
||||
[_] 2 file pairs contain exactly the same bits: identical-bits/identical-bits-unknown-format.xyz identical-bits/identical-bits.png
|
||||
[_] 1 file pairs contain the same pixel values, but not the same bits: different-bits/different-bits-identical-pixels.png
|
||||
[*] 2 file pairs have identical dimensions but some differing pixels: different-bits/slightly-different-pixels-same-size.png different-bits/very-different-pixels-same-size.png
|
||||
[*] 2 file pairs have differing dimensions: different-bits/slightly-different-sizes.png different-bits/very-different-sizes.png
|
||||
[_] 5 file pairs could not be compared: different-bits/different-bits-unknown-format.xyz missing-files/missing-from-baseDir.png missing-files/missing-from-baseDir.xyz missing-files/missing-from-comparisonDir.png missing-files/missing-from-comparisonDir.xyz
|
||||
[*] 1 file pairs could not be decoded in baseDir and could not be decoded in comparisonDir: different-bits/different-bits-unknown-format.xyz
|
||||
[_] 2 file pairs found in baseDir and not found in comparisonDir: missing-files/missing-from-comparisonDir.png missing-files/missing-from-comparisonDir.xyz
|
||||
[_] 2 file pairs not found in baseDir and found in comparisonDir: missing-files/missing-from-baseDir.png missing-files/missing-from-baseDir.xyz
|
||||
[*] 0 file pairs not compared yet:
|
||||
(results marked with [*] will cause nonzero return value)
|
||||
|
||||
number of mismatching file pairs: 9
|
||||
Maximum pixel intensity mismatch 239
|
||||
Largest area mismatch was 97.99% of pixels
|