Reland "[infra] Remove old python scripts and urllib2 references"

This is a reland of commit 167e608bb3

Original change's description:
> [infra] Remove old python scripts and urllib2 references
>
> I was searching for urllib2 while resolving issues with
> https://skia-review.googlesource.com/c/skia/+/538636
> when I found several old, apparently unused scripts.
>
> Rather than fix them, let's get rid of them. If they
> are still in use, the conversion to urllib.request is
> pretty easy.
>
> Change-Id: I27d419601e81c93a3d53e280188a379dfab927c4
> Reviewed-on: https://skia-review.googlesource.com/c/skia/+/538936
> Auto-Submit: Kevin Lubick <kjlubick@google.com>
> Commit-Queue: Kevin Lubick <kjlubick@google.com>
> Commit-Queue: Ravi Mistry <rmistry@google.com>
> Reviewed-by: Ravi Mistry <rmistry@google.com>

Change-Id: I656b45cbcbde61c45d6d9daa0d6b97324d738631
Reviewed-on: https://skia-review.googlesource.com/c/skia/+/541077
Commit-Queue: Kevin Lubick <kjlubick@google.com>
Commit-Queue: Ravi Mistry <rmistry@google.com>
Reviewed-by: Ravi Mistry <rmistry@google.com>
Auto-Submit: Kevin Lubick <kjlubick@google.com>
This commit is contained in:
Kevin Lubick 2022-05-16 15:28:41 -04:00 committed by SkCQ
parent 40bd208dcc
commit add9e135dd
9 changed files with 9 additions and 1425 deletions

View File

@ -1,248 +0,0 @@
#!/usr/bin/env python
#
# Copyright 2011 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import bench_util
import getopt
import httplib
import itertools
import json
import os
import re
import sys
import urllib
import urllib2
import xml.sax.saxutils
# Maximum expected number of characters we expect in an svn revision.
MAX_SVN_REV_LENGTH = 5
# Indices for getting elements from bench expectation files.
# See bench_expectations_<builder>.txt for details.
EXPECTED_IDX = -3
LB_IDX = -2
UB_IDX = -1
# Indices of the tuple of dictionaries containing slower and faster alerts.
SLOWER = 0
FASTER = 1
# URL prefix for the bench dashboard page. Showing recent 15 days of data.
DASHBOARD_URL_PREFIX = 'http://go/skpdash/#15'
def usage():
"""Prints simple usage information."""
print('-a <representation_alg> bench representation algorithm to use. ')
print(' Defaults to "25th". See bench_util.py for details.')
print('-b <builder> name of the builder whose bench data we are checking.')
print('-d <dir> a directory containing bench_<revision>_<scalar> files.')
print('-e <file> file containing expected bench builder values/ranges.')
print(' Will raise exception if actual bench values are out of range.')
print(' See bench_expectations_<builder>.txt for data format / examples.')
print('-r <revision> the git commit hash or svn revision for checking ')
print(' bench values.')
class Label:
"""The information in a label.
(str, str, str, str, {str:str})"""
def __init__(self, bench, config, time_type, settings):
self.bench = bench
self.config = config
self.time_type = time_type
self.settings = settings
def __repr__(self):
return "Label(%s, %s, %s, %s)" % (
str(self.bench),
str(self.config),
str(self.time_type),
str(self.settings),
)
def __str__(self):
return "%s_%s_%s_%s" % (
str(self.bench),
str(self.config),
str(self.time_type),
str(self.settings),
)
def __eq__(self, other):
return (self.bench == other.bench and
self.config == other.config and
self.time_type == other.time_type and
self.settings == other.settings)
def __hash__(self):
return (hash(self.bench) ^
hash(self.config) ^
hash(self.time_type) ^
hash(frozenset(self.settings.iteritems())))
def create_bench_dict(revision_data_points):
"""Convert current revision data into a dictionary of line data.
Args:
revision_data_points: a list of bench data points
Returns:
a dictionary of this form:
keys = Label objects
values = the corresponding bench value
"""
bench_dict = {}
for point in revision_data_points:
point_name = Label(point.bench,point.config,point.time_type,
point.settings)
if point_name not in bench_dict:
bench_dict[point_name] = point.time
else:
raise Exception('Duplicate expectation entry: ' + str(point_name))
return bench_dict
def read_expectations(expectations, filename):
"""Reads expectations data from file and put in expectations dict."""
for expectation in open(filename).readlines():
elements = expectation.strip().split(',')
if not elements[0] or elements[0].startswith('#'):
continue
if len(elements) != 5:
raise Exception("Invalid expectation line format: %s" %
expectation)
bench_entry = elements[0] + ',' + elements[1]
if bench_entry in expectations:
raise Exception("Dup entries for bench expectation %s" %
bench_entry)
# [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB, EXPECTED)
expectations[bench_entry] = (float(elements[LB_IDX]),
float(elements[UB_IDX]),
float(elements[EXPECTED_IDX]))
def check_expectations(lines, expectations, key_suffix):
"""Check if any bench results are outside of expected range.
For each input line in lines, checks the expectations dictionary to see if
the bench is out of the given range.
Args:
lines: dictionary mapping Label objects to the bench values.
expectations: dictionary returned by read_expectations().
key_suffix: string of <Platform>-<Alg> containing the bot platform and the
bench representation algorithm.
Returns:
No return value.
Raises:
Exception containing bench data that are out of range, if any.
"""
# The platform for this bot, to pass to the dashboard plot.
platform = key_suffix[ : key_suffix.rfind('-')]
# Tuple of dictionaries recording exceptions that are slower and faster,
# respectively. Each dictionary maps off_ratio (ratio of actual to expected)
# to a list of corresponding exception messages.
exceptions = ({}, {})
for line in lines:
line_str = str(line)
line_str = line_str[ : line_str.find('_{')]
# Extracts bench and config from line_str, which is in the format
# <bench-picture-name>.skp_<config>_
bench, config = line_str.strip('_').split('.skp_')
bench_platform_key = line_str + ',' + key_suffix
if bench_platform_key not in expectations:
continue
this_bench_value = lines[line]
this_min, this_max, this_expected = expectations[bench_platform_key]
if this_bench_value < this_min or this_bench_value > this_max:
off_ratio = this_bench_value / this_expected
exception = 'Bench %s out of range [%s, %s] (%s vs %s, %s%%).' % (
bench_platform_key, this_min, this_max, this_bench_value,
this_expected, (off_ratio - 1) * 100)
exception += '\n' + '~'.join([
DASHBOARD_URL_PREFIX, bench, platform, config])
if off_ratio > 1: # Bench is slower.
exceptions[SLOWER].setdefault(off_ratio, []).append(exception)
else:
exceptions[FASTER].setdefault(off_ratio, []).append(exception)
outputs = []
for i in [SLOWER, FASTER]:
if exceptions[i]:
ratios = exceptions[i].keys()
ratios.sort(reverse=True)
li = []
for ratio in ratios:
li.extend(exceptions[i][ratio])
header = '%s benches got slower (sorted by %% difference):' % len(li)
if i == FASTER:
header = header.replace('slower', 'faster')
outputs.extend(['', header] + li)
if outputs:
# Directly raising Exception will have stderr outputs tied to the line
# number of the script, so use sys.stderr.write() instead.
# Add a trailing newline to supress new line checking errors.
sys.stderr.write('\n'.join(['Exception:'] + outputs + ['\n']))
exit(1)
def main():
"""Parses command line and checks bench expectations."""
try:
opts, _ = getopt.getopt(sys.argv[1:],
"a:b:d:e:r:",
"default-setting=")
except getopt.GetoptError as err:
print(str(err))
usage()
sys.exit(2)
directory = None
bench_expectations = {}
rep = '25th' # bench representation algorithm, default to 25th
rev = None # git commit hash or svn revision number
bot = None
try:
for option, value in opts:
if option == "-a":
rep = value
elif option == "-b":
bot = value
elif option == "-d":
directory = value
elif option == "-e":
read_expectations(bench_expectations, value)
elif option == "-r":
rev = value
else:
usage()
assert False, "unhandled option"
except ValueError:
usage()
sys.exit(2)
if directory is None or bot is None or rev is None:
usage()
sys.exit(2)
platform_and_alg = bot + '-' + rep
data_points = bench_util.parse_skp_bench_data(directory, rev, rep)
bench_dict = create_bench_dict(data_points)
if bench_expectations:
check_expectations(bench_dict, bench_expectations, platform_and_alg)
if __name__ == "__main__":
main()

View File

@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3
# Copyright 2017 Google Inc.
#
@ -10,7 +10,11 @@ import os
import shutil
import stat
import sys
import urllib2
if sys.version_info[0] < 3:
from urllib2 import urlopen
else:
from urllib.request import urlopen
os.chdir(os.path.join(os.path.dirname(__file__), os.pardir))
@ -21,7 +25,7 @@ def fetch(target):
sha1_path = target_path + '.sha1'
if not os.path.exists(sha1_path):
print sha1_path, 'is missing. Did you run `tools/git-sync-deps`?'
print(sha1_path, 'is missing. Did you run `tools/git-sync-deps`?')
exit(1)
sha1 = open(sha1_path).read().strip()
@ -35,7 +39,7 @@ def fetch(target):
if sha1_of_file(target_path) != sha1:
with open(target_path, 'wb') as f:
url = 'https://chromium-%s.storage-download.googleapis.com/%s' % (target, sha1)
f.write(urllib2.urlopen(url).read())
f.write(urlopen(url).read())
os.chmod(target_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR |
stat.S_IRGRP | stat.S_IXGRP |

View File

@ -10,7 +10,6 @@ import json
import os
import platform
import re
import shutil
import stat
import sys
import tempfile
@ -21,10 +20,9 @@ if sys.version_info[0] < 3:
else:
from urllib.request import urlopen
def sha256sum(path):
try:
with open(sk_path, 'rb') as f:
with open(path, 'rb') as f:
return hashlib.sha256(f.read()).hexdigest()
except OSError:
return ''

View File

@ -1,113 +0,0 @@
#! /usr/bin/env python
# Copyright 2020 Google LLC.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''
get_examples.py: Populate docs/examples/ from the list of named fiddles.
'''
import os
import re
import sys
if sys.version_info[0] < 3:
from urllib2 import urlopen
from HTMLParser import HTMLParser
def unescape(v): return HTMLParser().unescape(v)
else:
from urllib.request import urlopen
from html.parser import HTMLParser
from html import unescape
def cxx_bool(v): return 'true' if v else 'false'
assert os.pardir == '..' and '/' in [os.sep, os.altsep]
def parse_fiddle_sk(x):
class FiddleSk(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.attrs = {}
def handle_starttag(self, tag, attrs):
if tag == 'fiddle-sk':
self.attrs = dict(attrs)
fiddle = FiddleSk()
fiddle.feed(x)
return fiddle.attrs
def process_fiddle(name):
if name == 'MAD_Magazine_Oct_1985':
return
filename = 'docs/examples/%s.cpp' % name
if os.path.exists(filename):
return
url = 'https://fiddle.skia.org/c/@' + name
content = urlopen(url).read()
regex = (r'(<fiddle-sk\s[^>]*>)\s*<textarea-numbers-sk>\s*'
r'<textarea [^>]*>(.*)</textarea>')
match = re.search(regex, content.decode('utf-8'), flags=re.S)
if not match:
sys.stderr.write('error: %s\n' % url)
keys = parse_fiddle_sk(match.group(1))
code = unescape(match.group(2))
width = keys.get('width', '256')
height = keys.get('height', '256')
source_image = keys.get('source', 256)
duration = keys.get('duration', '0')
textonly = 'textonly' in keys
srgb = not textonly and 'srgb' in keys
f16 = srgb and 'f16' in keys
offscreen = 'offscreen' in keys
sys.stdout.write('Writing to: %s\n' % filename)
sys.stdout.flush()
with open(filename, 'w') as o:
o.write('// Copyright 2020 Google LLC.\n'
'// Use of this source code is governed by a BSD-style'
' license that can be found in the LICENSE file.\n'
'#include "tools/fiddle/examples.h"\n')
if offscreen:
o.write('REGISTER_FIDDLE(')
o.write(', '.join([name,
width,
height,
cxx_bool(textonly),
source_image,
duration,
cxx_bool(srgb),
cxx_bool(f16),
cxx_bool(offscreen),
keys.get('offscreen_width', '64'),
keys.get('offscreen_height', '64'),
keys.get('offscreen_sample_count', '0'),
keys.get('offscreen_texturable', 'false'),
keys.get('offscreen_mipmap', 'false')]))
elif srgb:
o.write('REG_FIDDLE_SRGB(')
o.write(', '.join([name, width, height, cxx_bool(textonly),
source_image, duration, cxx_bool(f16)]))
elif duration:
o.write('REG_FIDDLE_ANIMATED(')
o.write(', '.join([name, width, height, cxx_bool(textonly),
source_image, duration]))
else:
o.write('REG_FIDDLE(')
o.write(', '.join([name, width, height, cxx_bool(textonly),
source_image]))
o.write(') {\n')
o.write(code)
o.write('\n} // END FIDDLE\n')
def main():
os.chdir(os.path.dirname(__file__) + '/../..')
for line in urlopen('https://fiddle.skia.org/named/'):
line_match = re.search(r'/c/@([A-Za-z0-9_-]*)', line.decode('utf-8'))
if not line_match:
continue
name = line_match.group(1)
process_fiddle(name)
if __name__ == '__main__':
main()

View File

@ -1,244 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script that uploads the specified Skia Gerrit change to Android.
This script does the following:
* Downloads the repo tool.
* Inits and checks out the bare-minimum required Android checkout.
* Sets the required git config options in external/skia.
* Cherry-picks the specified Skia patch.
* Modifies the change subject to append a "Test:" line required for presubmits.
* Uploads the Skia change to Android's Gerrit instance.
After the change is uploaded to Android, developers can trigger TH and download
binaries (if required) after runs complete.
The script re-uses the workdir when it is run again. To start from a clean slate
delete the workdir.
Timings:
* ~1m15s when using an empty/non-existent workdir for the first time.
* ~15s when using a workdir previously populated by the script.
Example usage:
$ python upload_to_android.py -w /repos/testing -c 44200
"""
from __future__ import print_function
import argparse
import getpass
import json
import os
import subprocess
import stat
import urllib2
REPO_TOOL_URL = 'https://storage.googleapis.com/git-repo-downloads/repo'
SKIA_PATH_IN_ANDROID = os.path.join('external', 'skia')
ANDROID_REPO_URL = 'https://googleplex-android.googlesource.com'
REPO_BRANCH_NAME = 'experiment'
SKIA_GERRIT_INSTANCE = 'https://skia-review.googlesource.com'
SK_USER_CONFIG_PATH = os.path.join('include', 'config', 'SkUserConfig.h')
def get_change_details(change_num):
response = urllib2.urlopen('%s/changes/%s/detail?o=ALL_REVISIONS' % (
SKIA_GERRIT_INSTANCE, change_num), timeout=5)
content = response.read()
# Remove the first line which contains ")]}'\n".
return json.loads(content[5:])
def init_work_dir(work_dir):
if not os.path.isdir(work_dir):
print('Creating %s' % work_dir)
os.makedirs(work_dir)
# Ensure the repo tool exists in the work_dir.
repo_dir = os.path.join(work_dir, 'bin')
repo_binary = os.path.join(repo_dir, 'repo')
if not os.path.isdir(repo_dir):
print('Creating %s' % repo_dir)
os.makedirs(repo_dir)
if not os.path.exists(repo_binary):
print('Downloading %s from %s' % (repo_binary, REPO_TOOL_URL))
response = urllib2.urlopen(REPO_TOOL_URL, timeout=5)
content = response.read()
with open(repo_binary, 'w') as f:
f.write(content)
# Set executable bit.
st = os.stat(repo_binary)
os.chmod(repo_binary, st.st_mode | stat.S_IEXEC)
# Create android-repo directory in the work_dir.
android_dir = os.path.join(work_dir, 'android-repo')
if not os.path.isdir(android_dir):
print('Creating %s' % android_dir)
os.makedirs(android_dir)
print("""
About to run repo init. If it hangs asking you to run glogin then please:
* Exit the script (ctrl-c).
* Run 'glogin'.
* Re-run the script.
""")
os.chdir(android_dir)
subprocess.check_call(
'%s init -u %s/a/platform/manifest -g "all,-notdefault,-darwin" '
'-b master --depth=1'
% (repo_binary, ANDROID_REPO_URL), shell=True)
print('Syncing the Android checkout at %s' % android_dir)
subprocess.check_call('%s sync %s tools/repohooks -j 32 -c' % (
repo_binary, SKIA_PATH_IN_ANDROID), shell=True)
# Set the necessary git config options.
os.chdir(SKIA_PATH_IN_ANDROID)
subprocess.check_call(
'git config remote.goog.review %s/' % ANDROID_REPO_URL, shell=True)
subprocess.check_call(
'git config review.%s/.autoupload true' % ANDROID_REPO_URL, shell=True)
subprocess.check_call(
'git config user.email %s@google.com' % getpass.getuser(), shell=True)
return repo_binary
class Modifier:
def modify(self):
raise NotImplementedError
def get_user_msg(self):
raise NotImplementedError
class FetchModifier(Modifier):
def __init__(self, change_num, debug):
self.change_num = change_num
self.debug = debug
def modify(self):
# Download and cherry-pick the patch.
change_details = get_change_details(self.change_num)
latest_patchset = len(change_details['revisions'])
mod = int(self.change_num) % 100
download_ref = 'refs/changes/%s/%s/%s' % (
str(mod).zfill(2), self.change_num, latest_patchset)
subprocess.check_call(
'git fetch https://skia.googlesource.com/skia %s' % download_ref,
shell=True)
subprocess.check_call('git cherry-pick FETCH_HEAD', shell=True)
if self.debug:
# Add SK_DEBUG to SkUserConfig.h.
with open(SK_USER_CONFIG_PATH, 'a') as f:
f.write('#ifndef SK_DEBUG\n')
f.write('#define SK_DEBUG\n')
f.write('#endif//SK_DEBUG\n')
subprocess.check_call('git add %s' % SK_USER_CONFIG_PATH, shell=True)
# Amend the commit message to add a prefix that makes it clear that the
# change should not be submitted and a "Test:" line which is required by
# Android presubmit checks.
original_commit_message = change_details['subject']
new_commit_message = (
# Intentionally breaking up the below string because some presubmits
# complain about it.
'[DO ' + 'NOT ' + 'SUBMIT] %s\n\n'
'Test: Presubmit checks will test this change.' % (
original_commit_message))
subprocess.check_call('git commit --amend -m "%s"' % new_commit_message,
shell=True)
def get_user_msg(self):
return """
Open the above URL and trigger TH by checking 'Presubmit-Ready'.
You can download binaries (if required) from the TH link after it completes.
"""
# Add a legacy flag if it doesn't exist, or remove it if it exists.
class AndroidLegacyFlagModifier(Modifier):
def __init__(self, flag):
self.flag = flag
self.verb = "Unknown"
def modify(self):
flag_line = " #define %s\n" % self.flag
config_file = os.path.join('include', 'config', 'SkUserConfigManual.h')
with open(config_file) as f:
lines = f.readlines()
if flag_line not in lines:
lines.insert(
lines.index("#endif // SkUserConfigManual_DEFINED\n"), flag_line)
verb = "Add"
else:
lines.remove(flag_line)
verb = "Remove"
with open(config_file, 'w') as f:
for line in lines:
f.write(line)
subprocess.check_call('git add %s' % config_file, shell=True)
message = '%s %s\n\nTest: Presubmit checks will test this change.' % (
verb, self.flag)
subprocess.check_call('git commit -m "%s"' % message, shell=True)
def get_user_msg(self):
return """
Please open the above URL to review and land the change.
"""
def upload_to_android(work_dir, modifier):
repo_binary = init_work_dir(work_dir)
# Create repo branch.
subprocess.check_call('%s start %s .' % (repo_binary, REPO_BRANCH_NAME),
shell=True)
try:
modifier.modify()
# Upload to Android Gerrit.
subprocess.check_call('%s upload --verify' % repo_binary, shell=True)
print(modifier.get_user_msg())
finally:
# Abandon repo branch.
subprocess.call('%s abandon %s' % (repo_binary, REPO_BRANCH_NAME),
shell=True)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--work-dir', '-w', required=True,
help='Directory where an Android checkout will be created (if it does '
'not already exist). Note: ~1GB space will be used.')
parser.add_argument(
'--change-num', '-c', required=True,
help='The skia-rev Gerrit change number that should be patched into '
'Android.')
parser.add_argument(
'--debug', '-d', action='store_true', default=False,
help='Adds SK_DEBUG to SkUserConfig.h.')
args = parser.parse_args()
upload_to_android(args.work_dir, FetchModifier(args.change_num, args.debug))
if __name__ == '__main__':
main()

View File

@ -1,414 +0,0 @@
#!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Skia's Chromium Codereview Comparison Script.
This script takes two Codereview URLs, looks at the trybot results for
the two codereviews and compares the results.
Usage:
compare_codereview.py CONTROL_URL ROLL_URL
"""
import collections
import os
import re
import sys
import urllib2
import HTMLParser
class CodeReviewHTMLParser(HTMLParser.HTMLParser):
"""Parses CodeReview web page.
Use the CodeReviewHTMLParser.parse static function to make use of
this class.
This uses the HTMLParser class because it's the best thing in
Python's standard library. We need a little more power than a
regex. [Search for "You can't parse [X]HTML with regex." for more
information.
"""
# pylint: disable=I0011,R0904
@staticmethod
def parse(url):
"""Parses a CodeReview web pages.
Args:
url (string), a codereview URL like this:
'https://codereview.chromium.org/?????????'.
Returns:
A dictionary; the keys are bot_name strings, the values
are CodeReviewHTMLParser.Status objects
"""
parser = CodeReviewHTMLParser()
try:
parser.feed(urllib2.urlopen(url).read())
except (urllib2.URLError,):
print >> sys.stderr, 'Error getting', url
return None
parser.close()
return parser.statuses
# namedtuples are like lightweight structs in Python. The low
# overhead of a tuple, but the ease of use of an object.
Status = collections.namedtuple('Status', ['status', 'url'])
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._id = None
self._status = None
self._href = None
self._anchor_data = ''
self._currently_parsing_trybotdiv = False
# statuses is a dictionary of CodeReviewHTMLParser.Status
self.statuses = {}
def handle_starttag(self, tag, attrs):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the start of a tag
(e.g. <div id="main">).
The tag argument is the name of the tag converted to lower
case. The attrs argument is a list of (name, value) pairs
containing the attributes found inside the tag's <>
brackets. The name will be translated to lower case, and
quotes in the value have been removed, and character and
entity references have been replaced.
For instance, for the tag <A HREF="http://www.cwi.nl/">, this
method would be called as handle_starttag('a', [('href',
'http://www.cwi.nl/')]).
[[end standard library documentation]]
"""
attrs = dict(attrs)
if tag == 'div':
# We are looking for <div id="tryjobdiv*">.
id_attr = attrs.get('id','')
if id_attr.startswith('tryjobdiv'):
self._id = id_attr
if (self._id and tag == 'a'
and 'build-result' in attrs.get('class', '').split()):
# If we are already inside a <div id="tryjobdiv*">, we
# look for a link if the form
# <a class="build-result" href="*">. Then we save the
# (non-standard) status attribute and the URL.
self._status = attrs.get('status')
self._href = attrs.get('href')
self._currently_parsing_trybotdiv = True
# Start saving anchor data.
def handle_data(self, data):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to process arbitrary data (e.g. text
nodes and the content of <script>...</script> and
<style>...</style>).
[[end standard library documentation]]
"""
# Save the text inside the <a></a> tags. Assume <a> tags
# aren't nested.
if self._currently_parsing_trybotdiv:
self._anchor_data += data
def handle_endtag(self, tag):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the end tag of an element
(e.g. </div>). The tag argument is the name of the tag
converted to lower case.
[[end standard library documentation]]
"""
if tag == 'a' and self._status:
# We take the accumulated self._anchor_data and save it as
# the bot name.
bot = self._anchor_data.strip()
stat = CodeReviewHTMLParser.Status(status=self._status,
url=self._href)
if bot:
# Add to accumulating dictionary.
self.statuses[bot] = stat
# Reset state to search for the next bot.
self._currently_parsing_trybotdiv = False
self._anchor_data = ''
self._status = None
self._href = None
class BuilderHTMLParser(HTMLParser.HTMLParser):
"""parses Trybot web pages.
Use the BuilderHTMLParser.parse static function to make use of
this class.
This uses the HTMLParser class because it's the best thing in
Python's standard library. We need a little more power than a
regex. [Search for "You can't parse [X]HTML with regex." for more
information.
"""
# pylint: disable=I0011,R0904
@staticmethod
def parse(url):
"""Parses a Trybot web page.
Args:
url (string), a trybot result URL.
Returns:
An array of BuilderHTMLParser.Results, each a description
of failure results, along with an optional url
"""
parser = BuilderHTMLParser()
try:
parser.feed(urllib2.urlopen(url).read())
except (urllib2.URLError,):
print >> sys.stderr, 'Error getting', url
return []
parser.close()
return parser.failure_results
Result = collections.namedtuple('Result', ['text', 'url'])
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.failure_results = []
self._current_failure_result = None
self._divlevel = None
self._li_level = 0
self._li_data = ''
self._current_failure = False
self._failure_results_url = ''
def handle_starttag(self, tag, attrs):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the start of a tag
(e.g. <div id="main">).
The tag argument is the name of the tag converted to lower
case. The attrs argument is a list of (name, value) pairs
containing the attributes found inside the tag's <>
brackets. The name will be translated to lower case, and
quotes in the value have been removed, and character and
entity references have been replaced.
For instance, for the tag <A HREF="http://www.cwi.nl/">, this
method would be called as handle_starttag('a', [('href',
'http://www.cwi.nl/')]).
[[end standard library documentation]]
"""
attrs = dict(attrs)
if tag == 'li':
# <li> tags can be nested. So we have to count the
# nest-level for backing out.
self._li_level += 1
return
if tag == 'div' and attrs.get('class') == 'failure result':
# We care about this sort of thing:
# <li>
# <li>
# <li>
# <div class="failure result">...</div>
# </li>
# </li>
# We want this text here.
# </li>
if self._li_level > 0:
self._current_failure = True # Tells us to keep text.
return
if tag == 'a' and self._current_failure:
href = attrs.get('href')
# Sometimes we want to keep the stdio url. We always
# return it, just in case.
if href.endswith('/logs/stdio'):
self._failure_results_url = href
def handle_data(self, data):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to process arbitrary data (e.g. text
nodes and the content of <script>...</script> and
<style>...</style>).
[[end standard library documentation]]
"""
if self._current_failure:
self._li_data += data
def handle_endtag(self, tag):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the end tag of an element
(e.g. </div>). The tag argument is the name of the tag
converted to lower case.
[[end standard library documentation]]
"""
if tag == 'li':
self._li_level -= 1
if 0 == self._li_level:
if self._current_failure:
result = self._li_data.strip()
first = result.split()[0]
if first:
result = re.sub(
r'^%s(\s+%s)+' % (first, first), first, result)
# Sometimes, it repeats the same thing
# multiple times.
result = re.sub(r'unexpected flaky.*', '', result)
# Remove some extra unnecessary text.
result = re.sub(r'\bpreamble\b', '', result)
result = re.sub(r'\bstdio\b', '', result)
url = self._failure_results_url
self.failure_results.append(
BuilderHTMLParser.Result(result, url))
self._current_failure_result = None
# Reset the state.
self._current_failure = False
self._li_data = ''
self._failure_results_url = ''
def printer(indent, string):
"""Print indented, wrapped text.
"""
def wrap_to(line, columns):
"""Wrap a line to the given number of columns, return a list
of strings.
"""
ret = []
nextline = ''
for word in line.split():
if nextline:
if len(nextline) + 1 + len(word) > columns:
ret.append(nextline)
nextline = word
else:
nextline += (' ' + word)
else:
nextline = word
if nextline:
ret.append(nextline)
return ret
out = sys.stdout
spacer = ' '
for line in string.split('\n'):
for i, wrapped_line in enumerate(wrap_to(line, 68 - (2 * indent))):
out.write(spacer * indent)
if i > 0:
out.write(spacer)
out.write(wrapped_line)
out.write('\n')
out.flush()
def main(control_url, roll_url, verbosity=1):
"""Compare two Codereview URLs
Args:
control_url, roll_url: (strings) URL of the format
https://codereview.chromium.org/?????????
verbosity: (int) verbose level. 0, 1, or 2.
"""
# pylint: disable=I0011,R0914,R0912
control = CodeReviewHTMLParser.parse(control_url)
roll = CodeReviewHTMLParser.parse(roll_url)
all_bots = set(control) & set(roll) # Set intersection.
if not all_bots:
print >> sys.stderr, (
'Error: control %s and roll %s have no common trybots.'
% (list(control), list(roll)))
return
control_name = '[control %s]' % control_url.split('/')[-1]
roll_name = '[roll %s]' % roll_url.split('/')[-1]
out = sys.stdout
for bot in sorted(all_bots):
if (roll[bot].status == 'success'):
if verbosity > 1:
printer(0, '==%s==' % bot)
printer(1, 'OK')
continue
if control[bot].status != 'failure' and roll[bot].status != 'failure':
continue
printer(0, '==%s==' % bot)
formatted_results = []
for (status, name, url) in [
(control[bot].status, control_name, control[bot].url),
( roll[bot].status, roll_name, roll[bot].url)]:
lines = []
if status == 'failure':
results = BuilderHTMLParser.parse(url)
for result in results:
formatted_result = re.sub(r'(\S*\.html) ', '\n__\g<1>\n', result.text)
# Strip runtimes.
formatted_result = re.sub(r'\(.*\)', '', formatted_result)
lines.append((2, formatted_result))
if ('compile' in result.text or '...and more' in result.text):
lines.append((3, re.sub('/[^/]*$', '/', url) + result.url))
formatted_results.append(lines)
identical = formatted_results[0] == formatted_results[1]
for (formatted_result, (status, name, url)) in zip(
formatted_results,
[(control[bot].status, control_name, control[bot].url),
(roll[bot].status, roll_name, roll[bot].url)]):
if status != 'failure' and not identical:
printer(1, name)
printer(2, status)
elif status == 'failure':
if identical:
printer(1, control_name + ' and ' + roll_name + ' failed identically')
else:
printer(1, name)
for (indent, line) in formatted_result:
printer(indent, line)
if identical:
break
out.write('\n')
if verbosity > 0:
# Print out summary of all of the bots.
out.write('%11s %11s %4s %s\n\n' %
('CONTROL', 'ROLL', 'DIFF', 'BOT'))
for bot in sorted(all_bots):
if roll[bot].status == 'success':
diff = ''
elif (control[bot].status == 'success' and
roll[bot].status == 'failure'):
diff = '!!!!'
elif ('pending' in control[bot].status or
'pending' in roll[bot].status):
diff = '....'
else:
diff = '****'
out.write('%11s %11s %4s %s\n' % (
control[bot].status, roll[bot].status, diff, bot))
out.write('\n')
out.flush()
if __name__ == '__main__':
if len(sys.argv) < 3:
print >> sys.stderr, __doc__
exit(1)
main(sys.argv[1], sys.argv[2],
int(os.environ.get('COMPARE_CODEREVIEW_VERBOSITY', 1)))

View File

@ -1,202 +0,0 @@
#!/usr/bin/python
'''
Copyright 2013 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
'''
'''
Gathers diffs between 2 JSON expectations files, or between actual and
expected results within a single JSON actual-results file,
and generates an old-vs-new diff dictionary.
TODO(epoger): Fix indentation in this file (2-space indents, not 4-space).
'''
# System-level imports
import argparse
import json
import os
import sys
import urllib2
# Imports from within Skia
#
# We need to add the 'gm' directory, so that we can import gm_json.py within
# that directory. That script allows us to parse the actual-results.json file
# written out by the GM tool.
# Make sure that the 'gm' dir is in the PYTHONPATH, but add it at the *end*
# so any dirs that are already in the PYTHONPATH will be preferred.
#
# This assumes that the 'gm' directory has been checked out as a sibling of
# the 'tools' directory containing this script, which will be the case if
# 'trunk' was checked out as a single unit.
GM_DIRECTORY = os.path.realpath(
os.path.join(os.path.dirname(os.path.dirname(__file__)), 'gm'))
if GM_DIRECTORY not in sys.path:
sys.path.append(GM_DIRECTORY)
import gm_json
# Object that generates diffs between two JSON gm result files.
class GMDiffer(object):
def __init__(self):
pass
def _GetFileContentsAsString(self, filepath):
"""Returns the full contents of a file, as a single string.
If the filename looks like a URL, download its contents.
If the filename is None, return None."""
if filepath is None:
return None
elif filepath.startswith('http:') or filepath.startswith('https:'):
return urllib2.urlopen(filepath).read()
else:
return open(filepath, 'r').read()
def _GetExpectedResults(self, contents):
"""Returns the dictionary of expected results from a JSON string,
in this form:
{
'test1' : 14760033689012826769,
'test2' : 9151974350149210736,
...
}
We make these simplifying assumptions:
1. Each test has either 0 or 1 allowed results.
2. All expectations are of type JSONKEY_HASHTYPE_BITMAP_64BITMD5.
Any tests which violate those assumptions will cause an exception to
be raised.
Any tests for which we have no expectations will be left out of the
returned dictionary.
"""
result_dict = {}
json_dict = gm_json.LoadFromString(contents)
all_expectations = json_dict[gm_json.JSONKEY_EXPECTEDRESULTS]
# Prevent https://code.google.com/p/skia/issues/detail?id=1588
if not all_expectations:
return result_dict
for test_name in all_expectations.keys():
test_expectations = all_expectations[test_name]
allowed_digests = test_expectations[
gm_json.JSONKEY_EXPECTEDRESULTS_ALLOWEDDIGESTS]
if allowed_digests:
num_allowed_digests = len(allowed_digests)
if num_allowed_digests > 1:
raise ValueError(
'test %s has %d allowed digests' % (
test_name, num_allowed_digests))
digest_pair = allowed_digests[0]
if digest_pair[0] != gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5:
raise ValueError(
'test %s has unsupported hashtype %s' % (
test_name, digest_pair[0]))
result_dict[test_name] = digest_pair[1]
return result_dict
def _GetActualResults(self, contents):
"""Returns the dictionary of actual results from a JSON string,
in this form:
{
'test1' : 14760033689012826769,
'test2' : 9151974350149210736,
...
}
We make these simplifying assumptions:
1. All results are of type JSONKEY_HASHTYPE_BITMAP_64BITMD5.
Any tests which violate those assumptions will cause an exception to
be raised.
Any tests for which we have no actual results will be left out of the
returned dictionary.
"""
result_dict = {}
json_dict = gm_json.LoadFromString(contents)
all_result_types = json_dict[gm_json.JSONKEY_ACTUALRESULTS]
for result_type in all_result_types.keys():
results_of_this_type = all_result_types[result_type]
if results_of_this_type:
for test_name in results_of_this_type.keys():
digest_pair = results_of_this_type[test_name]
if (digest_pair[0] !=
gm_json.JSONKEY_HASHTYPE_BITMAP_64BITMD5):
raise ValueError(
'test %s has unsupported hashtype %s' % (
test_name, digest_pair[0]))
result_dict[test_name] = digest_pair[1]
return result_dict
def _DictionaryDiff(self, old_dict, new_dict):
"""Generate a dictionary showing diffs between old_dict and new_dict.
Any entries which are identical across them will be left out."""
diff_dict = {}
all_keys = set(old_dict.keys() + new_dict.keys())
for key in all_keys:
if old_dict.get(key) != new_dict.get(key):
new_entry = {}
new_entry['old'] = old_dict.get(key)
new_entry['new'] = new_dict.get(key)
diff_dict[key] = new_entry
return diff_dict
def GenerateDiffDict(self, oldfile, newfile=None):
"""Generate a dictionary showing the diffs:
old = expectations within oldfile
new = expectations within newfile
If newfile is not specified, then 'new' is the actual results within
oldfile.
"""
return self.GenerateDiffDictFromStrings(
self._GetFileContentsAsString(oldfile),
self._GetFileContentsAsString(newfile))
def GenerateDiffDictFromStrings(self, oldjson, newjson=None):
"""Generate a dictionary showing the diffs:
old = expectations within oldjson
new = expectations within newjson
If newfile is not specified, then 'new' is the actual results within
oldfile.
"""
old_results = self._GetExpectedResults(oldjson)
if newjson:
new_results = self._GetExpectedResults(newjson)
else:
new_results = self._GetActualResults(oldjson)
return self._DictionaryDiff(old_results, new_results)
def _Main():
parser = argparse.ArgumentParser()
parser.add_argument(
'old',
help='Path to JSON file whose expectations to display on ' +
'the "old" side of the diff. This can be a filepath on ' +
'local storage, or a URL.')
parser.add_argument(
'new', nargs='?',
help='Path to JSON file whose expectations to display on ' +
'the "new" side of the diff; if not specified, uses the ' +
'ACTUAL results from the "old" JSON file. This can be a ' +
'filepath on local storage, or a URL.')
args = parser.parse_args()
differ = GMDiffer()
diffs = differ.GenerateDiffDict(oldfile=args.old, newfile=args.new)
json.dump(diffs, sys.stdout, sort_keys=True, indent=2)
if __name__ == '__main__':
_Main()

View File

@ -1,160 +0,0 @@
#!/usr/bin/env python
# Copyright (c) 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
README = """
Automatically add or remove a specific legacy flag to multiple Skia client repos.
This would only work on Google desktop.
Example usage:
$ python toggle_legacy_flag.py SK_SUPPORT_LEGACY_SOMETHING \\
-a /data/android -c ~/chromium/src -g legacyflag
If you only need to add the flag to one repo, for example, Android, please give
only -a (--android-dir) argument:
$ python toggle_legacy_flag.py SK_SUPPORT_LEGACY_SOMETHING -a /data/android
"""
from __future__ import print_function
import os, sys
import argparse
import subprocess
import getpass
from random import randint
ANDROID_TOOLS_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'android')
def toggle_android(args):
sys.path.append(ANDROID_TOOLS_DIR)
import upload_to_android
modifier = upload_to_android.AndroidLegacyFlagModifier(args.flag)
upload_to_android.upload_to_android(args.android_dir, modifier)
def toggle_chromium(args):
os.chdir(args.chromium_dir)
branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
branch = branch.strip()
EXPECTED_STASH_OUT = "No local changes to save"
stash_output = subprocess.check_output(['git', 'stash']).strip()
if branch != "master" or stash_output != EXPECTED_STASH_OUT:
print ("Please checkout a clean master branch at your chromium repo (%s) "
"before running this script") % args.chromium_dir
if stash_output != EXPECTED_STASH_OUT:
subprocess.check_call(['git', 'stash', 'pop'])
exit(1)
# Update the repository to avoid conflicts
subprocess.check_call(['git', 'pull'])
subprocess.check_call(['gclient', 'sync']);
# Use random number to avoid branch name collision.
# We'll delete the branch in the end.
random = randint(1, 10000)
subprocess.check_call(['git', 'checkout', '-b', 'legacyflag_%d' % random])
try:
config_file = os.path.join('skia', 'config', 'SkUserConfig.h')
with open(config_file) as f:
lines = f.readlines()
flag_line = "#define %s\n" % args.flag
if flag_line in lines:
index = lines.index(flag_line)
del lines[index-1 : index +2]
verb = "Remove"
else:
separator = (
"/////////////////////////"
" Imported from BUILD.gn and skia_common.gypi\n")
content = ("#ifndef {0}\n"
"#define {0}\n"
"#endif\n\n").format(args.flag)
lines.insert(lines.index(separator), content)
verb = "Add"
with open(config_file, 'w') as f:
for line in lines:
f.write(line)
message = "%s %s" % (verb, args.flag)
subprocess.check_call('git commit -a -m "%s"' % message, shell=True)
subprocess.check_call('git cl upload -m "%s" -f' % message,
shell=True)
finally:
subprocess.check_call(['git', 'checkout', 'master'])
subprocess.check_call(['git', 'branch', '-D', 'legacyflag_%d' % random])
def toggle_google3(args):
G3_SCRIPT_DIR = os.path.expanduser("~/skia-g3/scripts")
if not os.path.isdir(G3_SCRIPT_DIR):
print ("Google3 directory unavailable.\n"
"Please see "
"https://sites.google.com/a/google.com/skia/rebaseline#g3_flag "
"for Google3 setup.")
exit(1)
sys.path.append(G3_SCRIPT_DIR)
import citc_flag
citc_flag.toggle_google3(args.google3, args.flag)
def main():
if len(sys.argv) <= 1 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
print(README)
parser = argparse.ArgumentParser()
parser.add_argument(
'--android-dir', '-a', required=False,
help='Directory where an Android checkout will be created (if it does '
'not already exist). Note: ~1GB space will be used.')
parser.add_argument(
'--chromium-dir', '-c', required=False,
help='Directory of an EXISTING Chromium checkout (e.g., ~/chromium/src)')
parser.add_argument(
'--google3', '-g', required=False,
help='Google3 workspace to be created (if it does not already exist).')
parser.add_argument('flag', type=str, help='legacy flag name')
args = parser.parse_args()
if not args.android_dir and not args.chromium_dir and not args.google3:
print("""
Nothing to do. Please give me at least one of these three arguments:
-a (--android-dir)
-c (--chromium-dir)
-g (--google3)
""")
exit(1)
end_message = "CLs generated. Now go review and land them:\n"
if args.chromium_dir:
args.chromium_dir = os.path.expanduser(args.chromium_dir)
toggle_chromium(args)
end_message += " * https://chromium-review.googlesource.com\n"
if args.google3:
toggle_google3(args)
end_message += " * http://goto.google.com/cl\n"
if args.android_dir:
args.android_dir = os.path.expanduser(args.android_dir)
toggle_android(args)
end_message += " * http://goto.google.com/androidcl\n"
print(end_message)
if __name__ == '__main__':
main()

View File

@ -1,37 +0,0 @@
#!/usr/bin/python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Retrieve the given file from googlesource.com."""
from contextlib import closing
import base64
import sys
import urllib2
def get(repo_url, filepath):
"""Retrieve the contents of the given file from the given googlesource repo.
Args:
repo_url: string; URL of the repository from which to retrieve the file.
filepath: string; path of the file within the repository.
Return:
string; the contents of the given file.
"""
base64_url = '/'.join((repo_url, '+', 'main', filepath)) + '?format=TEXT'
with closing(urllib2.urlopen(base64_url)) as f:
return base64.b64decode(f.read())
if __name__ == '__main__':
if len(sys.argv) != 3:
print >> sys.stderr, 'Usage: %s <repo_url> <filepath>' % sys.argv[0]
sys.exit(1)
sys.stdout.write(get(sys.argv[1], sys.argv[2]))