8875baf99d
The bloaty here was compiled with this patch: https://github.com/google/bloaty/pull/149 Hopefully that lands upstream and we can track master again. This adds BuildStats.+Debug because we need symbols to get sensical data. Bloaty's WASM support is experimental and currently doesn't support having a stripped (Release) version be profiled using the symbols of a Debug version. This means that the buildStats for debug will be higher than actual, but hopefully the absolute positioning will be the same and thus the outputs useful. Bug: skia: Change-Id: Id7bf721843e8c52a0aae2b7e57ff95397693b3dd Reviewed-on: https://skia-review.googlesource.com/c/163256 Reviewed-by: Mike Klein <mtklein@google.com> Commit-Queue: Kevin Lubick <kjlubick@google.com>
80 lines
2.2 KiB
Python
80 lines
2.2 KiB
Python
# Copyright 2018 The Chromium Authors. All rights reserved.
|
|
# Use of this source code is governed by a BSD-style license that can be
|
|
# found in the LICENSE file.
|
|
|
|
|
|
"""Writes a Perf-formated json file with stats about the given web file."""
|
|
|
|
|
|
import json
|
|
import os
|
|
import subprocess
|
|
import sys
|
|
|
|
|
|
def main():
|
|
input_file = sys.argv[1]
|
|
out_dir = sys.argv[2]
|
|
keystr = sys.argv[3]
|
|
propstr = sys.argv[4]
|
|
bloaty_path = sys.argv[5]
|
|
|
|
results = {
|
|
'key': { },
|
|
'results': { }
|
|
}
|
|
|
|
magic_seperator = '#$%^&*'
|
|
print magic_seperator
|
|
print 'If you see lots of func[19] and such, go check out the debug build'
|
|
print ('Note that template instantiations are grouped together, '
|
|
'thus the elided types.')
|
|
print ('If you notice an unsymbolized "duplicate" entry, it is simply how '
|
|
'many bytes the function name itself takes up')
|
|
print subprocess.check_output([bloaty_path, input_file,
|
|
'-d', 'shortsymbols', '-n', '0'])
|
|
|
|
print magic_seperator
|
|
print 'If you see lots of func[19] and such, go check out the debug build'
|
|
print subprocess.check_output([bloaty_path, input_file,
|
|
'-d', 'fullsymbols', '-n', '0'])
|
|
|
|
props = propstr.split(' ')
|
|
for i in range(0, len(props), 2):
|
|
results[props[i]] = props[i+1]
|
|
|
|
keys = keystr.split(' ')
|
|
for i in range(0, len(keys), 2):
|
|
results['key'][keys[i]] = keys[i+1]
|
|
|
|
r = {
|
|
'total_size_bytes': os.path.getsize(input_file)
|
|
}
|
|
|
|
# Make a copy to avoid destroying the hardlinked file.
|
|
# Swarming hardlinks in the builds from isolated cache.
|
|
temp_file = input_file + '_tmp'
|
|
subprocess.check_call(['cp', input_file, temp_file])
|
|
subprocess.check_call(['gzip', temp_file])
|
|
|
|
r['gzip_size_bytes'] = os.path.getsize(temp_file + '.gz')
|
|
|
|
name = os.path.basename(input_file)
|
|
|
|
results['results'][name] = {
|
|
# We need this top level layer 'config'/slice
|
|
# Other analysis methods (e.g. libskia) might have
|
|
# slices for data on the 'code' section, etc.
|
|
'default' : r,
|
|
}
|
|
|
|
print magic_seperator
|
|
print json.dumps(results, indent=2)
|
|
|
|
with open(os.path.join(out_dir, name+'.json'), 'w') as output:
|
|
output.write(json.dumps(results, indent=2))
|
|
|
|
|
|
if '__main__' == __name__:
|
|
main()
|