mirror of
https://sourceware.org/git/glibc.git
synced 2024-11-22 04:50:07 +00:00
benchtests: Use argparse to parse arguments
Make the script more usable by adding proper command line options along with a way to query the options. The script is capable of doing a bunch of things right now like choosing a base for comparison, choosing to generate graphs, etc. and they should be accessible via command line switches. * benchtests/scripts/compare_strings.py: Use argparse. * benchtests/README: Document existence of compare_strings.py.
This commit is contained in:
parent
9ac4470888
commit
06b1de2378
@ -1,3 +1,8 @@
|
||||
2017-09-16 Siddhesh Poyarekar <siddhesh@sourceware.org>
|
||||
|
||||
* benchtests/scripts/compare_strings.py: Use argparse.
|
||||
* benchtests/README: Document existence of compare_strings.py.
|
||||
|
||||
2017-09-15 Joseph Myers <joseph@codesourcery.com>
|
||||
|
||||
* math/s_fma.c: Include <libm-alias-double.h>.
|
||||
|
@ -122,3 +122,14 @@ To add a benchset for `foo':
|
||||
- Write your bench-foo.c that prints out the measurements to stdout.
|
||||
- On execution, a bench-foo.out is created in $(objpfx) with the contents of
|
||||
stdout.
|
||||
|
||||
Reading String Benchmark Results:
|
||||
================================
|
||||
|
||||
Some of the string benchmark results are now in JSON to make it easier to read
|
||||
in scripts. Use the benchtests/compare_strings.py script to show the results
|
||||
in a tabular format, generate graphs and more. Run
|
||||
|
||||
benchtests/scripts/compare_strings.py -h
|
||||
|
||||
for usage information.
|
||||
|
@ -28,6 +28,7 @@ import sys
|
||||
import os
|
||||
import json
|
||||
import pylab
|
||||
import argparse
|
||||
|
||||
try:
|
||||
import jsonschema as validator
|
||||
@ -118,22 +119,32 @@ def main(args):
|
||||
|
||||
Take a string benchmark output file and compare timings.
|
||||
"""
|
||||
if len(args) < 3:
|
||||
print('Usage: %s <input file> <schema file> [-base=ifunc_name] attr1 [attr2 ...]' % sys.argv[0])
|
||||
sys.exit(os.EX_USAGE)
|
||||
|
||||
base_func = None
|
||||
filename = args[0]
|
||||
schema_filename = args[1]
|
||||
if args[2].find('-base=') == 0:
|
||||
base_func = args[2][6:]
|
||||
attrs = args[3:]
|
||||
else:
|
||||
attrs = args[2:]
|
||||
filename = args.input
|
||||
schema_filename = args.schema
|
||||
base_func = args.base
|
||||
attrs = args.attributes.split(',')
|
||||
|
||||
results = parse_file(filename, schema_filename)
|
||||
results = parse_file(args.input, args.schema)
|
||||
process_results(results, attrs, base_func)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv[1:])
|
||||
parser = argparse.ArgumentParser()
|
||||
|
||||
# The required arguments.
|
||||
req = parser.add_argument_group(title='required arguments')
|
||||
req.add_argument('-a', '--attributes', required=True,
|
||||
help='Comma separated list of benchmark attributes.')
|
||||
req.add_argument('-i', '--input', required=True,
|
||||
help='Input JSON benchmark result file.')
|
||||
req.add_argument('-s', '--schema', required=True,
|
||||
help='Schema file to validate the result file.')
|
||||
|
||||
# Optional arguments.
|
||||
parser.add_argument('-b', '--base',
|
||||
help='IFUNC variant to set as baseline.')
|
||||
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
Loading…
Reference in New Issue
Block a user