From 45844b22682aca7a32f35bd357935780df48660d Mon Sep 17 00:00:00 2001 From: Pedro Larroy Date: Fri, 14 Jun 2019 21:10:01 -0700 Subject: [PATCH] Sort benchmark output per op name, add some validation to CLI (#15249) --- benchmark/opperf/opperf.py | 36 +++++++++++++++++--------- benchmark/opperf/utils/common_utils.py | 5 ++-- 2 files changed, 27 insertions(+), 14 deletions(-) mode change 100644 => 100755 benchmark/opperf/opperf.py diff --git a/benchmark/opperf/opperf.py b/benchmark/opperf/opperf.py old mode 100644 new mode 100755 index b7f8c667d951..34c6cf96b723 --- a/benchmark/opperf/opperf.py +++ b/benchmark/opperf/opperf.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 +# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -14,9 +16,15 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +# +# -*- coding: utf-8 -*- + +"""Commandline utility to run operator benchmarks""" import argparse import logging +import os +import sys import mxnet as mx @@ -94,10 +102,7 @@ def _parse_mxnet_context(ctx): device_id = int(ctx[4:-1]) return mx.gpu(device_id) - -if __name__ == '__main__': - # CLI Parser - +def main(): # 1. GET USER INPUTS parser = argparse.ArgumentParser( description='Run all the MXNet operators (NDArray) benchmarks') @@ -108,26 +113,33 @@ def _parse_mxnet_context(ctx): 'Valid Inputs - cpu, gpu, gpu(0), gpu(1)...') parser.add_argument('--dtype', type=str, default='float32', help='DType (Precision) to run benchmarks. By default, ' 'float32. Valid Inputs - float32, float64.') - parser.add_argument('--output-format', type=str, default='json', + parser.add_argument('-f', '--output-format', type=str, default='json', + choices=['json', 'md'], help='Benchmark result output format. By default, json. ' 'Valid Inputs - json, md') - parser.add_argument('--output-file', type=str, default='./mxnet_operator_benchmarks.json', + parser.add_argument('-o', '--output-file', type=str, default='./mxnet_operator_benchmarks.json', help='Name and path for the ' 'output file.') - user_options = parser.parse_args() - logging.info(f"Running MXNet operator benchmarks with the following options: {user_options}") + args = parser.parse_args() + logging.info(f"Running MXNet operator benchmarks with the following options: {args}") + assert not os.path.isfile(args.output_file), f"Output file {args.output_file} already exists." # 2. RUN BENCHMARKS - ctx = _parse_mxnet_context(user_options.ctx) - dtype = user_options.dtype - final_benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx, dtype=user_options.dtype) + ctx = _parse_mxnet_context(args.ctx) + dtype = args.dtype + final_benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx, dtype=args.dtype) # 3. PREPARE OUTPUTS - save_to_file(final_benchmark_results, user_options.output_file, user_options.output_format) + save_to_file(final_benchmark_results, args.output_file, args.output_format) # 4. Generate list of MXNet operators not covered in benchmarks ops_not_covered = get_operators_with_no_benchmark(final_benchmark_results.keys()) for idx, op in enumerate(ops_not_covered): print(f"{idx}. {op}") + return 0 + +if __name__ == '__main__': + sys.exit(main()) + diff --git a/benchmark/opperf/utils/common_utils.py b/benchmark/opperf/utils/common_utils.py index ffa40da49566..9fe2e19b13b3 100644 --- a/benchmark/opperf/utils/common_utils.py +++ b/benchmark/opperf/utils/common_utils.py @@ -17,6 +17,7 @@ import os import json +from operator import itemgetter from collections import ChainMap @@ -62,7 +63,7 @@ def save_to_file(inp_dict, out_filepath, out_format='json'): if out_format == 'json': # Save as JSON with open(out_filepath, "w") as result_file: - json.dump(inp_dict, result_file, indent=4) + json.dump(inp_dict, result_file, indent=4, sort_keys=True) elif out_format == 'md': # Save as md with open(out_filepath, "w") as result_file: @@ -112,7 +113,7 @@ def _prepare_markdown(results): " | Inputs |", "| :---: | :---: | :---: | :---:| :--- |"] - for op, op_bench_results in results.items(): + for op, op_bench_results in sorted(results.items(), key=itemgetter(0)): for op_bench_result in op_bench_results: results_markdown.append(_prepare_op_benchmark_result(op, op_bench_result))