Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Sort benchmark output per op name, add some validation to CLI (#15249)
Browse files Browse the repository at this point in the history
  • Loading branch information
larroy authored and sandeep-krishnamurthy committed Jun 15, 2019
1 parent 13cf5db commit 45844b2
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 14 deletions.
36 changes: 24 additions & 12 deletions benchmark/opperf/opperf.py
100644 → 100755
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/usr/bin/env python3
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
Expand All @@ -14,9 +16,15 @@
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -*- coding: utf-8 -*-

"""Commandline utility to run operator benchmarks"""

import argparse
import logging
import os
import sys

import mxnet as mx

Expand Down Expand Up @@ -94,10 +102,7 @@ def _parse_mxnet_context(ctx):
device_id = int(ctx[4:-1])
return mx.gpu(device_id)


if __name__ == '__main__':
# CLI Parser

def main():
# 1. GET USER INPUTS
parser = argparse.ArgumentParser(
description='Run all the MXNet operators (NDArray) benchmarks')
Expand All @@ -108,26 +113,33 @@ def _parse_mxnet_context(ctx):
'Valid Inputs - cpu, gpu, gpu(0), gpu(1)...')
parser.add_argument('--dtype', type=str, default='float32', help='DType (Precision) to run benchmarks. By default, '
'float32. Valid Inputs - float32, float64.')
parser.add_argument('--output-format', type=str, default='json',
parser.add_argument('-f', '--output-format', type=str, default='json',
choices=['json', 'md'],
help='Benchmark result output format. By default, json. '
'Valid Inputs - json, md')

parser.add_argument('--output-file', type=str, default='./mxnet_operator_benchmarks.json',
parser.add_argument('-o', '--output-file', type=str, default='./mxnet_operator_benchmarks.json',
help='Name and path for the '
'output file.')

user_options = parser.parse_args()
logging.info(f"Running MXNet operator benchmarks with the following options: {user_options}")
args = parser.parse_args()
logging.info(f"Running MXNet operator benchmarks with the following options: {args}")
assert not os.path.isfile(args.output_file), f"Output file {args.output_file} already exists."

# 2. RUN BENCHMARKS
ctx = _parse_mxnet_context(user_options.ctx)
dtype = user_options.dtype
final_benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx, dtype=user_options.dtype)
ctx = _parse_mxnet_context(args.ctx)
dtype = args.dtype
final_benchmark_results = run_all_mxnet_operator_benchmarks(ctx=ctx, dtype=args.dtype)

# 3. PREPARE OUTPUTS
save_to_file(final_benchmark_results, user_options.output_file, user_options.output_format)
save_to_file(final_benchmark_results, args.output_file, args.output_format)

# 4. Generate list of MXNet operators not covered in benchmarks
ops_not_covered = get_operators_with_no_benchmark(final_benchmark_results.keys())
for idx, op in enumerate(ops_not_covered):
print(f"{idx}. {op}")
return 0

if __name__ == '__main__':
sys.exit(main())

5 changes: 3 additions & 2 deletions benchmark/opperf/utils/common_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

import os
import json
from operator import itemgetter

from collections import ChainMap

Expand Down Expand Up @@ -62,7 +63,7 @@ def save_to_file(inp_dict, out_filepath, out_format='json'):
if out_format == 'json':
# Save as JSON
with open(out_filepath, "w") as result_file:
json.dump(inp_dict, result_file, indent=4)
json.dump(inp_dict, result_file, indent=4, sort_keys=True)
elif out_format == 'md':
# Save as md
with open(out_filepath, "w") as result_file:
Expand Down Expand Up @@ -112,7 +113,7 @@ def _prepare_markdown(results):
" | Inputs |",
"| :---: | :---: | :---: | :---:| :--- |"]

for op, op_bench_results in results.items():
for op, op_bench_results in sorted(results.items(), key=itemgetter(0)):
for op_bench_result in op_bench_results:
results_markdown.append(_prepare_op_benchmark_result(op, op_bench_result))

Expand Down

0 comments on commit 45844b2

Please sign in to comment.