From 425c8d033957b30405c8ddd1676afa834de03084 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 30 Jan 2020 01:24:10 -0800 Subject: [PATCH 1/4] add loss ops and fix minor typo in array_rearrange --- .../opperf/nd_operations/array_rearrange.py | 4 +- .../opperf/nd_operations/nn_loss_operators.py | 56 +++++++++++++++++++ benchmark/opperf/opperf.py | 5 ++ benchmark/opperf/utils/op_registry_utils.py | 21 ++++++- benchmark/opperf/utils/profiler_utils.py | 4 +- 5 files changed, 85 insertions(+), 5 deletions(-) create mode 100644 benchmark/opperf/nd_operations/nn_loss_operators.py diff --git a/benchmark/opperf/nd_operations/array_rearrange.py b/benchmark/opperf/nd_operations/array_rearrange.py index 9b1cbfe3632d..e7eb5d68748e 100644 --- a/benchmark/opperf/nd_operations/array_rearrange.py +++ b/benchmark/opperf/nd_operations/array_rearrange.py @@ -31,7 +31,7 @@ def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100): """Runs benchmarks with the given context and precision (dtype) for all the - rearrange operators in MXNet. + rearrange operators in MXNet. Parameters ---------- @@ -49,7 +49,7 @@ def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler=' Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. """ - # Fetch all optimizer operators + # Fetch all array rerrange operators mx_rearrange_ops = get_all_rearrange_operators() # Run benchmarks diff --git a/benchmark/opperf/nd_operations/nn_loss_operators.py b/benchmark/opperf/nd_operations/nn_loss_operators.py new file mode 100644 index 000000000000..8c61afe58742 --- /dev/null +++ b/benchmark/opperf/nd_operations/nn_loss_operators.py @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import mxnet as mx +from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks +from benchmark.opperf.utils.op_registry_utils import get_all_loss_operators + +"""Performance benchmark tests for MXNet Neural Network Loss Operators + +1. smooth_l1 +2. CTCLoss +3. MakeLoss +4. softmax_cross_entropy +""" + + +def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100): + """Runs benchmarks with the given context and precision (dtype) for all the + Neural Network loss operators in MXNet. + + Parameters + ---------- + ctx: mx.ctx + Context to run benchmarks + dtype: str, default 'float32' + Precision to use for benchmarks + warmup: int, default 25 + Number of times to run for warmup + runs: int, default 100 + Number of runs to capture benchmark results + + Returns + ------- + Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. + + """ + # Fetch all loss operators + mx_rearrange_ops = get_all_loss_operators() + + # Run benchmarks + mx_rearrange_op_results = run_op_benchmarks(mx_rearrange_ops, dtype, ctx, profiler, warmup, runs) + return mx_rearrange_op_results diff --git a/benchmark/opperf/opperf.py b/benchmark/opperf/opperf.py index fd8f6672f3a7..b1cfb7ca0d25 100755 --- a/benchmark/opperf/opperf.py +++ b/benchmark/opperf/opperf.py @@ -41,6 +41,7 @@ from benchmark.opperf.nd_operations.nn_basic_operators import run_nn_basic_operators_benchmarks from benchmark.opperf.nd_operations.nn_optimizer_operators import run_optimizer_operators_benchmarks from benchmark.opperf.nd_operations.array_rearrange import run_rearrange_operators_benchmarks +from benchmark.opperf.nd_operations.nn_loss_operators import run_loss_operators_benchmarks from benchmark.opperf.utils.common_utils import merge_map_list, save_to_file from benchmark.opperf.utils.op_registry_utils import get_operators_with_no_benchmark, \ @@ -99,9 +100,13 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n # Run all Optimizer operations benchmarks with default input values mxnet_operator_benchmark_results.append(run_optimizer_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler)) + # Run all Transpose Convolution operations benchmarks with default input values mxnet_operator_benchmark_results.append(run_transpose_convolution_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler)) + # Run all NN loss operations benchmarks with default input values + mxnet_operator_benchmark_results.append(run_loss_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler)) + # ****************************** PREPARE FINAL RESULTS ******************************** final_benchmark_result_map = merge_map_list(mxnet_operator_benchmark_results) return final_benchmark_result_map diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index 2c0940e3520b..330b9c222094 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -313,7 +313,26 @@ def get_all_rearrange_operators(): if op_name in rearrange_ops and op_name not in unique_ops: rearrange_mx_operators[op_name] = mx_operators[op_name] return rearrange_mx_operators - + +def get_all_loss_operators(): + """Gets all Neural Network loss operators registered with MXNet. + + Returns + ------- + {"operator_name": {"has_backward", "nd_op_handle", "params"}} + """ + loss_ops = ['smooth_l1', 'CTCLoss', 'ctc_loss', 'MakeLoss', 'softmax_cross_entropy'] + + # Get all mxnet operators + mx_operators = _get_all_mxnet_operators() + + # Filter for NN Loss operators + loss_mx_operators = {} + for op_name, op_params in mx_operators.items(): + if op_name in loss_ops and op_name not in unique_ops: + loss_mx_operators[op_name] = mx_operators[op_name] + return loss_mx_operators + def get_operators_with_no_benchmark(operators_with_benchmark): """Gets all MXNet operators with not benchmark. diff --git a/benchmark/opperf/utils/profiler_utils.py b/benchmark/opperf/utils/profiler_utils.py index 21e2606ab94e..45322c1066cf 100644 --- a/benchmark/opperf/utils/profiler_utils.py +++ b/benchmark/opperf/utils/profiler_utils.py @@ -48,8 +48,8 @@ def _get_operator_profile(operator_name, operator_profile_results): # alias map : dictionary of the form {"alias" : "registered_name"} # allows to retrieve alias operator profile from the profiler results # TODO handling - "identity" : "_copy" - alias_map = {"broadcast_plus" : "broadcast_add", "broadcast_minus" : "broadcast_sub", "flatten" : "Flatten", "max_axis" : "max", - "swapaxes" : "SwapAxis", "flip" : "reverse", "reshape" : "Reshape", "crop" : "slice", "sum_axis" : "sum", "min_axis" : "min"} + alias_map = {"broadcast_plus": "broadcast_add", "broadcast_minus": "broadcast_sub", "flatten": "Flatten", "max_axis": "max", + "swapaxes": "SwapAxis", "flip": "reverse", "reshape": "Reshape", "crop": "slice", "sum_axis": "sum", "min_axis": "min", "CTCLoss": "ctc_loss"} op_name = None From 357425f7cf66bbde0bea434a4be531704893cd87 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 30 Jan 2020 10:19:52 +0000 Subject: [PATCH 2/4] fix issues with ops, still no backward for softmax_cross_entropy --- benchmark/opperf/rules/default_params.py | 14 ++++++++++++-- benchmark/opperf/utils/benchmark_utils.py | 7 +++++++ benchmark/opperf/utils/op_registry_utils.py | 7 +++++++ 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index 69b28c6535cc..615e2e31b63d 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -103,6 +103,12 @@ DEFAULT_DIM_1 = [0] DEFAULT_DIM_2 = [1] +# For loss operators +DEFAULT_DATA_3d = [(1024, 100, 100)] +DEFAULT_LABEL = [(100,100)] +DEFAULT_DATA_SMCE = [(1024, 1024)] +DEFAULT_LABEL_SMCE = [(1024,)] + # Default Inputs. MXNet Op Param Name to Default Input mapping DEFAULTS_INPUTS = {"data": DEFAULT_DATA, "sample": DEFAULT_SAMPLE, @@ -152,7 +158,11 @@ "dim1": DEFAULT_DIM_1, "dim2": DEFAULT_DIM_2, "block_size": DEFAULT_BLOCK_SIZE, - "args": DEFAULT_ARGS} + "args": DEFAULT_ARGS, + "data_smce": DEFAULT_DATA_SMCE, + "data_3d": DEFAULT_DATA_3d, + "label_smce": DEFAULT_LABEL_SMCE, + "label": DEFAULT_LABEL} # These are names of MXNet operator parameters that is of type NDArray. @@ -164,4 +174,4 @@ "mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p", "low", "high", "weight", "bias", "moving_mean", "moving_var", "weight", "weight32", "grad", "mean", "var", "mom", "n", "d", - "v", "z", "g", "delta", "args"] + "v", "z", "g", "delta", "args", "label"] diff --git a/benchmark/opperf/utils/benchmark_utils.py b/benchmark/opperf/utils/benchmark_utils.py index 5db6b0b5ea83..48e8e06b033d 100644 --- a/benchmark/opperf/utils/benchmark_utils.py +++ b/benchmark/opperf/utils/benchmark_utils.py @@ -27,6 +27,8 @@ from .profiler_utils import cpp_profile,python_profile +no_backward = ['softmax_cross_entropy'] + def _prepare_op_inputs(inputs, run_backward, dtype, ctx): mx.random.seed(41) kwargs_list = [] @@ -148,6 +150,11 @@ def run_op_benchmarks(ops, dtype, ctx, profiler, warmup, runs): for op, op_params in ops.items(): # Prepare inputs for the operator inputs = prepare_op_inputs(op, op_params) + + # setting backward false for ops with known issue + if op in no_backward: + op_params["has_backward"] = False + # Run benchmarks cur_op_res = run_performance_test(op_params["nd_op_handle"], run_backward=op_params["has_backward"], diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index 330b9c222094..d46cac07d398 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -119,6 +119,9 @@ def prepare_op_inputs(op, arg_params): # 4d tensor is needed only by following two ops ops_4d = ['depth_to_space','space_to_depth'] + # 3d tensor is needed by following ops + ops_3d = ['CTCLoss', 'ctc_loss'] + # Prepare op to default input mapping arg_values = {} for arg_name, arg_type in zip(arg_params["params"]["arg_names"], @@ -127,6 +130,10 @@ def prepare_op_inputs(op, arg_params): arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_nd"] elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in DEFAULTS_INPUTS: arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_4d"] + elif "NDArray" in arg_type and op in ops_3d and arg_name + "_3d" in DEFAULTS_INPUTS: + arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_3d"] + elif "NDArray" in arg_type and op == 'softmax_cross_entropy': + arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_smce"] elif arg_name in DEFAULTS_INPUTS: arg_values[arg_name] = DEFAULTS_INPUTS[arg_name] elif "float" in arg_type and arg_name + "_float" in DEFAULTS_INPUTS: From 546efa7674f9e89ff34a1c6e0698c29338b63c90 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 31 Jan 2020 00:06:21 +0000 Subject: [PATCH 3/4] fix names --- benchmark/opperf/nd_operations/nn_loss_operators.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmark/opperf/nd_operations/nn_loss_operators.py b/benchmark/opperf/nd_operations/nn_loss_operators.py index 8c61afe58742..dae15bf962e5 100644 --- a/benchmark/opperf/nd_operations/nn_loss_operators.py +++ b/benchmark/opperf/nd_operations/nn_loss_operators.py @@ -49,8 +49,8 @@ def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ """ # Fetch all loss operators - mx_rearrange_ops = get_all_loss_operators() + mx_loss_ops = get_all_loss_operators() # Run benchmarks - mx_rearrange_op_results = run_op_benchmarks(mx_rearrange_ops, dtype, ctx, profiler, warmup, runs) - return mx_rearrange_op_results + mx_loss_op_results = run_op_benchmarks(mx_loss_ops, dtype, ctx, profiler, warmup, runs) + return mx_loss_op_results From 2ecf151d7fa039eabe98d4cb811736f3bce5a4e2 Mon Sep 17 00:00:00 2001 From: Chaitanya Prakash Bapat Date: Thu, 30 Jan 2020 17:03:41 -0800 Subject: [PATCH 4/4] add profiler param to function description --- benchmark/opperf/nd_operations/nn_loss_operators.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmark/opperf/nd_operations/nn_loss_operators.py b/benchmark/opperf/nd_operations/nn_loss_operators.py index dae15bf962e5..9d894087343b 100644 --- a/benchmark/opperf/nd_operations/nn_loss_operators.py +++ b/benchmark/opperf/nd_operations/nn_loss_operators.py @@ -38,6 +38,8 @@ def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ Context to run benchmarks dtype: str, default 'float32' Precision to use for benchmarks + profiler: str, default 'native' + Type of Profiler to use (native/python) warmup: int, default 25 Number of times to run for warmup runs: int, default 100