Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[OpPerf] Add Neural network loss ops (#17482)
Browse files Browse the repository at this point in the history
* add loss ops and fix minor typo in array_rearrange

* fix issues with ops, still no backward for softmax_cross_entropy

* fix names

* add profiler param to function description
  • Loading branch information
ChaiBapchya committed Feb 3, 2020
1 parent 7caffa6 commit 88cc54d
Show file tree
Hide file tree
Showing 7 changed files with 113 additions and 7 deletions.
4 changes: 2 additions & 2 deletions benchmark/opperf/nd_operations/array_rearrange.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@

def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the
rearrange operators in MXNet.
rearrange operators in MXNet.
Parameters
----------
Expand All @@ -49,7 +49,7 @@ def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all optimizer operators
# Fetch all array rerrange operators
mx_rearrange_ops = get_all_rearrange_operators()

# Run benchmarks
Expand Down
58 changes: 58 additions & 0 deletions benchmark/opperf/nd_operations/nn_loss_operators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import mxnet as mx
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
from benchmark.opperf.utils.op_registry_utils import get_all_loss_operators

"""Performance benchmark tests for MXNet Neural Network Loss Operators
1. smooth_l1
2. CTCLoss
3. MakeLoss
4. softmax_cross_entropy
"""


def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the
Neural Network loss operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all loss operators
mx_loss_ops = get_all_loss_operators()

# Run benchmarks
mx_loss_op_results = run_op_benchmarks(mx_loss_ops, dtype, ctx, profiler, warmup, runs)
return mx_loss_op_results
5 changes: 5 additions & 0 deletions benchmark/opperf/opperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@
from benchmark.opperf.nd_operations.nn_basic_operators import run_nn_basic_operators_benchmarks
from benchmark.opperf.nd_operations.nn_optimizer_operators import run_optimizer_operators_benchmarks
from benchmark.opperf.nd_operations.array_rearrange import run_rearrange_operators_benchmarks
from benchmark.opperf.nd_operations.nn_loss_operators import run_loss_operators_benchmarks

from benchmark.opperf.utils.common_utils import merge_map_list, save_to_file
from benchmark.opperf.utils.op_registry_utils import get_operators_with_no_benchmark, \
Expand Down Expand Up @@ -99,9 +100,13 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n

# Run all Optimizer operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_optimizer_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler))

# Run all Transpose Convolution operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_transpose_convolution_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler))

# Run all NN loss operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_loss_operators_benchmarks(ctx=ctx, dtype=dtype, profiler=profiler))

# ****************************** PREPARE FINAL RESULTS ********************************
final_benchmark_result_map = merge_map_list(mxnet_operator_benchmark_results)
return final_benchmark_result_map
Expand Down
14 changes: 12 additions & 2 deletions benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,12 @@
DEFAULT_DIM_1 = [0]
DEFAULT_DIM_2 = [1]

# For loss operators
DEFAULT_DATA_3d = [(1024, 100, 100)]
DEFAULT_LABEL = [(100,100)]
DEFAULT_DATA_SMCE = [(1024, 1024)]
DEFAULT_LABEL_SMCE = [(1024,)]

# Default Inputs. MXNet Op Param Name to Default Input mapping
DEFAULTS_INPUTS = {"data": DEFAULT_DATA,
"sample": DEFAULT_SAMPLE,
Expand Down Expand Up @@ -152,7 +158,11 @@
"dim1": DEFAULT_DIM_1,
"dim2": DEFAULT_DIM_2,
"block_size": DEFAULT_BLOCK_SIZE,
"args": DEFAULT_ARGS}
"args": DEFAULT_ARGS,
"data_smce": DEFAULT_DATA_SMCE,
"data_3d": DEFAULT_DATA_3d,
"label_smce": DEFAULT_LABEL_SMCE,
"label": DEFAULT_LABEL}


# These are names of MXNet operator parameters that is of type NDArray.
Expand All @@ -164,4 +174,4 @@
"mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p",
"low", "high", "weight", "bias", "moving_mean", "moving_var",
"weight", "weight32", "grad", "mean", "var", "mom", "n", "d",
"v", "z", "g", "delta", "args"]
"v", "z", "g", "delta", "args", "label"]
7 changes: 7 additions & 0 deletions benchmark/opperf/utils/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
from .profiler_utils import cpp_profile,python_profile


no_backward = ['softmax_cross_entropy']

def _prepare_op_inputs(inputs, run_backward, dtype, ctx):
mx.random.seed(41)
kwargs_list = []
Expand Down Expand Up @@ -148,6 +150,11 @@ def run_op_benchmarks(ops, dtype, ctx, profiler, warmup, runs):
for op, op_params in ops.items():
# Prepare inputs for the operator
inputs = prepare_op_inputs(op, op_params)

# setting backward false for ops with known issue
if op in no_backward:
op_params["has_backward"] = False

# Run benchmarks
cur_op_res = run_performance_test(op_params["nd_op_handle"],
run_backward=op_params["has_backward"],
Expand Down
28 changes: 27 additions & 1 deletion benchmark/opperf/utils/op_registry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,9 @@ def prepare_op_inputs(op, arg_params):
# 4d tensor is needed only by following two ops
ops_4d = ['depth_to_space','space_to_depth']

# 3d tensor is needed by following ops
ops_3d = ['CTCLoss', 'ctc_loss']

# Prepare op to default input mapping
arg_values = {}
for arg_name, arg_type in zip(arg_params["params"]["arg_names"],
Expand All @@ -127,6 +130,10 @@ def prepare_op_inputs(op, arg_params):
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_nd"]
elif "NDArray" in arg_type and op in ops_4d and arg_name + "_4d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_4d"]
elif "NDArray" in arg_type and op in ops_3d and arg_name + "_3d" in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_3d"]
elif "NDArray" in arg_type and op == 'softmax_cross_entropy':
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name + "_smce"]
elif arg_name in DEFAULTS_INPUTS:
arg_values[arg_name] = DEFAULTS_INPUTS[arg_name]
elif "float" in arg_type and arg_name + "_float" in DEFAULTS_INPUTS:
Expand Down Expand Up @@ -313,7 +320,26 @@ def get_all_rearrange_operators():
if op_name in rearrange_ops and op_name not in unique_ops:
rearrange_mx_operators[op_name] = mx_operators[op_name]
return rearrange_mx_operators


def get_all_loss_operators():
"""Gets all Neural Network loss operators registered with MXNet.
Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
loss_ops = ['smooth_l1', 'CTCLoss', 'ctc_loss', 'MakeLoss', 'softmax_cross_entropy']

# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()

# Filter for NN Loss operators
loss_mx_operators = {}
for op_name, op_params in mx_operators.items():
if op_name in loss_ops and op_name not in unique_ops:
loss_mx_operators[op_name] = mx_operators[op_name]
return loss_mx_operators


def get_operators_with_no_benchmark(operators_with_benchmark):
"""Gets all MXNet operators with not benchmark.
Expand Down
4 changes: 2 additions & 2 deletions benchmark/opperf/utils/profiler_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,8 +48,8 @@ def _get_operator_profile(operator_name, operator_profile_results):
# alias map : dictionary of the form {"alias" : "registered_name"}
# allows to retrieve alias operator profile from the profiler results
# TODO handling - "identity" : "_copy"
alias_map = {"broadcast_plus" : "broadcast_add", "broadcast_minus" : "broadcast_sub", "flatten" : "Flatten", "max_axis" : "max",
"swapaxes" : "SwapAxis", "flip" : "reverse", "reshape" : "Reshape", "crop" : "slice", "sum_axis" : "sum", "min_axis" : "min"}
alias_map = {"broadcast_plus": "broadcast_add", "broadcast_minus": "broadcast_sub", "flatten": "Flatten", "max_axis": "max",
"swapaxes": "SwapAxis", "flip": "reverse", "reshape": "Reshape", "crop": "slice", "sum_axis": "sum", "min_axis": "min", "CTCLoss": "ctc_loss"}

op_name = None

Expand Down

0 comments on commit 88cc54d

Please sign in to comment.