Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Implement remaining nn_activation ops in opperf #17475

Merged
merged 7 commits into from
Feb 12, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 28 additions & 63 deletions benchmark/opperf/nd_operations/nn_activation_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,35 +16,45 @@
# under the License.

import mxnet as mx
from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE

from benchmark.opperf.utils.op_registry_utils import get_all_nn_activation_operators
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks

"""Performance benchmark tests for MXNet NDArray Activation Operators.

1. LeakyRelu
1.1 Elu
1.2 Selu
1.3 Leaky
1.4 PRelu
1.5 RRelu
3. Hard_Sigmoid
4. Softmax
5. Log_Softmax
1. LeakyReLU
1.1 elu
1.2 selu
1.3 leaky
1.4 gelu
2. hard_sigmoid
3. Softmax
4. SoftmaxActivation
5. softmax
6. log_softmax
7. softmin
8. Activation
8.1 relu
8.2 sigmoid
8.3 softrelu
8.4 softsign
8.5 tanh

"""


def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype)for all the activation
operators (relu, sigmoid, softmax) in MXNet.
operators in MXNet.

Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Module to use for tracking benchmark excecution time
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -55,56 +65,11 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler=
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.

"""
# Relu and its variation
relu_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "LeakyReLU")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (1024, 1024), "act_type": "leaky", "slope": 0.1},
{"data": (10000, 1), "act_type": "leaky", "slope": 0.1},
{"data": (10000, 100), "act_type": "leaky", "slope": 0.1},
{"data": (1024, 1024), "act_type": "elu", "slope": 0.1},
{"data": (10000, 1), "act_type": "elu", "slope": 0.1},
{"data": (10000, 100), "act_type": "elu", "slope": 0.1},
{"data": (1024, 1024), "act_type": "selu"},
{"data": (10000, 1), "act_type": "selu"},
{"data": (10000, 100), "act_type": "selu"},
{"data": (1024, 1024), "act_type": "prelu", "gamma": (1, 1024)},
{"data": (10000, 1), "act_type": "prelu", "gamma": (1, 1)},
{"data": (10000, 100), "act_type": "prelu", "gamma": (1, 100)}
],
warmup=warmup,
runs=runs)

# Sigmoid => Covered as part of Unary ops
# Hard_Sigmoid
hard_sigmoid_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "hard_sigmoid")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (1024, 1024), "alpha": 0.25, "beta": 0.5},
{"data": (10000, 1), "alpha": 0.25, "beta": 0.5},
{"data": (10000, 100), "alpha": 0.25, "beta": 0.5}
],
warmup=warmup,
runs=runs)

# Softmax, LogSoftmax
softmax_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "softmax"),
getattr(MX_OP_MODULE, "log_softmax")],
run_backward=True,
dtype=dtype,
ctx=ctx,
profiler=profiler,
inputs=[{"data": (1024, 1024), "axis": -1, "temperature": 0.5},
{"data": (10000, 1), "axis": -1, "temperature": 0.5},
{"data": (10000, 100), "axis": -1, "temperature": 0.5}
],
warmup=warmup,
runs=runs)
# Fetch all NN Activation Operators
mx_activation_ops = get_all_nn_activation_operators()

# Prepare combined results
mx_activation_op_results = merge_map_list(relu_benchmark_res + hard_sigmoid_benchmark_res + softmax_benchmark_res)
# Run benchmarks
mx_activation_op_results = run_op_benchmarks(mx_activation_ops, dtype, ctx, profiler, warmup, runs)
return mx_activation_op_results

9 changes: 8 additions & 1 deletion benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,10 @@
DEFAULT_LABEL = [(100,100)]
DEFAULT_DATA_SMCE = [(1024, 1024)]
DEFAULT_LABEL_SMCE = [(1024,)]
# For NN operators
DEFAULT_ACT_TYPE_LR = ['leaky', 'elu', 'selu', 'gelu']
DEFAULT_ACT_TYPE_ACTIVATION = ['relu', 'sigmoid', 'softrelu', 'softsign', 'tanh']
DEFAULT_LABEL_SOFTMAX = [(1024, 1024), (10000, 1), (10000, 100)]

# For linalg operators
DEFAULT_A = [(1024, 1024)]
Expand Down Expand Up @@ -218,7 +222,10 @@
"B": DEFAULT_B,
"C": DEFAULT_C,
"A_linalg_maketrian": DEFAULT_A_MT,
"axes": DEFAULT_AXES}
"axes": DEFAULT_AXES,
"act_type_leakyrelu": DEFAULT_ACT_TYPE_LR,
"label_softmax": DEFAULT_LABEL_SOFTMAX,
"act_type_activation": DEFAULT_ACT_TYPE_ACTIVATION}


# These are names of MXNet operator parameters that is of type NDArray.
Expand Down
25 changes: 23 additions & 2 deletions benchmark/opperf/utils/op_registry_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -117,8 +117,9 @@ def prepare_op_inputs(op, arg_params):

# 3d tensor is needed by following ops
ops_3d = ['CTCLoss', 'ctc_loss']

custom_data = ['BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian']

# For ops with args that need to change shape/value for different ops
custom_data = ['Activation', 'LeakyReLU', 'Softmax', 'BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian']

# Prepare op to default input mapping
arg_values = {}
Expand Down Expand Up @@ -310,6 +311,26 @@ def get_all_reduction_operators():
return reduction_mx_operators


def get_all_nn_activation_operators():
"""Gets all NN Activation operators registered with MXNet.

Returns
-------
{"operator_name": {"has_backward", "nd_op_handle", "params"}}
"""
nn_activation_ops = ['Softmax', 'SoftmaxActivation', 'softmin', 'Activation', 'LeakyReLU', 'hard_sigmoid', 'softmax', 'log_softmax']

# Get all mxnet operators
mx_operators = _get_all_mxnet_operators()

# Filter for NN Activation operators
nn_activation_mx_operators = {}
for op_name, _ in mx_operators.items():
if op_name in nn_activation_ops:
nn_activation_mx_operators[op_name] = mx_operators[op_name]
return nn_activation_mx_operators


def get_all_optimizer_operators():
"""Gets all Optimizer operators registered with MXNet.

Expand Down