From af0bb73e04ba23f9466f6955026744ca9c9e47cb Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Wed, 29 Jan 2020 13:52:55 -0800 Subject: [PATCH 1/7] Refactored individual run_performance_test calls into single generalized function, added Softmax, SoftmaxActivation, softmin, and Activation ops --- .../nd_operations/nn_activation_operators.py | 91 ++++++------------- benchmark/opperf/rules/default_params.py | 9 +- benchmark/opperf/utils/op_registry_utils.py | 20 ++++ 3 files changed, 55 insertions(+), 65 deletions(-) diff --git a/benchmark/opperf/nd_operations/nn_activation_operators.py b/benchmark/opperf/nd_operations/nn_activation_operators.py index 83813fe13c2e..0a5e2df8c439 100644 --- a/benchmark/opperf/nd_operations/nn_activation_operators.py +++ b/benchmark/opperf/nd_operations/nn_activation_operators.py @@ -16,28 +16,36 @@ # under the License. import mxnet as mx -from benchmark.opperf.utils.benchmark_utils import run_performance_test -from benchmark.opperf.utils.common_utils import merge_map_list -from benchmark.opperf.rules.default_params import MX_OP_MODULE + +from benchmark.opperf.utils.op_registry_utils import get_all_nn_activation_operators +from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks """Performance benchmark tests for MXNet NDArray Activation Operators. -1. LeakyRelu - 1.1 Elu - 1.2 Selu - 1.3 Leaky - 1.4 PRelu - 1.5 RRelu -3. Hard_Sigmoid -4. Softmax -5. Log_Softmax +1. LeakyReLU + 1.1 elu + 1.2 selu + 1.3 leaky + 1.4 gelu +2. hard_sigmoid +3. Softmax +4. SoftmaxActivation +5. softmax +6. log_softmax +7. softmin +8. Activation + 8.1 relu + 8.2 sigmoid + 8.3 softrelu + 8.4 softsign + 8.5 tanh """ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100): """Runs benchmarks with the given context and precision (dtype)for all the activation - operators (relu, sigmoid, softmax) in MXNet. + operators in MXNet. Parameters ---------- @@ -55,56 +63,11 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler= Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. """ - # Relu and its variation - relu_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "LeakyReLU")], - run_backward=True, - dtype=dtype, - ctx=ctx, - profiler=profiler, - inputs=[{"data": (1024, 1024), "act_type": "leaky", "slope": 0.1}, - {"data": (10000, 1), "act_type": "leaky", "slope": 0.1}, - {"data": (10000, 100), "act_type": "leaky", "slope": 0.1}, - {"data": (1024, 1024), "act_type": "elu", "slope": 0.1}, - {"data": (10000, 1), "act_type": "elu", "slope": 0.1}, - {"data": (10000, 100), "act_type": "elu", "slope": 0.1}, - {"data": (1024, 1024), "act_type": "selu"}, - {"data": (10000, 1), "act_type": "selu"}, - {"data": (10000, 100), "act_type": "selu"}, - {"data": (1024, 1024), "act_type": "prelu", "gamma": (1, 1024)}, - {"data": (10000, 1), "act_type": "prelu", "gamma": (1, 1)}, - {"data": (10000, 100), "act_type": "prelu", "gamma": (1, 100)} - ], - warmup=warmup, - runs=runs) - - # Sigmoid => Covered as part of Unary ops - # Hard_Sigmoid - hard_sigmoid_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "hard_sigmoid")], - run_backward=True, - dtype=dtype, - ctx=ctx, - profiler=profiler, - inputs=[{"data": (1024, 1024), "alpha": 0.25, "beta": 0.5}, - {"data": (10000, 1), "alpha": 0.25, "beta": 0.5}, - {"data": (10000, 100), "alpha": 0.25, "beta": 0.5} - ], - warmup=warmup, - runs=runs) - # Softmax, LogSoftmax - softmax_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "softmax"), - getattr(MX_OP_MODULE, "log_softmax")], - run_backward=True, - dtype=dtype, - ctx=ctx, - profiler=profiler, - inputs=[{"data": (1024, 1024), "axis": -1, "temperature": 0.5}, - {"data": (10000, 1), "axis": -1, "temperature": 0.5}, - {"data": (10000, 100), "axis": -1, "temperature": 0.5} - ], - warmup=warmup, - runs=runs) + # Fetch all NN Activation Operators + mx_nn_activation_broadcast_ops = get_all_nn_activation_operators() - # Prepare combined results - mx_activation_op_results = merge_map_list(relu_benchmark_res + hard_sigmoid_benchmark_res + softmax_benchmark_res) - return mx_activation_op_results + # Run benchmarks + mx_nn_activation_op_results = run_op_benchmarks(mx_nn_activation_broadcast_ops, dtype, ctx, profiler, warmup, runs) + return mx_nn_activation_op_results + \ No newline at end of file diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index 596dceb16480..022a1baf3cec 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -134,6 +134,10 @@ DEFAULT_LABEL = [(100,100)] DEFAULT_DATA_SMCE = [(1024, 1024)] DEFAULT_LABEL_SMCE = [(1024,)] +# For NN operators +DEFAULT_ACT_TYPE_LR = ['leaky', 'elu', 'selu', 'gelu'] +DEFAULT_ACT_TYPE_ACTIVATION = ['relu', 'sigmoid', 'softrelu', 'softsign', 'tanh'] +DEFAULT_LABEL_SOFTMAX = [(1024, 1024), (10000, 1), (10000, 100)] # For linalg operators DEFAULT_A = [(1024, 1024)] @@ -218,7 +222,10 @@ "B": DEFAULT_B, "C": DEFAULT_C, "A_linalg_maketrian": DEFAULT_A_MT, - "axes": DEFAULT_AXES} + "axes": DEFAULT_AXES, + "act_type_leakyrelu": DEFAULT_ACT_TYPE_LR, + "label_softmax": DEFAULT_LABEL_SOFTMAX, + "act_type_activation": DEFAULT_ACT_TYPE_ACTIVATION} # These are names of MXNet operator parameters that is of type NDArray. diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index eb2adba05d18..fce152315039 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -310,6 +310,26 @@ def get_all_reduction_operators(): return reduction_mx_operators +def get_all_nn_activation_operators(): + """Gets all NN Activation operators registered with MXNet. + + Returns + ------- + {"operator_name": {"has_backward", "nd_op_handle", "params"}} + """ + nn_activation_ops = ['Softmax', 'SoftmaxActivation', 'softmin', 'Activation', 'LeakyReLU', 'hard_sigmoid', 'softmax', 'log_softmax'] + + # Get all mxnet operators + mx_operators = _get_all_mxnet_operators() + + # Filter for NN Activation operators + nn_activation_mx_operators = {} + for op_name, _ in mx_operators.items(): + if op_name in nn_activation_ops and op_name not in unique_ops: + nn_activation_mx_operators[op_name] = mx_operators[op_name] + return nn_activation_mx_operators + + def get_all_optimizer_operators(): """Gets all Optimizer operators registered with MXNet. From 789fe91a2bfb2208832c2fb32c412ec1148a2fbf Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Wed, 29 Jan 2020 13:52:55 -0800 Subject: [PATCH 2/7] Refactored individual run_performance_test calls into single generalized function, added Softmax, SoftmaxActivation, softmin, and Activation ops --- benchmark/opperf/utils/op_registry_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index fce152315039..fab8fcd53eb0 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -120,6 +120,9 @@ def prepare_op_inputs(op, arg_params): custom_data = ['BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian'] + # For ops with args that need to change shape/value for different ops + custom_data = ['Activation', 'LeakyReLU', 'Softmax'] + # Prepare op to default input mapping arg_values = {} for arg_name, arg_type in zip(arg_params["params"]["arg_names"], From a49a688f2d68eebc02c295a6267d02ab66dec8a7 Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Wed, 29 Jan 2020 14:49:02 -0800 Subject: [PATCH 3/7] Fixed variable names --- benchmark/opperf/nd_operations/nn_activation_operators.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/benchmark/opperf/nd_operations/nn_activation_operators.py b/benchmark/opperf/nd_operations/nn_activation_operators.py index 0a5e2df8c439..dfcfd807545f 100644 --- a/benchmark/opperf/nd_operations/nn_activation_operators.py +++ b/benchmark/opperf/nd_operations/nn_activation_operators.py @@ -65,9 +65,8 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler= """ # Fetch all NN Activation Operators - mx_nn_activation_broadcast_ops = get_all_nn_activation_operators() + mx_activation_ops = get_all_nn_activation_operators() # Run benchmarks - mx_nn_activation_op_results = run_op_benchmarks(mx_nn_activation_broadcast_ops, dtype, ctx, profiler, warmup, runs) - return mx_nn_activation_op_results - \ No newline at end of file + mx_activation_op_results = run_op_benchmarks(mx_activation_ops, dtype, ctx, profiler, warmup, runs) + return mx_activation_op_results \ No newline at end of file From 662d2be38f731c10427fa9a37b9bb6509676ea82 Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Wed, 29 Jan 2020 14:50:18 -0800 Subject: [PATCH 4/7] Added newline at end of file for consistency --- benchmark/opperf/nd_operations/nn_activation_operators.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/benchmark/opperf/nd_operations/nn_activation_operators.py b/benchmark/opperf/nd_operations/nn_activation_operators.py index dfcfd807545f..6ffa066d89ec 100644 --- a/benchmark/opperf/nd_operations/nn_activation_operators.py +++ b/benchmark/opperf/nd_operations/nn_activation_operators.py @@ -69,4 +69,5 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler= # Run benchmarks mx_activation_op_results = run_op_benchmarks(mx_activation_ops, dtype, ctx, profiler, warmup, runs) - return mx_activation_op_results \ No newline at end of file + return mx_activation_op_results + \ No newline at end of file From 45146a12e2bda58587cf168a58fc47926d721b7c Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Thu, 30 Jan 2020 17:38:31 -0800 Subject: [PATCH 5/7] Addressed NN Basic PR comment --- benchmark/opperf/nd_operations/nn_activation_operators.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmark/opperf/nd_operations/nn_activation_operators.py b/benchmark/opperf/nd_operations/nn_activation_operators.py index 6ffa066d89ec..b77777cc04dd 100644 --- a/benchmark/opperf/nd_operations/nn_activation_operators.py +++ b/benchmark/opperf/nd_operations/nn_activation_operators.py @@ -53,6 +53,8 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler= Context to run benchmarks dtype: str, default 'float32' Precision to use for benchmarks + profiler: str, default 'native' + Module to use for tracking benchmark excecution time warmup: int, default 25 Number of times to run for warmup runs: int, default 100 From c90afe0129df0bf92e16bb60391a03f0e484b9f9 Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Tue, 4 Feb 2020 15:06:50 -0800 Subject: [PATCH 6/7] Dropped unnecessary custom_args --- benchmark/opperf/utils/op_registry_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index fab8fcd53eb0..d5630afb0375 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -117,11 +117,9 @@ def prepare_op_inputs(op, arg_params): # 3d tensor is needed by following ops ops_3d = ['CTCLoss', 'ctc_loss'] - - custom_data = ['BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian'] # For ops with args that need to change shape/value for different ops - custom_data = ['Activation', 'LeakyReLU', 'Softmax'] + custom_data = ['Activation', 'LeakyReLU', 'Softmax', 'BilinearSampler', 'GridGenerator', 'sample_multinomial', 'linalg_maketrian'] # Prepare op to default input mapping arg_values = {} From c6fc7e3e457ba0f3049ee872d110ddedd0783d7e Mon Sep 17 00:00:00 2001 From: Connor Goggins Date: Thu, 6 Feb 2020 17:01:05 -0800 Subject: [PATCH 7/7] Removed unique_ops --- benchmark/opperf/utils/op_registry_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index d5630afb0375..b9f1e45bbd37 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -326,7 +326,7 @@ def get_all_nn_activation_operators(): # Filter for NN Activation operators nn_activation_mx_operators = {} for op_name, _ in mx_operators.items(): - if op_name in nn_activation_ops and op_name not in unique_ops: + if op_name in nn_activation_ops: nn_activation_mx_operators[op_name] = mx_operators[op_name] return nn_activation_mx_operators