Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Make module/namespace parameterized to choose between mx.nd or mx.np
Browse files Browse the repository at this point in the history
  • Loading branch information
sandeep-krishnamurthy committed Jun 12, 2019
1 parent 0be6d7e commit 5691b78
Show file tree
Hide file tree
Showing 9 changed files with 355 additions and 26 deletions.
5 changes: 3 additions & 2 deletions benchmark/opperf/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,11 @@ With this utility, for each MXNet operator you can get the following details:
**Timing**
1. Forward execution time
2. Backward execution time
3. Time spent for memory management

**Memory**
1. Total memory allocated
1. Average and Max memory allocated

NOTE: This is the `pool memory`. It does not reflect the exact memory requested by the operator.

# Motivation

Expand Down
6 changes: 3 additions & 3 deletions benchmark/opperf/nd_operations/gemm_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
from mxnet import nd
from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list

from benchmark.opperf.rules.default_params import MX_OP_MODULE
"""Performance benchmark tests for MXNet NDArray GEMM Operators.
1. dot
Expand Down Expand Up @@ -57,7 +57,7 @@ def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10, runs
"""
# Benchmark tests for dot and batch_dot operators
dot_benchmark_res = run_performance_test(
[nd.dot], run_backward=True,
[getattr(MX_OP_MODULE, "dot")], run_backward=True,
dtype=dtype, ctx=ctx,
inputs=[{"lhs": (1024, 1024),
"rhs": (1024, 1024)},
Expand All @@ -71,7 +71,7 @@ def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10, runs
warmup=warmup, runs=runs)

batch_dot_benchmark_res = run_performance_test(
[nd.batch_dot], run_backward=True,
[getattr(MX_OP_MODULE, "batch_dot")], run_backward=True,
dtype=dtype, ctx=ctx,
inputs=[{"lhs": (32, 1024, 1024),
"rhs": (32, 1024, 1024)},
Expand Down
8 changes: 5 additions & 3 deletions benchmark/opperf/nd_operations/nn_activation_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from mxnet import nd
from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE

"""Performance benchmark tests for MXNet NDArray Activation Operators.
Expand Down Expand Up @@ -56,7 +57,7 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10
"""
# Relu and its variation
relu_benchmark_res = run_performance_test([nd.LeakyReLU],
relu_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "LeakyReLU")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -78,7 +79,7 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10

# Sigmoid => Covered as part of Unary ops
# Hard_Sigmoid
hard_sigmoid_benchmark_res = run_performance_test([nd.hard_sigmoid],
hard_sigmoid_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "hard_sigmoid")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -90,7 +91,8 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10
runs=runs)

# Softmax, LogSoftmax
softmax_benchmark_res = run_performance_test([nd.softmax, nd.log_softmax],
softmax_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "softmax"),
getattr(MX_OP_MODULE, "log_softmax")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand Down
7 changes: 4 additions & 3 deletions benchmark/opperf/nd_operations/nn_basic_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from mxnet import nd
from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE

"""Performance benchmark tests for MXNet NDArray basic NN Operators.
Expand All @@ -31,7 +32,7 @@

def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10, runs=50):
# FullyConnnected operator benchmarks
fc_benchmark_res = run_performance_test([nd.FullyConnected],
fc_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "FullyConnected")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -49,7 +50,7 @@ def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10,
runs=runs)

# Dropout benchmarks
dropout_benchmark_res = run_performance_test([nd.Dropout],
dropout_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "Dropout")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -62,7 +63,7 @@ def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10,
warmup=warmup,
runs=runs)
# BatchNorm benchmarks
batchnorm_benchmark_res = run_performance_test([nd.BatchNorm],
batchnorm_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "BatchNorm")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand Down
9 changes: 5 additions & 4 deletions benchmark/opperf/nd_operations/nn_conv_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
from mxnet import nd
from benchmark.opperf.utils.benchmark_utils import run_performance_test
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE

"""Performance benchmark tests for MXNet NDArray Convolution and Pooling Operators.
Expand Down Expand Up @@ -61,7 +62,7 @@ def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10, r
for pool_type in pool_types:
for global_pool in global_pool_types:
for pool1d_data in [(32, 3, 256), (32, 3, 64)]:
pool1d_benchmark_res += run_performance_test([nd.Pooling],
pool1d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Pooling")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -76,7 +77,7 @@ def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=10, r
warmup=warmup,
runs=runs)
for pool2d_data in [(32, 3, 256, 256), (32, 3, 64, 64)]:
pool2d_benchmark_res += run_performance_test([nd.Pooling],
pool2d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Pooling")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -99,7 +100,7 @@ def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=1
# Conv1D Benchmarks
conv1d_benchmark_res = []
for conv_data in [(32, 3, 256), (32, 3, 64)]:
conv1d_benchmark_res += run_performance_test([nd.Convolution],
conv1d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Convolution")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand All @@ -118,7 +119,7 @@ def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=1
# Conv2D Benchmarks
conv2d_benchmark_res = []
for conv_data in [(32, 3, 256, 256), (32, 3, 64, 64)]:
conv2d_benchmark_res += run_performance_test([nd.Convolution],
conv2d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Convolution")],
run_backward=True,
dtype=dtype,
ctx=ctx,
Expand Down
6 changes: 3 additions & 3 deletions benchmark/opperf/opperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,15 +99,15 @@ def _parse_mxnet_context(ctx):
# CLI Parser

# 1. GET USER INPUTS
parser = argparse.ArgumentParser(
description='Run all the MXNet operators (NDArray) benchmarks')
parser = argparse.ArgumentParser(description='Run all the MXNet operators (NDArray) benchmarks')

parser.add_argument('--ctx', type=str, default='cpu',
help='Global context to run all benchmarks. By default, cpu on a '
'CPU machine, gpu(0) on a GPU machine. '
'Valid Inputs - cpu, gpu, gpu(0), gpu(1)...')
parser.add_argument('--dtype', type=str, default='float32', help='DType (Precision) to run benchmarks. By default, '
'float32. Valid Inputs - float32, float64.')
'float32. Valid Inputs - float32, float64, int32, '
'int64')
parser.add_argument('--output-format', type=str, default='json',
help='Benchmark result output format. By default, json. '
'Valid Inputs - json, md')
Expand Down
Loading

0 comments on commit 5691b78

Please sign in to comment.