Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Adding docs, changed large_tensor to int64_tensor for clarity
Browse files Browse the repository at this point in the history
  • Loading branch information
connorgoggins committed Feb 12, 2020
1 parent 345d8c6 commit c064f47
Show file tree
Hide file tree
Showing 17 changed files with 120 additions and 102 deletions.
8 changes: 4 additions & 4 deletions benchmark/opperf/nd_operations/array_rearrange.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
"""


def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the
def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the
rearrange operators in MXNet.
Parameters
Expand All @@ -41,7 +41,7 @@ def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
large_tensor: str, default 'off'
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
Expand All @@ -57,5 +57,5 @@ def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='
mx_rearrange_ops = get_all_rearrange_operators()

# Run benchmarks
mx_rearrange_op_results = run_op_benchmarks(mx_rearrange_ops, dtype, ctx, profiler, large_tensor, warmup, runs)
mx_rearrange_op_results = run_op_benchmarks(mx_rearrange_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_rearrange_op_results
26 changes: 15 additions & 11 deletions benchmark/opperf/nd_operations/binary_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@
get_all_elemen_wise_binary_operators, get_all_misc_binary_operators


def run_mx_binary_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the miscellaneous
def run_mx_binary_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the miscellaneous
binary operators in MXNet.
Parameters
Expand All @@ -48,6 +48,10 @@ def run_mx_binary_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profi
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -61,12 +65,12 @@ def run_mx_binary_misc_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profi
# Fetch all Miscellaneous Binary Operators
mx_binary_misc_ops = get_all_misc_binary_operators()
# Run benchmarks
mx_binary_op_results = run_op_benchmarks(mx_binary_misc_ops, dtype, ctx, profiler, large_tensor, warmup, runs)
mx_binary_op_results = run_op_benchmarks(mx_binary_misc_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_binary_op_results


def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the binary
def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the binary
broadcast operators in MXNet.
Parameters
Expand All @@ -77,7 +81,7 @@ def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32',
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
large_tensor: str, default 'off'
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
Expand All @@ -92,12 +96,12 @@ def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32',
# Fetch all Binary Broadcast Operators
mx_binary_broadcast_ops = get_all_broadcast_binary_operators()
# Run benchmarks
mx_binary_op_results = run_op_benchmarks(mx_binary_broadcast_ops, dtype, ctx, profiler, large_tensor, warmup, runs)
mx_binary_op_results = run_op_benchmarks(mx_binary_broadcast_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_binary_op_results


def run_mx_binary_element_wise_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the binary
def run_mx_binary_element_wise_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the binary
element_wise operators in MXNet.
Parameters
Expand All @@ -108,7 +112,7 @@ def run_mx_binary_element_wise_operators_benchmarks(ctx=mx.cpu(), dtype='float32
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
large_tensor: str, default 'off'
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 10
Number of times to run for warmup
Expand All @@ -123,5 +127,5 @@ def run_mx_binary_element_wise_operators_benchmarks(ctx=mx.cpu(), dtype='float32
# Fetch all Binary Element_wise Operators
mx_binary_element_wise_ops = get_all_elemen_wise_binary_operators()
# Run benchmarks
mx_binary_op_results = run_op_benchmarks(mx_binary_element_wise_ops, dtype, ctx, profiler, large_tensor, warmup, runs)
mx_binary_op_results = run_op_benchmarks(mx_binary_element_wise_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_binary_op_results
8 changes: 4 additions & 4 deletions benchmark/opperf/nd_operations/gemm_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@
"""


def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the GEMM
def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the GEMM
operators (dot, batch_dot, khatri_rao) in MXNet.
Parameters
Expand All @@ -47,7 +47,7 @@ def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
large_tensor: str, default 'off'
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
Expand All @@ -60,7 +60,7 @@ def run_gemm_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ
"""
# Benchmark tests for dot and batch_dot operators
if large_tensor == "on":
if int64_tensor == "on":
dot_benchmark_res = run_performance_test(
[getattr(MX_OP_MODULE, "dot")], run_backward=True,
dtype=dtype, ctx=ctx,
Expand Down
6 changes: 4 additions & 2 deletions benchmark/opperf/nd_operations/indexing_routines.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,8 @@
"""


def run_indexing_routines_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the indexing routines
def run_indexing_routines_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and data size (int64_tensor) for all the indexing routines
in MXNet.
Parameters
Expand All @@ -47,6 +47,8 @@ def run_indexing_routines_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='na
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand Down
8 changes: 5 additions & 3 deletions benchmark/opperf/nd_operations/linalg_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@
from benchmark.opperf.utils.common_utils import merge_map_list
from benchmark.opperf.rules.default_params import MX_OP_MODULE

def run_linalg_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the linear algebra
def run_linalg_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and data size (int64_tensor) for all the linear algebra
operators in MXNet.
Parameters
Expand All @@ -46,6 +46,8 @@ def run_linalg_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nat
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand Down Expand Up @@ -74,5 +76,5 @@ def run_linalg_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nat
# Fetch all Linear Algebra Operators
mx_linalg_ops = get_all_linalg_operators()
# Run benchmarks
mx_linalg_op_results = run_op_benchmarks(mx_linalg_ops, dtype, ctx, profiler, warmup, runs)
mx_linalg_op_results = run_op_benchmarks(mx_linalg_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return merge_map_list(linalg_potrf_benchmark + [mx_linalg_op_results])
8 changes: 4 additions & 4 deletions benchmark/opperf/nd_operations/nn_activation_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@
"""


def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the activation
def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the activation
operators (relu, sigmoid, softmax) in MXNet.
Parameters
Expand All @@ -55,7 +55,7 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler=
Precision to use for benchmarks
profiler: str, default 'native'
Module to use for tracking benchmark excecution time
large_tensor: str, default 'off'
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
Expand All @@ -72,6 +72,6 @@ def run_activation_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler=
mx_activation_ops = get_all_nn_activation_operators()

# Run benchmarks
mx_activation_op_results = run_op_benchmarks(mx_activation_ops, dtype, ctx, profiler, large_tensor, warmup, runs)
mx_activation_op_results = run_op_benchmarks(mx_activation_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_activation_op_results

12 changes: 7 additions & 5 deletions benchmark/opperf/nd_operations/nn_basic_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@
"""


def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and data size (large_tensor) for all the basic neural network
def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and data size (int64_tensor) for all the basic neural network
operators in MXNet.
Parameters
Expand All @@ -39,8 +39,10 @@ def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
large_tensor: str, default 'off'
Tensor size to use for tests
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -51,7 +53,7 @@ def run_nn_basic_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='n
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
if large_tensor == 'on':
if int64_tensor == 'on':
# FullyConnnected operator benchmarks
fc_benchmark_res = run_performance_test([getattr(MX_OP_MODULE, "FullyConnected")],
run_backward=True,
Expand Down
36 changes: 21 additions & 15 deletions benchmark/opperf/nd_operations/nn_conv_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@
"""


def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the pooling
def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the pooling
operators in MXNet.
Parameters
Expand All @@ -62,8 +62,10 @@ def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='na
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
large_tensor: str, default 'off'
Tensor size to use for tests
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -82,7 +84,7 @@ def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='na
pool2d_benchmark_res = []
for pool_type in pool_types:
for global_pool in global_pool_types:
if large_tensor == 'on':
if int64_tensor == 'on':
for pool1d_data in [(1, 1, 2**32), (2**31, 1, 3)]:
pool1d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Pooling")],
run_backward=True,
Expand Down Expand Up @@ -179,8 +181,8 @@ def run_pooling_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='na
return mx_pooling_op_results


def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', large_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the convolution
def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the convolution
operators in MXNet.
Parameters
Expand All @@ -189,8 +191,10 @@ def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
large_tensor: str, default 'off'
Tensor size to use for tests
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -203,7 +207,7 @@ def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler
"""
conv1d_benchmark_res = []
conv2d_benchmark_res = []
if large_tensor == 'on':
if int64_tensor == 'on':
# Conv1D Benchmarks
for conv_data in [(2**30, 1, 4), (2**31, 1, 3)]:
conv1d_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Convolution")],
Expand Down Expand Up @@ -286,8 +290,8 @@ def run_convolution_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler
return mx_conv_op_results


def run_transpose_convolution_operators_benchmarks(ctx=mx.cpu(), profiler='native', large_tensor='off', dtype='float32', warmup=10, runs=50):
"""Runs benchmarks with the given context, precision (dtype), and input data size (large_tensor) for all the transpose convolution
def run_transpose_convolution_operators_benchmarks(ctx=mx.cpu(), profiler='native', int64_tensor='off', dtype='float32', warmup=10, runs=50):
"""Runs benchmarks with the given context, precision (dtype), and input data size (int64_tensor) for all the transpose convolution
operators in MXNet.
Parameters
Expand All @@ -296,8 +300,10 @@ def run_transpose_convolution_operators_benchmarks(ctx=mx.cpu(), profiler='nativ
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
large_tensor: str, default 'off'
Tensor size to use for tests
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -310,7 +316,7 @@ def run_transpose_convolution_operators_benchmarks(ctx=mx.cpu(), profiler='nativ
"""
# Conv1DTranspose Benchmarks
conv1d_transpose_benchmark_res = []
if large_tensor == 'on':
if int64_tensor == 'on':
for conv_data in [(2**30, 1, 4), (2**31, 1, 3)]:
conv1d_transpose_benchmark_res += run_performance_test([getattr(MX_OP_MODULE, "Deconvolution")],
run_backward=True,
Expand Down
8 changes: 5 additions & 3 deletions benchmark/opperf/nd_operations/nn_loss_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
"""


def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the
def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='native', int64_tensor='off', warmup=25, runs=100):
"""Runs benchmarks with the given context, precision (dtype), and data size (int64_tensor) for all the
Neural Network loss operators in MXNet.
Parameters
Expand All @@ -40,6 +40,8 @@ def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ
Precision to use for benchmarks
profiler: str, default 'native'
Type of Profiler to use (native/python)
int64_tensor: str, default 'off'
Input tensor size to use for tests (if on, dimensions >= 2**32)
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Expand All @@ -54,5 +56,5 @@ def run_loss_operators_benchmarks(ctx=mx.cpu(), dtype='float32', profiler='nativ
mx_loss_ops = get_all_loss_operators()

# Run benchmarks
mx_loss_op_results = run_op_benchmarks(mx_loss_ops, dtype, ctx, profiler, warmup, runs)
mx_loss_op_results = run_op_benchmarks(mx_loss_ops, dtype, ctx, profiler, int64_tensor, warmup, runs)
return mx_loss_op_results
Loading

0 comments on commit c064f47

Please sign in to comment.