Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Trigger CI
Browse files Browse the repository at this point in the history
  • Loading branch information
zixuanweeei committed Jul 29, 2019
2 parents 49ebe01 + a26af2b commit cfc6910
Show file tree
Hide file tree
Showing 44 changed files with 1,248 additions and 422 deletions.
5 changes: 2 additions & 3 deletions CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
Expand Up @@ -42,8 +42,8 @@
/plugin/ @pllarroy

# CMake
CMakeLists.txt @szha @rahul003 @pllarroy
/cmake/ @szha @rahul003 @pllarroy
CMakeLists.txt @szha @pllarroy
/cmake/ @szha @pllarroy

# MXNet CI
dev_menu.py @pllarroy
Expand Down Expand Up @@ -71,4 +71,3 @@ prepare_mkl.sh @szha

# Github templates
/.github/ @szha

2 changes: 1 addition & 1 deletion benchmark/opperf/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ For example, you want to run benchmarks for all NDArray Broadcast Binary Operato

```
#!/usr/bin/python
from benchmark.opperf.tensor_operations.binary_broadcast_operators import run_mx_binary_broadcast_operators_benchmarks
from benchmark.opperf.nd_operations.binary_broadcast_operators import run_mx_binary_broadcast_operators_benchmarks
# Run all Binary Broadcast operations benchmarks with default input values
print(run_mx_binary_broadcast_operators_benchmarks())
Expand Down
18 changes: 1 addition & 17 deletions benchmark/opperf/nd_operations/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,14 @@
6. reshape
7. one_hot
8. linalg_potri
9. mp_sgd_update
10. multi_sgd_update
11. signum_update
12. Convolution_v1
13. repeat
14. Custom
15. softmax_cross_entropy
16. SwapAxis
17. norm
18. Softmax
19. rmspropalex_update
20. fill_element_0index
21. cast
22. UpSampling
Expand All @@ -52,37 +49,29 @@
30. Activation
31. LinearRegressionOutput
32. Pooling_v1
33. ftml_update
34. Crop
35. ElementWiseSum
36. diag
37. Reshape
38. Pad
39. linalg_gemm2
40. crop
41. rmsprop_update
43. RNN
45. SoftmaxOutput
46. linalg_extractdiag
47. sgd_mom_update
48. SequenceLast
50. flip
51. SequenceReverse
52. swapaxes
53. SVMOutput
54. linalg_trsm
55. where
56. SoftmaxActivation
57. signsgd_update
58. slice
59. linalg_gelqf
60. softmin
61. linalg_gemm
62. BilinearSampler
63. mp_sgd_mom_update
64. choose_element_0index
65. tile
66. space_to_depth
67. gather_nd
69. SequenceMask
70. reshape_like
Expand Down Expand Up @@ -110,14 +99,11 @@
94. broadcast_like
95. Embedding
96. linalg_makediag
97. transpose
98. linalg_syrk
99. squeeze
101. ROIPooling
102. ftrl_update
103. SliceChannel
104. slice_like
105. depth_to_space
106. linalg_maketrian
108. pad
109. LayerNorm
Expand All @@ -132,6 +118,4 @@
119. normal
120. take
121. MakeLoss
122. sgd_update
123. adam_update
124. concat
124. concat
57 changes: 57 additions & 0 deletions benchmark/opperf/nd_operations/array_rearrange.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import mxnet as mx
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
from benchmark.opperf.utils.op_registry_utils import get_all_rearrange_operators

"""Performance benchmark tests for MXNet NDArray Rearrange Operators.
1. transpose
2. swapaxes
3. flip
4. depth_to_space
5. space_to_depth
"""


def run_rearrange_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the
rearrange operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all optimizer operators
mx_rearrange_ops = get_all_rearrange_operators()

# Run benchmarks
mx_rearrange_op_results = run_op_benchmarks(mx_rearrange_ops, dtype, ctx, warmup, runs)
return mx_rearrange_op_results
4 changes: 2 additions & 2 deletions benchmark/opperf/nd_operations/binary_operators.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@


def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype)for all the binary
"""Runs benchmarks with the given context and precision (dtype) for all the binary
broadcast operators in MXNet.
Parameters
Expand All @@ -66,7 +66,7 @@ def run_mx_binary_broadcast_operators_benchmarks(ctx=mx.cpu(), dtype='float32',


def run_mx_binary_element_wise_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype)for all the binary
"""Runs benchmarks with the given context and precision (dtype) for all the binary
element_wise operators in MXNet.
Parameters
Expand Down
64 changes: 64 additions & 0 deletions benchmark/opperf/nd_operations/nn_optimizer_operators.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.

import mxnet as mx
from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks
from benchmark.opperf.utils.op_registry_utils import get_all_optimizer_operators

"""Performance benchmark tests for MXNet Neural Network Optimizer Update Operators.
1. Stochastic Gradient Descent (SGD)
1.1 mp_sgd_update
1.2 sgd_mom_update
1.3 signsgd_update
1.4 mp_sgd_mom_update
1.5 sgd_update
2. signum_update
3. rmspropalex_update
4. ftml_update
5. rmsprop_update
6. ftrl_update
7. adam_update
"""


def run_optimizer_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100):
"""Runs benchmarks with the given context and precision (dtype) for all the neural network
optimizer update operators in MXNet.
Parameters
----------
ctx: mx.ctx
Context to run benchmarks
dtype: str, default 'float32'
Precision to use for benchmarks
warmup: int, default 25
Number of times to run for warmup
runs: int, default 100
Number of runs to capture benchmark results
Returns
-------
Dictionary of results. Key -> Name of the operator, Value -> Benchmark results.
"""
# Fetch all optimizer operators
mx_optimizer_ops = get_all_optimizer_operators()

# Run benchmarks
mx_optimizer_op_results = run_op_benchmarks(mx_optimizer_ops, dtype, ctx, warmup, runs)
return mx_optimizer_op_results
7 changes: 7 additions & 0 deletions benchmark/opperf/opperf.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@
from benchmark.opperf.nd_operations.nn_conv_operators import run_pooling_operators_benchmarks, \
run_convolution_operators_benchmarks, run_transpose_convolution_operators_benchmarks
from benchmark.opperf.nd_operations.nn_basic_operators import run_nn_basic_operators_benchmarks
from benchmark.opperf.nd_operations.nn_optimizer_operators import run_optimizer_operators_benchmarks
from benchmark.opperf.nd_operations.array_rearrange import run_rearrange_operators_benchmarks

from benchmark.opperf.utils.common_utils import merge_map_list, save_to_file
from benchmark.opperf.utils.op_registry_utils import get_operators_with_no_benchmark, \
Expand Down Expand Up @@ -78,6 +80,9 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32'):
# Run all Sorting and Searching operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_sorting_searching_operators_benchmarks(ctx=ctx, dtype=dtype))

# Run all Array Rearrange operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_rearrange_operators_benchmarks(ctx=ctx, dtype=dtype))

# ************************ MXNET NN OPERATOR BENCHMARKS ****************************

# Run all basic NN operations benchmarks with default input values
Expand All @@ -92,6 +97,8 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32'):
# Run all Convolution operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_convolution_operators_benchmarks(ctx=ctx, dtype=dtype))

# Run all Optimizer operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_optimizer_operators_benchmarks(ctx=ctx, dtype=dtype))
# Run all Transpose Convolution operations benchmarks with default input values
mxnet_operator_benchmark_results.append(run_transpose_convolution_operators_benchmarks(ctx=ctx, dtype=dtype))

Expand Down
73 changes: 70 additions & 3 deletions benchmark/opperf/rules/default_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@

# For operators like - random_uniform, random_normal etc..
DEFAULT_SHAPE = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_SAMPLE = [(2,)]
DEFAULT_LOW = [0]
DEFAULT_HIGH = [5]
DEFAULT_K = [1]
Expand Down Expand Up @@ -62,8 +63,44 @@
# NOTE: Data used is DEFAULT_DATA
DEFAULT_AXIS = [0]

# For optimizer operators
DEFAULT_WEIGHT = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_GRAD = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_MOM = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_MEAN = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_VAR = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_N = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_D = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_V = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_Z = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_G = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_DELTA = [(1024, 1024), (10000, 1), (10000, 100)]
DEFAULT_LRS = [(0.1,0.1)]
DEFAULT_LR = [0.1,0.5,0.9]
DEFAULT_GAMMA_1 = [0.1,0.5,0.9]
DEFAULT_GAMMA_2 = [0.1,0.5,0.9]
DEFAULT_EPSILON = [1e-08]
DEFAULT_BETA_1 = [0.1,0.5,0.9]
DEFAULT_BETA_2 = [0.1,0.5,0.9]
DEFAULT_T = [1,5]
DEFAULT_RESCALE_GRAD = [0.4, 0.77]
DEFAULT_CLIP_GRADIENT = [-1.0,0.8]
DEFAULT_CLIP_WEIGHTS = [-1.0,0.8]
DEFAULT_LAZY_UPDATE = [0,1]

# For rearrange operators
# NOTE: Data needs to be a 4D tensor for operators like space_to_depth and depth_to_space
# Hence below we append 4d to mark the difference.
# For depth_to_space, dimension 3 needs to be a multiple of 'block' and 1 should be a multiple of `block^2`
DEFAULT_DATA_4d = [(1, 4, 2, 4), (10,25,10,100)]
DEFAULT_DIM_1 = [0, 1, 2, 3]
DEFAULT_DIM_2 = [1, 2, 3, 0]
DEFAULT_BLOCK_SIZE = [2, 5]


# Default Inputs. MXNet Op Param Name to Default Input mapping
DEFAULTS_INPUTS = {"data": DEFAULT_DATA,
"sample": DEFAULT_SAMPLE,
"lhs": DEFAULT_LHS,
"rhs": DEFAULT_RHS,
"shape": DEFAULT_SHAPE,
Expand All @@ -81,13 +118,43 @@
"k_nd": DEFAULT_K_ND,
"p_nd": DEFAULT_P_ND,
"axis_shape": DEFAULT_AXIS_SHAPE,
"axis": DEFAULT_AXIS}
"axis": DEFAULT_AXIS,
"weight" : DEFAULT_WEIGHT,
"weight32" : DEFAULT_WEIGHT,
"grad" : DEFAULT_GRAD,
"mean" : DEFAULT_MEAN,
"var" : DEFAULT_VAR,
"mom" : DEFAULT_MOM,
"n" : DEFAULT_N,
"d" : DEFAULT_D,
"v" : DEFAULT_V,
"z" : DEFAULT_Z,
"g" : DEFAULT_G,
"delta" : DEFAULT_DELTA,
"lr" : DEFAULT_LR,
"lrs" : DEFAULT_LRS,
"wds" : DEFAULT_LRS,
"gamma1" : DEFAULT_GAMMA_1,
"gamma2" : DEFAULT_GAMMA_2,
"epsilon" : DEFAULT_EPSILON,
"beta1" : DEFAULT_BETA_1,
"beta2" : DEFAULT_BETA_2,
"t" : DEFAULT_T,
"rescale_grad" : DEFAULT_RESCALE_GRAD,
"clip_grad" : DEFAULT_CLIP_GRADIENT,
"lazy_update" : DEFAULT_LAZY_UPDATE,
"data_4d": DEFAULT_DATA_4d,
"dim1": DEFAULT_DIM_1,
"dim2": DEFAULT_DIM_2,
"block_size": DEFAULT_BLOCK_SIZE}


# These are names of MXNet operator parameters that is of type NDArray.
# We maintain this list to automatically recognize these parameters are to be
# given as NDArray and translate users inputs such as a shape tuple, Numpy Array or
# a list to MXNet NDArray. This is just a convenience added so benchmark utility users
# can just say shape of the tensor, and we automatically create Tensors.
PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp",
PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp", "sample",
"mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p",
"low", "high", "weight", "bias", "moving_mean", "moving_var"]
"low", "high", "weight", "bias", "moving_mean", "moving_var",
"weight", "weight32", "grad", "mean", "var", "mom", "n", "d", "v", "z", "g", "delta"]
Loading

0 comments on commit cfc6910

Please sign in to comment.