From a2773410bc4cf87651540541a9923babf1c93570 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 12 Jul 2019 10:50:40 -0700 Subject: [PATCH 01/10] optimizer for opperf benchmark --- benchmark/opperf/nd_operations/README.md | 17 +---- .../nd_operations/nn_optimizer_operators.py | 68 +++++++++++++++++++ benchmark/opperf/utils/op_registry_utils.py | 23 +++++++ 3 files changed, 92 insertions(+), 16 deletions(-) create mode 100644 benchmark/opperf/nd_operations/nn_optimizer_operators.py diff --git a/benchmark/opperf/nd_operations/README.md b/benchmark/opperf/nd_operations/README.md index 7aa220c4368a..63fcbff3fe56 100644 --- a/benchmark/opperf/nd_operations/README.md +++ b/benchmark/opperf/nd_operations/README.md @@ -22,15 +22,11 @@ 0. LogisticRegressionOutput 1. broadcast_axes 2. ravel_multi_index -3. multi_sgd_mom_update 4. smooth_l1 5. scatter_nd 6. reshape 7. one_hot 8. linalg_potri -9. mp_sgd_update -10. multi_sgd_update -11. signum_update 12. Convolution_v1 13. repeat 14. Custom @@ -38,7 +34,6 @@ 16. SwapAxis 17. norm 18. Softmax -19. rmspropalex_update 20. fill_element_0index 21. cast 22. UpSampling @@ -52,7 +47,6 @@ 30. Activation 31. LinearRegressionOutput 32. Pooling_v1 -33. ftml_update 34. Crop 35. ElementWiseSum 36. diag @@ -60,12 +54,10 @@ 38. Pad 39. linalg_gemm2 40. crop -41. rmsprop_update 43. RNN 44. argmin 45. SoftmaxOutput 46. linalg_extractdiag -47. sgd_mom_update 48. SequenceLast 49. Deconvolution 50. flip @@ -75,13 +67,11 @@ 54. linalg_trsm 55. where 56. SoftmaxActivation -57. signsgd_update 58. slice 59. linalg_gelqf 60. softmin 61. linalg_gemm 62. BilinearSampler -63. mp_sgd_mom_update 64. choose_element_0index 65. tile 66. space_to_depth @@ -93,7 +83,6 @@ 72. stack 73. topk 74. khatri_rao -75. multi_mp_sgd_update 76. linalg_sumlogdiag 77. broadcast_to 78. IdentityAttachKLSparseReg @@ -103,7 +92,6 @@ 82. uniform 83. InstanceNorm 84. expand_dims -85. multi_mp_sgd_mom_update 86. reverse 87. add_n 88. clip @@ -119,7 +107,6 @@ 98. linalg_syrk 99. squeeze 101. ROIPooling -102. ftrl_update 103. SliceChannel 104. slice_like 105. depth_to_space @@ -138,6 +125,4 @@ 119. normal 120. take 121. MakeLoss -122. sgd_update -123. adam_update -124. concat \ No newline at end of file +124. concat diff --git a/benchmark/opperf/nd_operations/nn_optimizer_operators.py b/benchmark/opperf/nd_operations/nn_optimizer_operators.py new file mode 100644 index 000000000000..cad300a2642b --- /dev/null +++ b/benchmark/opperf/nd_operations/nn_optimizer_operators.py @@ -0,0 +1,68 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import mxnet as mx +from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks +from benchmark.opperf.utils.op_registry_utils import get_all_optimizer_operators + +"""Performance benchmark tests for MXNet NDArray Optimizer Operators. + +1. Stochastic Gradient Descent (SGD) + 1.1 multi_sgd_mom_update + 1.2 mp_sgd_update + 1.3 multi_sgd_update + 1.4 sgd_mom_update + 1.5 signsgd_update + 1.6 mp_sgd_mom_update + 1.7 multi_mp_sgd_update + 1.8 multi_mp_sgd_mom_update + 1.9 sgd_update +3. signum_update +4. rmspropalex_update +5. ftml_update +6. rmsprop_update +7. ftrl_update +8. adam_update +""" + + +def run_optimizer_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100): + """Runs benchmarks with the given context and precision (dtype) for all the neural network + optimizer operators in MXNet. + + Parameters + ---------- + ctx: mx.ctx + Context to run benchmarks + dtype: str, default 'float32' + Precision to use for benchmarks + warmup: int, default 25 + Number of times to run for warmup + runs: int, default 100 + Number of runs to capture benchmark results + + Returns + ------- + Dictionary of results. Key -> Name of the operator, Value -> Benchmark results. + + """ + # Fetch all optimizer operators + mx_optimizer_ops = get_all_optimizer_operators() + + # Run benchmarks + mx_optimizer_op_results = run_op_benchmarks(mx_optimizer_ops, dtype, ctx, warmup, runs) + return mx_optimizer_op_results diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index 6509be37f39d..367633aca663 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -240,6 +240,29 @@ def get_all_reduction_operators(): return reduction_mx_operators +def get_all_optimizer_operators(): + """Gets all Optimizer operators registered with MXNet. + + Returns + ------- + {"operator_name": {"has_backward", "nd_op_handle", "params"}} + """ + optimizer_ops = ['multi_sgd_mom_update', 'mp_sgd_update', 'multi_sgd_update', 'signum_update', + 'rmspropalex_update', 'ftml_update', 'rmsprop_update', 'sgd_mom_update', 'signsgd_update', + 'mp_sgd_mom_update', 'multi_mp_sgd_update', 'multi_mp_sgd_mom_update', 'ftrl_update', 'sgd_update', + 'adam_update'] + + # Get all mxnet operators + mx_operators = _get_all_mxnet_operators() + + # Filter for Optimizer operators + optimizer_mx_operators = {} + for op_name, op_params in mx_operators.items(): + if op_name in optimizer_ops and op_name not in unique_ops: + optimizer_mx_operators[op_name] = mx_operators[op_name] + return optimizer_mx_operators + + def get_operators_with_no_benchmark(operators_with_benchmark): """Gets all MXNet operators with not benchmark. From fcec71d89a9c22b5f64a62bfc1d49dccf8051217 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Sat, 13 Jul 2019 19:50:57 -0700 Subject: [PATCH 02/10] Trigger notification From 0beea92e8319023f6e15a280b209befa2d919471 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Mon, 15 Jul 2019 18:19:44 -0700 Subject: [PATCH 03/10] missed function call --- benchmark/opperf/opperf.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/benchmark/opperf/opperf.py b/benchmark/opperf/opperf.py index a73db4fdae89..5b1665e721aa 100755 --- a/benchmark/opperf/opperf.py +++ b/benchmark/opperf/opperf.py @@ -38,6 +38,7 @@ from benchmark.opperf.nd_operations.nn_conv_operators import run_pooling_operators_benchmarks, \ run_convolution_operators_benchmarks from benchmark.opperf.nd_operations.nn_basic_operators import run_nn_basic_operators_benchmarks +from benchmark.opperf.nd_operations.nn_optimizer_operators import run_optimizer_operators_benchmarks from benchmark.opperf.utils.common_utils import merge_map_list, save_to_file from benchmark.opperf.utils.op_registry_utils import get_operators_with_no_benchmark,\ @@ -88,6 +89,9 @@ def run_all_mxnet_operator_benchmarks(ctx=mx.cpu(), dtype='float32'): # Run all Convolution operations benchmarks with default input values mxnet_operator_benchmark_results.append(run_convolution_operators_benchmarks(ctx=ctx, dtype=dtype)) + # Run all Optimizer operations benchmarks with default input values + mxnet_operator_benchmark_results.append(run_optimizer_operators_benchmarks(ctx=ctx, dtype=dtype)) + # ****************************** PREPARE FINAL RESULTS ******************************** final_benchmark_result_map = merge_map_list(mxnet_operator_benchmark_results) return final_benchmark_result_map From b4ab5716a42b731838146437c10e0fe1e8a5d885 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Wed, 17 Jul 2019 17:44:55 -0700 Subject: [PATCH 04/10] added params --- benchmark/opperf/rules/default_params.py | 40 ++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index 2c8f3d436e0d..a344db1d6122 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -62,6 +62,25 @@ # NOTE: Data used is DEFAULT_DATA DEFAULT_AXIS = [0] +# For optimizer operators +DEFAULT_WEIGHT = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_GRAD = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_MOM = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_MEAN = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_VAR = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_N = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_LR = [[0.1,0.5,0.9]] +DEFAULT_GAMMA_1 = [[0.1,0.5,0.9]] +DEFAULT_GAMMA_2 = [[0.1,0.5,0.9]] +DEFAULT_EPSILON = [[1e-08]] +DEFAULT_BETA_1 = [[0.1,0.5,0.9]] +DEFAULT_BETA_2 = [[0.1,0.5,0.9]] +DEFAULT_T = [[1,5]] +DEFAULT_RESCALE_GRAD = [[0.4, 0.77]] +DEFAULT_CLIP_GRADIENT = [[-1.0,0.8]] +DEFAULT_CLIP_WEIGHTS = [[-1.0,0.8]] +DEFAULT_LAZY_UPDATE = [[0,1]] + # Default Inputs. MXNet Op Param Name to Default Input mapping DEFAULTS_INPUTS = {"data": DEFAULT_DATA, "lhs": DEFAULT_LHS, @@ -81,7 +100,23 @@ "k_nd": DEFAULT_K_ND, "p_nd": DEFAULT_P_ND, "axis_shape": DEFAULT_AXIS_SHAPE, - "axis": DEFAULT_AXIS} + "axis": DEFAULT_AXIS, + "weight" : DEFAULT_WEIGHT, + "grad" : DEFAULT_GRAD, + "mean" : DEFAULT_MEAN, + "var" : DEFAULT_VAR, + "mom" : DEFAULT_MOM, + "n" : DEFAULT_N, + "lr" : DEFAULT_LR, + "gamma1" : DEFAULT_GAMMA_1, + "gamma2" : DEFAULT_GAMMA_2, + "epsilon" : DEFAULT_EPSILON, + "beta1" : DEFAULT_BETA_1, + "beta2" : DEFAULT_BETA_2, + "t" : DEFAULT_T, + "rescale_grad" : DEFAULT_RESCALE_GRAD, + "clip_grad" : DEFAULT_CLIP_GRADIENT, + "lazy_update" : DEFAULT_LAZY_UPDATE} # These are names of MXNet operator parameters that is of type NDArray. # We maintain this list to automatically recognize these parameters are to be @@ -90,4 +125,5 @@ # can just say shape of the tensor, and we automatically create Tensors. PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp", "mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p", - "low", "high", "weight", "bias", "moving_mean", "moving_var"] + "low", "high", "weight", "bias", "moving_mean", "moving_var", + "weight", "grad", "mean", "var", "mom", "n"] From 84fcf3c7bb988fc7e8b2d3c58d6921d286583323 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 18 Jul 2019 18:32:26 -0700 Subject: [PATCH 05/10] minor typos --- benchmark/opperf/nd_operations/nn_optimizer_operators.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/opperf/nd_operations/nn_optimizer_operators.py b/benchmark/opperf/nd_operations/nn_optimizer_operators.py index cad300a2642b..ef0b59fcf84b 100644 --- a/benchmark/opperf/nd_operations/nn_optimizer_operators.py +++ b/benchmark/opperf/nd_operations/nn_optimizer_operators.py @@ -19,7 +19,7 @@ from benchmark.opperf.utils.benchmark_utils import run_op_benchmarks from benchmark.opperf.utils.op_registry_utils import get_all_optimizer_operators -"""Performance benchmark tests for MXNet NDArray Optimizer Operators. +"""Performance benchmark tests for MXNet Neural Network Optimizer Update Operators. 1. Stochastic Gradient Descent (SGD) 1.1 multi_sgd_mom_update @@ -42,7 +42,7 @@ def run_optimizer_operators_benchmarks(ctx=mx.cpu(), dtype='float32', warmup=25, runs=100): """Runs benchmarks with the given context and precision (dtype) for all the neural network - optimizer operators in MXNet. + optimizer update operators in MXNet. Parameters ---------- From e3a321d02838b4873e7f7692b0f70c7bc9324f2e Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Thu, 18 Jul 2019 21:54:12 -0700 Subject: [PATCH 06/10] Trigger notification From 7bba150a20500efd224b319ec79aa7915203bc24 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Mon, 22 Jul 2019 14:39:32 -0700 Subject: [PATCH 07/10] resolve default params --- benchmark/opperf/rules/default_params.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index a344db1d6122..58f7402546fe 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -69,17 +69,17 @@ DEFAULT_MEAN = [(1024, 1024), (10000, 1), (10000, 100)] DEFAULT_VAR = [(1024, 1024), (10000, 1), (10000, 100)] DEFAULT_N = [(1024, 1024), (10000, 1), (10000, 100)] -DEFAULT_LR = [[0.1,0.5,0.9]] -DEFAULT_GAMMA_1 = [[0.1,0.5,0.9]] -DEFAULT_GAMMA_2 = [[0.1,0.5,0.9]] -DEFAULT_EPSILON = [[1e-08]] -DEFAULT_BETA_1 = [[0.1,0.5,0.9]] -DEFAULT_BETA_2 = [[0.1,0.5,0.9]] -DEFAULT_T = [[1,5]] -DEFAULT_RESCALE_GRAD = [[0.4, 0.77]] -DEFAULT_CLIP_GRADIENT = [[-1.0,0.8]] -DEFAULT_CLIP_WEIGHTS = [[-1.0,0.8]] -DEFAULT_LAZY_UPDATE = [[0,1]] +DEFAULT_LR = [0.1,0.5,0.9] +DEFAULT_GAMMA_1 = [0.1,0.5,0.9] +DEFAULT_GAMMA_2 = [0.1,0.5,0.9] +DEFAULT_EPSILON = [1e-08] +DEFAULT_BETA_1 = [0.1,0.5,0.9] +DEFAULT_BETA_2 = [0.1,0.5,0.9] +DEFAULT_T = [1,5] +DEFAULT_RESCALE_GRAD = [0.4, 0.77] +DEFAULT_CLIP_GRADIENT = [-1.0,0.8] +DEFAULT_CLIP_WEIGHTS = [-1.0,0.8] +DEFAULT_LAZY_UPDATE = [0,1] # Default Inputs. MXNet Op Param Name to Default Input mapping DEFAULTS_INPUTS = {"data": DEFAULT_DATA, From 82f2d61b138ae1751e016250548dd0ede07562d7 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Mon, 22 Jul 2019 17:30:08 -0700 Subject: [PATCH 08/10] temp remove multi op --- benchmark/opperf/rules/default_params.py | 16 +++++++++++++++- benchmark/opperf/utils/op_registry_utils.py | 4 ++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index 58f7402546fe..69f61c32c993 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -69,6 +69,12 @@ DEFAULT_MEAN = [(1024, 1024), (10000, 1), (10000, 100)] DEFAULT_VAR = [(1024, 1024), (10000, 1), (10000, 100)] DEFAULT_N = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_D = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_V = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_Z = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_G = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_DELTA = [(1024, 1024), (10000, 1), (10000, 100)] +DEFAULT_LRS = [(0.1,0.1)] DEFAULT_LR = [0.1,0.5,0.9] DEFAULT_GAMMA_1 = [0.1,0.5,0.9] DEFAULT_GAMMA_2 = [0.1,0.5,0.9] @@ -102,12 +108,20 @@ "axis_shape": DEFAULT_AXIS_SHAPE, "axis": DEFAULT_AXIS, "weight" : DEFAULT_WEIGHT, + "weight32" : DEFAULT_WEIGHT, "grad" : DEFAULT_GRAD, "mean" : DEFAULT_MEAN, "var" : DEFAULT_VAR, "mom" : DEFAULT_MOM, "n" : DEFAULT_N, + "d" : DEFAULT_D, + "v" : DEFAULT_V, + "z" : DEFAULT_Z, + "g" : DEFAULT_G, + "delta" : DEFAULT_DELTA, "lr" : DEFAULT_LR, + "lrs" : DEFAULT_LRS, + "wds" : DEFAULT_LRS, "gamma1" : DEFAULT_GAMMA_1, "gamma2" : DEFAULT_GAMMA_2, "epsilon" : DEFAULT_EPSILON, @@ -126,4 +140,4 @@ PARAMS_OF_TYPE_NDARRAY = ["lhs", "rhs", "data", "base", "exp", "mu", "sigma", "lam", "alpha", "beta", "gamma", "k", "p", "low", "high", "weight", "bias", "moving_mean", "moving_var", - "weight", "grad", "mean", "var", "mom", "n"] + "weight", "weight32", "grad", "mean", "var", "mom", "n", "d", "v", "z", "g", "delta"] diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index 44018d4f96cd..4c09114694b7 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -249,9 +249,9 @@ def get_all_optimizer_operators(): ------- {"operator_name": {"has_backward", "nd_op_handle", "params"}} """ - optimizer_ops = ['multi_sgd_mom_update', 'mp_sgd_update', 'multi_sgd_update', 'signum_update', + optimizer_ops = ['mp_sgd_update', 'signum_update', 'rmspropalex_update', 'ftml_update', 'rmsprop_update', 'sgd_mom_update', 'signsgd_update', - 'mp_sgd_mom_update', 'multi_mp_sgd_update', 'multi_mp_sgd_mom_update', 'ftrl_update', 'sgd_update', + 'mp_sgd_mom_update', 'ftrl_update', 'sgd_update', 'adam_update'] # Get all mxnet operators From 421dd24cec89753a2058677e67106a41b57189e6 Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 26 Jul 2019 23:36:56 -0700 Subject: [PATCH 09/10] take care of #15643 --- benchmark/opperf/nd_operations/README.md | 4 ++++ .../opperf/nd_operations/nn_optimizer_operators.py | 14 +++++--------- benchmark/opperf/rules/default_params.py | 2 +- benchmark/opperf/utils/op_registry_utils.py | 7 +++---- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/benchmark/opperf/nd_operations/README.md b/benchmark/opperf/nd_operations/README.md index 8a55407ec14b..95958662ae8c 100644 --- a/benchmark/opperf/nd_operations/README.md +++ b/benchmark/opperf/nd_operations/README.md @@ -22,11 +22,13 @@ 0. LogisticRegressionOutput 1. broadcast_axes 2. ravel_multi_index +3. multi_sgd_mom_update 4. smooth_l1 5. scatter_nd 6. reshape 7. one_hot 8. linalg_potri +10. multi_sgd_update 12. Convolution_v1 13. repeat 14. Custom @@ -76,6 +78,7 @@ 71. slice_axis 72. stack 74. khatri_rao +75. multi_mp_sgd_update 76. linalg_sumlogdiag 77. broadcast_to 78. IdentityAttachKLSparseReg @@ -84,6 +87,7 @@ 82. uniform 83. InstanceNorm 84. expand_dims +85. multi_mp_sgd_mom_update 86. reverse 87. add_n 88. clip diff --git a/benchmark/opperf/nd_operations/nn_optimizer_operators.py b/benchmark/opperf/nd_operations/nn_optimizer_operators.py index ef0b59fcf84b..681996eecef0 100644 --- a/benchmark/opperf/nd_operations/nn_optimizer_operators.py +++ b/benchmark/opperf/nd_operations/nn_optimizer_operators.py @@ -22,15 +22,11 @@ """Performance benchmark tests for MXNet Neural Network Optimizer Update Operators. 1. Stochastic Gradient Descent (SGD) - 1.1 multi_sgd_mom_update - 1.2 mp_sgd_update - 1.3 multi_sgd_update - 1.4 sgd_mom_update - 1.5 signsgd_update - 1.6 mp_sgd_mom_update - 1.7 multi_mp_sgd_update - 1.8 multi_mp_sgd_mom_update - 1.9 sgd_update + 1.1 mp_sgd_update + 1.2 sgd_mom_update + 1.3 signsgd_update + 1.4 mp_sgd_mom_update + 1.5 sgd_update 3. signum_update 4. rmspropalex_update 5. ftml_update diff --git a/benchmark/opperf/rules/default_params.py b/benchmark/opperf/rules/default_params.py index b67ff76893ef..00bd76632142 100644 --- a/benchmark/opperf/rules/default_params.py +++ b/benchmark/opperf/rules/default_params.py @@ -140,7 +140,7 @@ "t" : DEFAULT_T, "rescale_grad" : DEFAULT_RESCALE_GRAD, "clip_grad" : DEFAULT_CLIP_GRADIENT, - "lazy_update" : DEFAULT_LAZY_UPDATE + "lazy_update" : DEFAULT_LAZY_UPDATE, "data_4d": DEFAULT_DATA_4d, "dim1": DEFAULT_DIM_1, "dim2": DEFAULT_DIM_2, diff --git a/benchmark/opperf/utils/op_registry_utils.py b/benchmark/opperf/utils/op_registry_utils.py index a69ee83bde4f..860b83a4dace 100644 --- a/benchmark/opperf/utils/op_registry_utils.py +++ b/benchmark/opperf/utils/op_registry_utils.py @@ -251,10 +251,9 @@ def get_all_optimizer_operators(): ------- {"operator_name": {"has_backward", "nd_op_handle", "params"}} """ - optimizer_ops = ['mp_sgd_update', 'signum_update', - 'rmspropalex_update', 'ftml_update', 'rmsprop_update', 'sgd_mom_update', 'signsgd_update', - 'mp_sgd_mom_update', 'ftrl_update', 'sgd_update', - 'adam_update'] + optimizer_ops = ['mp_sgd_update', 'signum_update', 'rmspropalex_update', 'ftml_update', 'rmsprop_update', + 'sgd_mom_update', 'signsgd_update', 'mp_sgd_mom_update', 'ftrl_update', 'sgd_update', + 'adam_update'] # Get all mxnet operators mx_operators = _get_all_mxnet_operators() From 215256851df7f87e4729488a007c696e92aed83d Mon Sep 17 00:00:00 2001 From: ChaiBapchya Date: Fri, 26 Jul 2019 23:41:20 -0700 Subject: [PATCH 10/10] numbering typo --- .../opperf/nd_operations/nn_optimizer_operators.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/benchmark/opperf/nd_operations/nn_optimizer_operators.py b/benchmark/opperf/nd_operations/nn_optimizer_operators.py index 681996eecef0..130ab85300dc 100644 --- a/benchmark/opperf/nd_operations/nn_optimizer_operators.py +++ b/benchmark/opperf/nd_operations/nn_optimizer_operators.py @@ -27,12 +27,12 @@ 1.3 signsgd_update 1.4 mp_sgd_mom_update 1.5 sgd_update -3. signum_update -4. rmspropalex_update -5. ftml_update -6. rmsprop_update -7. ftrl_update -8. adam_update +2. signum_update +3. rmspropalex_update +4. ftml_update +5. rmsprop_update +6. ftrl_update +7. adam_update """