From a470710aff10bea30ff5c9d1968770286ea2f55e Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Wed, 15 Sep 2021 14:54:49 +0200 Subject: [PATCH 1/3] Zero intialization to avoid error message on a Centos --- src/api/operator/numpy/linalg/np_eig.cc | 2 +- src/api/operator/numpy/linalg/np_eigvals.cc | 2 +- src/api/operator/numpy/linalg/np_lstsq.cc | 2 +- .../operator/numpy/linalg/np_matrix_rank.cc | 4 +- src/api/operator/numpy/linalg/np_norm.cc | 2 +- src/api/operator/numpy/linalg/np_pinv.cc | 4 +- src/api/operator/numpy/linalg/np_potrf.cc | 2 +- src/api/operator/numpy/linalg/np_tensorinv.cc | 2 +- .../operator/numpy/linalg/np_tensorsolve.cc | 2 +- src/api/operator/numpy/np_bincount_op.cc | 2 +- .../numpy/np_broadcast_reduce_op_boolean.cc | 2 +- .../numpy/np_broadcast_reduce_op_index.cc | 2 +- .../numpy/np_broadcast_reduce_op_value.cc | 2 +- src/api/operator/numpy/np_cross.cc | 2 +- src/api/operator/numpy/np_cumsum.cc | 2 +- src/api/operator/numpy/np_delete_op.cc | 2 +- src/api/operator/numpy/np_diff_op.cc | 2 +- src/api/operator/numpy/np_ediff1d_op.cc | 2 +- src/api/operator/numpy/np_einsum_op.cc | 2 +- .../numpy/np_elemwise_unary_op_basic.cc | 2 +- src/api/operator/numpy/np_fill_diagonal_op.cc | 2 +- src/api/operator/numpy/np_histogram_op.cc | 2 +- src/api/operator/numpy/np_init_op.cc | 26 +++++----- src/api/operator/numpy/np_insert_op.cc | 6 +-- src/api/operator/numpy/np_interp_op.cc | 2 +- src/api/operator/numpy/np_matrix_op.cc | 48 +++++++++---------- src/api/operator/numpy/np_moments_op.cc | 6 +-- src/api/operator/numpy/np_nan_to_num_op.cc | 2 +- src/api/operator/numpy/np_ordering_op.cc | 4 +- src/api/operator/numpy/np_pad_op.cc | 2 +- src/api/operator/numpy/np_percentile_op.cc | 2 +- src/api/operator/numpy/np_repeat_op.cc | 2 +- src/api/operator/numpy/np_tensordot_op.cc | 4 +- src/api/operator/numpy/np_trace_op.cc | 2 +- src/api/operator/numpy/np_tri_op.cc | 2 +- src/api/operator/numpy/np_tril_op.cc | 2 +- src/api/operator/numpy/np_triu_op.cc | 2 +- src/api/operator/numpy/np_unique_op.cc | 2 +- src/api/operator/numpy/np_where_op.cc | 4 +- src/api/operator/numpy/np_window_op.cc | 2 +- src/api/operator/numpy/random/np_choice_op.cc | 2 +- .../numpy/random/np_exponential_op.cc | 2 +- .../operator/numpy/random/np_laplace_op.cc | 2 +- .../numpy/random/np_location_scale_op.cc | 4 +- .../numpy/random/np_multinomial_op.cc | 2 +- src/api/operator/numpy/random/np_pareto_op.cc | 2 +- src/api/operator/numpy/random/np_power_op.cc | 2 +- .../operator/numpy/random/np_rayleigh_op.cc | 2 +- .../operator/numpy/random/np_weibull_op.cc | 2 +- .../numpy_extension/npx_activation_op.cc | 2 +- .../numpy_extension/npx_arange_like_op.cc | 2 +- .../numpy_extension/npx_batch_dot_op.cc | 2 +- .../numpy_extension/npx_batch_norm_op.cc | 2 +- .../numpy_extension/npx_broadcast_like_op.cc | 2 +- .../numpy_extension/npx_control_flow_op.cc | 6 +-- .../numpy_extension/npx_convolution_op.cc | 2 +- .../numpy_extension/npx_deconvolution_op.cc | 2 +- .../numpy_extension/npx_dropout_op.cc | 2 +- .../numpy_extension/npx_embedding_op.cc | 2 +- .../numpy_extension/npx_fully_connected_op.cc | 2 +- .../numpy_extension/npx_group_norm_op.cc | 2 +- .../numpy_extension/npx_layer_norm_op.cc | 2 +- .../numpy_extension/npx_leaky_relu_op.cc | 2 +- .../numpy_extension/npx_one_hot_op.cc | 2 +- .../operator/numpy_extension/npx_pick_op.cc | 2 +- .../numpy_extension/npx_pooling_op.cc | 2 +- .../operator/numpy_extension/npx_rnn_op.cc | 2 +- .../numpy_extension/npx_softmax_op.cc | 8 ++-- .../operator/numpy_extension/npx_topk_op.cc | 2 +- src/api/operator/random/np_gamma_op.cc | 2 +- src/api/operator/random/np_normal_op.cc | 2 +- src/api/operator/random/np_randint_op.cc | 2 +- src/api/operator/random/np_uniform_op.cc | 2 +- src/api/operator/tensor/indexing_op.cc | 2 +- src/api/operator/tensor/matrix_op.cc | 2 +- src/api/operator/ufunc_helper.cc | 8 ++-- src/c_api/c_api_profile.cc | 2 +- 77 files changed, 132 insertions(+), 128 deletions(-) diff --git a/src/api/operator/numpy/linalg/np_eig.cc b/src/api/operator/numpy/linalg/np_eig.cc index 05cfa6c71a9d..2bb1c3f6b1e8 100644 --- a/src/api/operator/numpy/linalg/np_eig.cc +++ b/src/api/operator/numpy/linalg/np_eig.cc @@ -45,7 +45,7 @@ MXNET_REGISTER_API("_npi.eigh").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_eigh"); nnvm::NodeAttrs attrs; - op::EighParam param; + op::EighParam param = {}; param.UPLO = *((args[1].operator std::string()).c_str()); attrs.parsed = param; attrs.op = op; diff --git a/src/api/operator/numpy/linalg/np_eigvals.cc b/src/api/operator/numpy/linalg/np_eigvals.cc index 04982ded7d06..5227715cd82f 100644 --- a/src/api/operator/numpy/linalg/np_eigvals.cc +++ b/src/api/operator/numpy/linalg/np_eigvals.cc @@ -46,7 +46,7 @@ MXNET_REGISTER_API("_npi.eigvalsh") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_eigvalsh"); nnvm::NodeAttrs attrs; - op::EigvalshParam param; + op::EigvalshParam param = {}; param.UPLO = *((args[1].operator std::string()).c_str()); attrs.parsed = param; attrs.op = op; diff --git a/src/api/operator/numpy/linalg/np_lstsq.cc b/src/api/operator/numpy/linalg/np_lstsq.cc index e2ac7673c38b..559361a29ef1 100644 --- a/src/api/operator/numpy/linalg/np_lstsq.cc +++ b/src/api/operator/numpy/linalg/np_lstsq.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.lstsq").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; const nnvm::Op* op = Op::Get("_npi_lstsq"); nnvm::NodeAttrs attrs; - op::LstsqParam param; + op::LstsqParam param = {}; if (args[2].type_code() == kNull) { param.rcond = static_cast(1); } else if (args[2].type_code() == kStr) { diff --git a/src/api/operator/numpy/linalg/np_matrix_rank.cc b/src/api/operator/numpy/linalg/np_matrix_rank.cc index 5849973c5333..6cb4373fdbe1 100644 --- a/src/api/operator/numpy/linalg/np_matrix_rank.cc +++ b/src/api/operator/numpy/linalg/np_matrix_rank.cc @@ -31,7 +31,7 @@ namespace mxnet { inline static void _npi_matrix_rank_none_tol(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_matrix_rank_none_tol"); - op::MatrixRankNoneTolParam param; + op::MatrixRankNoneTolParam param = {}; nnvm::NodeAttrs attrs; param.hermitian = args[2].operator bool(); param.finfoEps32 = args[3].operator double(); @@ -49,7 +49,7 @@ inline static void _npi_matrix_rank_none_tol(runtime::MXNetArgs args, runtime::M inline static void _npi_matrix_rank(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_matrix_rank"); - op::MatrixRankParam param; + op::MatrixRankParam param = {}; nnvm::NodeAttrs attrs; param.hermitian = args[2].operator bool(); attrs.parsed = param; diff --git a/src/api/operator/numpy/linalg/np_norm.cc b/src/api/operator/numpy/linalg/np_norm.cc index b3a45701fd68..d0e8940fef59 100644 --- a/src/api/operator/numpy/linalg/np_norm.cc +++ b/src/api/operator/numpy/linalg/np_norm.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.norm").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npi_norm"); - op::NumpyNormParam param; + op::NumpyNormParam param = {}; param.ord = args[1].operator double(); if (args[2].type_code() == kNull) { param.axis = dmlc::optional(); diff --git a/src/api/operator/numpy/linalg/np_pinv.cc b/src/api/operator/numpy/linalg/np_pinv.cc index 531d7c0f8d44..5b19faa80fd6 100644 --- a/src/api/operator/numpy/linalg/np_pinv.cc +++ b/src/api/operator/numpy/linalg/np_pinv.cc @@ -31,7 +31,7 @@ namespace mxnet { inline static void _npi_pinv(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_pinv"); - op::PinvParam param; + op::PinvParam param = {}; nnvm::NodeAttrs attrs; param.hermitian = args[2].operator bool(); attrs.parsed = param; @@ -47,7 +47,7 @@ inline static void _npi_pinv(runtime::MXNetArgs args, runtime::MXNetRetValue* re inline static void _npi_pinv_scalar_rcond(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_pinv_scalar_rcond"); - op::PinvScalarRcondParam param; + op::PinvScalarRcondParam param = {}; nnvm::NodeAttrs attrs; param.rcond = args[1].operator double(); param.hermitian = args[2].operator bool(); diff --git a/src/api/operator/numpy/linalg/np_potrf.cc b/src/api/operator/numpy/linalg/np_potrf.cc index bd11a56d4796..40e3cf99fdcd 100644 --- a/src/api/operator/numpy/linalg/np_potrf.cc +++ b/src/api/operator/numpy/linalg/np_potrf.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.cholesky") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_cholesky"); nnvm::NodeAttrs attrs; - op::LaCholeskyParam param; + op::LaCholeskyParam param = {}; param.lower = args[1].operator bool(); attrs.parsed = param; attrs.op = op; diff --git a/src/api/operator/numpy/linalg/np_tensorinv.cc b/src/api/operator/numpy/linalg/np_tensorinv.cc index 9392f2e8c9bc..b67634681b18 100644 --- a/src/api/operator/numpy/linalg/np_tensorinv.cc +++ b/src/api/operator/numpy/linalg/np_tensorinv.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.tensorinv") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tensorinv"); nnvm::NodeAttrs attrs; - op::TensorinvParam param; + op::TensorinvParam param = {}; param.ind = args[1].operator int(); attrs.parsed = param; attrs.op = op; diff --git a/src/api/operator/numpy/linalg/np_tensorsolve.cc b/src/api/operator/numpy/linalg/np_tensorsolve.cc index 9d1224063ee4..6ac45a02151d 100644 --- a/src/api/operator/numpy/linalg/np_tensorsolve.cc +++ b/src/api/operator/numpy/linalg/np_tensorsolve.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.tensorsolve") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tensorsolve"); nnvm::NodeAttrs attrs; - op::TensorsolveParam param; + op::TensorsolveParam param = {}; if (args[2].type_code() == kNull) { param.a_axes = Tuple(); } else { diff --git a/src/api/operator/numpy/np_bincount_op.cc b/src/api/operator/numpy/np_bincount_op.cc index 27495e98182d..9303b933e73d 100644 --- a/src/api/operator/numpy/np_bincount_op.cc +++ b/src/api/operator/numpy/np_bincount_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.bincount") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_bincount"); nnvm::NodeAttrs attrs; - op::NumpyBincountParam param; + op::NumpyBincountParam param = {}; int num_outputs = 0; if (args[1].type_code() == kNull) { diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc b/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc index f2494f0d5672..5d542dd29afc 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_boolean.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.all").set_body([](runtime::MXNetArgs args, runtime::MXN using namespace runtime; const nnvm::Op* op = Op::Get("_npi_all"); nnvm::NodeAttrs attrs; - op::NumpyReduceAxesBoolParam param; + op::NumpyReduceAxesBoolParam param = {}; NDArray* out = args[3].operator mxnet::NDArray*(); NDArray** outputs = out == nullptr ? nullptr : &out; diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_index.cc b/src/api/operator/numpy/np_broadcast_reduce_op_index.cc index 1d46ec037aef..292e4207fa3d 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_index.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_index.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.argmax") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_argmax"); nnvm::NodeAttrs attrs; - op::ReduceAxisParam param; + op::ReduceAxisParam param = {}; // param.axis if (args[1].type_code() == kNull) { param.axis = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc index f7238e8b24d2..869a802e904c 100644 --- a/src/api/operator/numpy/np_broadcast_reduce_op_value.cc +++ b/src/api/operator/numpy/np_broadcast_reduce_op_value.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.broadcast_to") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_broadcast_to"); nnvm::NodeAttrs attrs; - op::BroadcastToParam param; + op::BroadcastToParam param = {}; if (args[1].type_code() == kDLInt) { param.shape = TShape(1, args[1].operator int64_t()); } else { diff --git a/src/api/operator/numpy/np_cross.cc b/src/api/operator/numpy/np_cross.cc index 2bf9675148ca..8b9b002fbf0c 100644 --- a/src/api/operator/numpy/np_cross.cc +++ b/src/api/operator/numpy/np_cross.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.cross").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npi_cross"); - op::NumpyCrossParam param; + op::NumpyCrossParam param = {}; param.axisa = args[2].operator int(); param.axisb = args[3].operator int(); param.axisc = args[4].operator int(); diff --git a/src/api/operator/numpy/np_cumsum.cc b/src/api/operator/numpy/np_cumsum.cc index 227ac0531e0d..56ba5fb7d95f 100644 --- a/src/api/operator/numpy/np_cumsum.cc +++ b/src/api/operator/numpy/np_cumsum.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.cumsum") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npi_cumsum"); - op::CumsumParam param; + op::CumsumParam param = {}; // axis if (args[1].type_code() == kNull) { param.axis = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_delete_op.cc b/src/api/operator/numpy/np_delete_op.cc index dd5746994a29..1a102081ebf6 100644 --- a/src/api/operator/numpy/np_delete_op.cc +++ b/src/api/operator/numpy/np_delete_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.delete") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_delete"); nnvm::NodeAttrs attrs; - op::NumpyDeleteParam param; + op::NumpyDeleteParam param = {}; int num_inputs = 0; param.start = dmlc::nullopt; param.step = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_diff_op.cc b/src/api/operator/numpy/np_diff_op.cc index a89063b93eb2..9b478cdc5d4f 100644 --- a/src/api/operator/numpy/np_diff_op.cc +++ b/src/api/operator/numpy/np_diff_op.cc @@ -31,7 +31,7 @@ MXNET_REGISTER_API("_npi.diff").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_diff"); nnvm::NodeAttrs attrs; - op::DiffParam param; + op::DiffParam param = {}; param.n = args[1].operator int(); param.axis = args[2].operator int(); diff --git a/src/api/operator/numpy/np_ediff1d_op.cc b/src/api/operator/numpy/np_ediff1d_op.cc index ee88eac54908..9c10a6b24ebd 100644 --- a/src/api/operator/numpy/np_ediff1d_op.cc +++ b/src/api/operator/numpy/np_ediff1d_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.ediff1d") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_ediff1d"); nnvm::NodeAttrs attrs; - op::EDiff1DParam param; + op::EDiff1DParam param = {}; int num_inputs = 1; NDArray* inputs[3]; inputs[0] = args[0].operator mxnet::NDArray*(); diff --git a/src/api/operator/numpy/np_einsum_op.cc b/src/api/operator/numpy/np_einsum_op.cc index 8c96297a4433..fad06d15212c 100644 --- a/src/api/operator/numpy/np_einsum_op.cc +++ b/src/api/operator/numpy/np_einsum_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.einsum") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_einsum"); nnvm::NodeAttrs attrs; - op::NumpyEinsumParam param; + op::NumpyEinsumParam param = {}; int args_size = args.size(); // param.num_args param.num_args = args_size - 3; diff --git a/src/api/operator/numpy/np_elemwise_unary_op_basic.cc b/src/api/operator/numpy/np_elemwise_unary_op_basic.cc index be5afcfed2c0..d87c73845835 100644 --- a/src/api/operator/numpy/np_elemwise_unary_op_basic.cc +++ b/src/api/operator/numpy/np_elemwise_unary_op_basic.cc @@ -96,7 +96,7 @@ MXNET_REGISTER_API("_npi.around") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_around"); nnvm::NodeAttrs attrs; - op::AroundParam param; + op::AroundParam param = {}; param.decimals = args[1].operator int64_t(); attrs.parsed = param; attrs.op = op; diff --git a/src/api/operator/numpy/np_fill_diagonal_op.cc b/src/api/operator/numpy/np_fill_diagonal_op.cc index 089d7cd95903..3ac4ef83f063 100644 --- a/src/api/operator/numpy/np_fill_diagonal_op.cc +++ b/src/api/operator/numpy/np_fill_diagonal_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.fill_diagonal") const nnvm::Op* op = Op::Get("_npi_fill_diagonal"); nnvm::NodeAttrs attrs; - op::NumpyFillDiagonalParam param; + op::NumpyFillDiagonalParam param = {}; int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy/np_histogram_op.cc b/src/api/operator/numpy/np_histogram_op.cc index daeb3c730ca6..c38e8a1915c8 100644 --- a/src/api/operator/numpy/np_histogram_op.cc +++ b/src/api/operator/numpy/np_histogram_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.histogram") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npi_histogram"); - op::HistogramParam param; + op::HistogramParam param = {}; // parse bin_cnt if (args[2].type_code() == kNull) { param.bin_cnt = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_init_op.cc b/src/api/operator/numpy/np_init_op.cc index 1e7caa396447..3b203c6cfe44 100644 --- a/src/api/operator/numpy/np_init_op.cc +++ b/src/api/operator/numpy/np_init_op.cc @@ -35,7 +35,7 @@ MXNET_REGISTER_API("_npi.zeros").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; const nnvm::Op* op = Op::Get("_npi_zeros"); nnvm::NodeAttrs attrs; - op::InitOpParam param; + op::InitOpParam param = {}; if (args[0].type_code() == kDLInt) { param.shape = TShape(1, args[0].operator int64_t()); } else { @@ -62,7 +62,7 @@ MXNET_REGISTER_API("_npi.full_like") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_full_like"); nnvm::NodeAttrs attrs; - op::FullLikeOpParam param; + op::FullLikeOpParam param = {}; param.fill_value = args[1].operator double(); if (args[2].type_code() == kNull) { param.dtype = dmlc::nullopt; @@ -93,7 +93,7 @@ MXNET_REGISTER_API("_npi.indices") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_indices"); nnvm::NodeAttrs attrs; - op::IndicesOpParam param; + op::IndicesOpParam param = {}; // param.dimensions if (args[0].type_code() == kDLInt) { param.dimensions = TShape(1, args[0].operator int64_t()); @@ -124,7 +124,7 @@ MXNET_REGISTER_API("_npi.atleast_1d") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_atleast_1d"); nnvm::NodeAttrs attrs; - op::AtleastNDParam param; + op::AtleastNDParam param = {}; int args_size = args.size(); param.num_args = args_size; attrs.parsed = param; @@ -151,7 +151,7 @@ MXNET_REGISTER_API("_npi.atleast_2d") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_atleast_2d"); nnvm::NodeAttrs attrs; - op::AtleastNDParam param; + op::AtleastNDParam param = {}; int args_size = args.size(); param.num_args = args_size; attrs.parsed = param; @@ -178,7 +178,7 @@ MXNET_REGISTER_API("_npi.atleast_3d") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_atleast_3d"); nnvm::NodeAttrs attrs; - op::AtleastNDParam param; + op::AtleastNDParam param = {}; int args_size = args.size(); param.num_args = args_size; attrs.parsed = param; @@ -205,7 +205,7 @@ MXNET_REGISTER_API("_npi.arange") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_arange"); nnvm::NodeAttrs attrs; - op::RangeParam param; + op::RangeParam param = {}; param.start = args[0].operator double(); if (args[1].type_code() == kNull) { param.stop = dmlc::nullopt; @@ -236,7 +236,7 @@ MXNET_REGISTER_API("_npi.eye").set_body([](runtime::MXNetArgs args, runtime::MXN using namespace runtime; const nnvm::Op* op = Op::Get("_npi_eye"); nnvm::NodeAttrs attrs; - op::NumpyEyeParam param; + op::NumpyEyeParam param = {}; param.N = args[0].operator nnvm::dim_t(); if (args[1].type_code() == kNull) { param.M = dmlc::nullopt; @@ -317,7 +317,7 @@ MXNET_REGISTER_API("_npi.logspace") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_logspace"); nnvm::NodeAttrs attrs; - op::LogspaceParam param; + op::LogspaceParam param = {}; param.start = args[0].operator double(); param.stop = args[1].operator double(); if (features::is_enabled(features::INT64_TENSOR_SIZE)) @@ -354,7 +354,7 @@ MXNET_REGISTER_API("_npi.ones").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_ones"); nnvm::NodeAttrs attrs; - op::InitOpParam param; + op::InitOpParam param = {}; if (args[0].type_code() == kDLInt) { param.shape = TShape(1, args[0].operator int64_t()); } else { @@ -380,7 +380,11 @@ MXNET_REGISTER_API("_npi.full").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_full"); nnvm::NodeAttrs attrs; +<<<<<<< HEAD op::NumpyInitOpWithScalarParam param; +======= + op::InitOpWithScalarParam param = {}; +>>>>>>> 119a2314f (Zero intialization to avoid error message on a Centos) if (args[0].type_code() == kDLInt) { param.shape = TShape(1, args[0].operator int64_t()); } else { @@ -423,7 +427,7 @@ MXNET_REGISTER_API("_npi.identity") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_identity"); nnvm::NodeAttrs attrs; - op::InitOpParam param; + op::InitOpParam param = {}; param.shape = TShape(args[0].operator ObjectRef()); if (args[1].type_code() == kNull) { param.dtype = mxnet::common::GetDefaultDtype(); diff --git a/src/api/operator/numpy/np_insert_op.cc b/src/api/operator/numpy/np_insert_op.cc index 2d6b7574ecb9..ef3cfc50491b 100644 --- a/src/api/operator/numpy/np_insert_op.cc +++ b/src/api/operator/numpy/np_insert_op.cc @@ -37,7 +37,7 @@ MXNET_REGISTER_API("_npi.insert_scalar") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_insert_scalar"); nnvm::NodeAttrs attrs; - op::NumpyInsertParam param; + op::NumpyInsertParam param = {}; int num_inputs = 0; param.start = dmlc::nullopt; param.step = dmlc::nullopt; @@ -78,7 +78,7 @@ MXNET_REGISTER_API("_npi.insert_slice") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_insert_slice"); nnvm::NodeAttrs attrs; - op::NumpyInsertParam param; + op::NumpyInsertParam param = {}; int num_inputs = 0; if (args[1].type_code() == kDLInt || args[1].type_code() == kDLUInt || args[1].type_code() == kDLFloat) { @@ -126,7 +126,7 @@ MXNET_REGISTER_API("_npi.insert_tensor") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_insert_tensor"); nnvm::NodeAttrs attrs; - op::NumpyInsertParam param; + op::NumpyInsertParam param = {}; param.start = dmlc::nullopt; param.step = dmlc::nullopt; param.stop = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_interp_op.cc b/src/api/operator/numpy/np_interp_op.cc index c3682ded7314..baf0e5d995a3 100644 --- a/src/api/operator/numpy/np_interp_op.cc +++ b/src/api/operator/numpy/np_interp_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.interp") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_interp"); nnvm::NodeAttrs attrs; - op::NumpyInterpParam param; + op::NumpyInterpParam param = {}; if (args[3].type_code() == kNull) { param.left = dmlc::nullopt; } else { diff --git a/src/api/operator/numpy/np_matrix_op.cc b/src/api/operator/numpy/np_matrix_op.cc index 498e11bea66e..7b7b3646ff94 100644 --- a/src/api/operator/numpy/np_matrix_op.cc +++ b/src/api/operator/numpy/np_matrix_op.cc @@ -36,7 +36,7 @@ MXNET_REGISTER_API("_npi.transpose") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_transpose"); nnvm::NodeAttrs attrs; - op::NumpyTransposeParam param; + op::NumpyTransposeParam param = {}; if (args[1].type_code() == kNull) { param.axes = TShape(-1, 0); } else if (args[1].type_code() == kDLInt) { @@ -59,7 +59,7 @@ MXNET_REGISTER_API("_npi.expand_dims") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_expand_dims"); nnvm::NodeAttrs attrs; - op::ExpandDimParam param; + op::ExpandDimParam param = {}; param.axis = args[1].operator int(); // we directly copy ExpandDimParam, which is trivially-copyable @@ -78,7 +78,7 @@ MXNET_REGISTER_API("_npi.stack").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; const nnvm::Op* op = Op::Get("_npi_stack"); nnvm::NodeAttrs attrs; - op::StackParam param; + op::StackParam param = {}; int i = 0; int num_inputs = 0; @@ -109,7 +109,7 @@ MXNET_REGISTER_API("_npi.flip").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_flip"); nnvm::NodeAttrs attrs; - op::FlipParam param; + op::FlipParam param = {}; NDArray* out = args[2].operator mxnet::NDArray*(); NDArray** outputs = out == nullptr ? nullptr : &out; @@ -139,7 +139,7 @@ MXNET_REGISTER_API("_npi.concatenate") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_concatenate"); nnvm::NodeAttrs attrs; - op::ConcatParam param; + op::ConcatParam param = {}; int arg_size = args.num_args; param.num_args = arg_size - 2; if (args[arg_size - 2].type_code() == kNull) { @@ -172,7 +172,7 @@ MXNET_REGISTER_API("_npi.dstack") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_dstack"); nnvm::NodeAttrs attrs; - op::ConcatParam param; + op::ConcatParam param = {}; int args_size = args.size(); // param.num_args param.num_args = args_size; @@ -198,7 +198,7 @@ MXNET_REGISTER_API("_npi.split").set_body([](runtime::MXNetArgs args, runtime::M int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; nnvm::NodeAttrs attrs; - op::SplitParam param; + op::SplitParam param = {}; param.axis = args[2].operator int(); param.squeeze_axis = false; if (args[1].type_code() == kDLInt) { @@ -235,7 +235,7 @@ MXNET_REGISTER_API("_npi.roll").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_roll"); nnvm::NodeAttrs attrs; - op::NumpyRollParam param; + op::NumpyRollParam param = {}; if (args[1].type_code() == kNull) { param.shift = dmlc::nullopt; } else if (args[1].type_code() == kDLInt) { @@ -264,7 +264,7 @@ MXNET_REGISTER_API("_npi.rot90").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_rot90"); nnvm::NodeAttrs attrs; - op::NumpyRot90Param param; + op::NumpyRot90Param param = {}; param.k = args[1].operator int(); if (args[2].type_code() == kNull) { param.axes = dmlc::nullopt; @@ -288,7 +288,7 @@ MXNET_REGISTER_API("_npi.column_stack") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_column_stack"); nnvm::NodeAttrs attrs; - op::NumpyColumnStackParam param; + op::NumpyColumnStackParam param = {}; param.num_args = args.size(); attrs.parsed = param; @@ -309,7 +309,7 @@ MXNET_REGISTER_API("_npi.hstack") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_hstack"); nnvm::NodeAttrs attrs; - op::ConcatParam param; + op::ConcatParam param = {}; param.num_args = args.size(); attrs.parsed = param; @@ -330,7 +330,7 @@ MXNET_REGISTER_API("_npi.array_split") using namespace runtime; static const nnvm::Op* op = Op::Get("_npi_array_split"); nnvm::NodeAttrs attrs; - op::SplitParam param; + op::SplitParam param = {}; param.axis = args[2].operator int(); param.squeeze_axis = false; if (args[1].type_code() == kDLInt) { @@ -369,7 +369,7 @@ MXNET_REGISTER_API("_npi.dsplit") CHECK_GE(inputs[0]->shape().ndim(), 3) << "ValueError: dsplit only works on arrays of 3 or more dimensions"; nnvm::NodeAttrs attrs; - op::SplitParam param; + op::SplitParam param = {}; param.axis = 2; param.squeeze_axis = false; if (args[1].type_code() == kDLInt) { @@ -408,7 +408,7 @@ MXNET_REGISTER_API("_npi.hsplit") CHECK_GE(inputs[0]->shape().ndim(), 1) << "ValueError: hsplit only works on arrays of 1 or more dimensions"; nnvm::NodeAttrs attrs; - op::SplitParam param; + op::SplitParam param = {}; param.axis = 0; param.squeeze_axis = false; if (args[1].type_code() == kDLInt) { @@ -445,7 +445,7 @@ MXNET_REGISTER_API("_npi.vsplit") CHECK_GE(inputs[0]->shape().ndim(), 2) << "ValueError: vsplit only works on arrays of 2 or more dimensions"; nnvm::NodeAttrs attrs; - op::SplitParam param; + op::SplitParam param = {}; param.axis = 0; param.squeeze_axis = false; if (args[1].type_code() == kDLInt) { @@ -479,7 +479,7 @@ MXNET_REGISTER_API("_npi.diag").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_diag"); nnvm::NodeAttrs attrs; - op::NumpyDiagParam param; + op::NumpyDiagParam param = {}; if (features::is_enabled(features::INT64_TENSOR_SIZE)) param.k = args[1].operator int64_t(); else @@ -499,7 +499,7 @@ MXNET_REGISTER_API("_npi.rollaxis") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_rollaxis"); nnvm::NodeAttrs attrs; - op::NumpyRollaxisParam param; + op::NumpyRollaxisParam param = {}; param.axis = args[1].operator int(); param.start = args[2].operator int(); attrs.parsed = param; @@ -517,7 +517,7 @@ MXNET_REGISTER_API("_npi.reshape") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_reshape"); nnvm::NodeAttrs attrs; - op::NumpyXReshapeParam param; + op::NumpyXReshapeParam param = {}; if (args[1].type_code() == kNull) { param.newshape = TShape(-1, 0); } else if (args[1].type_code() == kDLInt) { @@ -542,7 +542,7 @@ MXNET_REGISTER_API("_npi.moveaxis") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_moveaxis"); nnvm::NodeAttrs attrs; - op::NumpyMoveaxisParam param; + op::NumpyMoveaxisParam param = {}; if (args[1].type_code() == kNull) { param.source = TShape(-1, 0); } else if (args[1].type_code() == kDLInt) { @@ -572,7 +572,7 @@ MXNET_REGISTER_API("_npi.diagonal") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_diagonal"); nnvm::NodeAttrs attrs; - op::NumpyDiagonalParam param; + op::NumpyDiagonalParam param = {}; if (features::is_enabled(features::INT64_TENSOR_SIZE)) param.offset = args[1].operator int64_t(); else @@ -607,7 +607,7 @@ MXNET_REGISTER_API("_npi.diagflat") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_diagflat"); nnvm::NodeAttrs attrs; - op::NumpyDiagflatParam param; + op::NumpyDiagflatParam param = {}; param.k = args[1].operator int(); int num_inputs = 1; int num_outputs = 0; @@ -624,7 +624,7 @@ MXNET_REGISTER_API("_npi.squeeze") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_squeeze"); nnvm::NodeAttrs attrs; - op::SqueezeParam param; + op::SqueezeParam param = {}; if (args[1].type_code() == kNull) { param.axis = dmlc::optional>(); } else if (args[1].type_code() == kDLInt) { @@ -647,7 +647,7 @@ MXNET_REGISTER_API("_npi.tril_indices") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tril_indices"); nnvm::NodeAttrs attrs; - op::NumpyTrilindicesParam param; + op::NumpyTrilindicesParam param = {}; if (features::is_enabled(features::INT64_TENSOR_SIZE)) { param.n = args[0].operator int64_t(); param.k = args[1].operator int64_t(); @@ -677,7 +677,7 @@ MXNET_REGISTER_API("_npi.vstack") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_vstack"); nnvm::NodeAttrs attrs; - op::NumpyVstackParam param; + op::NumpyVstackParam param = {}; param.num_args = args.size(); attrs.parsed = param; diff --git a/src/api/operator/numpy/np_moments_op.cc b/src/api/operator/numpy/np_moments_op.cc index 5cb0cfaf6531..723c63f6da69 100644 --- a/src/api/operator/numpy/np_moments_op.cc +++ b/src/api/operator/numpy/np_moments_op.cc @@ -32,7 +32,7 @@ namespace mxnet { MXNET_REGISTER_API("_npi.std").set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_std"); - op::NumpyMomentsParam param; + op::NumpyMomentsParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; @@ -86,7 +86,7 @@ MXNET_REGISTER_API("_npi.std").set_body([](runtime::MXNetArgs args, runtime::MXN MXNET_REGISTER_API("_npi.var").set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_var"); - op::NumpyMomentsParam param; + op::NumpyMomentsParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; @@ -141,7 +141,7 @@ MXNET_REGISTER_API("_npi.average") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_average"); - op::NumpyWeightedAverageParam param; + op::NumpyWeightedAverageParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; diff --git a/src/api/operator/numpy/np_nan_to_num_op.cc b/src/api/operator/numpy/np_nan_to_num_op.cc index 804d757a035b..e9121447cb73 100644 --- a/src/api/operator/numpy/np_nan_to_num_op.cc +++ b/src/api/operator/numpy/np_nan_to_num_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.nan_to_num") const nnvm::Op* op = Op::Get("_npi_nan_to_num"); nnvm::NodeAttrs attrs; - op::NumpyNanToNumParam param; + op::NumpyNanToNumParam param = {}; int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy/np_ordering_op.cc b/src/api/operator/numpy/np_ordering_op.cc index 627e450892af..6f84720b981e 100644 --- a/src/api/operator/numpy/np_ordering_op.cc +++ b/src/api/operator/numpy/np_ordering_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.sort").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_sort"); nnvm::NodeAttrs attrs; - op::SortParam param; + op::SortParam param = {}; if (args[1].type_code() == kNull) { param.axis = dmlc::nullopt; @@ -58,7 +58,7 @@ MXNET_REGISTER_API("_npi.argsort") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_argsort"); nnvm::NodeAttrs attrs; - op::ArgSortParam param; + op::ArgSortParam param = {}; if (args[1].type_code() == kNull) { param.axis = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_pad_op.cc b/src/api/operator/numpy/np_pad_op.cc index 4f3b46cf0a28..67171ed32abd 100644 --- a/src/api/operator/numpy/np_pad_op.cc +++ b/src/api/operator/numpy/np_pad_op.cc @@ -105,7 +105,7 @@ MXNET_REGISTER_API("_npi.pad").set_body([](runtime::MXNetArgs args, runtime::MXN using namespace runtime; const nnvm::Op* op = Op::Get("_npi_pad"); nnvm::NodeAttrs attrs; - op::NumpyPadParam param; + op::NumpyPadParam param = {}; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; mxnet::TShape ashape = inputs[0]->shape(); int ndim = ashape.ndim(); diff --git a/src/api/operator/numpy/np_percentile_op.cc b/src/api/operator/numpy/np_percentile_op.cc index fd311c73aeb3..fe11a6b102e8 100644 --- a/src/api/operator/numpy/np_percentile_op.cc +++ b/src/api/operator/numpy/np_percentile_op.cc @@ -52,7 +52,7 @@ MXNET_REGISTER_API("_npi.percentile") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_percentile"); nnvm::NodeAttrs attrs; - op::NumpyPercentileParam param; + op::NumpyPercentileParam param = {}; NDArray* out = args[5].operator mxnet::NDArray*(); NDArray** outputs = out == nullptr ? nullptr : &out; diff --git a/src/api/operator/numpy/np_repeat_op.cc b/src/api/operator/numpy/np_repeat_op.cc index c7bed2b3ec69..95ec44cdaf83 100644 --- a/src/api/operator/numpy/np_repeat_op.cc +++ b/src/api/operator/numpy/np_repeat_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.repeats") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_repeats"); nnvm::NodeAttrs attrs; - op::RepeatsParam param; + op::RepeatsParam param = {}; param.repeats = Tuple(args[1].operator ObjectRef()); if (args[2].type_code() == kNull) { param.axis = dmlc::optional(); diff --git a/src/api/operator/numpy/np_tensordot_op.cc b/src/api/operator/numpy/np_tensordot_op.cc index cf1c0fc0fefb..fb10264a6bd5 100644 --- a/src/api/operator/numpy/np_tensordot_op.cc +++ b/src/api/operator/numpy/np_tensordot_op.cc @@ -30,7 +30,7 @@ namespace mxnet { inline static void _npi_tensordot_int_axes(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tensordot_int_axes"); - op::TensordotIntAxesParam param; + op::TensordotIntAxesParam param = {}; nnvm::NodeAttrs attrs; param.axes = args[2].operator int(); attrs.op = op; @@ -47,7 +47,7 @@ inline static void _npi_tensordot_int_axes(runtime::MXNetArgs args, runtime::MXN inline static void _npi_tensordot(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tensordot"); - op::TensordotParam param; + op::TensordotParam param = {}; nnvm::NodeAttrs attrs; ADT adt = Downcast(args[2].operator ObjectRef()); if (const IntegerObj* lop = adt[0].as()) { diff --git a/src/api/operator/numpy/np_trace_op.cc b/src/api/operator/numpy/np_trace_op.cc index 125f96d2d01e..32d4bf51896f 100644 --- a/src/api/operator/numpy/np_trace_op.cc +++ b/src/api/operator/numpy/np_trace_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.trace").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; const nnvm::Op* op = Op::Get("_npi_trace"); nnvm::NodeAttrs attrs; - op::NumpyTraceParam param; + op::NumpyTraceParam param = {}; param.offset = args[1].operator int64_t(); param.axis1 = args[2].operator int64_t(); param.axis2 = args[3].operator int64_t(); diff --git a/src/api/operator/numpy/np_tri_op.cc b/src/api/operator/numpy/np_tri_op.cc index 915c68ca4eb0..759d2c66273a 100644 --- a/src/api/operator/numpy/np_tri_op.cc +++ b/src/api/operator/numpy/np_tri_op.cc @@ -31,7 +31,7 @@ MXNET_REGISTER_API("_npi.tri").set_body([](runtime::MXNetArgs args, runtime::MXN using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tri"); nnvm::NodeAttrs attrs; - op::TriParam param; + op::TriParam param = {}; param.N = args[0].operator nnvm::dim_t(); if (args[1].type_code() == kNull) { param.M = dmlc::nullopt; diff --git a/src/api/operator/numpy/np_tril_op.cc b/src/api/operator/numpy/np_tril_op.cc index 8388797ad24a..02dc245acb8f 100644 --- a/src/api/operator/numpy/np_tril_op.cc +++ b/src/api/operator/numpy/np_tril_op.cc @@ -31,7 +31,7 @@ MXNET_REGISTER_API("_npi.tril").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_tril"); nnvm::NodeAttrs attrs; - op::TrilParam param; + op::TrilParam param = {}; param.k = args[1].operator int(); // we directly copy TrilParam, which is trivially-copyable diff --git a/src/api/operator/numpy/np_triu_op.cc b/src/api/operator/numpy/np_triu_op.cc index 8bad12e018a9..f52bba24d134 100644 --- a/src/api/operator/numpy/np_triu_op.cc +++ b/src/api/operator/numpy/np_triu_op.cc @@ -30,7 +30,7 @@ namespace mxnet { MXNET_REGISTER_API("_npi.triu").set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; - op::TriuParam param; + op::TriuParam param = {}; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npi_triu"); // inputs diff --git a/src/api/operator/numpy/np_unique_op.cc b/src/api/operator/numpy/np_unique_op.cc index 19f64d714b97..94c9abf309cd 100644 --- a/src/api/operator/numpy/np_unique_op.cc +++ b/src/api/operator/numpy/np_unique_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.unique") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_unique"); nnvm::NodeAttrs attrs; - op::NumpyUniqueParam param; + op::NumpyUniqueParam param = {}; // param param.return_index = args[1].operator bool(); param.return_inverse = args[2].operator bool(); diff --git a/src/api/operator/numpy/np_where_op.cc b/src/api/operator/numpy/np_where_op.cc index 8b458a274f6d..df46ee517bac 100644 --- a/src/api/operator/numpy/np_where_op.cc +++ b/src/api/operator/numpy/np_where_op.cc @@ -52,7 +52,7 @@ inline static void _npi_where_scalar1(runtime::MXNetArgs args, using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = isl ? Op::Get("_npi_where_lscalar") : Op::Get("_npi_where_rscalar"); - op::NumpyWhereScalarParam param; + op::NumpyWhereScalarParam param = {}; param.scalar = isl ? args[1].operator double() : args[2].operator double(); attrs.op = op; attrs.parsed = param; @@ -69,7 +69,7 @@ inline static void _npi_where_scalar1(runtime::MXNetArgs args, inline static void _npi_where_scalar2(runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_where_scalar2"); - op::NumpyWhereScalar2Param param; + op::NumpyWhereScalar2Param param = {}; nnvm::NodeAttrs attrs; param.x = args[1].operator double(); param.y = args[2].operator double(); diff --git a/src/api/operator/numpy/np_window_op.cc b/src/api/operator/numpy/np_window_op.cc index 848f5c64cbe5..e882b05b73d4 100644 --- a/src/api/operator/numpy/np_window_op.cc +++ b/src/api/operator/numpy/np_window_op.cc @@ -34,7 +34,7 @@ inline static void SetNumpyWindowsParam(runtime::MXNetArgs args, const nnvm::Op* op) { using namespace runtime; nnvm::NodeAttrs attrs; - op::NumpyWindowsParam param; + op::NumpyWindowsParam param = {}; if (args[0].type_code() == kNull) { param.M = dmlc::nullopt; } else { diff --git a/src/api/operator/numpy/random/np_choice_op.cc b/src/api/operator/numpy/random/np_choice_op.cc index 7f64a697ecaf..2f8f7054cfb9 100644 --- a/src/api/operator/numpy/random/np_choice_op.cc +++ b/src/api/operator/numpy/random/np_choice_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.choice") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_choice"); nnvm::NodeAttrs attrs; - op::NumpyChoiceParam param; + op::NumpyChoiceParam param = {}; NDArray* inputs[2]; int num_inputs = 0; diff --git a/src/api/operator/numpy/random/np_exponential_op.cc b/src/api/operator/numpy/random/np_exponential_op.cc index 15347a0893d2..eee811dd508d 100644 --- a/src/api/operator/numpy/random/np_exponential_op.cc +++ b/src/api/operator/numpy/random/np_exponential_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.exponential") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_exponential"); - op::NumpyExponentialParam param; + op::NumpyExponentialParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[1].type_code() == kDLInt) { diff --git a/src/api/operator/numpy/random/np_laplace_op.cc b/src/api/operator/numpy/random/np_laplace_op.cc index 594b4b79413b..8d3f96d6a080 100644 --- a/src/api/operator/numpy/random/np_laplace_op.cc +++ b/src/api/operator/numpy/random/np_laplace_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.laplace") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_laplace"); nnvm::NodeAttrs attrs; - op::NumpyLaplaceParam param; + op::NumpyLaplaceParam param = {}; NDArray** inputs = new NDArray*[2](); int num_inputs = 0; diff --git a/src/api/operator/numpy/random/np_location_scale_op.cc b/src/api/operator/numpy/random/np_location_scale_op.cc index 30785352369c..37c0e1876081 100644 --- a/src/api/operator/numpy/random/np_location_scale_op.cc +++ b/src/api/operator/numpy/random/np_location_scale_op.cc @@ -42,7 +42,7 @@ MXNET_REGISTER_API("_npi.gumbel") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_gumbel"); - op::NumpyLocationScaleParam param; + op::NumpyLocationScaleParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[2].type_code() == kDLInt) { @@ -96,7 +96,7 @@ MXNET_REGISTER_API("_npi.logistic") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_logistic"); - op::NumpyLocationScaleParam param; + op::NumpyLocationScaleParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[2].type_code() == kDLInt) { diff --git a/src/api/operator/numpy/random/np_multinomial_op.cc b/src/api/operator/numpy/random/np_multinomial_op.cc index ad4d80838b45..1ddc5953d6ab 100644 --- a/src/api/operator/numpy/random/np_multinomial_op.cc +++ b/src/api/operator/numpy/random/np_multinomial_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.multinomial") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_multinomial"); nnvm::NodeAttrs attrs; - op::NumpyMultinomialParam param; + op::NumpyMultinomialParam param = {}; NDArray** inputs = new NDArray*[1](); int num_inputs = 0; diff --git a/src/api/operator/numpy/random/np_pareto_op.cc b/src/api/operator/numpy/random/np_pareto_op.cc index 079b4810adbf..941360d20131 100644 --- a/src/api/operator/numpy/random/np_pareto_op.cc +++ b/src/api/operator/numpy/random/np_pareto_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.pareto") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_pareto"); - op::NumpyParetoParam param; + op::NumpyParetoParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[1].type_code() == kDLInt) { diff --git a/src/api/operator/numpy/random/np_power_op.cc b/src/api/operator/numpy/random/np_power_op.cc index 8543c613e46d..1bb5df6c8c5d 100644 --- a/src/api/operator/numpy/random/np_power_op.cc +++ b/src/api/operator/numpy/random/np_power_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.powerd") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_powerd"); - op::NumpyPowerParam param; + op::NumpyPowerParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[1].type_code() == kDLInt) { diff --git a/src/api/operator/numpy/random/np_rayleigh_op.cc b/src/api/operator/numpy/random/np_rayleigh_op.cc index df1d61c40dba..2f353ad2ec4c 100644 --- a/src/api/operator/numpy/random/np_rayleigh_op.cc +++ b/src/api/operator/numpy/random/np_rayleigh_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.rayleigh") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_rayleigh"); - op::NumpyRayleighParam param; + op::NumpyRayleighParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[1].type_code() == kDLInt) { diff --git a/src/api/operator/numpy/random/np_weibull_op.cc b/src/api/operator/numpy/random/np_weibull_op.cc index 3504f569f92f..d5941e550f6a 100644 --- a/src/api/operator/numpy/random/np_weibull_op.cc +++ b/src/api/operator/numpy/random/np_weibull_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.weibull") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; const nnvm::Op* op = Op::Get("_npi_weibull"); - op::NumpyWeibullParam param; + op::NumpyWeibullParam param = {}; nnvm::NodeAttrs attrs; attrs.op = op; if (args[1].type_code() == kDLInt) { diff --git a/src/api/operator/numpy_extension/npx_activation_op.cc b/src/api/operator/numpy_extension/npx_activation_op.cc index 32a0d6661d28..c7771d7e308d 100644 --- a/src/api/operator/numpy_extension/npx_activation_op.cc +++ b/src/api/operator/numpy_extension/npx_activation_op.cc @@ -57,7 +57,7 @@ MXNET_REGISTER_API("_npx.activation") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_activation"); - op::ActivationParam param; + op::ActivationParam param = {}; // act_type param.act_type = String2MXNetActType(args[1].operator std::string()); attrs.parsed = param; diff --git a/src/api/operator/numpy_extension/npx_arange_like_op.cc b/src/api/operator/numpy_extension/npx_arange_like_op.cc index 07e37efe8145..77cbb3181211 100644 --- a/src/api/operator/numpy_extension/npx_arange_like_op.cc +++ b/src/api/operator/numpy_extension/npx_arange_like_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npx.arange_like") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_arange_like"); - op::RangeLikeParam param; + op::RangeLikeParam param = {}; // inputs int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy_extension/npx_batch_dot_op.cc b/src/api/operator/numpy_extension/npx_batch_dot_op.cc index d764801859c5..cdae12ecf8d0 100644 --- a/src/api/operator/numpy_extension/npx_batch_dot_op.cc +++ b/src/api/operator/numpy_extension/npx_batch_dot_op.cc @@ -48,7 +48,7 @@ MXNET_REGISTER_API("_npx.batch_dot") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_batch_dot"); - op::DotParam param; + op::DotParam param = {}; // inputs int num_inputs = 2; std::vector inputs; diff --git a/src/api/operator/numpy_extension/npx_batch_norm_op.cc b/src/api/operator/numpy_extension/npx_batch_norm_op.cc index a82703d9212e..5bdc5c272004 100644 --- a/src/api/operator/numpy_extension/npx_batch_norm_op.cc +++ b/src/api/operator/numpy_extension/npx_batch_norm_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npx.batch_norm") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_batch_norm"); - op::BatchNormParam param; + op::BatchNormParam param = {}; // eps param.eps = args[5].operator double(); // momentum diff --git a/src/api/operator/numpy_extension/npx_broadcast_like_op.cc b/src/api/operator/numpy_extension/npx_broadcast_like_op.cc index 3929a516f116..5f6c3a1ff74b 100644 --- a/src/api/operator/numpy_extension/npx_broadcast_like_op.cc +++ b/src/api/operator/numpy_extension/npx_broadcast_like_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npx.broadcast_like") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_broadcast_like"); - op::BroadcastLikeParam param; + op::BroadcastLikeParam param = {}; // inputs int num_inputs = 2; std::vector inputs; diff --git a/src/api/operator/numpy_extension/npx_control_flow_op.cc b/src/api/operator/numpy_extension/npx_control_flow_op.cc index 5e422381e1e1..9e3ccc7ebf9d 100644 --- a/src/api/operator/numpy_extension/npx_control_flow_op.cc +++ b/src/api/operator/numpy_extension/npx_control_flow_op.cc @@ -35,7 +35,7 @@ MXNET_REGISTER_API("_npx.foreach") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_foreach"); - op::NPXForeachParam param; + op::NPXForeachParam param = {}; int args_size = args.size(); int num_inputs = args_size - 7; // inputs @@ -94,7 +94,7 @@ MXNET_REGISTER_API("_npx.while_loop") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_while_loop"); - op::NPXWhileLoopParam param; + op::NPXWhileLoopParam param = {}; int args_size = args.size(); int num_inputs = args_size - 8; // inputs @@ -151,7 +151,7 @@ MXNET_REGISTER_API("_npx.cond").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_cond"); - op::NPXCondParam param; + op::NPXCondParam param = {}; int args_size = args.size(); int num_inputs = args_size - 7; // inputs diff --git a/src/api/operator/numpy_extension/npx_convolution_op.cc b/src/api/operator/numpy_extension/npx_convolution_op.cc index 4e543b5eeee9..9df9c7311484 100644 --- a/src/api/operator/numpy_extension/npx_convolution_op.cc +++ b/src/api/operator/numpy_extension/npx_convolution_op.cc @@ -68,7 +68,7 @@ MXNET_REGISTER_API("_npx.convolution") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_convolution"); - op::ConvolutionParam param; + op::ConvolutionParam param = {}; int args_size = args.size(); // no_bias if (args[args_size - 4].type_code() == kNull) { diff --git a/src/api/operator/numpy_extension/npx_deconvolution_op.cc b/src/api/operator/numpy_extension/npx_deconvolution_op.cc index 8d35da394c3c..e751056b3050 100644 --- a/src/api/operator/numpy_extension/npx_deconvolution_op.cc +++ b/src/api/operator/numpy_extension/npx_deconvolution_op.cc @@ -68,7 +68,7 @@ MXNET_REGISTER_API("_npx.deconvolution") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_deconvolution"); - op::DeconvolutionParam param; + op::DeconvolutionParam param = {}; int args_size = args.size(); // no_bias if (args[args_size - 4].type_code() == kNull) { diff --git a/src/api/operator/numpy_extension/npx_dropout_op.cc b/src/api/operator/numpy_extension/npx_dropout_op.cc index 3ccc7f62fe9b..27f95b93087b 100644 --- a/src/api/operator/numpy_extension/npx_dropout_op.cc +++ b/src/api/operator/numpy_extension/npx_dropout_op.cc @@ -46,7 +46,7 @@ MXNET_REGISTER_API("_npx.dropout") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_dropout"); - op::DropoutParam param; + op::DropoutParam param = {}; // inputs int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy_extension/npx_embedding_op.cc b/src/api/operator/numpy_extension/npx_embedding_op.cc index 73d47c83c441..3aa523908910 100644 --- a/src/api/operator/numpy_extension/npx_embedding_op.cc +++ b/src/api/operator/numpy_extension/npx_embedding_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npx.embedding") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_embedding"); - op::EmbeddingParam param; + op::EmbeddingParam param = {}; // inputs int num_inputs = 2; std::vector inputs; diff --git a/src/api/operator/numpy_extension/npx_fully_connected_op.cc b/src/api/operator/numpy_extension/npx_fully_connected_op.cc index 892c3e0037c9..11c36a90c526 100644 --- a/src/api/operator/numpy_extension/npx_fully_connected_op.cc +++ b/src/api/operator/numpy_extension/npx_fully_connected_op.cc @@ -35,7 +35,7 @@ MXNET_REGISTER_API("_npx.fully_connected") int args_size = args.size(); nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_fully_connected"); - op::FullyConnectedParam param; + op::FullyConnectedParam param = {}; // no_bias param.no_bias = args[args_size - 2].operator bool(); // inputs diff --git a/src/api/operator/numpy_extension/npx_group_norm_op.cc b/src/api/operator/numpy_extension/npx_group_norm_op.cc index 473e43e20616..8776e297d40d 100644 --- a/src/api/operator/numpy_extension/npx_group_norm_op.cc +++ b/src/api/operator/numpy_extension/npx_group_norm_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npx.group_norm") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_group_norm"); - op::GroupNormParam param; + op::GroupNormParam param = {}; // num_groups param.num_groups = args[3]; // eps diff --git a/src/api/operator/numpy_extension/npx_layer_norm_op.cc b/src/api/operator/numpy_extension/npx_layer_norm_op.cc index 6b79a95f7237..90cc4287fee8 100644 --- a/src/api/operator/numpy_extension/npx_layer_norm_op.cc +++ b/src/api/operator/numpy_extension/npx_layer_norm_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npx.layer_norm") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_layer_norm"); - op::LayerNormParam param; + op::LayerNormParam param = {}; // inputs int num_inputs = 3; std::vector inputs; diff --git a/src/api/operator/numpy_extension/npx_leaky_relu_op.cc b/src/api/operator/numpy_extension/npx_leaky_relu_op.cc index d4723bf46852..67631be9be65 100644 --- a/src/api/operator/numpy_extension/npx_leaky_relu_op.cc +++ b/src/api/operator/numpy_extension/npx_leaky_relu_op.cc @@ -55,7 +55,7 @@ MXNET_REGISTER_API("_npx.leaky_relu") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_leaky_relu"); - op::LeakyReLUParam param; + op::LeakyReLUParam param = {}; int args_size = args.size(); // act_type param.act_type = String2ActType(args[args_size - 4].operator std::string()); diff --git a/src/api/operator/numpy_extension/npx_one_hot_op.cc b/src/api/operator/numpy_extension/npx_one_hot_op.cc index e8d66af0d4de..05336b47e27c 100644 --- a/src/api/operator/numpy_extension/npx_one_hot_op.cc +++ b/src/api/operator/numpy_extension/npx_one_hot_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npx.one_hot") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_one_hot"); - op::OneHotParam param; + op::OneHotParam param = {}; // inputs int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy_extension/npx_pick_op.cc b/src/api/operator/numpy_extension/npx_pick_op.cc index 22cbc84ec44a..5c70f6a73a68 100644 --- a/src/api/operator/numpy_extension/npx_pick_op.cc +++ b/src/api/operator/numpy_extension/npx_pick_op.cc @@ -45,7 +45,7 @@ MXNET_REGISTER_API("_npx.pick").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_pick"); - op::PickParam param; + op::PickParam param = {}; // axis if (args[2].type_code() == kNull) { param.axis = dmlc::nullopt; diff --git a/src/api/operator/numpy_extension/npx_pooling_op.cc b/src/api/operator/numpy_extension/npx_pooling_op.cc index 0b743bda9909..ec5934d26332 100644 --- a/src/api/operator/numpy_extension/npx_pooling_op.cc +++ b/src/api/operator/numpy_extension/npx_pooling_op.cc @@ -86,7 +86,7 @@ MXNET_REGISTER_API("_npx.pooling") using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_pooling"); - op::PoolingParam param; + op::PoolingParam param = {}; // inputs int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/numpy_extension/npx_rnn_op.cc b/src/api/operator/numpy_extension/npx_rnn_op.cc index 7d75e13dfb5e..bda44f0d1ea1 100644 --- a/src/api/operator/numpy_extension/npx_rnn_op.cc +++ b/src/api/operator/numpy_extension/npx_rnn_op.cc @@ -49,7 +49,7 @@ MXNET_REGISTER_API("_npx.rnn").set_body([](runtime::MXNetArgs args, runtime::MXN using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_rnn"); - op::RNNParam param; + op::RNNParam param = {}; int args_size = args.size(); int num_inputs = 0; diff --git a/src/api/operator/numpy_extension/npx_softmax_op.cc b/src/api/operator/numpy_extension/npx_softmax_op.cc index 6c8f9f438499..03a167f101e9 100644 --- a/src/api/operator/numpy_extension/npx_softmax_op.cc +++ b/src/api/operator/numpy_extension/npx_softmax_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npx.softmax") using namespace runtime; nnvm::NodeAttrs attrs; static const nnvm::Op* op = Op::Get("_npx_softmax"); - op::SoftmaxParam param; + op::SoftmaxParam param = {}; int args_size = args.size(); // inputs int num_inputs = args_size - 4; @@ -87,7 +87,7 @@ MXNET_REGISTER_API("_npx.log_softmax") using namespace runtime; nnvm::NodeAttrs attrs; static const nnvm::Op* op = Op::Get("_npx_log_softmax"); - op::SoftmaxParam param; + op::SoftmaxParam param = {}; int args_size = args.size(); // inputs @@ -142,7 +142,7 @@ MXNET_REGISTER_API("_npx.masked_softmax") using namespace runtime; nnvm::NodeAttrs attrs; static const nnvm::Op* op = Op::Get("_npx_masked_softmax"); - op::MaskedSoftmaxParam param; + op::MaskedSoftmaxParam param = {}; // inputs int num_inputs = 2; @@ -186,7 +186,7 @@ MXNET_REGISTER_API("_npx.masked_log_softmax") using namespace runtime; nnvm::NodeAttrs attrs; static const nnvm::Op* op = Op::Get("_npx_masked_log_softmax"); - op::MaskedSoftmaxParam param; + op::MaskedSoftmaxParam param = {}; // inputs int num_inputs = 2; diff --git a/src/api/operator/numpy_extension/npx_topk_op.cc b/src/api/operator/numpy_extension/npx_topk_op.cc index af200f59e5f8..6729ec0e0e08 100644 --- a/src/api/operator/numpy_extension/npx_topk_op.cc +++ b/src/api/operator/numpy_extension/npx_topk_op.cc @@ -49,7 +49,7 @@ MXNET_REGISTER_API("_npx.topk").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; nnvm::NodeAttrs attrs; const nnvm::Op* op = Op::Get("_npx_topk"); - op::TopKParam param; + op::TopKParam param = {}; // inputs int num_inputs = 1; NDArray* inputs[] = {args[0].operator mxnet::NDArray*()}; diff --git a/src/api/operator/random/np_gamma_op.cc b/src/api/operator/random/np_gamma_op.cc index a543e2b6c4d3..48028c07afb5 100644 --- a/src/api/operator/random/np_gamma_op.cc +++ b/src/api/operator/random/np_gamma_op.cc @@ -33,7 +33,7 @@ MXNET_REGISTER_API("_npi.gamma").set_body([](runtime::MXNetArgs args, runtime::M using namespace runtime; const nnvm::Op* op = Op::Get("_npi_gamma"); nnvm::NodeAttrs attrs; - op::NumpyGammaParam param; + op::NumpyGammaParam param = {}; int num_inputs = 0; std::vector inputs; if (args[0].type_code() == kDLFloat || args[0].type_code() == kDLInt) { diff --git a/src/api/operator/random/np_normal_op.cc b/src/api/operator/random/np_normal_op.cc index 5fd22eed8048..60e89e15ceb3 100644 --- a/src/api/operator/random/np_normal_op.cc +++ b/src/api/operator/random/np_normal_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.normal") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_normal"); nnvm::NodeAttrs attrs; - op::NumpyNormalParam param; + op::NumpyNormalParam param = {}; int num_inputs = 0; std::vector inputs; if (args[0].type_code() == kDLFloat || args[0].type_code() == kDLInt) { diff --git a/src/api/operator/random/np_randint_op.cc b/src/api/operator/random/np_randint_op.cc index 4f6128cde038..820e78487510 100644 --- a/src/api/operator/random/np_randint_op.cc +++ b/src/api/operator/random/np_randint_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.randint") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_random_randint"); nnvm::NodeAttrs attrs; - op::SampleRandIntParam param; + op::SampleRandIntParam param = {}; int num_inputs = 0; param.low = args[0].operator int(); param.high = args[1].operator int(); diff --git a/src/api/operator/random/np_uniform_op.cc b/src/api/operator/random/np_uniform_op.cc index 3cb2daa720ea..ef5d957b52e0 100644 --- a/src/api/operator/random/np_uniform_op.cc +++ b/src/api/operator/random/np_uniform_op.cc @@ -34,7 +34,7 @@ MXNET_REGISTER_API("_npi.uniform") using namespace runtime; const nnvm::Op* op = Op::Get("_npi_uniform"); nnvm::NodeAttrs attrs; - op::NumpyUniformParam param; + op::NumpyUniformParam param = {}; int num_inputs = 0; std::vector inputs; if (args[0].type_code() == kDLFloat || args[0].type_code() == kDLInt) { diff --git a/src/api/operator/tensor/indexing_op.cc b/src/api/operator/tensor/indexing_op.cc index bfd39aadfc34..208dc8d26e8f 100644 --- a/src/api/operator/tensor/indexing_op.cc +++ b/src/api/operator/tensor/indexing_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.take").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_take"); nnvm::NodeAttrs attrs; - op::TakeParam param; + op::TakeParam param = {}; NDArray* inputs[2]; if (args[0].type_code() != kNull) { diff --git a/src/api/operator/tensor/matrix_op.cc b/src/api/operator/tensor/matrix_op.cc index 4b18ef15094f..585c801b46b4 100644 --- a/src/api/operator/tensor/matrix_op.cc +++ b/src/api/operator/tensor/matrix_op.cc @@ -32,7 +32,7 @@ MXNET_REGISTER_API("_npi.clip").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_clip"); nnvm::NodeAttrs attrs; - op::ClipParam param; + op::ClipParam param = {}; NDArray* inputs[1]; if (args[0].type_code() != kNull) { diff --git a/src/api/operator/ufunc_helper.cc b/src/api/operator/ufunc_helper.cc index 978e9d4840f7..927253150f9a 100644 --- a/src/api/operator/ufunc_helper.cc +++ b/src/api/operator/ufunc_helper.cc @@ -62,7 +62,7 @@ void UFuncHelper(NDArray* lhs, const nnvm::Op* op) { using namespace runtime; nnvm::NodeAttrs attrs; - op::NumpyBinaryScalarParam param; + op::NumpyBinaryScalarParam param = {}; param.scalar = rhs; param.is_int = true; attrs.op = op; @@ -87,7 +87,7 @@ void UFuncHelper(NDArray* lhs, const nnvm::Op* op) { using namespace runtime; nnvm::NodeAttrs attrs; - op::NumpyBinaryScalarParam param; + op::NumpyBinaryScalarParam param = {}; param.scalar = rhs; param.is_int = false; attrs.op = op; @@ -112,7 +112,7 @@ void UFuncHelper(int64_t lhs, const nnvm::Op* op) { using namespace runtime; nnvm::NodeAttrs attrs; - op::NumpyBinaryScalarParam param; + op::NumpyBinaryScalarParam param = {}; param.scalar = lhs; param.is_int = true; attrs.op = op; @@ -137,7 +137,7 @@ void UFuncHelper(double lhs, const nnvm::Op* op) { using namespace runtime; nnvm::NodeAttrs attrs; - op::NumpyBinaryScalarParam param; + op::NumpyBinaryScalarParam param = {}; param.scalar = lhs; param.is_int = false; attrs.op = op; diff --git a/src/c_api/c_api_profile.cc b/src/c_api/c_api_profile.cc index 47f6328b4de7..1358d0ba8ada 100644 --- a/src/c_api/c_api_profile.cc +++ b/src/c_api/c_api_profile.cc @@ -250,7 +250,7 @@ int MXSetProcessProfilerConfig(int num_params, CHECK_NOTNULL(vals[i]); kwargs.emplace_back(std::make_pair(keys[i], vals[i])); } - ProfileConfigParam param; + ProfileConfigParam param = {}; param.Init(kwargs); if (static_cast(param.profile_process) == ProfileProcess::kServer) { std::ostringstream os; From b54a3dc26305cf0975c76907607e099f045ffd28 Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Wed, 3 Nov 2021 13:42:42 +0100 Subject: [PATCH 2/3] Add spaces --- src/api/operator/numpy_extension/npx_softmax_op.cc | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/api/operator/numpy_extension/npx_softmax_op.cc b/src/api/operator/numpy_extension/npx_softmax_op.cc index 03a167f101e9..44a776fd82c4 100644 --- a/src/api/operator/numpy_extension/npx_softmax_op.cc +++ b/src/api/operator/numpy_extension/npx_softmax_op.cc @@ -33,8 +33,8 @@ MXNET_REGISTER_API("_npx.softmax") using namespace runtime; nnvm::NodeAttrs attrs; static const nnvm::Op* op = Op::Get("_npx_softmax"); - op::SoftmaxParam param = {}; - int args_size = args.size(); + op::SoftmaxParam param = {}; + int args_size = args.size(); // inputs int num_inputs = args_size - 4; std::vector inputs; @@ -87,7 +87,7 @@ MXNET_REGISTER_API("_npx.log_softmax") using namespace runtime; nnvm::NodeAttrs attrs; static const nnvm::Op* op = Op::Get("_npx_log_softmax"); - op::SoftmaxParam param = {}; + op::SoftmaxParam param = {}; int args_size = args.size(); // inputs @@ -141,7 +141,7 @@ MXNET_REGISTER_API("_npx.masked_softmax") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; nnvm::NodeAttrs attrs; - static const nnvm::Op* op = Op::Get("_npx_masked_softmax"); + static const nnvm::Op* op = Op::Get("_npx_masked_softmax"); op::MaskedSoftmaxParam param = {}; // inputs @@ -185,7 +185,7 @@ MXNET_REGISTER_API("_npx.masked_log_softmax") .set_body([](runtime::MXNetArgs args, runtime::MXNetRetValue* ret) { using namespace runtime; nnvm::NodeAttrs attrs; - static const nnvm::Op* op = Op::Get("_npx_masked_log_softmax"); + static const nnvm::Op* op = Op::Get("_npx_masked_log_softmax"); op::MaskedSoftmaxParam param = {}; // inputs From 8e2fee70f6a3a99d9452f64bfbe9fe802e5e86cb Mon Sep 17 00:00:00 2001 From: mozga-intel Date: Thu, 4 Nov 2021 16:51:14 +0100 Subject: [PATCH 3/3] Fix, rebase comment --- src/api/operator/numpy/np_init_op.cc | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/src/api/operator/numpy/np_init_op.cc b/src/api/operator/numpy/np_init_op.cc index 3b203c6cfe44..5fed60463377 100644 --- a/src/api/operator/numpy/np_init_op.cc +++ b/src/api/operator/numpy/np_init_op.cc @@ -380,11 +380,7 @@ MXNET_REGISTER_API("_npi.full").set_body([](runtime::MXNetArgs args, runtime::MX using namespace runtime; const nnvm::Op* op = Op::Get("_npi_full"); nnvm::NodeAttrs attrs; -<<<<<<< HEAD - op::NumpyInitOpWithScalarParam param; -======= - op::InitOpWithScalarParam param = {}; ->>>>>>> 119a2314f (Zero intialization to avoid error message on a Centos) + op::NumpyInitOpWithScalarParam param = {}; if (args[0].type_code() == kDLInt) { param.shape = TShape(1, args[0].operator int64_t()); } else {