diff --git a/src/operator/nn/activation-inl.h b/src/operator/nn/activation-inl.h index 5e701a205e1e..d8da30b7263a 100644 --- a/src/operator/nn/activation-inl.h +++ b/src/operator/nn/activation-inl.h @@ -101,10 +101,10 @@ void ActivationBackward(const OpContext &ctx, const TBlob &out_grad, template void ActivationCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const ActivationParam& param = nnvm::get(attrs.parsed); @@ -134,10 +134,10 @@ void ActivationCompute(const nnvm::NodeAttrs& attrs, template void ActivationGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { #if MXNET_USE_CUDNN == 1 CHECK_EQ(inputs.size(), 3U); #else diff --git a/src/operator/nn/batch_norm-inl.h b/src/operator/nn/batch_norm-inl.h index b229290dd3a8..4838570bda97 100644 --- a/src/operator/nn/batch_norm-inl.h +++ b/src/operator/nn/batch_norm-inl.h @@ -110,10 +110,10 @@ class BatchNormOp { * \sa OpReqType, OpContext */ void Forward(const OpContext &ctx, - const std::vector &in_data, - const std::vector &req, - const std::vector &out_data, - const std::vector &aux_states) { + const std::vector &in_data, + const std::vector &req, + const std::vector &out_data, + const std::vector &aux_states) { using namespace mshadow; using namespace mshadow::expr; @@ -160,12 +160,12 @@ class BatchNormOp { * \sa OperatorProperty, OpReqType, OpContext */ void Backward(const OpContext &ctx, - const std::vector &out_grad, - const std::vector &in_data, - const std::vector &out_data, - const std::vector &req, - const std::vector &in_grad, - const std::vector &aux_states) { + const std::vector &out_grad, + const std::vector &in_data, + const std::vector &out_data, + const std::vector &req, + const std::vector &in_grad, + const std::vector &aux_states) { CHECK_EQ(out_grad.size(), param_.output_mean_var ? 3U : 1U); CHECK_EQ(in_data.size(), 3U); CHECK_EQ(out_data.size(), 3U); @@ -222,9 +222,9 @@ static BatchNormOp &GetBatchNormOp(const BatchNormParam& pa template void BatchNormCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const BatchNormParam& param = nnvm::get(attrs.parsed); CHECK_EQ(inputs.size(), 5U); std::vector in_data(inputs.begin(), inputs.begin() + 3); @@ -237,9 +237,9 @@ void BatchNormCompute(const nnvm::NodeAttrs& attrs, template void BatchNormGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 11U); const BatchNormParam& param = nnvm::get(attrs.parsed); std::vector out_grad(inputs.begin(), diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc index 9ce4febd3eef..298de204a53f 100644 --- a/src/operator/nn/batch_norm.cc +++ b/src/operator/nn/batch_norm.cc @@ -318,7 +318,8 @@ void BatchNormOp::DoBackward(mshadow::Stream *, DMLC_REGISTER_PARAMETER(BatchNormParam); static bool BatchNormShape(const nnvm::NodeAttrs& attrs, - std::vector *in_shape, std::vector *out_shape) { + std::vector *in_shape, + std::vector *out_shape) { const BatchNormParam& param = nnvm::get(attrs.parsed); using namespace mshadow; CHECK_EQ(in_shape->size(), 5U) << "Input:[data, gamma, beta, MovingMean, MovingVar]"; @@ -357,7 +358,7 @@ static inline std::vector ListOutputs() { } static bool BatchNormType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, std::vector *out_type) { + std::vector *in_type, std::vector *out_type) { using namespace mshadow; CHECK_GE(in_type->size(), 1U); const int dtype = (*in_type)[0]; diff --git a/src/operator/nn/convolution-inl.h b/src/operator/nn/convolution-inl.h index 99dad0cf5544..7614b840e24b 100644 --- a/src/operator/nn/convolution-inl.h +++ b/src/operator/nn/convolution-inl.h @@ -161,9 +161,9 @@ class ConvolutionOp { } void Forward(const OpContext &ctx, - const std::vector &in_data, - const std::vector &req, - const std::vector &out_data) { + const std::vector &in_data, + const std::vector &req, + const std::vector &out_data) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(req[conv::kOut], kWriteTo); @@ -233,10 +233,10 @@ class ConvolutionOp { } void Backward(const OpContext &ctx, - const std::vector& out_grad, - const std::vector& in_data, - const std::vector& req, - const std::vector& in_grad) { + const std::vector& out_grad, + const std::vector& in_data, + const std::vector& req, + const std::vector& in_grad) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(out_grad.size(), 1U); @@ -387,9 +387,9 @@ class ConvolutionOp { template void ConvolutionCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const ConvolutionParam& param = nnvm::get(attrs.parsed); MSHADOW_REAL_TYPE_SWITCH(inputs[conv::kData].type_flag_, DType, { static thread_local ConvolutionOp op; @@ -400,9 +400,9 @@ void ConvolutionCompute(const nnvm::NodeAttrs& attrs, template void ConvolutionGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const ConvolutionParam& param = nnvm::get(attrs.parsed); std::vector in_data(inputs.begin() + 1, inputs.end()); const TBlob &out_grad = inputs[0]; diff --git a/src/operator/nn/convolution.cc b/src/operator/nn/convolution.cc index 2bd50b15a395..9cc0914e1f01 100644 --- a/src/operator/nn/convolution.cc +++ b/src/operator/nn/convolution.cc @@ -52,7 +52,8 @@ static inline std::vector ListArguments(const ConvolutionParam& par } static bool ConvolutionShape(const nnvm::NodeAttrs& attrs, - std::vector *in_shape, std::vector *out_shape) { + std::vector *in_shape, + std::vector *out_shape) { using namespace mshadow; const ConvolutionParam& param_ = nnvm::get(attrs.parsed); if (!param_.no_bias) { @@ -241,7 +242,7 @@ static bool ConvolutionShape(const nnvm::NodeAttrs& attrs, } static bool ConvolutionType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, std::vector *out_type) { + std::vector *in_type, std::vector *out_type) { const ConvolutionParam& param_ = nnvm::get(attrs.parsed); CHECK_GE(in_type->size(), 1U); int dtype = (*in_type)[0]; diff --git a/src/operator/nn/deconvolution-inl.h b/src/operator/nn/deconvolution-inl.h index ecd76fd8a165..5daab76f1f12 100644 --- a/src/operator/nn/deconvolution-inl.h +++ b/src/operator/nn/deconvolution-inl.h @@ -202,9 +202,9 @@ class DeconvolutionOp { } void Forward(const OpContext &ctx, - const std::vector &in_data, - const std::vector &req, - const std::vector &out_data) { + const std::vector &in_data, + const std::vector &req, + const std::vector &out_data) { using namespace mshadow; using namespace mshadow::expr; @@ -309,10 +309,10 @@ class DeconvolutionOp { } void Backward(const OpContext &ctx, - const std::vector &out_grad, - const std::vector &in_data, - const std::vector &req, - const std::vector &in_grad) { + const std::vector &out_grad, + const std::vector &in_data, + const std::vector &req, + const std::vector &in_grad) { using namespace mshadow; using namespace mshadow::expr; // TODO(bing): check the BLAS Handle, be careful @@ -453,9 +453,9 @@ class DeconvolutionOp { template void _DeconvolutionCompute(const DeconvolutionParam& param, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { MSHADOW_REAL_TYPE_SWITCH(inputs[deconv::kData].type_flag_, DType, { static thread_local DeconvolutionOp op; op.Init(param); @@ -465,18 +465,18 @@ void _DeconvolutionCompute(const DeconvolutionParam& param, template void DeconvolutionCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const DeconvolutionParam& param = nnvm::get(attrs.parsed); _DeconvolutionCompute(param, ctx, inputs, req, outputs); } template void _DeconvolutionGradCompute(const DeconvolutionParam& param, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { std::vector in_data(inputs.begin() + 1, inputs.end()); const TBlob &out_grad = inputs[0]; const std::vector &in_grad = outputs; @@ -491,9 +491,9 @@ void _DeconvolutionGradCompute(const DeconvolutionParam& param, template void DeconvolutionGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const DeconvolutionParam& param = nnvm::get(attrs.parsed); _DeconvolutionGradCompute(param, ctx, inputs, req, outputs); } diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc index eb958154baa7..3dd3f9f013a0 100644 --- a/src/operator/nn/deconvolution.cc +++ b/src/operator/nn/deconvolution.cc @@ -30,7 +30,8 @@ namespace mxnet { namespace op { static bool DeconvolutionShape(const nnvm::NodeAttrs& attrs, - std::vector *in_shape, std::vector *out_shape) { + std::vector *in_shape, + std::vector *out_shape) { const DeconvolutionParam& param_ = nnvm::get(attrs.parsed); #if MXNET_USE_CUDNN == 0 if (param_.kernel.ndim() != 2) { @@ -236,7 +237,7 @@ static inline std::vector ListArguments(const DeconvolutionParam& p } static bool DeconvolutionType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, std::vector *out_type) { + std::vector *in_type, std::vector *out_type) { const DeconvolutionParam& param_ = nnvm::get(attrs.parsed); CHECK_GE(in_type->size(), 1U); int dtype = (*in_type)[0]; diff --git a/src/operator/nn/deconvolution.cu b/src/operator/nn/deconvolution.cu index 0c2e160cf696..2812e4f46e12 100644 --- a/src/operator/nn/deconvolution.cu +++ b/src/operator/nn/deconvolution.cu @@ -41,9 +41,11 @@ static DeconvolutionOp &get_op(const DeconvolutionParam& param) { template static CuDNNDeconvolutionOp &get_cudnn_op(const DeconvolutionParam& param, - int forward_compute_type, int backward_compute_type, - const std::vector& in_shape, const std::vector& out_shape, - const Context& ctx, bool backward) { + int forward_compute_type, + int backward_compute_type, + const std::vector& in_shape, + const std::vector& out_shape, + const Context& ctx, bool backward) { // Convolution forward has to be called before backward for this operator. // So we can't make this operator thread local. backward might be called // in another thread. @@ -55,9 +57,10 @@ static CuDNNDeconvolutionOp &get_cudnn_op(const DeconvolutionParam& param template<> void DeconvolutionCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const DeconvolutionParam& param = nnvm::get(attrs.parsed); int dtype = inputs[0].type_flag_; // If 1D deconvolution, use MXNet implementation @@ -98,9 +101,10 @@ void DeconvolutionCompute(const nnvm::NodeAttrs& attrs, template<> void DeconvolutionGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const DeconvolutionParam& param = nnvm::get(attrs.parsed); std::vector in_data(inputs.begin() + 1, inputs.end()); const TBlob &out_grad = inputs[0]; diff --git a/src/operator/nn/dropout-inl.h b/src/operator/nn/dropout-inl.h index 4a9228f9a14e..222b0694824c 100644 --- a/src/operator/nn/dropout-inl.h +++ b/src/operator/nn/dropout-inl.h @@ -101,7 +101,7 @@ class DropoutOp { } void Forward(const OpContext &ctx, const std::vector &in_data, - const std::vector &req, const std::vector &out_data) { + const std::vector &req, const std::vector &out_data) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_data.size(), 1U); @@ -136,7 +136,8 @@ class DropoutOp { } void Backward(const OpContext &ctx, const TBlob &out_grad, - const TBlob &out_data_mask, const OpReqType &req, const TBlob &in_grad) { + const TBlob &out_data_mask, const OpReqType &req, + const TBlob &in_grad) { using namespace mshadow; using namespace mshadow::expr; Stream *s = ctx.get_stream(); @@ -169,10 +170,10 @@ class DropoutOp { template void DropoutCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const DropoutParam& param = nnvm::get(attrs.parsed); MSHADOW_REAL_TYPE_SWITCH(inputs[0].type_flag_, DType, { static thread_local DropoutOp op; @@ -183,10 +184,10 @@ void DropoutCompute(const nnvm::NodeAttrs& attrs, template void DropoutGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const DropoutParam& param = nnvm::get(attrs.parsed); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); diff --git a/src/operator/nn/fully_connected-inl.h b/src/operator/nn/fully_connected-inl.h index 07965c354930..73312c7dec68 100644 --- a/src/operator/nn/fully_connected-inl.h +++ b/src/operator/nn/fully_connected-inl.h @@ -73,7 +73,7 @@ class FullyConnectedOp { } void Forward(const OpContext &ctx, const std::vector &in_data, - const std::vector &req, const std::vector &out_data) { + const std::vector &req, const std::vector &out_data) { using namespace mshadow; using namespace mshadow::expr; if (req[fullc::kOut] == kNullOp) return; @@ -113,8 +113,8 @@ class FullyConnectedOp { } void Backward(const OpContext &ctx, const std::vector &out_grad, - const std::vector &in_data, const std::vector &req, - const std::vector &in_grad) { + const std::vector &in_data, const std::vector &req, + const std::vector &in_grad) { using namespace mshadow; using namespace mshadow::expr; // TODO(bing): check the BLAS Handle, be careful @@ -175,10 +175,10 @@ class FullyConnectedOp { template void FullyConnectedCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const FullyConnectedParam& param = nnvm::get(attrs.parsed); uint32_t in_expected = param.no_bias ? 2 : 3; CHECK_EQ(inputs.size(), in_expected); @@ -205,10 +205,10 @@ void FullyConnectedCompute(const nnvm::NodeAttrs& attrs, template void FullyConnectedGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const FullyConnectedParam& param = nnvm::get(attrs.parsed); uint32_t out_expected = param.no_bias ? 2 : 3; CHECK_EQ(inputs.size(), 3U); diff --git a/src/operator/nn/fully_connected.cc b/src/operator/nn/fully_connected.cc index 6524fbe349f9..cc475e04dd44 100644 --- a/src/operator/nn/fully_connected.cc +++ b/src/operator/nn/fully_connected.cc @@ -31,7 +31,8 @@ namespace mxnet { namespace op { static bool FullyConnectedShape(const nnvm::NodeAttrs& attrs, - std::vector *in_shape, std::vector *out_shape) { + std::vector *in_shape, + std::vector *out_shape) { const FullyConnectedParam& param = nnvm::get(attrs.parsed); using namespace mshadow; if (!param.no_bias) { @@ -71,7 +72,7 @@ static bool FullyConnectedShape(const nnvm::NodeAttrs& attrs, } static bool FullyConnectedType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, std::vector *out_type) { + std::vector *in_type, std::vector *out_type) { CHECK_GE(in_type->size(), 1U); return ElemwiseAttr( attrs, in_type, out_type, -1); diff --git a/src/operator/nn/fully_connected.cu b/src/operator/nn/fully_connected.cu index 81bc1a75aa58..7637865f2472 100644 --- a/src/operator/nn/fully_connected.cu +++ b/src/operator/nn/fully_connected.cu @@ -28,10 +28,10 @@ namespace op { template<> void FullyConnectedCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const FullyConnectedParam& param = nnvm::get(attrs.parsed); uint32_t in_expected = param.no_bias ? 2 : 3; CHECK_EQ(inputs.size(), in_expected); @@ -46,10 +46,10 @@ void FullyConnectedCompute(const nnvm::NodeAttrs& attrs, template<> void FullyConnectedGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const FullyConnectedParam& param = nnvm::get(attrs.parsed); uint32_t out_expected = param.no_bias ? 2 : 3; CHECK_EQ(inputs.size(), 3U); diff --git a/src/operator/nn/pooling-inl.h b/src/operator/nn/pooling-inl.h index e559ac1e41b9..448fb54b05a6 100644 --- a/src/operator/nn/pooling-inl.h +++ b/src/operator/nn/pooling-inl.h @@ -88,7 +88,7 @@ class PoolingOp { } void Forward(const OpContext& ctx, const TBlob& in_data, - const OpReqType& req, const TBlob& out_data) { + const OpReqType& req, const TBlob& out_data) { using namespace mshadow; Stream *s = ctx.get_stream(); const TShape& ishape = in_data.shape_; @@ -103,8 +103,8 @@ class PoolingOp { } void Backward(const OpContext& ctx, const TBlob& out_grad, - const TBlob& in_data, const TBlob& out_data, - const OpReqType& req, const TBlob& in_grad) { + const TBlob& in_data, const TBlob& out_data, + const OpReqType& req, const TBlob& in_grad) { using namespace mshadow; Stream *s = ctx.get_stream(); const TShape& ishape = in_data.shape_; @@ -132,10 +132,10 @@ PoolingOp &GetPoolingOp(const PoolingParam ¶m) { template void PoolingCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const PoolingParam& param = nnvm::get(attrs.parsed); @@ -152,10 +152,10 @@ void PoolingCompute(const nnvm::NodeAttrs& attrs, template void PoolingGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc index 3c30e1924323..41ace3cecae0 100644 --- a/src/operator/nn/pooling.cc +++ b/src/operator/nn/pooling.cc @@ -60,7 +60,7 @@ static void PoolingParamParser(nnvm::NodeAttrs* attrs) { } static bool PoolingShape(const nnvm::NodeAttrs& attrs, - std::vector *in_shape, std::vector *out_shape) { + std::vector *in_shape, std::vector *out_shape) { const PoolingParam& param_ = nnvm::get(attrs.parsed); CHECK_EQ(in_shape->size(), 1U); const TShape &dshape = (*in_shape)[0]; diff --git a/src/operator/nn/pooling.cu b/src/operator/nn/pooling.cu index 4d5c68f7ca6b..24aa4178b3c7 100644 --- a/src/operator/nn/pooling.cu +++ b/src/operator/nn/pooling.cu @@ -43,10 +43,10 @@ static CuDNNPoolingOp &GetCuDNNPoolingOp(const PoolingParam ¶m) { template<> void PoolingCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); const PoolingParam& param = nnvm::get(attrs.parsed); @@ -80,10 +80,10 @@ void PoolingCompute(const nnvm::NodeAttrs& attrs, template<> void PoolingGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 3U); CHECK_EQ(outputs.size(), 1U); CHECK_EQ(req.size(), 1U); diff --git a/src/operator/nn/softmax_activation-inl.h b/src/operator/nn/softmax_activation-inl.h index 5d0e937e218d..5b91b6f79e98 100644 --- a/src/operator/nn/softmax_activation-inl.h +++ b/src/operator/nn/softmax_activation-inl.h @@ -73,7 +73,7 @@ class SoftmaxActivationOp { } void Forward(const OpContext &ctx, const TBlob &in_data, - const OpReqType &req, const TBlob &out_data) { + const OpReqType &req, const TBlob &out_data) { using namespace mshadow; using namespace mshadow::expr; Stream *s = ctx.get_stream(); @@ -94,7 +94,7 @@ class SoftmaxActivationOp { } void Backward(const OpContext &ctx, const TBlob &out_grad, - const TBlob &out_data, const OpReqType &req, const TBlob &in_grad) { + const TBlob &out_data, const OpReqType &req, const TBlob &in_grad) { using namespace mshadow; using namespace mshadow::expr; // Use 3d tensor for both mode -> {instance, channel}. Get shapes @@ -126,10 +126,10 @@ class SoftmaxActivationOp { template void SoftmaxActivationCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const SoftmaxActivationParam& param = nnvm::get(attrs.parsed); CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); @@ -141,10 +141,10 @@ void SoftmaxActivationCompute(const nnvm::NodeAttrs& attrs, template void SoftmaxActivationGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const SoftmaxActivationParam& param = nnvm::get(attrs.parsed); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); diff --git a/src/operator/nn/softmax_activation.cu b/src/operator/nn/softmax_activation.cu index 9aba20ece514..8e6e787f8072 100644 --- a/src/operator/nn/softmax_activation.cu +++ b/src/operator/nn/softmax_activation.cu @@ -34,10 +34,10 @@ namespace op { template<> void SoftmaxActivationCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const SoftmaxActivationParam& param = nnvm::get(attrs.parsed); CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); @@ -55,10 +55,10 @@ void SoftmaxActivationCompute(const nnvm::NodeAttrs& attrs, template<> void SoftmaxActivationGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const SoftmaxActivationParam& param = nnvm::get(attrs.parsed); CHECK_EQ(inputs.size(), 2U); CHECK_EQ(outputs.size(), 1); diff --git a/src/operator/nn/upsampling-inl.h b/src/operator/nn/upsampling-inl.h index 91254dad9046..6ce33fcca8cb 100644 --- a/src/operator/nn/upsampling-inl.h +++ b/src/operator/nn/upsampling-inl.h @@ -90,9 +90,9 @@ class UpSamplingNearestOp { } void Forward(const OpContext &ctx, - const std::vector &in_data, - const std::vector &req, - const std::vector &out_data) { + const std::vector &in_data, + const std::vector &req, + const std::vector &out_data) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_data.size(), static_cast(param_.num_args)); @@ -126,8 +126,8 @@ class UpSamplingNearestOp { } void Backward(const OpContext &ctx, const TBlob &out_grad, - const std::vector &req, - const std::vector &in_grad) { + const std::vector &req, + const std::vector &in_grad) { using namespace mshadow; using namespace mshadow::expr; CHECK_EQ(in_grad.size(), static_cast(param_.num_args)); @@ -198,9 +198,9 @@ static inline DeconvolutionParam GetDeconvolutionParam(const UpSamplingParam& pa template void UpSamplingCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const UpSamplingParam& param = nnvm::get(attrs.parsed); if (param.sample_type == up_enum::kNearest) { MSHADOW_REAL_TYPE_SWITCH(inputs[deconv::kData].type_flag_, DType, { @@ -218,9 +218,9 @@ void UpSamplingCompute(const nnvm::NodeAttrs& attrs, template void UpSamplingGradCompute(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { + const OpContext& ctx, const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { const UpSamplingParam& param = nnvm::get(attrs.parsed); if (param.sample_type == up_enum::kNearest) { MSHADOW_REAL_TYPE_SWITCH(inputs[deconv::kData].type_flag_, DType, { diff --git a/src/operator/nn/upsampling.cc b/src/operator/nn/upsampling.cc index 87316a939718..44b619ac9516 100644 --- a/src/operator/nn/upsampling.cc +++ b/src/operator/nn/upsampling.cc @@ -32,7 +32,7 @@ namespace mxnet { namespace op { static bool UpSamplingShape(const nnvm::NodeAttrs& attrs, - std::vector *in_shape, std::vector *out_shape) { + std::vector *in_shape, std::vector *out_shape) { const UpSamplingParam& param_ = nnvm::get(attrs.parsed); CHECK_GE(in_shape->size(), 1U); const TShape &dshape = (*in_shape)[0]; @@ -87,7 +87,7 @@ static inline std::vector ListArguments(const UpSamplingParam& para } static bool UpSamplingType(const nnvm::NodeAttrs& attrs, - std::vector *in_type, std::vector *out_type) { + std::vector *in_type, std::vector *out_type) { const UpSamplingParam& param = nnvm::get(attrs.parsed); CHECK_GE(in_type->size(), 1U); int dtype = (*in_type)[0];