diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc index d646ec4d6f28..ba44ebd4ed4d 100644 --- a/src/operator/nn/activation.cc +++ b/src/operator/nn/activation.cc @@ -91,7 +91,7 @@ void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs, const ActivationParam& param = nnvm::get(attrs.parsed); bool relu = param.act_type == activation::kReLU; CHECK_EQ(inputs.size(), relu ? 2U : 3U); - if (SupportMKLDNN(inputs[0]) && ctx.need_grad) { + if (SupportMKLDNN(inputs[0])) { MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs); // XXX: for y = relu(x), y is passed as "in_data" to Backward() MKLDNNActivationBackward(attrs, ctx, inputs[0], relu ? inputs[1] : inputs[2], req[0], diff --git a/src/operator/nn/batch_norm.cc b/src/operator/nn/batch_norm.cc index ea6d1b3470d7..6254a1e18662 100644 --- a/src/operator/nn/batch_norm.cc +++ b/src/operator/nn/batch_norm.cc @@ -421,8 +421,7 @@ void BatchNormGradComputeExCPU(const nnvm::NodeAttrs &attrs, TShape shape = inputs[0].shape(); // MKLDNN batchnorm only works well on the special MKLDNN layout. if (SupportMKLDNNBN(inputs[0], param) - && (inputs[3].IsMKLDNNData() || inputs[0].IsMKLDNNData()) - && ctx.need_grad) { + && (inputs[3].IsMKLDNNData() || inputs[0].IsMKLDNNData())) { std::vector out_grad(1); std::vector out_data(3); std::vector in_data(3); diff --git a/src/operator/nn/deconvolution.cc b/src/operator/nn/deconvolution.cc index c86c31ddf2c8..039c732c831d 100644 --- a/src/operator/nn/deconvolution.cc +++ b/src/operator/nn/deconvolution.cc @@ -312,7 +312,7 @@ static void DeconvolutionGradComputeExCPU(const nnvm::NodeAttrs& attrs, const std::vector& req, const std::vector& outputs) { const DeconvolutionParam& param = nnvm::get(attrs.parsed); - if (SupportMKLDNNDeconv(param, inputs[0]) && ctx.need_grad) { + if (SupportMKLDNNDeconv(param, inputs[0])) { MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs); MKLDNNDeconvolutionBackward(attrs, ctx, inputs, req, outputs); MKLDNN_OPCHECK_RUN(DeconvolutionGradCompute, attrs, ctx, inputs, req, diff --git a/src/operator/nn/fully_connected.cc b/src/operator/nn/fully_connected.cc index 9d68dc2f818f..a178b2759bf9 100644 --- a/src/operator/nn/fully_connected.cc +++ b/src/operator/nn/fully_connected.cc @@ -141,7 +141,7 @@ void FullyConnectedGradComputeExCPU(const nnvm::NodeAttrs& attrs, const std::vector &inputs, const std::vector &req, const std::vector &outputs) { - if (SupportMKLDNN(inputs[0]) && ctx.need_grad) { + if (SupportMKLDNN(inputs[0])) { MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs); MKLDNNFCBackward(attrs, ctx, inputs, req, outputs); MKLDNN_OPCHECK_RUN(FullyConnectedGradCompute, attrs, ctx, inputs, req, diff --git a/src/operator/nn/lrn.cc b/src/operator/nn/lrn.cc index 49eff2ad6c71..020cb479acc6 100644 --- a/src/operator/nn/lrn.cc +++ b/src/operator/nn/lrn.cc @@ -133,7 +133,7 @@ void LRNGradComputeExCPU(const nnvm::NodeAttrs &attrs, const NDArray &in_data = inputs[1]; const NDArray &in_grad = outputs[0]; - if (SupportMKLDNN(inputs[0]) && ctx.need_grad) { + if (SupportMKLDNN(inputs[0])) { MKLDNN_OPCHECK_INIT(true, outputs.size(), inputs, outputs); MKLDNNLRNBackward(ctx, param, out_grad, in_data, req[0], in_grad); MKLDNN_OPCHECK_RUN(LRNGradCompute, attrs, ctx, inputs, req, outputs); diff --git a/src/operator/nn/pooling.cc b/src/operator/nn/pooling.cc index d94684fb377e..611568807a9a 100644 --- a/src/operator/nn/pooling.cc +++ b/src/operator/nn/pooling.cc @@ -270,8 +270,7 @@ void PoolingGradComputeExCPU(const nnvm::NodeAttrs &attrs, const OpContext &ctx, if (SupportMKLDNN(inputs[0]) - && SupportMKLDNNPooling(param, inputs[0].shape()) - && ctx.need_grad) { + && SupportMKLDNNPooling(param, inputs[0].shape())) { const NDArray &out_grad = inputs[0]; const NDArray *workspace = nullptr; const NDArray *in_data = nullptr;