From 980f4a28ecfc4268165f10130d76f2b7b9248718 Mon Sep 17 00:00:00 2001 From: Pedro Larroy Date: Thu, 29 Nov 2018 14:41:42 +0100 Subject: [PATCH] Use activation in GPU if we are using CUDNN and not MKLDNN as it's happening right now --- src/operator/nn/activation.cc | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/src/operator/nn/activation.cc b/src/operator/nn/activation.cc index c026d1f6853b..d5afccb45e11 100644 --- a/src/operator/nn/activation.cc +++ b/src/operator/nn/activation.cc @@ -27,10 +27,10 @@ #include "./activation-inl.h" #include "../mshadow_op.h" #include "../tensor/elemwise_unary_op.h" -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 #include "./mkldnn/mkldnn_base-inl.h" #include "./mkldnn/mkldnn_ops-inl.h" -#endif // MXNET_USE_MKLDNN +#endif // MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1MXNET_USE_MKLDNN #include "../operator_common.h" #include "../../common/utils.h" @@ -149,7 +149,7 @@ struct ActivationGrad { } }; -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 static void ActivationComputeExCPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector& inputs, @@ -184,9 +184,7 @@ void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs, } FallBackCompute(ActivationGradComputeImpl, attrs, ctx, inputs, req, outputs); } -#endif -#if MXNET_USE_MKLDNN == 1 inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs, const int dev_mask, DispatchMode* dispatch_mode, @@ -209,7 +207,7 @@ inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs, return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param), dispatch_mode, in_attrs, out_attrs); } -#endif +#endif // MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 MXNET_OPERATOR_REGISTER_UNARY(Activation) @@ -225,7 +223,7 @@ The following activation functions are supported: )code" ADD_FILELINE) .set_attr_parser(ParamParser) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", ActivationStorageType) #endif .set_attr("FListOutputNames", @@ -233,7 +231,7 @@ The following activation functions are supported: return std::vector{"output"}; }) .set_attr("FCompute", ActivationCompute) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 .set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ActivationComputeExCPU) #endif @@ -247,7 +245,7 @@ NNVM_REGISTER_OP(_backward_Activation) }) .set_num_outputs(1) .set_attr("TIsBackward", true) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 .set_attr("FInferStorageType", BackwardActStorageType) #endif .set_attr("FInferShape", ElemwiseShape<-1, 1>) @@ -255,13 +253,13 @@ NNVM_REGISTER_OP(_backward_Activation) .set_attr("FInplaceOption", [](const NodeAttrs& attrs){ return std::vector >{{0, 0}}; }) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) #endif .set_attr_parser(ParamParser) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1 .set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ActivationGradComputeExCPU) #endif