Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Use activation in GPU if we are using CUDNN and not MKLDNN as it's ha…
Browse files Browse the repository at this point in the history
…ppening right now
  • Loading branch information
larroy committed Nov 29, 2018
1 parent 992df3b commit 980f4a2
Showing 1 changed file with 9 additions and 11 deletions.
20 changes: 9 additions & 11 deletions src/operator/nn/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
#include "./activation-inl.h"
#include "../mshadow_op.h"
#include "../tensor/elemwise_unary_op.h"
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#endif // MXNET_USE_MKLDNN
#endif // MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1MXNET_USE_MKLDNN
#include "../operator_common.h"
#include "../../common/utils.h"

Expand Down Expand Up @@ -149,7 +149,7 @@ struct ActivationGrad {
}
};

#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
static void ActivationComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
Expand Down Expand Up @@ -184,9 +184,7 @@ void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs,
}
FallBackCompute(ActivationGradComputeImpl<cpu>, attrs, ctx, inputs, req, outputs);
}
#endif

#if MXNET_USE_MKLDNN == 1
inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
Expand All @@ -209,7 +207,7 @@ inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs,
return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param),
dispatch_mode, in_attrs, out_attrs);
}
#endif
#endif // MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1


MXNET_OPERATOR_REGISTER_UNARY(Activation)
Expand All @@ -225,15 +223,15 @@ The following activation functions are supported:
)code" ADD_FILELINE)
.set_attr_parser(ParamParser<ActivationParam>)
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", ActivationStorageType)
#endif
.set_attr<nnvm::FListOutputNames>("FListOutputNames",
[](const NodeAttrs& attrs) {
return std::vector<std::string>{"output"};
})
.set_attr<FCompute>("FCompute<cpu>", ActivationCompute<cpu>)
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
.set_attr<bool>("TIsMKLDNN", true)
.set_attr<FComputeEx>("FComputeEx<cpu>", ActivationComputeExCPU)
#endif
Expand All @@ -247,21 +245,21 @@ NNVM_REGISTER_OP(_backward_Activation)
})
.set_num_outputs(1)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", BackwardActStorageType)
#endif
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<-1, 1>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<-1, 1>)
.set_attr<nnvm::FInplaceOption>("FInplaceOption", [](const NodeAttrs& attrs){
return std::vector<std::pair<int, int> >{{0, 0}};
})
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
#endif
.set_attr_parser(ParamParser<ActivationParam>)
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_CUDNN == 0 && MXNET_USE_MKLDNN == 1
.set_attr<bool>("TIsMKLDNN", true)
.set_attr<FComputeEx>("FComputeEx<cpu>", ActivationGradComputeExCPU)
#endif
Expand Down

0 comments on commit 980f4a2

Please sign in to comment.