Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Fall back when sparse arrays are passed to MKLDNN-enabled operators #11664

Merged
merged 24 commits into from
Aug 24, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 12 additions & 45 deletions src/operator/nn/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#endif // MXNET_USE_MKLDNN
#include "../operator_common.h"
#include "../../common/utils.h"

namespace mxnet {
namespace op {
Expand Down Expand Up @@ -101,74 +103,35 @@ void ActivationGradComputeExCPU(const nnvm::NodeAttrs& attrs,
}
#endif

#if MXNET_USE_MKLDNN == 1
inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
bool ret = ElemwiseStorageType<1, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
#if MXNET_USE_MKLDNN == 1
const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
*dispatch_mode = DispatchMode::kFComputeEx;
}
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
*dispatch_mode = DispatchMode::kFComputeFallback;
return ret;
}
#endif
return ret;
return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param),
dispatch_mode, in_attrs, out_attrs);
}

inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
bool ret = false;
const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
#if (MXNET_USE_CUDNN == 1 || MXNET_USE_MKLDNN == 1)
if (param.act_type != activation::kReLU) {
CHECK_EQ(in_attrs->size(), 3U);
ret = ElemwiseStorageType<3, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
} else {
// for ReLU activation, the backward pass only needs ograd and output
CHECK_EQ(in_attrs->size(), 2U);
ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
}
#else
if (param.act_type == activation::kSoftSign) {
CHECK_EQ(in_attrs->size(), 3U);
ret = ElemwiseStorageType<3, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
} else {
CHECK_EQ(in_attrs->size(), 2U);
ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
}
#endif
CHECK_EQ(out_attrs->size(), 1U);
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
*dispatch_mode = DispatchMode::kFComputeEx;
}
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
*dispatch_mode = DispatchMode::kFComputeFallback;
return ret;
}
#endif
return ret;
return MKLDNNStorageType(attrs, dev_mask, SupportMKLDNNAct(param),
dispatch_mode, in_attrs, out_attrs);
}
#endif

MXNET_OPERATOR_REGISTER_UNARY(Activation)
.describe(R"code(Applies an activation function element-wise to the input.
Expand All @@ -183,7 +146,9 @@ The following activation functions are supported:

)code" ADD_FILELINE)
.set_attr_parser(ParamParser<ActivationParam>)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", ActivationStorageType)
#endif
.set_attr<nnvm::FListOutputNames>("FListOutputNames",
[](const NodeAttrs& attrs) {
return std::vector<std::string>{"output"};
Expand All @@ -204,7 +169,9 @@ NNVM_REGISTER_OP(_backward_Activation)
})
.set_num_outputs(1)
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", BackwardActStorageType)
#endif
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<3, 1>)
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<3, 1>)
.set_attr<nnvm::FInplaceOption>("FInplaceOption", [](const NodeAttrs& attrs){
Expand Down
3 changes: 2 additions & 1 deletion src/operator/nn/batch_norm.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
#include "batch_norm-inl.h"
#include <nnvm/op_attr_types.h>
#include "../elemwise_op_common.h"
#include "../operator_common.h"
#if MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_batch_norm-inl.h"
#endif
Expand Down Expand Up @@ -544,7 +545,7 @@ Both *mean* and *var* returns a scalar by treating the input as a vector.

Assume the input has size *k* on axis 1, then both ``gamma`` and ``beta``
have shape *(k,)*. If ``output_mean_var`` is set to be true, then outputs both ``data_mean`` and
the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these
the inverse of ``data_var``, which are needed for the backward pass. Note that gradient of these
two outputs are blocked.

Besides the inputs and the outputs, this operator accepts two auxiliary
Expand Down
43 changes: 17 additions & 26 deletions src/operator/nn/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,14 @@

#include "./convolution-inl.h"
#include "../elemwise_op_common.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
#include "../operator_common.h"
#if MXNET_USE_NNPACK == 1
#include "../nnpack/nnpack_pooling-inl.h"
#endif // MXNET_USE_NNPACK
#if MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#endif // MXNET_USE_MKLDNN

namespace mxnet {
namespace op {
Expand Down Expand Up @@ -288,27 +291,19 @@ static bool ConvolutionType(const nnvm::NodeAttrs& attrs,
return true;
}

#if MXNET_USE_MKLDNN == 1
inline static bool ConvStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
std::vector<int>* in_attrs,
std::vector<int>* out_attrs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
uint32_t in_expected = param.no_bias ? 2 : 3;
CHECK_EQ(in_attrs->size(), in_expected);
CHECK_EQ(out_attrs->size(), 1);

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
else if (dev_mask == mshadow::cpu::kDevMask)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs,
out_attrs);
}

inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -322,18 +317,10 @@ inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), in_expected);
CHECK_EQ(out_attrs->size(), out_expected);

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
else if (dev_mask == mshadow::cpu::kDevMask)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs,
out_attrs);
}
#endif

void ConvolutionParamParser(nnvm::NodeAttrs* attrs) {
using namespace mshadow;
Expand Down Expand Up @@ -492,7 +479,9 @@ There are other options to tune the performance.
})
.set_attr<nnvm::FInferShape>("FInferShape", ConvolutionShape)
.set_attr<nnvm::FInferType>("FInferType", ConvolutionType)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", ConvStorageType)
#endif
.set_attr<FCompute>("FCompute<cpu>", ConvolutionCompute<cpu>)
#if MXNET_USE_MKLDNN == 1
.set_attr<FComputeEx>("FComputeEx<cpu>", ConvolutionComputeExCPU)
Expand All @@ -512,7 +501,9 @@ NNVM_REGISTER_OP(_backward_Convolution)
return params.no_bias ? 2 : 3;
})
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", BackwardConvStorageType)
#endif
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
Expand Down
36 changes: 13 additions & 23 deletions src/operator/nn/deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,12 @@
*/

#include "./deconvolution-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
#if MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_ops-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
#endif

namespace mxnet {
namespace op {
Expand Down Expand Up @@ -256,6 +260,7 @@ static bool DeconvolutionType(const nnvm::NodeAttrs& attrs,
return true;
}

#if MXNET_USE_MKLDNN == 1
inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
Expand All @@ -266,17 +271,8 @@ inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), in_expected);
CHECK_EQ(out_attrs->size(), 1);

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
else if (dev_mask == mshadow::cpu::kDevMask)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs,
out_attrs);
}

inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -289,20 +285,10 @@ inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(in_attrs->size(), param.no_bias ? 3U : 4U);
CHECK_EQ(out_attrs->size(), out_expected);

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
else if (dev_mask == mshadow::cpu::kDevMask)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs,
out_attrs);
}

#if MXNET_USE_MKLDNN == 1
static void DeconvolutionComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
Expand Down Expand Up @@ -419,7 +405,9 @@ NNVM_REGISTER_OP(Deconvolution)
})
.set_attr<nnvm::FInferShape>("FInferShape", DeconvolutionShape)
.set_attr<nnvm::FInferType>("FInferType", DeconvolutionType)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", DeconvStorageType)
#endif
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
Expand All @@ -440,7 +428,9 @@ NNVM_REGISTER_OP(_backward_Deconvolution)
return params.no_bias ? 2 : 3;
})
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", BackwardDeconvStorageType)
#endif
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
Expand Down
36 changes: 12 additions & 24 deletions src/operator/nn/lrn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
#include "../operator_common.h"
#if MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_lrn-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
#endif

namespace mxnet {
Expand Down Expand Up @@ -81,24 +82,16 @@ struct LRNGrad {
}
};

#if MXNET_USE_MKLDNN == 1
bool LRNForwardInferStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK(!in_attrs->empty());
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
} else if (dev_mask == mshadow::cpu::kDevMask) {
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
#endif
storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
return true;

return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs,
out_attrs);
}

bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -107,20 +100,11 @@ bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK(!in_attrs->empty());
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet()) {
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeFallback);
} else if (dev_mask == mshadow::cpu::kDevMask) {
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
#endif
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);

return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs,
out_attrs);
}

#if MXNET_USE_MKLDNN == 1
void LRNComputeExCPU(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
Expand Down Expand Up @@ -183,7 +167,9 @@ number of kernels in the layer.
.set_attr_parser(ParamParser<LRNParam>)
.set_attr<nnvm::FInferShape>("FInferShape", LRNShape)
.set_attr<nnvm::FInferType>("FInferType", LRNType)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", LRNForwardInferStorageType)
#endif
.set_attr<nnvm::FListInputNames>("FListInputNames",
[](const NodeAttrs& attrs) {
return std::vector<std::string>{"data"};
Expand All @@ -203,7 +189,9 @@ number of kernels in the layer.
NNVM_REGISTER_OP(_backward_LRN)
.set_num_outputs(1)
.set_attr_parser(ParamParser<LRNParam>)
#if MXNET_USE_MKLDNN == 1
.set_attr<FInferStorageType>("FInferStorageType", LRNBackwardInferStorageType)
#endif
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
#if MXNET_USE_MKLDNN == 1
.set_attr<FComputeEx>("FComputeEx<cpu>", LRNGradComputeExCPU)
Expand Down
4 changes: 3 additions & 1 deletion src/operator/nn/mkldnn/mkldnn_base.cc
Original file line number Diff line number Diff line change
Expand Up @@ -536,7 +536,9 @@ bool MKLDNNStorageType(const nnvm::NodeAttrs &attrs,

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && support_mkldnn)
if (dev_mask == mshadow::cpu::kDevMask && !MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
else if (dev_mask == mshadow::cpu::kDevMask && support_mkldnn)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
Expand Down
Loading