Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Fallback Amend
Browse files Browse the repository at this point in the history
This is the final rectify for fallback problem
  • Loading branch information
luobao-intel committed Jul 12, 2018
1 parent 2e68c96 commit fd1266a
Show file tree
Hide file tree
Showing 7 changed files with 270 additions and 70 deletions.
48 changes: 24 additions & 24 deletions src/operator/nn/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
#include "./mkldnn/mkldnn_base-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#endif // MXNET_USE_MKLDNN
#include "../operator_common.h"
#include "../../common/utils.h"

namespace mxnet {
namespace op {
Expand Down Expand Up @@ -100,16 +102,16 @@ inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1);
CHECK_EQ(out_attrs->size(), 1);
bool ret = ElemwiseStorageType<1, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
#if MXNET_USE_MKLDNN == 1
const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
*dispatch_mode = DispatchMode::kFComputeEx;
}
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param))
return ElemwiseStorageType<1, 1, false, false, false>(
attrs, dev_mask, dispatch_mode, in_attrs, out_attrs);
else
return op::dispatch_fallback(out_attrs, dispatch_mode);
#endif
return ret;
return ElemwiseStorageType<1, 1, false, false, false>(
attrs, dev_mask, dispatch_mode, in_attrs, out_attrs);
}

inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -120,30 +122,28 @@ inline static bool BackwardActStorageType(const nnvm::NodeAttrs& attrs,
bool ret = false;
#if (MXNET_USE_CUDNN == 1 || MXNET_USE_MKLDNN == 1)
const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
if (param.act_type != activation::kReLU) {
CHECK_EQ(in_attrs->size(), 3U);
ret = ElemwiseStorageType<3, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
} else {
// for ReLU activation, the backward pass only needs ograd and output
CHECK_EQ(in_attrs->size(), 2U);
ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
}
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param))
if (param.act_type != activation::kReLU) {
CHECK_EQ(in_attrs->size(), 3U);
ret = ElemwiseStorageType<3, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
} else {
// for ReLU activation, the backward pass only needs ograd and output
CHECK_EQ(in_attrs->size(), 2U);
ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
}
else
ret = op::dispatch_fallback(out_attrs, dispatch_mode);
#else
CHECK_EQ(in_attrs->size(), 2U);
ret = ElemwiseStorageType<2, 1, false, false, false>(attrs, dev_mask,
dispatch_mode,
in_attrs, out_attrs);
#endif
CHECK_EQ(out_attrs->size(), 1U);
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
*dispatch_mode = DispatchMode::kFComputeEx;
}
#endif
return ret;
}

Expand Down
25 changes: 21 additions & 4 deletions src/operator/nn/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
*/

#include "./convolution-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
#include "../elemwise_op_common.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
Expand Down Expand Up @@ -305,8 +307,16 @@ inline static bool ConvStorageType(const nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -327,8 +337,15 @@ inline static bool BackwardConvStorageType(const nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

void ConvolutionParamParser(nnvm::NodeAttrs* attrs) {
Expand Down
26 changes: 22 additions & 4 deletions src/operator/nn/deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@
#include "./deconvolution-inl.h"
#include "./mkldnn/mkldnn_ops-inl.h"
#include "./mkldnn/mkldnn_base-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"

namespace mxnet {
namespace op {
Expand Down Expand Up @@ -273,8 +275,16 @@ inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -294,8 +304,16 @@ inline static bool BackwardDeconvStorageType(const nnvm::NodeAttrs& attrs,
else
#endif
wanted_mode = DispatchMode::kFCompute;
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

#if MXNET_USE_MKLDNN == 1
Expand Down
47 changes: 31 additions & 16 deletions src/operator/nn/lrn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

#include "./lrn-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
#if MXNET_USE_MKLDNN == 1
#include "./mkldnn/mkldnn_lrn-inl.h"
#endif
Expand Down Expand Up @@ -87,16 +88,23 @@ bool LRNForwardInferStorageType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK(!in_attrs->empty());
DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
return true;
}
if (dev_mask == mshadow::cpu::kDevMask)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
return true;
wanted_mode = DispatchMode::kFCompute;

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
Expand All @@ -105,16 +113,23 @@ bool LRNBackwardInferStorageType(const nnvm::NodeAttrs& attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK(!in_attrs->empty());
DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
return true;
}
if (dev_mask == mshadow::cpu::kDevMask)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
return true;
wanted_mode = DispatchMode::kFCompute;

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

#if MXNET_USE_MKLDNN == 1
Expand Down
45 changes: 32 additions & 13 deletions src/operator/nn/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@
*/
#include "../elemwise_op_common.h"
#include "./pooling-inl.h"
#include "../operator_common.h"
#include "../../common/utils.h"
#if MXNET_USE_NNPACK == 1
#include "../nnpack/nnpack_pooling-inl.h"
#endif // MXNET_USE_NNPACK
Expand Down Expand Up @@ -276,18 +278,26 @@ inline static bool PoolingStorageType(const nnvm::NodeAttrs &attrs,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
CHECK_EQ(in_attrs->size(), 1);

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
const PoolingParam &param = nnvm::get<PoolingParam>(attrs.parsed);
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param))
wanted_mode = DispatchMode::kFComputeEx;
else
#else
CHECK_EQ(out_attrs->size(), 1);
#endif
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
wanted_mode = DispatchMode::kFCompute;

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

inline static bool BackwardPoolingStorageType(const nnvm::NodeAttrs &attrs,
Expand All @@ -299,16 +309,25 @@ inline static bool BackwardPoolingStorageType(const nnvm::NodeAttrs &attrs,
CHECK_EQ(in_attrs->size(), GetNumBackInputs(param));
CHECK_EQ(out_attrs->size(), 1);

DispatchMode wanted_mode;
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param))
wanted_mode = DispatchMode::kFComputeEx;
else
#else
CHECK_EQ(in_attrs->size(), 3);
#endif
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
wanted_mode = DispatchMode::kFCompute;

bool dispatched = false;
if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}

DMLC_REGISTER_PARAMETER(PoolingParam);
Expand Down
19 changes: 10 additions & 9 deletions src/operator/nn/softmax.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,8 @@
#include "../tensor/elemwise_binary_op.h"
#include "mkldnn/mkldnn_base-inl.h"
#include "mkldnn/mkldnn_ops-inl.h"
#include "../../operator_common.h"
#include "../operator_common.h"
#include "../../common/utils.h"

namespace mxnet {
namespace op {
Expand All @@ -50,6 +51,7 @@ static void SoftmaxComputeExCPU(const nnvm::NodeAttrs& attrs,
FallBackCompute(SoftmaxCompute<cpu, mxnet_op::softmax_fwd>, attrs, ctx,
inputs, req, outputs);
}
#endif

inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
Expand All @@ -69,16 +71,15 @@ inline static bool SoftmaxStorageType(const nnvm::NodeAttrs& attrs,
wanted_mode = DispatchMode::kFCompute;

bool dispatched = false;
if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)){
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage, dispatch_mode, wanted_mode);
}
if (!dispatched){
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}

if (common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = op::storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
if (!dispatched) {
dispatched = op::dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
#endif

MXNET_OPERATOR_REGISTER_UNARY(softmax)
.describe(R"code(Applies the softmax function.
Expand Down
Loading

0 comments on commit fd1266a

Please sign in to comment.