Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
Limit MKLDNN ops being used.
Browse files Browse the repository at this point in the history
  • Loading branch information
zheng-da committed Dec 9, 2017
1 parent 53eec60 commit 75e2ae5
Show file tree
Hide file tree
Showing 5 changed files with 52 additions and 14 deletions.
10 changes: 8 additions & 2 deletions src/operator/nn/activation.cc
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,10 @@ inline static bool ActivationStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), 1);
const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)
// There is no reason to use MKLDNN activation if the input isn't in
// MKLDNN format.
&& in_attrs->at(0) == kMKLDNNStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
(*out_attrs)[0] = kMKLDNNStorage;
return true;
Expand All @@ -121,7 +124,10 @@ inline static bool backward_ActStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), 1U);
const ActivationParam& param = nnvm::get<ActivationParam>(attrs.parsed);
#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)) {
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNAct(param)
// There is no reason to use MKLDNN activation if the input isn't in
// MKLDNN format.
&& in_attrs->at(0) == kMKLDNNStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
(*out_attrs)[0] = kMKLDNNStorage;
return true;
Expand Down
22 changes: 16 additions & 6 deletions src/operator/nn/convolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -293,17 +293,22 @@ static bool ConvolutionType(const nnvm::NodeAttrs& attrs,
}

inline static bool ConvStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
const ConvolutionParam& param = nnvm::get<ConvolutionParam>(attrs.parsed);
uint32_t in_expected = param.no_bias ? 2 : 3;
CHECK_EQ(in_attrs->size(), in_expected);
CHECK_EQ(out_attrs->size(), 1);

#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
if (dev_mask == mshadow::cpu::kDevMask
// We should allow MKLDNN conv to apply to the default storage as well.
// Even with format conversion, MKLDNN conv should still be faster than
// the native implementation.
&& (in_attrs->at(0) == kMKLDNNStorage
|| in_attrs->at(0) == kDefaultStorage)) {
*dispatch_mode = DispatchMode::kFComputeEx;
(*out_attrs)[0] = kMKLDNNStorage;
return true;
Expand All @@ -326,7 +331,12 @@ inline static bool backward_ConvStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), out_expected);

#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
if (dev_mask == mshadow::cpu::kDevMask
// We should allow MKLDNN conv to apply to the default storage as well.
// Even with format conversion, MKLDNN conv should still be faster than
// the native implementation.
&& (in_attrs->at(0) == kMKLDNNStorage
|| in_attrs->at(0) == kDefaultStorage)) {
*dispatch_mode = DispatchMode::kFComputeEx;
for (size_t i = 0; i < out_attrs->size(); i++)
(*out_attrs)[i] = kMKLDNNStorage;
Expand Down
14 changes: 12 additions & 2 deletions src/operator/nn/deconvolution.cc
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,12 @@ inline static bool DeconvStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), 1);

#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
if (dev_mask == mshadow::cpu::kDevMask
// We should allow MKLDNN conv to apply to the default storage as well.
// Even with format conversion, MKLDNN conv should still be faster than
// the native implementation.
&& (in_attrs->at(0) == kMKLDNNStorage
|| in_attrs->at(0) == kDefaultStorage)) {
*dispatch_mode = DispatchMode::kFComputeEx;
(*out_attrs)[0] = kMKLDNNStorage;
return true;
Expand All @@ -293,7 +298,12 @@ inline static bool backward_DeconvStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), out_expected);

#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
if (dev_mask == mshadow::cpu::kDevMask
// We should allow MKLDNN conv to apply to the default storage as well.
// Even with format conversion, MKLDNN conv should still be faster than
// the native implementation.
&& (in_attrs->at(0) == kMKLDNNStorage
|| in_attrs->at(0) == kDefaultStorage)) {
*dispatch_mode = DispatchMode::kFComputeEx;
for (size_t i = 0; i < out_attrs->size(); i++)
(*out_attrs)[i] = kMKLDNNStorage;
Expand Down
10 changes: 8 additions & 2 deletions src/operator/nn/fully_connected.cc
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,10 @@ inline static bool FCStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), 1);

#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
// The native implementation uses BLAS. It shouldn't be slower than MKLDNN
// FC. If the input data has the default format, there is format conversion
// overhead as well.
if (dev_mask == mshadow::cpu::kDevMask && in_attrs->at(0) == kMKLDNNStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
(*out_attrs)[0] = kMKLDNNStorage;
return true;
Expand All @@ -160,7 +163,10 @@ inline static bool backward_FCStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), out_expected);

#if MXNET_USE_MKLDNN == 1
if (dev_mask == mshadow::cpu::kDevMask) {
// The native implementation uses BLAS. It shouldn't be slower than MKLDNN
// FC. If the input data has the default format, there is format conversion
// overhead as well.
if (dev_mask == mshadow::cpu::kDevMask && in_attrs->at(0) == kMKLDNNStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
for (size_t i = 0; i < out_attrs->size(); i++)
(*out_attrs)[i] = kMKLDNNStorage;
Expand Down
10 changes: 8 additions & 2 deletions src/operator/nn/pooling.cc
Original file line number Diff line number Diff line change
Expand Up @@ -300,7 +300,10 @@ inline static bool PoolingStorageType(const nnvm::NodeAttrs &attrs,

#if MXNET_USE_MKLDNN == 1
const PoolingParam &param = nnvm::get<PoolingParam>(attrs.parsed);
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)
// There is no reason to use MKLDNN pooling if the input isn't in
// MKLDNN format.
&& in_attrs->at(0) == kMKLDNNStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
for (size_t i = 0; i < out_attrs->size(); i++)
(*out_attrs)[i] = kMKLDNNStorage;
Expand All @@ -322,7 +325,10 @@ inline static bool backward_PoolingStorageType(const nnvm::NodeAttrs &attrs,

#if MXNET_USE_MKLDNN == 1
const PoolingParam &param = nnvm::get<PoolingParam>(attrs.parsed);
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)) {
if (dev_mask == mshadow::cpu::kDevMask && SupportMKLDNNPooling(param)
// There is no reason to use MKLDNN pooling if the input isn't in
// MKLDNN format.
&& in_attrs->at(0) == kMKLDNNStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
for (size_t i = 0; i < out_attrs->size(); i++)
(*out_attrs)[i] = kMKLDNNStorage;
Expand Down

0 comments on commit 75e2ae5

Please sign in to comment.