Skip to content
This repository has been archived by the owner on Nov 17, 2023. It is now read-only.

Commit

Permalink
[mkldnn-v1.0] Enable mkldnn cpp-test, copy op, concat op (#16503)
Browse files Browse the repository at this point in the history
* [mkldnn-v1.0] Enable mkldnn test, copy op, concat op

Exclude gpu topology via MXNET_USE_CUDA

nit

default format

Remove whitespace

* Unix-GPU Tensor-RT build timeout, re-trigger CI
  • Loading branch information
zixuanweeei authored and pengzhao-intel committed Oct 17, 2019
1 parent b3e02b1 commit b5cdabe
Show file tree
Hide file tree
Showing 10 changed files with 242 additions and 238 deletions.
24 changes: 12 additions & 12 deletions src/operator/nn/concat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs,
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFComputeEx);
}
#endif
#endif // MXNET_USE_MKLDNN == 100
if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
Expand All @@ -214,7 +214,7 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs,
#if MXNET_USE_MKLDNN == 100
if (!MKLDNNEnvSet())
*dispatch_mode = DispatchMode::kFComputeFallback;
#endif
#endif // MXNET_USE_MKLDNN == 100
return dispatched;
}

Expand All @@ -232,12 +232,12 @@ inline static bool BackwardConcatStorageType(const nnvm::NodeAttrs& attrs,
&& param.dim > 0)
wanted_mode = DispatchMode::kFComputeEx;
else
#endif
#endif // MXNET_USE_MKLDNN == 100
wanted_mode = DispatchMode::kFCompute;
#if MXNET_USE_MKLDNN == 100
if (!MKLDNNEnvSet())
wanted_mode = DispatchMode::kFComputeFallback;
#endif
#endif // MXNET_USE_MKLDNN == 100
return storage_type_assign(out_attrs, mxnet::kDefaultStorage,
dispatch_mode, wanted_mode);
}
Expand All @@ -254,7 +254,7 @@ bool SupportMKLDNNConcat(const std::vector<NDArray> &arrs) {
}
return true;
}
#endif
#endif // MXNET_USE_MKLDNN == 100
static void ConcatComputeExCPU(const nnvm::NodeAttrs& attrs,
const OpContext& op_ctx,
const std::vector<NDArray>& inputs,
Expand All @@ -274,7 +274,7 @@ static void ConcatComputeExCPU(const nnvm::NodeAttrs& attrs,
MKLDNN_OPCHECK_RUN(ConcatCompute<cpu>, attrs, op_ctx, inputs, req, outputs);
} else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) {
FallBackCompute(ConcatCompute<cpu>, attrs, op_ctx, inputs, req, outputs);
#endif
#endif // MXNET_USE_MKLDNN == 100
} else {
LogUnimplementedOp(attrs, op_ctx, inputs, req, outputs);
}
Expand All @@ -294,7 +294,7 @@ static void ConcatGradComputeExCPU(const nnvm::NodeAttrs& attrs,
}
FallBackCompute(ConcatGradCompute<cpu>, attrs, ctx, inputs, req, outputs);
}
#endif
#endif // MXNET_USE_MKLDNN == 100

struct ConcatGrad {
const char *op_name;
Expand All @@ -306,7 +306,7 @@ struct ConcatGrad {
for (size_t i = 0; i < n->inputs.size(); i++) {
heads.push_back(n->inputs[i]);
}
#endif
#endif // MXNET_USE_MKLDNN == 100
return MakeGradNode(op_name, n, heads, n->attrs.dict);
}
};
Expand Down Expand Up @@ -386,7 +386,7 @@ Example::
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<bool>("TIsMKLDNN", true)
#endif
#endif // MXNET_USE_MKLDNN == 100
CONCAT_FORWARD_ATTRS
.set_attr<mxnet::FInferShape>("FInferShape", ConcatShape)
.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to concatenate")
Expand All @@ -402,13 +402,13 @@ NNVM_REGISTER_OP(_backward_Concat)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
#endif
#endif // MXNET_USE_MKLDNN == 100
.set_attr<nnvm::TIsBackward>("TIsBackward", true)
.set_attr<FInferStorageType>("FInferStorageType", BackwardConcatStorageType)
#if MXNET_USE_MKLDNN == 100
.set_attr<bool>("TIsMKLDNN", true)
.set_attr<FComputeEx>("FComputeEx<cpu>", ConcatGradComputeExCPU)
#endif
#endif // MXNET_USE_MKLDNN == 100
.set_attr<FCompute>("FCompute<cpu>", ConcatGradCompute<cpu>);

// _rnn_param_concat is a custom concat op with specialized infer_shape,
Expand All @@ -420,7 +420,7 @@ NNVM_REGISTER_OP(_rnn_param_concat)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
#endif
#endif // MXNET_USE_MKLDNN == 100
CONCAT_FORWARD_ATTRS
.set_attr<mxnet::FInferShape>("FInferShape", RNNParamConcatShape)
.add_argument("data", "NDArray-or-Symbol[]", "List of arrays to concatenate")
Expand Down
2 changes: 1 addition & 1 deletion src/operator/nn/mkldnn/mkldnn_concat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,4 +101,4 @@ void MKLDNNConcatBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx,

} // namespace op
} // namespace mxnet
#endif
#endif // MXNET_USE_MKLDNN == 100
16 changes: 8 additions & 8 deletions src/operator/tensor/elemwise_unary_op_basic.cc
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ static void CopyEx(const nnvm::NodeAttrs& attrs,
const std::vector<NDArray>& outputs) {
CHECK_EQ(inputs.size(), 1U);
CHECK_EQ(outputs.size(), 1U);
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_MKLDNN == 100
const auto in_stype = inputs[0].storage_type();
const auto out_stype = outputs[0].storage_type();
if (inputs[0].IsMKLDNNData()) {
Expand All @@ -217,7 +217,7 @@ static void CopyEx(const nnvm::NodeAttrs& attrs,
FallBackCompute(UnaryOp::IdentityCompute<cpu>, attrs, ctx, inputs, req, outputs);
return;
}
#endif
#endif // MXNET_USE_MKLDNN == 100
UnaryOp::IdentityComputeEx<cpu>(attrs, ctx, inputs, req, outputs);
}

Expand All @@ -230,15 +230,15 @@ static inline bool CopyStorageType(const nnvm::NodeAttrs& attrs,
CHECK_EQ(out_attrs->size(), 1);
bool ret = ElemwiseStorageType<1, 1, false, true, true>(attrs, dev_mask, dispatch_mode,
in_attrs, out_attrs);
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_MKLDNN == 100
// We have to make sure all inputs are default layouts. Otherwise, we might
// want to fallback.
if (dev_mask == mshadow::cpu::kDevMask
&& in_attrs->at(0) == kDefaultStorage
&& out_attrs->at(0) == kDefaultStorage) {
*dispatch_mode = DispatchMode::kFComputeEx;
}
#endif
#endif // MXNET_USE_MKLDNN == 100
return ret;
}

Expand All @@ -248,12 +248,12 @@ MXNET_OPERATOR_REGISTER_UNARY(_copy)
.set_attr<FInferStorageType>("FInferStorageType", CopyStorageType)
.set_attr<FCompute>("FCompute<cpu>", UnaryOp::IdentityCompute<cpu>)
.set_attr<FComputeEx>("FComputeEx<cpu>", CopyEx)
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_MKLDNN == 100
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
.set_attr<bool>("TIsMKLDNN", true)
#endif
#endif // MXNET_USE_MKLDNN == 100
.set_attr<nnvm::FInplaceIdentity>("FInplaceIdentity",
[](const NodeAttrs& attrs){
return std::vector<bool>{true};
Expand All @@ -271,11 +271,11 @@ NNVM_REGISTER_OP(_backward_copy)
.set_attr<FInferStorageType>("FInferStorageType", CopyStorageType)
.set_attr<FCompute>("FCompute<cpu>", UnaryOp::IdentityCompute<cpu>)
.set_attr<FComputeEx>("FComputeEx<cpu>", CopyEx)
#if MXNET_USE_MKLDNN == 1
#if MXNET_USE_MKLDNN == 100
.set_attr<bool>("TIsMKLDNN", true)
.set_attr<FResourceRequest>("FResourceRequest", [](const NodeAttrs& n) {
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};
})
}) // MXNET_USE_MKLDNN == 100
#endif
.set_attr<nnvm::FInplaceIdentity>("FInplaceIdentity",
[](const NodeAttrs& attrs){
Expand Down
2 changes: 1 addition & 1 deletion tests/cpp/include/test_core_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer<DType>
keys.emplace_back(i_iter->first.c_str());
values.emplace_back(i_iter->second.c_str());
}
return imperative::ParseAttrs(op, op->num_inputs, count, &keys[0], &values[0]);
return imperative::ParseAttrs(op, op->num_inputs, count, keys.data(), values.data());
}

/*!
Expand Down
Loading

0 comments on commit b5cdabe

Please sign in to comment.