diff --git a/src/operator/nn/concat.cc b/src/operator/nn/concat.cc index 9e016bf884f2..c75bc8d29609 100644 --- a/src/operator/nn/concat.cc +++ b/src/operator/nn/concat.cc @@ -196,14 +196,14 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs, dispatched = storage_type_assign(&out_stype, kCSRStorage, dispatch_mode, DispatchMode::kFComputeEx); } -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 if (!dispatched && dev_mask == mshadow::cpu::kDevMask && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage) && param.dim > 0) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFComputeEx); } -#endif +#endif // MXNET_USE_MKLDNN == 100 if (!dispatched && common::ContainsOnlyStorage(*in_attrs, kDefaultStorage)) { dispatched = storage_type_assign(&out_stype, kDefaultStorage, dispatch_mode, DispatchMode::kFCompute); @@ -211,10 +211,10 @@ inline static bool ConcatForwardInferStorageType(const nnvm::NodeAttrs& attrs, if (!dispatched) { dispatched = dispatch_fallback(out_attrs, dispatch_mode); } -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 if (!MKLDNNEnvSet()) *dispatch_mode = DispatchMode::kFComputeFallback; -#endif +#endif // MXNET_USE_MKLDNN == 100 return dispatched; } @@ -224,7 +224,7 @@ inline static bool BackwardConcatStorageType(const nnvm::NodeAttrs& attrs, std::vector *in_attrs, std::vector *out_attrs) { DispatchMode wanted_mode; -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 const ConcatParam& param = nnvm::get(attrs.parsed); CHECK_EQ(out_attrs->size(), in_attrs->size() - 1); if (dev_mask == mshadow::cpu::kDevMask @@ -232,16 +232,16 @@ inline static bool BackwardConcatStorageType(const nnvm::NodeAttrs& attrs, && param.dim > 0) wanted_mode = DispatchMode::kFComputeEx; else -#endif +#endif // MXNET_USE_MKLDNN == 100 wanted_mode = DispatchMode::kFCompute; -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 if (!MKLDNNEnvSet()) wanted_mode = DispatchMode::kFComputeFallback; -#endif +#endif // MXNET_USE_MKLDNN == 100 return storage_type_assign(out_attrs, mxnet::kDefaultStorage, dispatch_mode, wanted_mode); } -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 bool SupportMKLDNNConcat(const std::vector &arrs) { for (auto &arr : arrs) { if (arr.IsView()) return false; @@ -249,12 +249,12 @@ bool SupportMKLDNNConcat(const std::vector &arrs) { // DO not support zero-size tensors. if (arr.shape().Size() == 0) return false; int ndim = arr.shape().ndim(); - const int mkldnn_ndims = arr.GetMKLDNNData()->get_primitive_desc().desc().data.ndims; + const int mkldnn_ndims = arr.GetMKLDNNData()->get_desc().data.ndims; if (!(ndim == 2 || ndim == 4) || ndim != mkldnn_ndims) return false; } return true; } -#endif +#endif // MXNET_USE_MKLDNN == 100 static void ConcatComputeExCPU(const nnvm::NodeAttrs& attrs, const OpContext& op_ctx, const std::vector& inputs, @@ -267,20 +267,20 @@ static void ConcatComputeExCPU(const nnvm::NodeAttrs& attrs, if (common::ContainsOnlyStorage(inputs, kCSRStorage) && outputs[0].storage_type() == kCSRStorage) { ConcatCSRImpl(attrs, op_ctx, inputs, req, outputs); -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 } else if (SupportMKLDNNConcat(inputs)) { MKLDNN_OPCHECK_INIT(false, outputs.size(), inputs, outputs); MKLDNNConcatForward(attrs, op_ctx, inputs, req, outputs); MKLDNN_OPCHECK_RUN(ConcatCompute, attrs, op_ctx, inputs, req, outputs); } else if (common::ContainsOnlyStorage(inputs, kDefaultStorage)) { FallBackCompute(ConcatCompute, attrs, op_ctx, inputs, req, outputs); -#endif +#endif // MXNET_USE_MKLDNN == 100 } else { LogUnimplementedOp(attrs, op_ctx, inputs, req, outputs); } } -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 static void ConcatGradComputeExCPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector& inputs, @@ -294,7 +294,7 @@ static void ConcatGradComputeExCPU(const nnvm::NodeAttrs& attrs, } FallBackCompute(ConcatGradCompute, attrs, ctx, inputs, req, outputs); } -#endif +#endif // MXNET_USE_MKLDNN == 100 struct ConcatGrad { const char *op_name; @@ -302,11 +302,11 @@ struct ConcatGrad { const std::vector& ograds) const { CHECK_EQ(ograds.size(), 1); std::vector heads(ograds.begin(), ograds.end()); -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 for (size_t i = 0; i < n->inputs.size(); i++) { heads.push_back(n->inputs[i]); } -#endif +#endif // MXNET_USE_MKLDNN == 100 return MakeGradNode(op_name, n, heads, n->attrs.dict); } }; @@ -381,12 +381,12 @@ Example:: [ 5., 5., 8., 8.]] )code" ADD_FILELINE) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) .set_attr("TIsMKLDNN", true) -#endif +#endif // MXNET_USE_MKLDNN == 100 CONCAT_FORWARD_ATTRS .set_attr("FInferShape", ConcatShape) .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to concatenate") @@ -398,17 +398,17 @@ NNVM_REGISTER_OP(_backward_Concat) return params.num_args; }) .set_attr_parser(ParamParser) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) -#endif +#endif // MXNET_USE_MKLDNN == 100 .set_attr("TIsBackward", true) .set_attr("FInferStorageType", BackwardConcatStorageType) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 .set_attr("TIsMKLDNN", true) .set_attr("FComputeEx", ConcatGradComputeExCPU) -#endif +#endif // MXNET_USE_MKLDNN == 100 .set_attr("FCompute", ConcatGradCompute); // _rnn_param_concat is a custom concat op with specialized infer_shape, @@ -416,11 +416,11 @@ NNVM_REGISTER_OP(_backward_Concat) // unknown shape that can be inferred from output shape. NNVM_REGISTER_OP(_rnn_param_concat) .add_alias("_npi_rnn_param_concat") -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) -#endif +#endif // MXNET_USE_MKLDNN == 100 CONCAT_FORWARD_ATTRS .set_attr("FInferShape", RNNParamConcatShape) .add_argument("data", "NDArray-or-Symbol[]", "List of arrays to concatenate") diff --git a/src/operator/nn/mkldnn/mkldnn_concat.cc b/src/operator/nn/mkldnn/mkldnn_concat.cc index c8843a575945..b89c7f56daca 100644 --- a/src/operator/nn/mkldnn/mkldnn_concat.cc +++ b/src/operator/nn/mkldnn/mkldnn_concat.cc @@ -101,4 +101,4 @@ void MKLDNNConcatBackward(const nnvm::NodeAttrs& attrs, const OpContext &ctx, } // namespace op } // namespace mxnet -#endif +#endif // MXNET_USE_MKLDNN == 100 diff --git a/src/operator/tensor/elemwise_unary_op_basic.cc b/src/operator/tensor/elemwise_unary_op_basic.cc index 7c35c44305a8..ddcee3159447 100644 --- a/src/operator/tensor/elemwise_unary_op_basic.cc +++ b/src/operator/tensor/elemwise_unary_op_basic.cc @@ -204,7 +204,7 @@ static void CopyEx(const nnvm::NodeAttrs& attrs, const std::vector& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 const auto in_stype = inputs[0].storage_type(); const auto out_stype = outputs[0].storage_type(); if (inputs[0].IsMKLDNNData()) { @@ -217,7 +217,7 @@ static void CopyEx(const nnvm::NodeAttrs& attrs, FallBackCompute(UnaryOp::IdentityCompute, attrs, ctx, inputs, req, outputs); return; } -#endif +#endif // MXNET_USE_MKLDNN == 100 UnaryOp::IdentityComputeEx(attrs, ctx, inputs, req, outputs); } @@ -230,7 +230,7 @@ static inline bool CopyStorageType(const nnvm::NodeAttrs& attrs, CHECK_EQ(out_attrs->size(), 1); bool ret = ElemwiseStorageType<1, 1, false, true, true>(attrs, dev_mask, dispatch_mode, in_attrs, out_attrs); -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 // We have to make sure all inputs are default layouts. Otherwise, we might // want to fallback. if (dev_mask == mshadow::cpu::kDevMask @@ -238,7 +238,7 @@ static inline bool CopyStorageType(const nnvm::NodeAttrs& attrs, && out_attrs->at(0) == kDefaultStorage) { *dispatch_mode = DispatchMode::kFComputeEx; } -#endif +#endif // MXNET_USE_MKLDNN == 100 return ret; } @@ -248,12 +248,12 @@ MXNET_OPERATOR_REGISTER_UNARY(_copy) .set_attr("FInferStorageType", CopyStorageType) .set_attr("FCompute", UnaryOp::IdentityCompute) .set_attr("FComputeEx", CopyEx) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; }) .set_attr("TIsMKLDNN", true) -#endif +#endif // MXNET_USE_MKLDNN == 100 .set_attr("FInplaceIdentity", [](const NodeAttrs& attrs){ return std::vector{true}; @@ -271,11 +271,11 @@ NNVM_REGISTER_OP(_backward_copy) .set_attr("FInferStorageType", CopyStorageType) .set_attr("FCompute", UnaryOp::IdentityCompute) .set_attr("FComputeEx", CopyEx) -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 .set_attr("TIsMKLDNN", true) .set_attr("FResourceRequest", [](const NodeAttrs& n) { return std::vector{ResourceRequest::kTempSpace}; -}) +}) // MXNET_USE_MKLDNN == 100 #endif .set_attr("FInplaceIdentity", [](const NodeAttrs& attrs){ diff --git a/tests/cpp/include/test_core_op.h b/tests/cpp/include/test_core_op.h index 87df39a2754d..286496108128 100644 --- a/tests/cpp/include/test_core_op.h +++ b/tests/cpp/include/test_core_op.h @@ -79,7 +79,7 @@ class CoreOpExecutor : public test::op::OperatorDataInitializer keys.emplace_back(i_iter->first.c_str()); values.emplace_back(i_iter->second.c_str()); } - return imperative::ParseAttrs(op, op->num_inputs, count, &keys[0], &values[0]); + return imperative::ParseAttrs(op, op->num_inputs, count, keys.data(), values.data()); } /*! diff --git a/tests/cpp/include/test_mkldnn.h b/tests/cpp/include/test_mkldnn.h index f1682772a14a..f425ba0fa1df 100644 --- a/tests/cpp/include/test_mkldnn.h +++ b/tests/cpp/include/test_mkldnn.h @@ -26,7 +26,7 @@ #ifndef TEST_MKLDNN_H_ #define TEST_MKLDNN_H_ -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 #include #include @@ -37,29 +37,29 @@ using namespace mxnet; -inline static mkldnn::memory::primitive_desc GetMemPD(const mxnet::TShape s, int dtype, - mkldnn::memory::format format) { +inline static mkldnn::memory::desc GetMemDesc(const mxnet::TShape s, const int dtype, + const mkldnn::memory::format_tag format_tag) { mkldnn::memory::dims dims(s.ndim()); for (size_t i = 0; i < dims.size(); i++) dims[i] = s[i]; - mkldnn::memory::desc desc{dims, get_mkldnn_type(dtype), format}; - return mkldnn::memory::primitive_desc(desc, CpuEngine::Get()->get_engine()); + mkldnn::memory::desc desc{dims, get_mkldnn_type(dtype), format_tag}; + return desc; } -inline static mkldnn::memory::primitive_desc GetExpandedMemPD( - mkldnn::memory::primitive_desc pd, float scale, int dim = 0) { - CHECK(dim < pd.desc().data.ndims) << "dimension cannot be larger than total dimensions of input"; - mxnet::TShape s(pd.desc().data.ndims, -1); - for (size_t i = 0; i < pd.desc().data.ndims; i++) - s[i] = pd.desc().data.dims[i]; - s[dim] = static_cast(s[dim] * scale); - return GetMemPD(s, mshadow::DataType::kFlag, - static_cast(pd.desc().data.format)); +inline static mkldnn::memory::desc GetExpandedMemDesc( + mkldnn::memory::desc md, const float scale, const int dim = 0) { + CHECK(dim < md.data.ndims) << "dimension cannot be larger than total dimensions of input"; + mxnet::TShape s(md.data.ndims, -1); + for (size_t i = 0; i < md.data.ndims; i++) + s[i] = md.data.dims[i]; + s[dim] = static_cast(s[dim] * scale); + return GetMemDesc(s, mshadow::DataType::kFlag, + static_cast(GetDefaultFormat(md))); } struct TestArrayShapes { std::vector shapes; - std::vector pds; + std::vector mds; }; // Init arrays with the default layout. @@ -78,17 +78,17 @@ inline static void InitDefaultArray(NDArray *arr, bool is_rand = false) { // Init arrays with the specified layout. -inline static void InitMKLDNNArray(NDArray *arr, const mkldnn::memory::primitive_desc &pd, - bool is_rand = false) { +inline static void InitMKLDNNArray(NDArray *arr, const mkldnn::memory::desc &desc, + bool is_rand = false) { InitDefaultArray(arr, is_rand); - arr->MKLDNNDataReorderAsync(pd); + arr->MKLDNNDataReorderAsync(desc); arr->WaitToRead(); } -inline static bool IsSameShape(mkldnn::memory::primitive_desc pd, mxnet::TShape shape) { - if (pd.desc().data.ndims != shape.ndim()) return false; +inline static bool IsSameShape(const mkldnn::memory::desc &desc, const mxnet::TShape &shape) { + if (desc.data.ndims != shape.ndim()) return false; for (size_t i = 0; i < shape.ndim(); i++) - if (pd.desc().data.dims[i] != shape[i]) return false; + if (desc.data.dims[i] != shape[i]) return false; return true; } @@ -97,81 +97,81 @@ inline static bool IsSameShape(mkldnn::memory::primitive_desc pd, mxnet::TShape // it's specific for certain array shapes. It covers at least one special format // for each of the formats: nchw, oihw, goihw. // To test the logic of the code in NDArray, these formats should be enough. -inline static std::vector GetMKLDNNFormat(size_t num_dims, int dtype) { +inline static std::vector GetMKLDNNFormat(size_t num_dims, int dtype) { if (num_dims == 4) { mkldnn::memory::dims data_dims{1, 3, 224, 224}; mkldnn::memory::desc data_md{data_dims, get_mkldnn_type(dtype), - mkldnn::memory::format::any}; + mkldnn::memory::format_tag::any}; mkldnn::memory::dims weight_dims{96, 3, 11, 11}; mkldnn::memory::desc weight_md{weight_dims, get_mkldnn_type(dtype), - mkldnn::memory::format::any}; + mkldnn::memory::format_tag::any}; mkldnn::memory::dims output_dims{1, 96, 54, 54}; mkldnn::memory::desc out_md{output_dims, get_mkldnn_type(dtype), - mkldnn::memory::format::any}; + mkldnn::memory::format_tag::any}; mkldnn::memory::dims strides{4, 4}; mkldnn::memory::dims padding{0, 0}; mkldnn::convolution_forward::desc desc(mkldnn::prop_kind::forward_training, mkldnn::algorithm::convolution_direct, data_md, weight_md, out_md, strides, - padding, padding, mkldnn::padding_kind::zero); + padding, padding); mkldnn::convolution_forward::primitive_desc pd(desc, CpuEngine::Get()->get_engine()); - while (pd.dst_primitive_desc().get_size() != GetMemDescSize(out_md) || - pd.src_primitive_desc().get_size() != GetMemDescSize(data_md) || - pd.weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) { + while (pd.dst_desc().get_size() != GetMemDescSize(out_md) || + pd.src_desc().get_size() != GetMemDescSize(data_md) || + pd.weights_desc().get_size() != GetMemDescSize(weight_md)) { CHECK(pd.next_impl()) << "No implementation"; } - std::vector ret(1); - ret[0] = static_cast(pd.dst_primitive_desc().desc().data.format); - printf("format: %d \n", ret[0]); + std::vector ret(1); + ret[0] = static_cast(GetDefaultFormat(pd.dst_desc())); + printf("format: %d \n", static_cast(ret[0])); return ret; } else if (num_dims == 5) { mkldnn::memory::dims data_dims{1, 32, 112, 112}; mkldnn::memory::desc data_md{data_dims, get_mkldnn_type(dtype), - mkldnn::memory::format::any}; + mkldnn::memory::format_tag::any}; mkldnn::memory::dims weight_dims{32, 1, 1, 3, 3}; mkldnn::memory::desc weight_md{weight_dims, get_mkldnn_type(dtype), - mkldnn::memory::format::any}; + mkldnn::memory::format_tag::any}; mkldnn::memory::dims output_dims{1, 32, 112, 112}; mkldnn::memory::desc out_md{output_dims, get_mkldnn_type(dtype), - mkldnn::memory::format::any}; + mkldnn::memory::format_tag::any}; mkldnn::memory::dims strides{1, 1}; mkldnn::memory::dims padding{1, 1}; mkldnn::convolution_forward::desc desc(mkldnn::prop_kind::forward_training, mkldnn::algorithm::convolution_direct, data_md, weight_md, out_md, strides, - padding, padding, mkldnn::padding_kind::zero); + padding, padding); mkldnn::convolution_forward::primitive_desc pd(desc, CpuEngine::Get()->get_engine()); - while (pd.dst_primitive_desc().get_size() != GetMemDescSize(out_md) || - pd.src_primitive_desc().get_size() != GetMemDescSize(data_md) || - pd.weights_primitive_desc().get_size() != GetMemDescSize(weight_md)) { + while (pd.dst_desc().get_size() != GetMemDescSize(out_md) || + pd.src_desc().get_size() != GetMemDescSize(data_md) || + pd.weights_desc().get_size() != GetMemDescSize(weight_md)) { CHECK(pd.next_impl()) << "No implementation"; } - std::vector ret(1); - ret[0] = static_cast(pd.weights_primitive_desc().desc().data.format); - printf("format: %d\n", ret[0]); + std::vector ret(1); + ret[0] = static_cast(GetDefaultFormat(pd.weights_desc())); + printf("format: %d\n", static_cast(ret[0])); return ret; } else { - return std::vector(); + return std::vector(); } } inline static TestArrayShapes GetTestArrayShapes(bool spatial_data_format = false) { int dtype = mshadow::DataType::kFlag; mxnet::ShapeVector shapes; - std::vector pds; + std::vector mds; { // 1D mxnet::TShape s(1, -1); s[0] = 279936; shapes.push_back(s); - pds.push_back(GetMemPD(s, dtype, mkldnn::memory::format::x)); + mds.push_back(GetMemDesc(s, dtype, mkldnn::memory::format_tag::x)); s[0] = 34848; shapes.push_back(s); - pds.push_back(GetMemPD(s, dtype, mkldnn::memory::format::x)); + mds.push_back(GetMemDesc(s, dtype, mkldnn::memory::format_tag::x)); } { // 2D @@ -179,27 +179,27 @@ inline static TestArrayShapes GetTestArrayShapes(bool spatial_data_format = fals s[0] = 96; s[1] = 2916; shapes.push_back(s); - pds.push_back(GetMemPD(s, dtype, mkldnn::memory::format::nc)); + mds.push_back(GetMemDesc(s, dtype, mkldnn::memory::format_tag::nc)); s[0] = 96; s[1] = 363; shapes.push_back(s); - pds.push_back(GetMemPD(s, dtype, mkldnn::memory::format::nc)); + mds.push_back(GetMemDesc(s, dtype, mkldnn::memory::format_tag::nc)); } { // 4D mxnet::TShape s1(4, -1); s1[0] = 10; s1[1] = 96; s1[2] = 54; s1[3] = 54; shapes.push_back(s1); - pds.push_back(GetMemPD(s1, dtype, mkldnn::memory::format::nchw)); + mds.push_back(GetMemDesc(s1, dtype, mkldnn::memory::format_tag::nchw)); mxnet::TShape s2(4, -1); s2[0] = 96; s2[1] = 3; s2[2] = 11; s2[3] = 11; shapes.push_back(s2); - pds.push_back(GetMemPD(s2, dtype, mkldnn::memory::format::oihw)); + mds.push_back(GetMemDesc(s2, dtype, mkldnn::memory::format_tag::oihw)); - std::vector formats = GetMKLDNNFormat(4, dtype); + std::vector formats = GetMKLDNNFormat(4, dtype); if (!spatial_data_format) { - pds.push_back(GetMemPD(s1, dtype, formats[0])); + mds.push_back(GetMemDesc(s1, dtype, formats[0])); } } { @@ -207,17 +207,17 @@ inline static TestArrayShapes GetTestArrayShapes(bool spatial_data_format = fals mxnet::TShape s(5, -1); s[0] = 96; s[1] = 1; s[2] = 3; s[3] = 11; s[4] = 11; shapes.push_back(s); - pds.push_back(GetMemPD(s, dtype, mkldnn::memory::format::goihw)); + mds.push_back(GetMemDesc(s, dtype, mkldnn::memory::format_tag::goihw)); - std::vector formats = GetMKLDNNFormat(5, dtype); + std::vector formats = GetMKLDNNFormat(5, dtype); if (!spatial_data_format) { - pds.push_back(GetMemPD(s, dtype, formats[0])); + mds.push_back(GetMemDesc(s, dtype, formats[0])); } } TestArrayShapes ret; ret.shapes = shapes; - ret.pds = pds; + ret.mds = mds; return ret; } @@ -266,7 +266,7 @@ inline NDArray CreateKernelNDArray(mxnet::TShape kernel, int num_filters, mxnet: target_shape[3] = kernel[1]; int dtype = mshadow::DataType::kFlag; NDArray arr(target_shape, Context()); - auto pd = GetMemPD(target_shape, dtype, mkldnn::memory::format::nchw); + auto pd = GetMemDesc(target_shape, dtype, mkldnn::memory::format_tag::nchw); InitMKLDNNArray(&arr, pd); return arr; } @@ -274,7 +274,7 @@ inline NDArray CreateKernelNDArray(mxnet::TShape kernel, int num_filters, mxnet: inline NDArray CreateBiasNDArray(mxnet::TShape target_shape) { int dtype = mshadow::DataType::kFlag; NDArray arr(target_shape, Context()); - auto pd = GetMemPD(target_shape, dtype, mkldnn::memory::format::x); + auto pd = GetMemDesc(target_shape, dtype, mkldnn::memory::format_tag::x); InitMKLDNNArray(&arr, pd); return arr; } @@ -333,10 +333,10 @@ inline std::vector GetTestInputArrays( std::vector scale = {1}, bool spatial_data_format = false) { TestArrayShapes tas = GetTestArrayShapes(spatial_data_format); std::vector shapes = tas.shapes; - std::vector pds = tas.pds; + std::vector mds = tas.mds; std::vector in_arrs; - std::string desc; + std::string desc_str; int slice_amount = scale[0]; for (auto shape : shapes) { @@ -362,60 +362,60 @@ inline std::vector GetTestInputArrays( } - for (auto pd : pds) { + for (auto md : mds) { for (size_t dim = 0; dim < scale.size(); ++dim) { // preserve if matching layout else just expand on 0 dim - if (shape.ndim() == pd.desc().data.ndims) - pd = GetExpandedMemPD(pd, scale[dim], dim); + if (shape.ndim() == md.data.ndims) + md = GetExpandedMemDesc(md, scale[dim], dim); else - pd = GetExpandedMemPD(pd, scale[dim]); + md = GetExpandedMemDesc(md, scale[dim]); } - if (shape.Size() != pd.get_size() / sizeof(mshadow::default_real_t)) + if (shape.Size() != md.get_size() / sizeof(mshadow::default_real_t)) continue; // Type 2, 3. arr = NDArray(shape, Context()); - if (shape.ndim() == pd.desc().data.ndims && IsSameShape(pd, shape) + if (shape.ndim() == md.data.ndims && IsSameShape(md, shape) && types & ArrayTypes::MKLDNN) { - desc = "MKLDNN NDArray"; - InitMKLDNNArray(&arr, pd, rand); - in_arrs.emplace_back(arr, desc); - } else if (shape.ndim() == pd.desc().data.ndims && !IsSameShape(pd, shape) + desc_str = "MKLDNN NDArray"; + InitMKLDNNArray(&arr, md, rand); + in_arrs.emplace_back(arr, desc_str); + } else if (shape.ndim() == md.data.ndims && !IsSameShape(md, shape) && types & ArrayTypes::MKLDNNDiffShape) { - desc = "MKLDNN NDArray with different shape"; - InitMKLDNNArray(&arr, pd, rand); - in_arrs.emplace_back(arr, desc); - } else if (shape.ndim() != pd.desc().data.ndims && types & ArrayTypes::MKLDNNDiffDim) { + desc_str = "MKLDNN NDArray with different shape"; + InitMKLDNNArray(&arr, md, rand); + in_arrs.emplace_back(arr, desc_str); + } else if (shape.ndim() != md.data.ndims && types & ArrayTypes::MKLDNNDiffDim) { std::stringstream ss; ss << "MKLDNN NDArray with different dim " << - shape.ndim() << "/" << pd.desc().data.ndims; - desc = ss.str(); - InitMKLDNNArray(&arr, pd, rand); - in_arrs.emplace_back(arr, desc); + shape.ndim() << "/" << md.data.ndims; + desc_str = ss.str(); + InitMKLDNNArray(&arr, md, rand); + in_arrs.emplace_back(arr, desc_str); } // Type 5, 6. arr = NDArray(shape, Context()); - if (shape.ndim() == pd.desc().data.ndims && IsSameShape(pd, shape) + if (shape.ndim() == md.data.ndims && IsSameShape(md, shape) && types & ArrayTypes::MKLDNNReshaped) { - desc = "Reshaped MKLDNN NDArray"; - InitMKLDNNArray(&arr, pd, rand); - in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); - } else if (shape.ndim() == pd.desc().data.ndims && !IsSameShape(pd, shape) + desc_str = "Reshaped MKLDNN NDArray"; + InitMKLDNNArray(&arr, md, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc_str); + } else if (shape.ndim() == md.data.ndims && !IsSameShape(md, shape) && types & ArrayTypes::MKLDNNReshapedDiffShape) { - desc = "Reshaped MKLDNN NDArray with different shape"; - InitMKLDNNArray(&arr, pd, rand); - in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); - } else if (shape.ndim() != pd.desc().data.ndims + desc_str = "Reshaped MKLDNN NDArray with different shape"; + InitMKLDNNArray(&arr, md, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc_str); + } else if (shape.ndim() != md.data.ndims && types & ArrayTypes::MKLDNNReshapedDiffDim) { std::stringstream ss; ss << "MKLDNN NDArray with different dim " << - shape.ndim() << "/" << pd.desc().data.ndims; - desc = ss.str(); - InitMKLDNNArray(&arr, pd, rand); - in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc); + shape.ndim() << "/" << md.data.ndims; + desc_str = ss.str(); + InitMKLDNNArray(&arr, md, rand); + in_arrs.emplace_back(arr.Slice(slice_amount, arr.shape()[0] - slice_amount), desc_str); } } } @@ -444,7 +444,7 @@ inline std::vector GetTestInputArrays( */ inline std::vector GetTestOutputArrays( const mxnet::TShape &shp, - const std::vector &pds, + const std::vector &mds, std::vectorscale = {1}, bool rand = true, int types = ArrayTypes::All) { mxnet::TShape shape = shp; @@ -452,7 +452,7 @@ inline std::vector GetTestOutputArrays( shape[dim] = static_cast(shape[dim] * scale[dim]); std::vector in_arrs; - std::string desc; + std::string desc_str; // Type 1. NDArray arr(shape, Context()); @@ -500,30 +500,30 @@ inline std::vector GetTestOutputArrays( in_arrs.emplace_back(arr3.Slice(1, shape[0] + 1), "Reused+Reshaped NDArray"); } - for (auto pd : pds) { - if (shape.Size() != pd.get_size() / sizeof(mshadow::default_real_t)) + for (auto md : mds) { + if (shape.Size() != md.get_size() / sizeof(mshadow::default_real_t)) continue; - if (scale.size() > pd.desc().data.ndims) + if (scale.size() > md.data.ndims) continue; for (int dim = 0; dim < scale.size(); dim++) - pd = GetExpandedMemPD(pd, scale[dim]); + md = GetExpandedMemDesc(md, scale[dim]); // Type 2, 3. arr = NDArray(shape, Context()); - desc = "MKLDNN NDArray"; - if (shape.ndim() != pd.desc().data.ndims) { + desc_str = "MKLDNN NDArray"; + if (shape.ndim() != md.data.ndims) { std::stringstream ss; ss << "MKLDNN NDArray with different memory layout " - << shape.ndim() << "/" << pd.desc().data.ndims; - desc = ss.str(); + << shape.ndim() << "/" << md.data.ndims; + desc_str = ss.str(); } - if ((types & ArrayTypes::MKLDNN && shape.ndim() == pd.desc().data.ndims) || - (types & ArrayTypes::MKLDNNDiffDim && shape.ndim() != pd.desc().data.ndims)) { - in_arrs.emplace_back(arr, desc); - InitMKLDNNArray(&in_arrs.back().arr, pd, rand); + if ((types & ArrayTypes::MKLDNN && shape.ndim() == md.data.ndims) || + (types & ArrayTypes::MKLDNNDiffDim && shape.ndim() != md.data.ndims)) { + in_arrs.emplace_back(arr, desc_str); + InitMKLDNNArray(&in_arrs.back().arr, md, rand); } // Type 8, 9. @@ -532,18 +532,18 @@ inline std::vector GetTestOutputArrays( s[0] = shape.Size(); NDArray arr = NDArray(s, Context()); arr = arr.AsArray(shape, arr.dtype()); - InitMKLDNNArray(&arr, pd, rand); - desc = "Reused MKLDNN NDArray"; - if (shape.ndim() != pd.desc().data.ndims) { + InitMKLDNNArray(&arr, md, rand); + desc_str = "Reused MKLDNN NDArray"; + if (shape.ndim() != md.data.ndims) { std::stringstream ss; ss << "Reused MKLDNN NDArray with different memory layout " - << shape.ndim() << "/" << pd.desc().data.ndims; - desc = ss.str(); + << shape.ndim() << "/" << md.data.ndims; + desc_str = ss.str(); } - if ((types & ArrayTypes::MKLDNNReused && shape.ndim() == pd.desc().data.ndims) || - (types & ArrayTypes::MKLDNNReusedDiffDim && shape.ndim() != pd.desc().data.ndims)) { - in_arrs.emplace_back(arr, desc); + if ((types & ArrayTypes::MKLDNNReused && shape.ndim() == md.data.ndims) || + (types & ArrayTypes::MKLDNNReusedDiffDim && shape.ndim() != md.data.ndims)) { + in_arrs.emplace_back(arr, desc_str); } } return in_arrs; @@ -581,9 +581,9 @@ using VerifyFunc = std::function &in_arrs, const std::vector &out_arrs)>; inline void VerifyAddRequest(const std::vector &in_arrs, - const std::vector &original_outputs, - const std::vector &new_outputs, - VerifyFunc verify_fn) { + const std::vector &original_outputs, + const std::vector &new_outputs, + VerifyFunc verify_fn) { CHECK(original_outputs.size() == new_outputs.size()); std::vector tmp_outputs; NDArray tmp; @@ -596,7 +596,7 @@ inline void VerifyAddRequest(const std::vector &in_arrs, } inline void VerifyCopyResult(const std::vector &in_arrs, - const std::vector &out_arrs) { + const std::vector &out_arrs) { NDArray tmp1 = in_arrs[0]->Reorder2Default(); NDArray tmp2 = out_arrs[0]->Reorder2Default(); EXPECT_EQ(tmp1.shape().Size(), tmp2.shape().Size()); @@ -607,7 +607,7 @@ inline void VerifyCopyResult(const std::vector &in_arrs, } inline void VerifySumResult(const std::vector &in_arrs, - const std::vector &out_arrs) { + const std::vector &out_arrs) { NDArray in1 = in_arrs[0]->Reorder2Default(); NDArray in2 = in_arrs[1]->Reorder2Default(); NDArray out = out_arrs[0]->Reorder2Default(); @@ -621,5 +621,5 @@ inline void VerifySumResult(const std::vector &in_arrs, ASSERT_EQ(d1[i] + d2[i], o[i]); } -#endif // MXNET_USE_MKLDNN +#endif // MXNET_USE_MKLDNN == 100 #endif // TEST_MKLDNN_H_ diff --git a/tests/cpp/include/test_op.h b/tests/cpp/include/test_op.h index 67d98c4457e1..172c162e6f15 100644 --- a/tests/cpp/include/test_op.h +++ b/tests/cpp/include/test_op.h @@ -153,9 +153,9 @@ struct OpInfo { /*! \brief The operator data */ std::shared_ptr< OperatorExecutor > executor_; /*! \brief The operator prop class */ - std::shared_ptr prop_; + std::shared_ptr prop_; /*! \brief The input type(s) */ - std::vector in_type_; + std::vector in_type_; }; /*! \brief Pair of op info objects, generally for validating ops against each other */ diff --git a/tests/cpp/kvstore/gpu_topology_test.cc b/tests/cpp/kvstore/gpu_topology_test.cc index 29afb16bdc5b..d26894c21ea7 100644 --- a/tests/cpp/kvstore/gpu_topology_test.cc +++ b/tests/cpp/kvstore/gpu_topology_test.cc @@ -23,6 +23,8 @@ * \brief gpu topology tests */ +#if MXNET_USE_CUDA + #include #include #include @@ -670,3 +672,5 @@ TEST(GpuTopology, TestKernighanLin2) { << " not equal neither: " << 0 << " nor: " << P.size() << "."; } + +#endif // MXNET_USE_CUDA diff --git a/tests/cpp/operator/batchnorm_test.cc b/tests/cpp/operator/batchnorm_test.cc index ed0e70b831f1..770940dc3775 100644 --- a/tests/cpp/operator/batchnorm_test.cc +++ b/tests/cpp/operator/batchnorm_test.cc @@ -710,12 +710,12 @@ static constexpr size_t CYCLE_COUNT = 3; template static test::op::OpInfoPair testForwardAndBackward( - const bool isGPU1, - const bool isGPU2, - const mxnet::TShape &inputShape, - const test::op::kwargs_t& kwargs, - const size_t count = 1, - const size_t cycleCount = CYCLE_COUNT) { + const bool isGPU1, + const bool isGPU2, + const mxnet::TShape &inputShape, + const test::op::kwargs_t& kwargs, + const size_t count = 1, + const size_t cycleCount = CYCLE_COUNT) { test::op::OpInfo info_1 = TestBatchNormOperatorForward(isGPU1, inputShape, kwargs, count); @@ -1014,14 +1014,14 @@ TEST(BATCH_NORM, TestTiming_2D) { } MSHADOW_REAL_TYPE_SWITCH_EX( mshadow::kFloat32, DType, AccReal, { -#if MXNET_USE_MKLDNN +#if MXNET_USE_MKLDNN == 100 // MKL timingTest>( "MKL BatchNormProp 2D", false, false, blank_kwargs_nocudnn, 2, THISCOUNT); -#endif +#endif // MXNET_USE_MKLDNN == 100 // CPU test::ScopeSet disableMKL(&mxnet::op::batchnorm::disable_mkl, true); timingTest>( diff --git a/tests/cpp/operator/mkldnn_operator_test.cc b/tests/cpp/operator/mkldnn_operator_test.cc index 961785dcfc87..69ddcba79700 100644 --- a/tests/cpp/operator/mkldnn_operator_test.cc +++ b/tests/cpp/operator/mkldnn_operator_test.cc @@ -23,7 +23,7 @@ * \author Alex Zai */ -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 #include #include @@ -458,7 +458,7 @@ void VerifyConcatResult(const std::vector &in_arrs, } void VerifyConcatBackwardsResult(const std::vector &in_arrs, - const std::vector &out_arrs) { + const std::vector &out_arrs) { // in_arrs is larger array, out_arr is ammler int num_inputs = out_arrs.size(); int input_size = out_arrs[0]->shape().Size(); @@ -491,7 +491,7 @@ void TestOp(const OpAttrs &attrs, VerifyFunc verify_fn) { std::vector dispatches = attrs.dispatches; TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; if (attrs.requests.find(OpReqType::kWriteTo) != attrs.requests.end()) { std::vector in_arrs = GetTestInputArrays(); @@ -499,7 +499,7 @@ void TestOp(const OpAttrs &attrs, VerifyFunc verify_fn) { for (auto &dispatch : dispatches) { std::vector> out_arrs(attrs.num_outputs); for (int i = 0; i < attrs.num_outputs; i++) - out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds); + out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds); for (int i = 0; i < attrs.num_inputs; i++) inputs[i] = &in_arr.arr; for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) { @@ -549,7 +549,7 @@ void TestOp(const OpAttrs &attrs, VerifyFunc verify_fn) { for (auto &in_arr : in_arrs) { for (auto &dispatch : dispatches) { for (int i = 0; i < attrs.num_outputs; i++) - out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds); + out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds); for (size_t i = 0; i < attrs.num_inputs; i++) inputs[i] = &in_arr.arr; for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) { @@ -573,14 +573,14 @@ void TestOp(const OpAttrs &attrs, VerifyFunc verify_fn) { } void TestConcatOp(const OpAttrs &attrs, VerifyFunc verify_fn, - bool backwards = false) { + bool backwards = false) { std::vector inputs(attrs.num_inputs); std::vector outputs(attrs.num_outputs); std::vector req(attrs.num_outputs); std::vector dispatches = attrs.dispatches; TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; std::vector in_arrs = GetTestInputArrays(); @@ -611,7 +611,7 @@ void TestConcatOp(const OpAttrs &attrs, VerifyFunc verify_fn, scale_vector[i] = 1; scale_vector[dim] = scale; for (int i = 0; i < attrs.num_outputs; i++) - out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds, scale_vector); + out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds, scale_vector); for (int i = 0; i < attrs.num_inputs; i++) inputs[i] = &in_arr.arr; @@ -678,7 +678,7 @@ void TestOpEx(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) { std::vector req(forward_attrs.num_outputs); TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; std::vector in_arrs = GetTestInputArrays(forward_attrs.input_types, true); std::vector> out_arrs(forward_attrs.num_outputs); @@ -695,9 +695,9 @@ void TestOpEx(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) { for (int i = 0; i < forward_attrs.num_outputs; i++) { out_arrs[i] = - GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, forward_attrs.output_types); + GetTestOutputArrays(in_arr.arr.shape(), mds, {1}, forward_attrs.output_types); ex_out_arrs[i] = - GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, forward_attrs.output_types); + GetTestOutputArrays(in_arr.arr.shape(), mds, {1}, forward_attrs.output_types); } for (int i = 0; i < forward_attrs.num_inputs; i++) @@ -806,7 +806,7 @@ void TestOpExBNBackward(const OpAttrs &forward_attrs, Context(), backwards_attrs.attrs, backwards_input, backwards_ex_outputs, backwards_req, DispatchMode::kFComputeEx, mxnet::OpStatePtr()); Engine::Get()->WaitForAll(); - AssertEqual(backwards_outputs, backwards_ex_outputs); + AssertEqual(backwards_outputs, backwards_ex_outputs, 1e-4, 1e-2); } } @@ -821,7 +821,7 @@ void TestOpExBN(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) { std::vector req(forward_attrs.num_outputs); TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; std::vector in_arrs = GetTestInputArrays(forward_attrs.input_types, false); std::vector> out_arrs(forward_attrs.num_outputs); @@ -837,9 +837,9 @@ void TestOpExBN(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) { continue; for (int i = 0; i < forward_attrs.num_outputs; i++) { out_arrs[i] = - GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, true, forward_attrs.output_types); + GetTestOutputArrays(in_arr.arr.shape(), mds, {1}, true, forward_attrs.output_types); ex_out_arrs[i] = - GetTestOutputArrays(in_arr.arr.shape(), pds, {1}, true, forward_attrs.output_types); + GetTestOutputArrays(in_arr.arr.shape(), mds, {1}, true, forward_attrs.output_types); } for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) { inputs_buffer.clear(); @@ -867,11 +867,11 @@ void TestOpExBN(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) { Context(), forward_attrs.attrs, inputs2, ex_outputs, req, DispatchMode::kFComputeEx, mxnet::OpStatePtr()); Engine::Get()->WaitForAll(); - AssertEqual(outputs, ex_outputs); + AssertEqual(outputs, ex_outputs, 1e-04, 1e-02); if (!backwards_attrs.requests.empty()) { TestOpExBNBackward(forward_attrs, backwards_attrs, OpReqType::kWriteTo, - inputs, outputs, in_arr, &out_arrs[0][output_i]); + inputs, outputs, in_arr, &out_arrs[0][output_i]); } } } @@ -900,7 +900,7 @@ void TestFullyConnectedOp(const OpAttrs &forward_attrs, const OpAttrs &backwards std::vector back_req(backwards_attrs.num_outputs); TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; std::vector in_arrs = GetTestInputArrays(forward_attrs.input_types, true); std::vector> out_arrs(forward_attrs.num_outputs); @@ -937,9 +937,9 @@ void TestFullyConnectedOp(const OpAttrs &forward_attrs, const OpAttrs &backwards for (int i = 0; i < forward_attrs.num_outputs; i++) { out_arrs[i] = - GetTestOutputArrays(out_shape, pds, {1}, forward_attrs.output_types); + GetTestOutputArrays(out_shape, mds, {1}, forward_attrs.output_types); ex_out_arrs[i] = - GetTestOutputArrays(out_shape, pds, {1}, forward_attrs.output_types); + GetTestOutputArrays(out_shape, mds, {1}, forward_attrs.output_types); } for (size_t output_i = 0; output_i < out_arrs[0].size(); output_i++) { @@ -1014,7 +1014,7 @@ void TestConvOp(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs, std::vector dispatches = forward_attrs.dispatches; TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; P param; param.Init(forward_attrs.attrs.dict); @@ -1050,9 +1050,9 @@ void TestConvOp(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs, scale_vector[3] = scale; for (size_t i = 0; i < forward_attrs.num_outputs; ++i) { - out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds, + out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds, scale_vector, true, forward_attrs.output_types); - ex_out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds, + ex_out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds, scale_vector, true, forward_attrs.output_types); } NDArray ndkernel = CreateKernelNDArray(kernel, num_filter, in_arr.arr.shape(), is_deconv); @@ -1140,7 +1140,7 @@ void TestPoolingOp(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) std::vector dispatches = forward_attrs.dispatches; TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; mxnet::op::PoolingParam param; param.Init(forward_attrs.attrs.dict); @@ -1160,7 +1160,7 @@ void TestPoolingOp(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) if (input_shape.ndim() != kernel.ndim() + 2) continue; // cannot pool if ndarray and mkldnn memory have different ndim - if (in_arr.arr.IsView() || in_arr.arr.GetMKLDNNData()->get_primitive_desc().desc().data.ndims + if (in_arr.arr.IsView() || in_arr.arr.GetMKLDNNData()->get_desc().data.ndims != in_arr.arr.shape().ndim()) continue; std::vector scale_vector(in_arr.arr.shape().ndim()); @@ -1173,8 +1173,8 @@ void TestPoolingOp(const OpAttrs &forward_attrs, const OpAttrs &backwards_attrs) static_cast(input_shape[i]); } for (int i = 0; i < forward_attrs.num_outputs; i++) { - out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds, scale_vector); - ex_out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), pds, scale_vector); + out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds, scale_vector); + ex_out_arrs[i] = GetTestOutputArrays(in_arr.arr.shape(), mds, scale_vector); } for (int i = 0; i < forward_attrs.num_inputs; i++) @@ -1353,4 +1353,4 @@ TEST(IMPERATIVE, BNOp) { TestOpExBN(forward_attrs, backwards_attrs); } -#endif +#endif // MXNET_USE_MKLDNN == 100 diff --git a/tests/cpp/operator/mkldnn_test.cc b/tests/cpp/operator/mkldnn_test.cc index ab624e3a3c44..a7b02cc3689e 100644 --- a/tests/cpp/operator/mkldnn_test.cc +++ b/tests/cpp/operator/mkldnn_test.cc @@ -23,7 +23,7 @@ * \author Da Zheng */ -#if MXNET_USE_MKLDNN == 1 +#if MXNET_USE_MKLDNN == 100 #include #include @@ -88,10 +88,10 @@ TEST(MKLDNN_UTIL_FUNC, AlignMem) { } static void VerifyDefMem(const mkldnn::memory &mem) { - mkldnn::memory::primitive_desc pd = mem.get_primitive_desc(); + mkldnn::memory::desc desc = mem.get_desc(); mshadow::default_real_t *data = static_cast(mem.get_data_handle()); - size_t size = pd.get_size() / sizeof(mshadow::default_real_t); + size_t size = desc.get_size() / sizeof(mshadow::default_real_t); size_t num_same = 0; for (int i = 0; i < size; i++) num_same += data[i] == static_cast(i % 100 - 50); @@ -100,29 +100,30 @@ static void VerifyDefMem(const mkldnn::memory &mem) { TEST(MKLDNN_UTIL_FUNC, MemFormat) { // Check whether the number of format is correct. - CHECK_EQ(mkldnn_format_last, 158); - CHECK_EQ(mkldnn_nchw, 7); - CHECK_EQ(mkldnn_oihw, 17); + CHECK_EQ(mkldnn_format_tag_last, 131); + CHECK_EQ(mkldnn_nchw, 5); + CHECK_EQ(mkldnn_oihw, 5); } static void VerifyMem(const mkldnn::memory &mem) { - mkldnn::memory::primitive_desc pd = mem.get_primitive_desc(); + mkldnn::memory::desc desc = mem.get_desc(); + mkldnn::memory::dims dims(desc.data.ndims); + for (size_t i = 0; i < dims.size(); i++) + dims[i] = desc.data.dims[i]; + mkldnn::memory::desc new_desc{dims, + static_cast(desc.data.data_type), + static_cast(GetDefaultFormat(desc))}; - if (pd.desc().data.format == GetDefaultFormat(pd.desc())) { + if (desc == new_desc) { VerifyDefMem(mem); } else { - mkldnn::memory::dims dims(pd.desc().data.ndims); - for (size_t i = 0; i < dims.size(); i++) - dims[i] = pd.desc().data.dims[i]; - mkldnn::memory::desc desc{dims, - static_cast(pd.desc().data.data_type), - static_cast(GetDefaultFormat(pd.desc()))}; - mkldnn::memory::primitive_desc new_pd(desc, CpuEngine::Get()->get_engine()); - mkldnn::memory new_mem(new_pd); + mkldnn::memory* src_mem = const_cast(&mem); + mkldnn::memory new_mem(new_desc, CpuEngine::Get()->get_engine()); - std::vector net; - net.push_back(mkldnn::reorder(mem, new_mem)); - mkldnn::stream(mkldnn::stream::kind::eager).submit(net).wait(); + mkldnn::stream s(CpuEngine::Get()->get_engine()); + mkldnn::reorder(*src_mem, new_mem) + .execute(s, *src_mem, new_mem); + VerifyDefMem(new_mem); } } @@ -130,23 +131,23 @@ static void VerifyMem(const mkldnn::memory &mem) { TEST(MKLDNN_NDArray, GetDataReorder) { TestArrayShapes tas = GetTestArrayShapes(); mxnet::ShapeVector shapes = tas.shapes; - std::vector pds = tas.pds; + std::vector mds = tas.mds; // Reorder from the default to any other layout. for (auto s : shapes) { NDArray arr(s, Context()); InitDefaultArray(&arr); - for (auto pd : pds) { - if (s.Size() == pd.get_size() / sizeof(mshadow::default_real_t)) { - const mkldnn::memory *mem = arr.GetMKLDNNDataReorder(pd); + for (auto md : mds) { + if (s.Size() == md.get_size() / sizeof(mshadow::default_real_t)) { + const mkldnn::memory *mem = arr.GetMKLDNNDataReorder(md); printf("reorder from ("); for (size_t i = 0; i < s.ndim(); i++) printf("%ld, ", s[i]); printf(") to ("); - for (int i = 0; i < pd.desc().data.ndims; i++) - printf("%d, ", pd.desc().data.dims[i]); - printf("), format: %d\n", pd.desc().data.format); + for (int i = 0; i < md.data.ndims; i++) + printf("%ld, ", md.data.dims[i]); + printf("), format: %d\n", static_cast(GetDefaultFormat(md))); MKLDNNStream::Get()->Submit(false); VerifyMem(*mem); MKLDNNStream::Get()->Cleanup(); @@ -156,8 +157,8 @@ TEST(MKLDNN_NDArray, GetDataReorder) { // Reorder from a special layout to another layout. for (auto s : shapes) { - for (auto from_pd : pds) { - if (from_pd.get_size() / sizeof(mshadow::default_real_t) == s.Size()) { + for (auto md : mds) { + if (md.get_size() / sizeof(mshadow::default_real_t) == s.Size()) { NDArray arr(s, Context()); // There is possibility that the dimensions of an NDArray doesn't match // with the MKLDNN memory inside. @@ -165,21 +166,20 @@ TEST(MKLDNN_NDArray, GetDataReorder) { for (size_t i = 0; i < s.ndim(); i++) printf("%ld, ", s[i]); printf(") with MKLDNN memory ("); - for (int i = 0; i < from_pd.desc().data.ndims; i++) - printf("%d, ", from_pd.desc().data.dims[i]); - printf("), format: %d\n", from_pd.desc().data.format); - InitMKLDNNArray(&arr, from_pd); - for (auto to_pd : pds) { - if (to_pd.get_size() / sizeof(mshadow::default_real_t) == s.Size()) { - const mkldnn::memory *mem = arr.GetMKLDNNDataReorder(to_pd); + for (int i = 0; i < md.data.ndims; i++) + printf("%ld, ", md.data.dims[i]); + printf("), format: %d\n", static_cast(GetDefaultFormat(md))); + InitMKLDNNArray(&arr, md); + for (auto to_md : mds) { + if (to_md.get_size() / sizeof(mshadow::default_real_t) == s.Size()) { + const mkldnn::memory *mem = arr.GetMKLDNNDataReorder(to_md); printf("reorder from ("); for (size_t i = 0; i < s.ndim(); i++) printf("%ld, ", s[i]); - printf("), format: %d to (", - arr.GetMKLDNNData()->get_primitive_desc().desc().data.format); - for (int i = 0; i < to_pd.desc().data.ndims; i++) - printf("%d, ", to_pd.desc().data.dims[i]); - printf("), format: %d\n", to_pd.desc().data.format); + printf("), format: %d to (", static_cast(GetDefaultFormat(to_md))); + for (int i = 0; i < to_md.data.ndims; i++) + printf("%ld, ", to_md.data.dims[i]); + printf("), format: %d\n", static_cast(GetDefaultFormat(to_md))); MKLDNNStream::Get()->Submit(false); VerifyMem(*mem); MKLDNNStream::Get()->Cleanup(); @@ -194,7 +194,7 @@ TEST(MKLDNN_BASE, MKLDNNSum) { std::vector in_arrs = GetTestInputArrays(); std::vector in_arrs2 = GetTestInputArrays(ArrayTypes::All, true); TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; for (int i = 0; i < in_arrs.size(); i++) { auto in_arr = in_arrs[i]; @@ -204,7 +204,7 @@ TEST(MKLDNN_BASE, MKLDNNSum) { if (in_arr.arr.IsMKLDNNData() && in_arr.arr.IsView()) { continue; } - std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), pds); + std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), mds); for (auto &out_arr : out_arrs) { auto in_mem1 = in_arr.arr.GetMKLDNNData(); auto in_mem2 = in_arr2.arr.GetMKLDNNData(); @@ -232,7 +232,7 @@ TEST(MKLDNN_BASE, MKLDNNSum) { NDArrayAttrs orig_arr(in_arr.arr.Copy(in_arr.arr.ctx()), "In Place Copy"); orig_arr.arr.WaitToRead(); PrintVerifyMsg(orig_arr, in_arr); - InitMKLDNNArray(&orig_arr.arr, input_mem->get_primitive_desc()); + InitMKLDNNArray(&orig_arr.arr, input_mem->get_desc()); orig_arr.arr.CopyFrom(*input_mem); op::MKLDNNSum(*input_mem, *input_mem2, *input_mem); MKLDNNStream::Get()->Submit(); @@ -244,7 +244,7 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { std::vector in_arrs = GetTestInputArrays(); std::vector in_arrs2 = GetTestInputArrays(ArrayTypes::All, true); TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; MKLDNNStream *stream = MKLDNNStream::Get(); // kWriteTo @@ -256,7 +256,7 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { if (in_arr.arr.IsMKLDNNData() && in_arr.arr.IsView()) { continue; } - std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), pds); + std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), mds); for (auto &out_arr : out_arrs) { auto in_mem = in_arr.arr.GetMKLDNNData(); auto in_mem2 = in_arr2.arr.GetMKLDNNData(); @@ -264,7 +264,7 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { orig_output.WaitToRead(); PrintVerifyMsg(in_arr, out_arr); auto out_mem = out_arr.arr.GetMKLDNNData(); - auto output_mem_t = CreateMKLDNNMem(out_arr.arr, out_mem->get_primitive_desc(), kWriteTo); + auto output_mem_t = CreateMKLDNNMem(out_arr.arr, out_mem->get_desc(), kWriteTo); op::MKLDNNSum(*in_mem, *in_mem2, *output_mem_t.second); CommitOutput(out_arr.arr, output_mem_t); stream->Submit(); @@ -286,10 +286,10 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { NDArrayAttrs orig_arr(in_arr.arr.Copy(in_arr.arr.ctx()), "In Place Copy"); orig_arr.arr.WaitToRead(); PrintVerifyMsg(orig_arr, in_arr); - InitMKLDNNArray(&orig_arr.arr, input_mem->get_primitive_desc()); + InitMKLDNNArray(&orig_arr.arr, input_mem->get_desc()); orig_arr.arr.CopyFrom(*input_mem); auto output_mem_t = CreateMKLDNNMem(in_arr.arr, - input_mem->get_primitive_desc(), kWriteInplace, &in_arr.arr); + input_mem->get_desc(), kWriteInplace, &in_arr.arr); op::MKLDNNSum(*input_mem, *input_mem2, *output_mem_t.second); CommitOutput(in_arr.arr, output_mem_t); stream->Submit(); @@ -305,7 +305,7 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { if (in_arr.arr.IsMKLDNNData() && in_arr.arr.IsView()) { continue; } - std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), pds); + std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), mds); for (auto &out_arr : out_arrs) { auto in_mem = in_arr.arr.GetMKLDNNData(); auto in_mem2 = in_arr2.arr.GetMKLDNNData(); @@ -313,7 +313,7 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { orig_output.WaitToRead(); PrintVerifyMsg(in_arr, out_arr); auto out_mem = out_arr.arr.GetMKLDNNData(); - auto output_mem_t = CreateMKLDNNMem(out_arr.arr, out_mem->get_primitive_desc(), kAddTo); + auto output_mem_t = CreateMKLDNNMem(out_arr.arr, out_mem->get_desc(), kAddTo); op::MKLDNNSum(*in_mem, *in_mem2, *output_mem_t.second); CommitOutput(out_arr.arr, output_mem_t); stream->Submit(); @@ -336,9 +336,9 @@ TEST(MKLDNN_BASE, CreateMKLDNNMem) { NDArrayAttrs orig_arr(in_arr.arr.Copy(in_arr.arr.ctx()), "In Place Copy"); orig_arr.arr.WaitToRead(); PrintVerifyMsg(orig_arr, in_arr); - InitMKLDNNArray(&orig_arr.arr, input_mem->get_primitive_desc()); + InitMKLDNNArray(&orig_arr.arr, input_mem->get_desc()); orig_arr.arr.CopyFrom(*input_mem); - auto output_mem_t = CreateMKLDNNMem(in_arr.arr, input_mem->get_primitive_desc(), kNullOp); + auto output_mem_t = CreateMKLDNNMem(in_arr.arr, input_mem->get_desc(), kNullOp); op::MKLDNNSum(*input_mem, *input_mem2, *output_mem_t.second); CommitOutput(in_arr.arr, output_mem_t); stream->Submit(); @@ -373,8 +373,8 @@ TEST(MKLDNN_NDArray, GetTestInputArraysConcat) { TEST(MKLDNN_NDArray, GetTestOutputArraysConcat) { auto shapes_pds = GetTestArrayShapes(); - std::vector shapes; shapes = shapes_pds.shapes; - std::vector pds = shapes_pds.pds; + std::vector shapes = shapes_pds.shapes; + std::vector mds = shapes_pds.mds; for (auto &shape : shapes) { for (int dim = 0; dim < 5; dim++) { for (int num_inputs = 2; num_inputs < 5; num_inputs++) { @@ -386,7 +386,7 @@ TEST(MKLDNN_NDArray, GetTestOutputArraysConcat) { for (int i = 0; i < shape.ndim(); i++) scale_vector[i] = 1; scale_vector[dim] = num_inputs; - auto output_arrs = GetTestOutputArrays(shape, pds, scale_vector); + auto output_arrs = GetTestOutputArrays(shape, mds, scale_vector); for (auto &out_arr : output_arrs) { auto out_shape = out_arr.arr.shape(); EXPECT_EQ(shape.Size() * num_inputs, out_arr.arr.shape().Size()); @@ -399,13 +399,13 @@ TEST(MKLDNN_NDArray, GetTestOutputArraysConcat) { TEST(MKLDNN_NDArray, CopyFrom) { TestArrayShapes tas = GetTestArrayShapes(); - std::vector pds = tas.pds; + std::vector mds = tas.mds; std::vector in_arrs = GetTestInputArrays(); for (auto &in_arr : in_arrs) { if (in_arr.arr.IsMKLDNNData() && in_arr.arr.IsView()) continue; - std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), pds); + std::vector out_arrs = GetTestOutputArrays(in_arr.arr.shape(), mds); for (auto &out_arr : out_arrs) { const mkldnn::memory *mem = in_arr.arr.GetMKLDNNData(); out_arr.arr.CopyFrom(*mem); @@ -417,4 +417,4 @@ TEST(MKLDNN_NDArray, CopyFrom) { } } -#endif +#endif // MXNET_USE_MKLDNN == 100