From e09c119b35d9bcf54dbcf4ce3cd4c14b27dee81c Mon Sep 17 00:00:00 2001 From: y Date: Tue, 31 Aug 2021 08:54:52 +0000 Subject: [PATCH 1/2] Add oneDNN support for npx.reshape and np.reshape --- src/operator/nn/mkldnn/mkldnn_base-inl.h | 3 +- src/operator/nn/mkldnn/mkldnn_convolution.cc | 2 +- src/operator/nn/mkldnn/mkldnn_reshape.cc | 16 ++++--- src/operator/numpy/np_matrix_op.cc | 8 ++++ .../mkldnn/mkldnn_quantized_flatten.cc | 6 ++- src/operator/tensor/matrix_op-inl.h | 13 ++++++ src/operator/tensor/matrix_op.cc | 42 ++++++++++++------- 7 files changed, 64 insertions(+), 26 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_base-inl.h b/src/operator/nn/mkldnn/mkldnn_base-inl.h index 278075fe929f..88e54cee34cd 100644 --- a/src/operator/nn/mkldnn/mkldnn_base-inl.h +++ b/src/operator/nn/mkldnn/mkldnn_base-inl.h @@ -156,7 +156,7 @@ static inline bool SupportMKLDNN(int dtype, const mxnet::TShape& shape) { (ndim == 1 || ndim == 2 || ndim == 4); } -static inline bool SupportMKLDNNQuantize(int dtype) { +static inline bool IsMKLDNNType(int dtype) { return dtype == mshadow::kFloat32 || dtype == mshadow::kInt8 || dtype == mshadow::kUint8 || dtype == mshadow::kBfloat16; } @@ -218,6 +218,7 @@ bool SupportMKLDNNSoftmaxOutput(const SoftmaxOutputParam& param); bool SupportMKLDNNTranspose(const TransposeParam& param, const NDArray& data); bool SupportMKLDNNBatchDot(const std::vector& inputs, const NDArray& output); bool SupportMKLDNNLayerNorm(const LayerNormParam& param, const std::vector& inputs); +bool SupportMKLDNNReshape(const NDArray& input, const NDArray& output); } // namespace op static int GetTypeSize(int dtype) { diff --git a/src/operator/nn/mkldnn/mkldnn_convolution.cc b/src/operator/nn/mkldnn/mkldnn_convolution.cc index 7180ebddf645..f059c5ade50f 100644 --- a/src/operator/nn/mkldnn/mkldnn_convolution.cc +++ b/src/operator/nn/mkldnn/mkldnn_convolution.cc @@ -39,7 +39,7 @@ DMLC_REGISTER_PARAMETER(MKLDNNConvParam); bool SupportMKLDNNConv(const ConvolutionParam& params, const NDArray& input) { if ((params.kernel.ndim() != 1) && (params.kernel.ndim() != 2) && (params.kernel.ndim() != 3)) return false; - return SupportMKLDNNQuantize(input.dtype()) && + return IsMKLDNNType(input.dtype()) && ((input.shape().ndim() == 3) || (input.shape().ndim() == 4) || (input.shape().ndim() == 5)); } diff --git a/src/operator/nn/mkldnn/mkldnn_reshape.cc b/src/operator/nn/mkldnn/mkldnn_reshape.cc index 48e7d36a0b1b..b3fa3ce58ed9 100644 --- a/src/operator/nn/mkldnn/mkldnn_reshape.cc +++ b/src/operator/nn/mkldnn/mkldnn_reshape.cc @@ -33,6 +33,13 @@ namespace mxnet { namespace op { +bool SupportMKLDNNReshape(const NDArray& input, const NDArray& output) { + const int input_ndims = input.shape().ndim(); + const int output_ndims = output.shape().ndim(); + return input_ndims >= 1 && input_ndims <= 6 && output_ndims >= 1 && output_ndims <= 6 && + IsMKLDNNType(input.dtype()) && input.shape().Size() > 0; +} + MKLDNNReshapeFwd::MKLDNNReshapeFwd(const OpReqType& req, const NDArray& input, const NDArray& output) { @@ -121,15 +128,6 @@ void MKLDNNReshapeForward(const nnvm::NodeAttrs& attrs, const NDArray& input, const OpReqType& req, const NDArray& output) { - // For mkldnn non-supported input, it shouldn't hold mkldnn memory, so let's simply fallback to - // naive implement. - const int input_ndims = input.shape().ndim(); - if ((input_ndims < 1 || input_ndims > 4) || !SupportMKLDNNQuantize(input.dtype())) { - if (req != kWriteInplace) { - FallBackCompute(UnaryOp::IdentityCompute, attrs, ctx, {input}, {req}, {output}); - } - return; - } if (req == kNullOp) return; CHECK_NE(req, kAddTo) << "kAddTo is not supported yet"; diff --git a/src/operator/numpy/np_matrix_op.cc b/src/operator/numpy/np_matrix_op.cc index 53f4f7b433a4..c0d318caf2bd 100644 --- a/src/operator/numpy/np_matrix_op.cc +++ b/src/operator/numpy/np_matrix_op.cc @@ -380,6 +380,14 @@ NNVM_REGISTER_OP(_npx_reshape) .set_attr("FInferType", ElemwiseType<1, 1>) .set_attr("FGradient", ElemwiseGradUseNone{"_backward_reshape"}) .set_attr("FCompute", UnaryOp::IdentityCompute) +#if MXNET_USE_ONEDNN == 1 + .set_attr("TIsMKLDNN", true) + .set_attr("FComputeEx", ReshapeComputeExCPU) + .set_attr("FInferStorageType", ReshapeStorageType) + .set_attr("FResourceRequest", [](const NodeAttrs& n) { + return std::vector{ResourceRequest::kTempSpace}; + }) +#endif .set_attr("FInplaceOption", [](const NodeAttrs& attrs) { return std::vector >{{0, 0}}; diff --git a/src/operator/quantization/mkldnn/mkldnn_quantized_flatten.cc b/src/operator/quantization/mkldnn/mkldnn_quantized_flatten.cc index 18d386c61322..4d6f024d2c01 100644 --- a/src/operator/quantization/mkldnn/mkldnn_quantized_flatten.cc +++ b/src/operator/quantization/mkldnn/mkldnn_quantized_flatten.cc @@ -45,7 +45,11 @@ static void MKLDNNQuantizedFlattenForward(const nnvm::NodeAttrs& attrs, const std::vector& inputs, const std::vector& req, const std::vector& outputs) { - MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + if (SupportMKLDNNReshape(inputs[0], outputs[0])) { + MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + } else { + FallBackCompute(UnaryOp::IdentityCompute, attrs, ctx, inputs, req, outputs); + } outputs[1].data().dptr()[0] = inputs[1].data().dptr()[0]; outputs[2].data().dptr()[0] = inputs[2].data().dptr()[0]; } diff --git a/src/operator/tensor/matrix_op-inl.h b/src/operator/tensor/matrix_op-inl.h index 697d85e8eb1d..a515c018b4e8 100644 --- a/src/operator/tensor/matrix_op-inl.h +++ b/src/operator/tensor/matrix_op-inl.h @@ -80,6 +80,19 @@ struct ReshapeParam : public dmlc::Parameter { } }; +#if MXNET_USE_ONEDNN == 1 +bool ReshapeStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector* in_attrs, + std::vector* out_attrs); +void ReshapeComputeExCPU(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs); +#endif // MXNET_USE_ONEDNN == 1 + template inline mxnet::TShape InferReshapeShape(const mxnet::Tuple& shape, const mxnet::TShape& dshape, diff --git a/src/operator/tensor/matrix_op.cc b/src/operator/tensor/matrix_op.cc index 958e9a3e35b9..ae00753da4ab 100644 --- a/src/operator/tensor/matrix_op.cc +++ b/src/operator/tensor/matrix_op.cc @@ -26,8 +26,9 @@ #include "./matrix_op-inl.h" #include "./elemwise_unary_op.h" #if MXNET_USE_ONEDNN == 1 -#include "../nn/mkldnn/mkldnn_ops-inl.h" #include "../nn/mkldnn/mkldnn_base-inl.h" +#include "../nn/mkldnn/mkldnn_ops-inl.h" +#include "../nn/mkldnn/mkldnn_reshape-inl.h" #include "../nn/mkldnn/mkldnn_slice-inl.h" #endif @@ -114,24 +115,29 @@ DMLC_REGISTER_PARAMETER(DepthToSpaceParam); DMLC_REGISTER_PARAMETER(SplitParam); #if MXNET_USE_ONEDNN == 1 -static void ReshapeComputeExCPU(const nnvm::NodeAttrs& attrs, - const OpContext& ctx, - const std::vector& inputs, - const std::vector& req, - const std::vector& outputs) { +void ReshapeComputeExCPU(const nnvm::NodeAttrs& attrs, + const OpContext& ctx, + const std::vector& inputs, + const std::vector& req, + const std::vector& outputs) { CHECK_EQ(inputs.size(), 1U); CHECK_EQ(outputs.size(), 1U); // If inputs are supposed to be in MKLDNN format and // MKLDNN support the data type or the shape. Then convert // it to the output format and shape - MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + + if (SupportMKLDNNReshape(inputs[0], outputs[0])) { + MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + } else { + FallBackCompute(UnaryOp::IdentityCompute, attrs, ctx, inputs, req, outputs); + } } -inline static bool ReshapeStorageType(const nnvm::NodeAttrs& attrs, - const int dev_mask, - DispatchMode* dispatch_mode, - std::vector* in_attrs, - std::vector* out_attrs) { +bool ReshapeStorageType(const nnvm::NodeAttrs& attrs, + const int dev_mask, + DispatchMode* dispatch_mode, + std::vector* in_attrs, + std::vector* out_attrs) { CHECK_EQ(in_attrs->size(), 1U); CHECK_EQ(out_attrs->size(), 1U); return MKLDNNStorageType(attrs, dev_mask, true, dispatch_mode, in_attrs, out_attrs); @@ -228,7 +234,11 @@ static void FlattenEx(const nnvm::NodeAttrs& attrs, // If inputs are supposed to be in MKLDNN format and // MKLDNN support the data type or the shape. Then convert // it to the output format and shape - MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + if (SupportMKLDNNReshape(inputs[0], outputs[0])) { + MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + } else { + FallBackCompute(UnaryOp::IdentityCompute, attrs, ctx, inputs, req, outputs); + } } static inline bool FlattenStorageType(const nnvm::NodeAttrs& attrs, @@ -394,7 +404,11 @@ static void ExpandDimEx(const nnvm::NodeAttrs& attrs, // If inputs are supposed to be in MKLDNN format and // MKLDNN support the data type or the shape. Then convert // it to the output format and shape - MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + if (SupportMKLDNNReshape(inputs[0], outputs[0])) { + MKLDNNRun(MKLDNNReshapeForward, attrs, ctx, inputs[0], req[0], outputs[0]); + } else { + FallBackCompute(UnaryOp::IdentityCompute, attrs, ctx, inputs, req, outputs); + } } inline static bool ExpandDimStorageType(const nnvm::NodeAttrs& attrs, From 6cfdf0d473766bde4385a8b1c9482a4fb5c3be4f Mon Sep 17 00:00:00 2001 From: y Date: Thu, 2 Sep 2021 16:40:07 +0000 Subject: [PATCH 2/2] Fix SupportMKLDNN function for Convolution and Reshape --- src/operator/nn/mkldnn/mkldnn_convolution.cc | 5 ++--- src/operator/nn/mkldnn/mkldnn_reshape.cc | 4 ++-- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/src/operator/nn/mkldnn/mkldnn_convolution.cc b/src/operator/nn/mkldnn/mkldnn_convolution.cc index f059c5ade50f..ef2c57e4a9b8 100644 --- a/src/operator/nn/mkldnn/mkldnn_convolution.cc +++ b/src/operator/nn/mkldnn/mkldnn_convolution.cc @@ -37,11 +37,10 @@ namespace op { DMLC_REGISTER_PARAMETER(MKLDNNConvParam); bool SupportMKLDNNConv(const ConvolutionParam& params, const NDArray& input) { - if ((params.kernel.ndim() != 1) && (params.kernel.ndim() != 2) && (params.kernel.ndim() != 3)) + if (params.kernel.ndim() > 3 || params.kernel.ndim() == 0) return false; return IsMKLDNNType(input.dtype()) && - ((input.shape().ndim() == 3) || (input.shape().ndim() == 4) || - (input.shape().ndim() == 5)); + input.shape().ndim() >= 3 && input.shape().ndim() <= 5; } std::shared_ptr GetConvFwdImpl( diff --git a/src/operator/nn/mkldnn/mkldnn_reshape.cc b/src/operator/nn/mkldnn/mkldnn_reshape.cc index b3fa3ce58ed9..99d64efa148a 100644 --- a/src/operator/nn/mkldnn/mkldnn_reshape.cc +++ b/src/operator/nn/mkldnn/mkldnn_reshape.cc @@ -36,8 +36,8 @@ namespace op { bool SupportMKLDNNReshape(const NDArray& input, const NDArray& output) { const int input_ndims = input.shape().ndim(); const int output_ndims = output.shape().ndim(); - return input_ndims >= 1 && input_ndims <= 6 && output_ndims >= 1 && output_ndims <= 6 && - IsMKLDNNType(input.dtype()) && input.shape().Size() > 0; + return input.shape().Size() > 0 && input_ndims >= 1 && input_ndims <= 6 && output_ndims >= 1 && + output_ndims <= 6 && IsMKLDNNType(input.dtype()); } MKLDNNReshapeFwd::MKLDNNReshapeFwd(const OpReqType& req,