diff --git a/paddle/fluid/inference/tensorrt/pir/generic_plugin.cu b/paddle/fluid/inference/tensorrt/pir/generic_plugin.cu index aabaec54a611c9..ca4049adac3432 100644 --- a/paddle/fluid/inference/tensorrt/pir/generic_plugin.cu +++ b/paddle/fluid/inference/tensorrt/pir/generic_plugin.cu @@ -704,8 +704,14 @@ int GenericPlugin::enqueue(const nvinfer1::PluginTensorDesc* input_desc, phi_kernel_contexts_[data_type]->EmplaceBackAttr( attrs_map_[t].dyn_cast<::pir::FloatAttribute>().data()); } else if (attr_type_name == "pir::DoubleAttribute") { - phi_kernel_contexts_[data_type]->EmplaceBackAttr( - attrs_map_[t].dyn_cast<::pir::DoubleAttribute>().data()); + if (attrs_map_[t].type_id() == ::pir::FloatAttribute::type_id()) { + const auto val = attrs_map_[t].dyn_cast<::pir::FloatAttribute>().data(); + phi_kernel_contexts_[data_type]->EmplaceBackAttr( + static_cast(val)); + } else { + phi_kernel_contexts_[data_type]->EmplaceBackAttr( + attrs_map_[t].dyn_cast<::pir::DoubleAttribute>().data()); + } } else if (attr_type_name == "pir::BoolAttribute") { phi_kernel_contexts_[data_type]->EmplaceBackAttr( attrs_map_[t].dyn_cast<::pir::BoolAttribute>().data()); diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index d6eece67f88a5e..36d42444e8f17b 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -4031,6 +4031,43 @@ struct LogitOpTranscriber : public OpTranscriber { } }; +struct Pad3dOpTranscriber : public OpTranscriber { + pir::AttributeMap TranslateOpAttribute( + pir::IrContext* ctx, + const std::string& normalized_op_name, + const OpAttributeInfoList& op_attr_infos, + const OpDesc& op_desc) override { + auto& attribute_translator = AttributeTranslator::instance(); + auto& op_normalizer = OpNameNormalizer::instance(); + pir::AttributeMap attribute_map = {}; + + for (const auto& info : op_attr_infos) { + auto legacy_attr_name = + op_normalizer.GetLegacyAttrName(op_desc.Type(), info.name); + VLOG(10) << "[op: " << op_desc.Type() + << "][attr] from: " << legacy_attr_name << " to: " << info.name; + if (op_desc.HasAttr(legacy_attr_name)) { + paddle::framework::Attribute legacy_attr = + op_desc.GetAttr(legacy_attr_name); + VLOG(10) << "attribute in " << op_desc.Type() + << " name: " << legacy_attr_name << " " << legacy_attr.index(); + pir::Attribute new_attr = + attribute_translator(info.type_name, legacy_attr); + if (info.name == "pad_value") { + new_attr = pir::DoubleAttribute::get( + ctx, + static_cast( + new_attr.dyn_cast().data())); + } + attribute_map[info.name] = new_attr; + } else { + this->HandleNonexistentAttribute(ctx, &attribute_map, info); + } + } + return attribute_map; + } +}; + OpTranslator::OpTranslator() { pir::IrContext* ctx = pir::IrContext::Instance(); ctx->GetOrRegisterDialect(); @@ -4149,5 +4186,7 @@ OpTranslator::OpTranslator() { special_handlers["softplus_grad"] = SoftPlusOpTranscriber(); special_handlers["logit"] = LogitOpTranscriber(); special_handlers["logit_grad"] = LogitOpTranscriber(); + special_handlers["pad3d"] = Pad3dOpTranscriber(); + special_handlers["pad3d_grad"] = Pad3dOpTranscriber(); } } // namespace paddle::translator diff --git a/paddle/fluid/pir/serialize_deserialize/patch/0.yaml b/paddle/fluid/pir/serialize_deserialize/patch/0.yaml index cf04e810a71c15..ddeef1eaec8842 100644 --- a/paddle/fluid/pir/serialize_deserialize/patch/0.yaml +++ b/paddle/fluid/pir/serialize_deserialize/patch/0.yaml @@ -31,3 +31,8 @@ op_patches: - action : modify_attr object : eps type : pir::DoubleAttribute + - op_name : pd_op.pad3d + actions: + - action : modify_attr + object : pad_value + type : pir::DoubleAttribute diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 47b2aa30d2e1ae..d2b0a0683ddb91 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -3428,7 +3428,7 @@ void PadInferMeta(const MetaTensor& input, void Pad3dInferMeta(const MetaTensor& x, const IntArray& paddings_int_array, const std::string& mode, - float value, + double value, const std::string& data_format, MetaTensor* out, MetaConfig config) { diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index cc6bb467f0808c..7063acacd97b83 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -559,7 +559,7 @@ PADDLE_API void PadInferMeta(const MetaTensor& input, PADDLE_API void Pad3dInferMeta(const MetaTensor& x, const IntArray& paddings, const std::string& mode, - float value, + double value, const std::string& data_format, MetaTensor* out, MetaConfig config = MetaConfig()); diff --git a/paddle/phi/kernels/cpu/pad3d_grad_kernel.cc b/paddle/phi/kernels/cpu/pad3d_grad_kernel.cc index 83ab7d3838aa29..8d7abe0fd6d089 100644 --- a/paddle/phi/kernels/cpu/pad3d_grad_kernel.cc +++ b/paddle/phi/kernels/cpu/pad3d_grad_kernel.cc @@ -364,7 +364,7 @@ void Pad3dGradKernel(const Context& dev_ctx, const DenseTensor& out_grad, const IntArray& paddings, const std::string& mode, - float pad_value UNUSED, + double pad_value UNUSED, const std::string& data_format, DenseTensor* x_grad) { std::vector pads = paddings.GetData(); diff --git a/paddle/phi/kernels/cpu/pad3d_kernel.cc b/paddle/phi/kernels/cpu/pad3d_kernel.cc index 6a9f63c6249e64..5a77f822798493 100644 --- a/paddle/phi/kernels/cpu/pad3d_kernel.cc +++ b/paddle/phi/kernels/cpu/pad3d_kernel.cc @@ -381,7 +381,7 @@ void Pad3dKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* out) { T value = static_cast(pad_value); diff --git a/paddle/phi/kernels/gpu/pad3d_grad_kernel.cu b/paddle/phi/kernels/gpu/pad3d_grad_kernel.cu index 18d2f16e4677a6..c902c2cbf3a622 100644 --- a/paddle/phi/kernels/gpu/pad3d_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/pad3d_grad_kernel.cu @@ -343,7 +343,7 @@ void Pad3dGradKernel(const Context& dev_ctx, const DenseTensor& out_grad, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* x_grad) { std::vector pads = paddings.GetData(); diff --git a/paddle/phi/kernels/gpu/pad3d_kernel.cu b/paddle/phi/kernels/gpu/pad3d_kernel.cu index aaacfd735cdc9e..64dd54359dfc43 100644 --- a/paddle/phi/kernels/gpu/pad3d_kernel.cu +++ b/paddle/phi/kernels/gpu/pad3d_kernel.cu @@ -334,7 +334,7 @@ void Pad3dKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* out) { std::vector pads = paddings.GetData(); diff --git a/paddle/phi/kernels/onednn/pad3d_kernel.cc b/paddle/phi/kernels/onednn/pad3d_kernel.cc index 9429a7e83a77e1..97bd4b120c1001 100644 --- a/paddle/phi/kernels/onednn/pad3d_kernel.cc +++ b/paddle/phi/kernels/onednn/pad3d_kernel.cc @@ -52,7 +52,7 @@ void Pad3dKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& paddings, const std::string& mode UNUSED, - float pad_value, + double pad_value, const std::string& data_format UNUSED, DenseTensor* out) { PadOpKernel(dev_ctx, x, paddings.GetData(), pad_value, out); diff --git a/paddle/phi/kernels/onednn/pad_kernel_impl.h b/paddle/phi/kernels/onednn/pad_kernel_impl.h index 0c360e1dabbc31..02e97839b0271a 100644 --- a/paddle/phi/kernels/onednn/pad_kernel_impl.h +++ b/paddle/phi/kernels/onednn/pad_kernel_impl.h @@ -107,7 +107,7 @@ template void PadOpKernel(const Context& dev_ctx, const DenseTensor& x, const std::vector& paddings, - float pad_value, + double pad_value, DenseTensor* out) { const auto& onednn_engine = dev_ctx.GetEngine(); auto& astream = OneDNNContext::tls().get_stream(); diff --git a/paddle/phi/kernels/pad3d_grad_kernel.h b/paddle/phi/kernels/pad3d_grad_kernel.h index bbad50f4d83bd4..17b466aa76f9f3 100644 --- a/paddle/phi/kernels/pad3d_grad_kernel.h +++ b/paddle/phi/kernels/pad3d_grad_kernel.h @@ -25,7 +25,7 @@ void Pad3dGradKernel(const Context& dev_ctx, const DenseTensor& out_grad, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* x_grad); diff --git a/paddle/phi/kernels/pad3d_kernel.h b/paddle/phi/kernels/pad3d_kernel.h index 1589ff854ec23d..f49156b3b1dab9 100644 --- a/paddle/phi/kernels/pad3d_kernel.h +++ b/paddle/phi/kernels/pad3d_kernel.h @@ -24,7 +24,7 @@ void Pad3dKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* out); diff --git a/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc b/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc index c0ec47b722fb98..97f0e1f1025323 100644 --- a/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc +++ b/paddle/phi/kernels/xpu/pad3d_grad_kernel.cc @@ -26,7 +26,7 @@ void Pad3dGradKernel(const Context& dev_ctx, const DenseTensor& out_grad, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* x_grad) { T value = static_cast(pad_value); diff --git a/paddle/phi/kernels/xpu/pad3d_kernel.cc b/paddle/phi/kernels/xpu/pad3d_kernel.cc index 00c7c03da02402..451c756337e72f 100644 --- a/paddle/phi/kernels/xpu/pad3d_kernel.cc +++ b/paddle/phi/kernels/xpu/pad3d_kernel.cc @@ -26,7 +26,7 @@ void Pad3dKernel(const Context& dev_ctx, const DenseTensor& x, const IntArray& paddings, const std::string& mode, - float pad_value, + double pad_value, const std::string& data_format, DenseTensor* out) { std::vector pads = paddings.GetData(); diff --git a/paddle/phi/ops/yaml/backward.yaml b/paddle/phi/ops/yaml/backward.yaml index 4cb36dc44e88fd..71ab336e370284 100644 --- a/paddle/phi/ops/yaml/backward.yaml +++ b/paddle/phi/ops/yaml/backward.yaml @@ -2583,8 +2583,8 @@ composite: p_norm_grad(x, out, out_grad, porder, axis, epsilon, keepdim, asvector, x_grad) - backward_op : pad3d_double_grad - forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(grad_x) - args : (Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format) + forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode="constant", double pad_value=0.0, str data_format="NCDHW") -> Tensor(grad_x) + args : (Tensor grad_x_grad, IntArray paddings, str mode, double pad_value, str data_format) output : Tensor(grad_out_grad) infer_meta : func : Pad3dInferMeta @@ -2592,8 +2592,8 @@ func : pad3d - backward_op : pad3d_grad - forward : pad3d(Tensor x, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(out) - args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format) + forward : pad3d(Tensor x, IntArray paddings, str mode="constant", double pad_value=0.0, str data_format="NCDHW") -> Tensor(out) + args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, double pad_value, str data_format) output : Tensor(x_grad) infer_meta : func : UnchangedInferMeta diff --git a/paddle/phi/ops/yaml/legacy/backward_exclude.yaml b/paddle/phi/ops/yaml/legacy/backward_exclude.yaml index 1d22d7235c582c..d6ccde71dcb711 100644 --- a/paddle/phi/ops/yaml/legacy/backward_exclude.yaml +++ b/paddle/phi/ops/yaml/legacy/backward_exclude.yaml @@ -65,3 +65,5 @@ - unpool_grad - unsqueeze_grad - logit_grad +- pad3d_grad +- pad3d_double_grad diff --git a/paddle/phi/ops/yaml/legacy/ops_exclude.yaml b/paddle/phi/ops/yaml/legacy/ops_exclude.yaml index bcd1041fbed7a8..e5ee856c1aca0b 100644 --- a/paddle/phi/ops/yaml/legacy/ops_exclude.yaml +++ b/paddle/phi/ops/yaml/legacy/ops_exclude.yaml @@ -100,3 +100,4 @@ - zeros - zeros_like - logit +- pad3d diff --git a/paddle/phi/ops/yaml/legacy/static_backward.yaml b/paddle/phi/ops/yaml/legacy/static_backward.yaml index 82e596cc967649..c17f9a702e24cf 100755 --- a/paddle/phi/ops/yaml/legacy/static_backward.yaml +++ b/paddle/phi/ops/yaml/legacy/static_backward.yaml @@ -403,6 +403,27 @@ kernel : func : norm_grad +- backward_op : pad3d_double_grad + forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(grad_x) + args : (Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format) + output : Tensor(grad_out_grad) + infer_meta : + func : Pad3dInferMeta + kernel : + func : pad3d + +- backward_op : pad3d_grad + forward : pad3d(Tensor x, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(out) + args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param: [x] + kernel : + func : pad3d_grad + no_need_buffer : x + backward : pad3d_double_grad + - backward_op : pool2d_double_grad forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x) args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) diff --git a/paddle/phi/ops/yaml/legacy/static_ops.yaml b/paddle/phi/ops/yaml/legacy/static_ops.yaml index a202e525fed277..f4ad43b4c6b054 100755 --- a/paddle/phi/ops/yaml/legacy/static_ops.yaml +++ b/paddle/phi/ops/yaml/legacy/static_ops.yaml @@ -724,6 +724,16 @@ data_type : x traits : paddle::dialect::ForwardOnlyTrait +- op : pad3d + args : (Tensor x, IntArray paddings, str mode = "constant", float pad_value = 0.0, str data_format = "NCDHW") + output : Tensor(out) + infer_meta : + func : Pad3dInferMeta + kernel : + func : pad3d + backward : pad3d_grad + interfaces : paddle::dialect::InferSymbolicShapeInterface + - op : pool2d args : (Tensor x, IntArray kernel_size, int[] strides = {1,1}, int[] paddings = {0,0}, bool ceil_mode = false, bool exclusive = true, str data_format = "NCHW", str pooling_type = "", bool global_pooling = false, bool adaptive = false, str padding_algorithm = "EXPLICIT", bool use_cudnn = false) output : Tensor(out) diff --git a/paddle/phi/ops/yaml/ops.yaml b/paddle/phi/ops/yaml/ops.yaml index c47b3657726c20..0251fb0e653a23 100644 --- a/paddle/phi/ops/yaml/ops.yaml +++ b/paddle/phi/ops/yaml/ops.yaml @@ -4109,7 +4109,7 @@ interfaces : paddle::dialect::InferSymbolicShapeInterface - op : pad3d - args : (Tensor x, IntArray paddings, str mode = "constant", float pad_value = 0.0, str data_format = "NCDHW") + args : (Tensor x, IntArray paddings, str mode = "constant", double pad_value = 0.0, str data_format = "NCDHW") output : Tensor(out) infer_meta : func : Pad3dInferMeta