Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 8 additions & 2 deletions paddle/fluid/inference/tensorrt/pir/generic_plugin.cu
Original file line number Diff line number Diff line change
Expand Up @@ -704,8 +704,14 @@ int GenericPlugin::enqueue(const nvinfer1::PluginTensorDesc* input_desc,
phi_kernel_contexts_[data_type]->EmplaceBackAttr(
attrs_map_[t].dyn_cast<::pir::FloatAttribute>().data());
} else if (attr_type_name == "pir::DoubleAttribute") {
phi_kernel_contexts_[data_type]->EmplaceBackAttr(
attrs_map_[t].dyn_cast<::pir::DoubleAttribute>().data());
if (attrs_map_[t].type_id() == ::pir::FloatAttribute::type_id()) {
const auto val = attrs_map_[t].dyn_cast<::pir::FloatAttribute>().data();
phi_kernel_contexts_[data_type]->EmplaceBackAttr(
static_cast<double>(val));
} else {
phi_kernel_contexts_[data_type]->EmplaceBackAttr(
attrs_map_[t].dyn_cast<::pir::DoubleAttribute>().data());
}
} else if (attr_type_name == "pir::BoolAttribute") {
phi_kernel_contexts_[data_type]->EmplaceBackAttr(
attrs_map_[t].dyn_cast<::pir::BoolAttribute>().data());
Expand Down
39 changes: 39 additions & 0 deletions paddle/fluid/ir_adaptor/translator/op_translator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -4031,6 +4031,43 @@ struct LogitOpTranscriber : public OpTranscriber {
}
};

struct Pad3dOpTranscriber : public OpTranscriber {
pir::AttributeMap TranslateOpAttribute(
pir::IrContext* ctx,
const std::string& normalized_op_name,
const OpAttributeInfoList& op_attr_infos,
const OpDesc& op_desc) override {
auto& attribute_translator = AttributeTranslator::instance();
auto& op_normalizer = OpNameNormalizer::instance();
pir::AttributeMap attribute_map = {};

for (const auto& info : op_attr_infos) {
auto legacy_attr_name =
op_normalizer.GetLegacyAttrName(op_desc.Type(), info.name);
VLOG(10) << "[op: " << op_desc.Type()
<< "][attr] from: " << legacy_attr_name << " to: " << info.name;
if (op_desc.HasAttr(legacy_attr_name)) {
paddle::framework::Attribute legacy_attr =
op_desc.GetAttr(legacy_attr_name);
VLOG(10) << "attribute in " << op_desc.Type()
<< " name: " << legacy_attr_name << " " << legacy_attr.index();
pir::Attribute new_attr =
attribute_translator(info.type_name, legacy_attr);
if (info.name == "pad_value") {
new_attr = pir::DoubleAttribute::get(
ctx,
static_cast<double>(
new_attr.dyn_cast<pir::FloatAttribute>().data()));
}
attribute_map[info.name] = new_attr;
} else {
this->HandleNonexistentAttribute(ctx, &attribute_map, info);
}
}
return attribute_map;
}
};

OpTranslator::OpTranslator() {
pir::IrContext* ctx = pir::IrContext::Instance();
ctx->GetOrRegisterDialect<paddle::dialect::OperatorDialect>();
Expand Down Expand Up @@ -4149,5 +4186,7 @@ OpTranslator::OpTranslator() {
special_handlers["softplus_grad"] = SoftPlusOpTranscriber();
special_handlers["logit"] = LogitOpTranscriber();
special_handlers["logit_grad"] = LogitOpTranscriber();
special_handlers["pad3d"] = Pad3dOpTranscriber();
special_handlers["pad3d_grad"] = Pad3dOpTranscriber();
}
} // namespace paddle::translator
5 changes: 5 additions & 0 deletions paddle/fluid/pir/serialize_deserialize/patch/0.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -31,3 +31,8 @@ op_patches:
- action : modify_attr
object : eps
type : pir::DoubleAttribute
- op_name : pd_op.pad3d
actions:
- action : modify_attr
object : pad_value
type : pir::DoubleAttribute
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3428,7 +3428,7 @@ void PadInferMeta(const MetaTensor& input,
void Pad3dInferMeta(const MetaTensor& x,
const IntArray& paddings_int_array,
const std::string& mode,
float value,
double value,
const std::string& data_format,
MetaTensor* out,
MetaConfig config) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -559,7 +559,7 @@ PADDLE_API void PadInferMeta(const MetaTensor& input,
PADDLE_API void Pad3dInferMeta(const MetaTensor& x,
const IntArray& paddings,
const std::string& mode,
float value,
double value,
const std::string& data_format,
MetaTensor* out,
MetaConfig config = MetaConfig());
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/pad3d_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value UNUSED,
double pad_value UNUSED,
const std::string& data_format,
DenseTensor* x_grad) {
std::vector<int64_t> pads = paddings.GetData();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/cpu/pad3d_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -381,7 +381,7 @@ void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* out) {
T value = static_cast<T>(pad_value);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/pad3d_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* x_grad) {
std::vector<int64_t> pads = paddings.GetData();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/gpu/pad3d_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -334,7 +334,7 @@ void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* out) {
std::vector<int64_t> pads = paddings.GetData();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/pad3d_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode UNUSED,
float pad_value,
double pad_value,
const std::string& data_format UNUSED,
DenseTensor* out) {
PadOpKernel<T, Context>(dev_ctx, x, paddings.GetData(), pad_value, out);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/onednn/pad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ template <typename T, typename Context>
void PadOpKernel(const Context& dev_ctx,
const DenseTensor& x,
const std::vector<int64_t>& paddings,
float pad_value,
double pad_value,
DenseTensor* out) {
const auto& onednn_engine = dev_ctx.GetEngine();
auto& astream = OneDNNContext::tls().get_stream();
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/pad3d_grad_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* x_grad);

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/pad3d_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* out);

Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/xpu/pad3d_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void Pad3dGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* x_grad) {
T value = static_cast<T>(pad_value);
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/xpu/pad3d_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ void Pad3dKernel(const Context& dev_ctx,
const DenseTensor& x,
const IntArray& paddings,
const std::string& mode,
float pad_value,
double pad_value,
const std::string& data_format,
DenseTensor* out) {
std::vector<int64_t> pads = paddings.GetData();
Expand Down
8 changes: 4 additions & 4 deletions paddle/phi/ops/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2583,17 +2583,17 @@
composite: p_norm_grad(x, out, out_grad, porder, axis, epsilon, keepdim, asvector, x_grad)

- backward_op : pad3d_double_grad
forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format)
forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode="constant", double pad_value=0.0, str data_format="NCDHW") -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray paddings, str mode, double pad_value, str data_format)
output : Tensor(grad_out_grad)
infer_meta :
func : Pad3dInferMeta
kernel :
func : pad3d

- backward_op : pad3d_grad
forward : pad3d(Tensor x, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
forward : pad3d(Tensor x, IntArray paddings, str mode="constant", double pad_value=0.0, str data_format="NCDHW") -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, double pad_value, str data_format)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/ops/yaml/legacy/backward_exclude.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -65,3 +65,5 @@
- unpool_grad
- unsqueeze_grad
- logit_grad
- pad3d_grad
- pad3d_double_grad
1 change: 1 addition & 0 deletions paddle/phi/ops/yaml/legacy/ops_exclude.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -100,3 +100,4 @@
- zeros
- zeros_like
- logit
- pad3d
21 changes: 21 additions & 0 deletions paddle/phi/ops/yaml/legacy/static_backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,27 @@
kernel :
func : norm_grad

- backward_op : pad3d_double_grad
forward : pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(grad_out_grad)
infer_meta :
func : Pad3dInferMeta
kernel :
func : pad3d

- backward_op : pad3d_grad
forward : pad3d(Tensor x, IntArray paddings, str mode="constant", float pad_value=0.0, str data_format="NCDHW") -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pad3d_grad
no_need_buffer : x
backward : pad3d_double_grad

- backward_op : pool2d_double_grad
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/ops/yaml/legacy/static_ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -724,6 +724,16 @@
data_type : x
traits : paddle::dialect::ForwardOnlyTrait

- op : pad3d
args : (Tensor x, IntArray paddings, str mode = "constant", float pad_value = 0.0, str data_format = "NCDHW")
output : Tensor(out)
infer_meta :
func : Pad3dInferMeta
kernel :
func : pad3d
backward : pad3d_grad
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : pool2d
args : (Tensor x, IntArray kernel_size, int[] strides = {1,1}, int[] paddings = {0,0}, bool ceil_mode = false, bool exclusive = true, str data_format = "NCHW", str pooling_type = "", bool global_pooling = false, bool adaptive = false, str padding_algorithm = "EXPLICIT", bool use_cudnn = false)
output : Tensor(out)
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/ops/yaml/ops.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4109,7 +4109,7 @@
interfaces : paddle::dialect::InferSymbolicShapeInterface

- op : pad3d
args : (Tensor x, IntArray paddings, str mode = "constant", float pad_value = 0.0, str data_format = "NCDHW")
args : (Tensor x, IntArray paddings, str mode = "constant", double pad_value = 0.0, str data_format = "NCDHW")
output : Tensor(out)
infer_meta :
func : Pad3dInferMeta
Expand Down
Loading