From a17ec4a040209ad0cfc50bcdbcef2ad0935f50f3 Mon Sep 17 00:00:00 2001 From: cmcandy Date: Sun, 17 Mar 2024 02:05:11 +0800 Subject: [PATCH 1/6] [PIR] Fix partial sum --- .../ir_adaptor/translator/op_translator.cc | 15 +++++ .../pir/dialect/op_generator/ops_api_gen.py | 1 + paddle/fluid/pir/dialect/operator/ir/ops.yaml | 10 ++++ .../pir/dialect/operator/ir/ops_backward.yaml | 10 ++++ .../fluid/pir/dialect/operator/utils/utils.cc | 2 + paddle/phi/api/yaml/op_compat.yaml | 4 ++ paddle/phi/infermeta/backward.cc | 10 ++++ paddle/phi/infermeta/backward.h | 3 + paddle/phi/infermeta/unary.cc | 60 +++++++++++++++++++ paddle/phi/infermeta/unary.h | 7 +++ 10 files changed, 122 insertions(+) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 6a7e8a4dd5b44..38921c2ba8f07 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -2851,6 +2851,20 @@ struct FusedElemwiseAddActivationGradOpTranscriber } }; +struct PartialSumOpTranscriber : public OpTranscriber { + pir::OpInfo LookUpOpInfo(pir::IrContext* ctx, + const OpDesc& op_desc) override { + std::string target_op_name = "pd_op.partial_sum"; + const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); + if (!op_info) { + IR_THROW( + "Op partial_sum should have corresponding OpInfo " + "pd_op.partial_sum"); + } + return op_info; + } +}; + struct MatrixRankOpTranscriber : public OpTranscriber { pir::OpInfo LookUpOpInfo(pir::IrContext* ctx, const OpDesc& op_desc) override { @@ -3182,6 +3196,7 @@ OpTranslator::OpTranslator() { special_handlers["slice"] = SliceOpTranscriber(); special_handlers["split"] = SplitOpTranscriber(); special_handlers["sum"] = AddNOpTranscriber(); + special_handlers["partial_sum"] = PartialSumOpTranscriber(); special_handlers["tril_triu"] = TrilAndTriuOpTranscriber(); special_handlers["tril_triu_grad"] = TrilAndTriuGradOpTranscriber(); special_handlers["matmul"] = LegacyMatmulOpTranscriber(); diff --git a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py index 5a7c117974187..0b30184ce55c5 100644 --- a/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py +++ b/paddle/fluid/pir/dialect/op_generator/ops_api_gen.py @@ -153,6 +153,7 @@ 'lars_momentum', 'lars_momentum_', 'max_pool2d_v2', + 'partial_sum', 'random_routing', 'recv_v2', 'rnn_', diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index d4b9c2a2baff6..82d526fb43917 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -1555,6 +1555,16 @@ backward : sum_grad interfaces : paddle::dialect::InferSymbolicShapeInterface +- op : partial_sum + args : (Tensor[] x, int start_index = 0, int length = -1) + output : Tensor(out) + infer_meta : + func : PartialSumInferMeta + kernel : + func : partial_sum + data_type : x + backward : partial_sum_grad + - op : swish args : (Tensor x) output : Tensor(out) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml index 7b3068a8ab6c9..68a5d9ee9c506 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml @@ -846,6 +846,16 @@ no_need_buffer : x backward : sum_double_grad +- backward_op : partial_sum_grad + forward : partial_sum (Tensor[] x, int start_index = 0, int length = -1) -> Tensor(out) + args : (Tensor[] x, Tensor out_grad, int start_index, int length) + output : Tensor[](x_grad){x.size()} + infer_meta : + func : PartialSumGradInferMeta + param : [x] + kernel : + func : partial_sum_grad + - backward_op : swish_grad forward : swish (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/paddle/fluid/pir/dialect/operator/utils/utils.cc b/paddle/fluid/pir/dialect/operator/utils/utils.cc index 367d731cfe604..96eecc8d68466 100644 --- a/paddle/fluid/pir/dialect/operator/utils/utils.cc +++ b/paddle/fluid/pir/dialect/operator/utils/utils.cc @@ -73,6 +73,8 @@ const std::unordered_set LegacyOpList = { MatchMatrixTensorGradOp::name(), NceOp::name(), NceGradOp::name(), + PartialSumOp::name(), + PartialSumGradOp::name(), LrnOp::name(), LrnGradOp::name(), MovingAverageAbsMaxScaleOp::name(), diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index ce5b70516a8e0..bdb29041e5eea 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -2471,6 +2471,10 @@ - op : partial_sum backward : partial_sum_grad + inputs : + x : X + outputs : + out : Out extra : attrs : [bool use_mkldnn = false] diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 9f66d0ec3a9f5..8818887f535d9 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -876,6 +876,16 @@ void NceGradInferMeta(const MetaTensor& input, } } +void PartialSumGradInferMeta(const std::vector& xs, + std::vector x_grads) { + auto input_num = xs.size(); + for(size_t i=0; i< input_num; i++){ + auto x_dims = xs[i]->dims(); + x_grads[i]->set_dims(x_dims); + x_grads[i]->set_dtype(xs[i]->dtype()); + } +} + void NllLossGradInferMeta(const MetaTensor& x, const MetaTensor& label, const MetaTensor& weight, diff --git a/paddle/phi/infermeta/backward.h b/paddle/phi/infermeta/backward.h index bde9c57ff245a..b7a11a85deeaa 100644 --- a/paddle/phi/infermeta/backward.h +++ b/paddle/phi/infermeta/backward.h @@ -372,6 +372,9 @@ void NanmedianGradInferMeta(const MetaTensor& x, bool keep_dim, MetaTensor* x_grad); +void PartialSumGradInferMeta(const std::vector& xs, + std::vector x_grads); + void NceGradInferMeta(const MetaTensor& input, const MetaTensor& bias, const MetaTensor& weight, diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 3433d17cf50a3..8457881f9fd88 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -4453,6 +4453,66 @@ void SumInferMeta(const MetaTensor& x, SumRawInferMeta(x, axis, keep_dim, reduce_all, dtype, out, config); } +void PartialSumInferMeta(const std::vector& xs, + int start_index, + int length, + MetaTensor* out, + MetaConfig config) { + int64_t batch_size = -1; + int64_t input_len = -1; + + auto inputs_num = xs.size(); + PADDLE_ENFORCE_GT(inputs_num, + 0, + phi::errors::InvalidArgument( + "ShapeError: Input tensors count should > 0. But " + "received inputs' length is 0.")); + + // Only support two dimensions now, should be extended later + // when length is -1, need make sure all dimensions to be added are the same + for (size_t i = 0; i < inputs_num; i++) { + auto x_dim = xs[i]->dims(); + VLOG(1) << "inputs_dims:" << x_dim; + + PADDLE_ENFORCE_EQ( + x_dim.size(), + 2, + phi::errors::InvalidArgument("Only support two dimensions input now.")); + + if (i == 0) { + batch_size = x_dim[0]; + input_len = x_dim[1]; + } else { + // each tensor's dim must eq + PADDLE_ENFORCE_EQ(x_dim[0], + batch_size, + phi::errors::InvalidArgument( + "The batch size of all inputs must be same")); + PADDLE_ENFORCE_EQ(x_dim[1], + input_len, + phi::errors::InvalidArgument( + "The input len of all inputs must be same")); + } + } + PADDLE_ENFORCE_GT( + input_len, + start_index, + phi::errors::OutOfRange("start_index must be less than input len")); + if (length > 0) { + PADDLE_ENFORCE_GE(input_len, + start_index + length, + phi::errors::OutOfRange( + "start_index + length is larger than input length")); + } + + std::vector out_dims(2); + out_dims[0] = batch_size; + out_dims[1] = (length == -1) ? input_len - start_index : length; + DDim out_dim = common::make_ddim(out_dims); + out->set_dims(out_dim); + out->set_dtype(xs[0]->dtype()); +} + void SvdInferMeta(const MetaTensor& x, bool full_matrices, MetaTensor* u, diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 9171c58b3b94a..4a64e645cc3bf 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -697,6 +697,13 @@ void SumRawInferMeta(const MetaTensor& x, MetaTensor* out, MetaConfig config = MetaConfig()); +void PartialSumInferMeta(const std::vector& xs, + int start_index, + int length, + MetaTensor* out, + MetaConfig config = MetaConfig()); + + void SvdInferMeta(const MetaTensor& x, bool full_matrices, MetaTensor* u, From 3a772177dd533469aafe4a4210fdd12aebd03d89 Mon Sep 17 00:00:00 2001 From: cmcandy Date: Sun, 17 Mar 2024 11:55:02 +0800 Subject: [PATCH 2/6] [PIR] add partial sum to white list --- test/white_list/pir_op_test_white_list | 1 + 1 file changed, 1 insertion(+) diff --git a/test/white_list/pir_op_test_white_list b/test/white_list/pir_op_test_white_list index dfa901c0ca126..9d712b41bda90 100644 --- a/test/white_list/pir_op_test_white_list +++ b/test/white_list/pir_op_test_white_list @@ -200,6 +200,7 @@ test_one_hot_v2_op test_one_hot_v2_op_static_build test_overlap_add_op test_pad3d_op +test_partial_sum_op test_pass_quantization test_pixel_shuffle_op test_poisson_op From 987eb2484cb32f72ddf68cd112668c708de39b65 Mon Sep 17 00:00:00 2001 From: cmcandy Date: Sun, 17 Mar 2024 15:39:39 +0800 Subject: [PATCH 3/6] format --- .../ir_adaptor/translator/op_translator.cc | 789 ++++++++++-------- paddle/fluid/pir/dialect/operator/ir/ops.yaml | 20 +- .../pir/dialect/operator/ir/ops_backward.yaml | 20 +- paddle/phi/infermeta/backward.cc | 4 +- paddle/phi/infermeta/unary.cc | 1 - paddle/phi/infermeta/unary.h | 1 - 6 files changed, 467 insertions(+), 368 deletions(-) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index 38921c2ba8f07..a57de9801bf04 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -198,9 +198,11 @@ inline pir::Operation* InsertFullOperationForAttributeInput( inline pir::Operation* InsertFullArrayOperationForAttributeInput( pir::IrContext* ctx, pir::Block* block, pir::Attribute attr) { - IR_ENFORCE(attr.isa(), - "Encounter non IntArray type when trying to insert IntArray " - "mutable attribute"); + PADDLE_ENFORCE( + attr.isa(), + platform::errors::InvalidArgument( + "Encounter non IntArray type when trying to insert IntArray " + "mutable attribute")); phi::IntArray int_array = attr.dyn_cast().data(); pir::Builder builder(ctx, block); dialect::FullIntArrayOp full_int_array_op = @@ -284,9 +286,10 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, << target_op_name; auto op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name)); } if (!paddle::dialect::HaveOpToMultiKernelsMap( @@ -313,20 +316,23 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, std::string legacy_input_name = op_normalizer.GetLegacyArgName(op_desc.Type(), info.name); auto legacy_input_vars = op_desc.Input(legacy_input_name, true); - IR_ENFORCE(legacy_input_vars.size() <= 1, - "Do not support duplicable tensor input, when op have multi " - "kernels. OP is %s", - op_desc.Type()); + PADDLE_ENFORCE( + legacy_input_vars.size() <= 1, + platform::errors::InvalidArgument( + "Do not support duplicable tensor input, when op have multi " + "kernels. OP is %s", + op_desc.Type())); if (legacy_input_vars.empty()) { need_inputs_sig.emplace_back(""); continue; } VarDesc* var = op_desc.Block()->FindVarRecursive(legacy_input_vars[0]); - IR_ENFORCE(var != nullptr, - "[op:%s] Input %s should not be null", - op_desc.Type(), - legacy_input_vars[0]); + PADDLE_ENFORCE( + var != nullptr, + platform::errors::InvalidArgument("[op:%s] Input %s should not be null", + op_desc.Type(), + legacy_input_vars[0])); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR) { need_inputs_sig.emplace_back("dense"); @@ -334,9 +340,10 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, paddle::framework::proto::VarType::SELECTED_ROWS) { need_inputs_sig.emplace_back("selected_rows"); } else { - IR_THROW("Op %d only support dense tensor and selected_rows, but not %d", - op_desc.Type(), - var->GetType()); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op %d only support dense tensor and selected_rows, but not %d", + op_desc.Type(), + var->GetType())); } } @@ -364,19 +371,21 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, } } - IR_ENFORCE(!target_op_name.empty(), - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name); + PADDLE_ENFORCE(!target_op_name.empty(), + platform::errors::InvalidArgument( + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name)); target_op_name = GetPrefix(ctx, op_desc) + target_op_name; if (IsInplace(op_desc) && *target_op_name.rbegin() != '_') { target_op_name += "_"; } if (!op_info) { - IR_THROW("Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name)); } return op_info; @@ -429,9 +438,10 @@ pir::Value OpTranscriber::GetAttributeAsInput(pir::IrContext* ctx, op_normalizer.GetLegacyAttrName(op_desc.Type(), input_info.name); if (!op_desc.HasAttr(legacy_attr_name)) { - IR_THROW("Op %s arg %s should not be zero size", - op_desc.Type(), - legacy_attr_name); + PADDLE_THROW( + phi::errors::InvalidArgument("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name)); } paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" @@ -532,10 +542,11 @@ std::vector OpTranscriber::GenerateOperationInput( // Vector if (legacy_input_vars.size() == 1) { VarDesc* var = op_desc.Block()->FindVarRecursive(legacy_input_vars[0]); - IR_ENFORCE(var != nullptr, - "[op:%s] Input %s should not be null", - op_desc.Type(), - legacy_input_vars[0]); + PADDLE_ENFORCE(var != nullptr, + platform::errors::InvalidArgument( + "[op:%s] Input %s should not be null", + op_desc.Type(), + legacy_input_vars[0])); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { is_vector = false; @@ -544,15 +555,17 @@ std::vector OpTranscriber::GenerateOperationInput( // if src type is Tensor if (!is_vector) { - IR_ENFORCE(legacy_input_vars.size() == 1u, - "Input %s not found when parsing op %s", - info.name, - op_desc.Type()); - IR_ENFORCE(param_map->count(legacy_input_vars[0]), - "Input [%s: %s] of op [%s] not found in param map", - info.name, - legacy_input_vars[0], - op_desc.Type()); + PADDLE_ENFORCE(legacy_input_vars.size() == 1u, + platform::errors::InvalidArgument( + "Input %s not found when parsing op %s", + info.name, + op_desc.Type())); + PADDLE_ENFORCE(param_map->count(legacy_input_vars[0]), + platform::errors::InvalidArgument( + "Input [%s: %s] of op [%s] not found in param map", + info.name, + legacy_input_vars[0], + op_desc.Type())); auto defining_info = (*param_map)[legacy_input_vars[0]]; op_inputs.push_back(defining_info.value); @@ -593,10 +606,11 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "] optional " << info.name << " :" << info.type_name << " " << legacy_output_name; - IR_ENFORCE(info.optional, - "Op %s arg %s should be optional if it can be empty", - op_desc.Type(), - legacy_output_name); + PADDLE_ENFORCE(info.optional, + platform::errors::InvalidArgument( + "Op %s arg %s should be optional if it can be empty", + op_desc.Type(), + legacy_output_name)); op_output_types.emplace_back(nullptr); continue; } @@ -613,10 +627,11 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, // Vector if (legacy_output_vars.size() == 1) { VarDesc* var = block->FindVarRecursive(legacy_output_vars[0]); - IR_ENFORCE(var != nullptr, - "[op:%s] Output %s should not be null", - op_desc.Type(), - legacy_output_vars[0]); + PADDLE_ENFORCE(var != nullptr, + platform::errors::InvalidArgument( + "[op:%s] Output %s should not be null", + op_desc.Type(), + legacy_output_vars[0])); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { pir::Type translated_var_type = @@ -640,10 +655,11 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, auto& var_name = legacy_output_vars[0]; VarDesc* var = block->FindVarRecursive(var_name); - IR_ENFORCE(var != nullptr, - "[op:%s] Output %s should not be null", - op_desc.Type(), - var_name); + PADDLE_ENFORCE(var != nullptr, + platform::errors::InvalidArgument( + "[op:%s] Output %s should not be null", + op_desc.Type(), + var_name)); VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "]" << info.name << " var: " << var_name << " type: " << var->GetType(); @@ -669,10 +685,11 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, continue; } VarDesc* var = block->FindVarRecursive(var_name); - IR_ENFORCE(var != nullptr, - "[op:%s] Output %s should not be null", - op_desc.Type(), - var_name); + PADDLE_ENFORCE(var != nullptr, + platform::errors::InvalidArgument( + "[op:%s] Output %s should not be null", + op_desc.Type(), + var_name)); VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "]" << info.name << " var: " << var_name << " type: " << var->GetType(); @@ -842,13 +859,15 @@ struct AssignOpTranscriber : public OpTranscriber { const OpDesc& op_desc) override { std::string target_op_name; - IR_ENFORCE( - op_desc.HasInput("X"), "op %s should have input `X`", op_desc.Type()); + PADDLE_ENFORCE(op_desc.HasInput("X"), + platform::errors::InvalidArgument( + "op %s should have input `X`", op_desc.Type())); const auto& input_vars = op_desc.Input("X"); - IR_ENFORCE(input_vars.size() == 1, - "op %s should have one input `X`, but got %d.", - op_desc.Type(), - input_vars.size()); + PADDLE_ENFORCE(input_vars.size() == 1, + platform::errors::InvalidArgument( + "op %s should have one input `X`, but got %d.", + op_desc.Type(), + input_vars.size())); const auto* input_var = op_desc.Block()->FindVarRecursive(input_vars[0]); if (input_var->GetType() == framework::proto::VarType::LOD_TENSOR_ARRAY) { target_op_name = dialect::AssignArray_Op::name(); @@ -858,7 +877,8 @@ struct AssignOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op assign should have corresponding OpInfo %s", target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op assign should have corresponding OpInfo %s", target_op_name)); } return op_info; @@ -935,9 +955,9 @@ struct AssignValueOpTranscriber : public OpTranscriber { std::string target_op_name = "pd_op.assign_value"; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op assign_value should have corresponding OpInfo " - "pd_op.assign_value"); + "pd_op.assign_value")); } return op_info; @@ -968,7 +988,8 @@ struct AssignValueOpTranscriber : public OpTranscriber { if (op_desc.HasAttr("shape")) { legacy_attr = op_desc.GetAttr("shape"); } else { - IR_THROW("Op assign_value should have attribute `shape` but not find"); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op assign_value should have attribute `shape` but not find")); } pir::Attribute attr_shape = attribute_translator(attr_info_maps.at("shape").type_name, legacy_attr); @@ -977,7 +998,8 @@ struct AssignValueOpTranscriber : public OpTranscriber { if (op_desc.HasAttr("dtype")) { legacy_attr = op_desc.GetAttr("dtype"); } else { - IR_THROW("Op assign_value should have attribute `dtype` but not find"); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op assign_value should have attribute `dtype` but not find")); } pir::Attribute attr_dtype = attribute_translator(attr_info_maps.at("dtype").type_name, legacy_attr); @@ -1005,10 +1027,11 @@ struct AssignValueOpTranscriber : public OpTranscriber { } } - IR_ENFORCE( + PADDLE_ENFORCE( attribute_map.find("values") != attribute_map.end(), - "Op assign_value should have attribute `**_values` or `values` but " - "not find"); + platform::errors::InvalidArgument( + "Op assign_value should have attribute `**_values` or `values` but " + "not find")); TranslateOpDistAttribute(op_desc, &attribute_map); @@ -1056,16 +1079,18 @@ pir::Value TranslateDropOutStateIn(pir::IrContext* ctx, // `DropoutState` is a tensor VarDesc* dropout_state = op_desc.Block()->FindVarRecursive(legacy_output_vars[0]); - IR_ENFORCE(dropout_state != nullptr, - "[op:%s] Output %s should not be null", - op_desc.Type(), - legacy_output_vars[0]); + PADDLE_ENFORCE( + dropout_state != nullptr, + platform::errors::InvalidArgument("[op:%s] Output %s should not be null", + op_desc.Type(), + legacy_output_vars[0])); auto& type_translator = TypeTranslator::instance(); pir::Type translated_var_type = type_translator[dropout_state->GetType()](ctx, *dropout_state); - IR_ENFORCE( + PADDLE_ENFORCE( translated_var_type.isa(), - "Unexpected: Rnn Op's output DropoutState should be a DenseTensor"); + platform::errors::InvalidArgument( + "Unexpected: Rnn Op's output DropoutState should be a DenseTensor")); auto tensor_type = translated_var_type.dyn_cast(); pir::Builder builder(ctx, block); @@ -1116,9 +1141,10 @@ struct EmbeddingGradOpTranscriber : public OpTranscriber { << target_op_name; auto op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name)); } return op_info; @@ -1194,7 +1220,9 @@ struct SplitOpTranscriber : public OpTranscriber { std::vector op_inputs; // process first input auto x_input_vars = op_desc.Input("X"); - IR_ENFORCE(x_input_vars.size() == 1, "x input of split MUST be a tensor"); + PADDLE_ENFORCE( + x_input_vars.size() == 1, + platform::errors::InvalidArgument("x input of split MUST be a tensor")); auto x_defining_info = (*param_map)[x_input_vars[0]]; op_inputs.push_back(x_defining_info.value); @@ -1224,8 +1252,9 @@ struct SplitOpTranscriber : public OpTranscriber { !op_desc.Input("AxisTensor").empty()) { // get axis from input auto axis_var_list = op_desc.Input("AxisTensor"); - IR_ENFORCE(axis_var_list.size() == 1, - "axis tensor input of split MUST be a tensor"); + PADDLE_ENFORCE(axis_var_list.size() == 1, + platform::errors::InvalidArgument( + "axis tensor input of split MUST be a tensor")); auto axis_defining_info = (*param_map)[axis_var_list[0]]; op_inputs.push_back(axis_defining_info.value); } else { @@ -1283,8 +1312,9 @@ struct SplitOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op assign_value should have corresponding OpInfo %s.", - target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op assign_value should have corresponding OpInfo %s.", + target_op_name)); } return op_info; @@ -1375,7 +1405,8 @@ struct AddNOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op add_n should have corresponding OpInfo %s", target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op add_n should have corresponding OpInfo %s", target_op_name)); } return op_info; @@ -1394,9 +1425,9 @@ struct TrilAndTriuOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op tril_triu should have corresponding OpInfo pd_op.tril or " - "pd_op.triu."); + "pd_op.triu.")); } return op_info; @@ -1415,10 +1446,10 @@ struct TrilAndTriuGradOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op tril_triu_grad should have corresponding OpInfo pd_op.tril_grad " "or " - "pd_op.triu_grad."); + "pd_op.triu_grad.")); } return op_info; @@ -1432,27 +1463,32 @@ ValueInfo GetTensorInfoByVarName(const OpDesc& op_desc, const std::vector& names, TranslationContext* param_map, const std::string& var_name) { - IR_ENFORCE(names.size() == 1, - "Expected op[%s]'s input %s has only 1 variable, but got %d", - op_desc.Type(), - var_name, - names.size()); + PADDLE_ENFORCE( + names.size() == 1, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has only 1 variable, but got %d", + op_desc.Type(), + var_name, + names.size())); const auto& name = names[0]; - IR_ENFORCE(param_map->count(name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - name); + PADDLE_ENFORCE( + param_map->count(name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", op_desc.Type(), name)); const auto& defining_info = param_map->at(name); pir::Value value = defining_info.value; - IR_ENFORCE( - value, "Expected op[%s]'s input %s is not null", op_desc.Type(), name); + PADDLE_ENFORCE( + value != nullptr, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is not null", op_desc.Type(), name)); const pir::Type& type = value.type(); - IR_ENFORCE(type.isa(), - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - name, - type); + PADDLE_ENFORCE(type.isa(), + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + name, + type)); dialect::DenseTensorType tensor_type = type.dyn_cast(); @@ -1480,9 +1516,10 @@ struct MulOpTranscriber : public OpTranscriber { const std::string& target_op_name = paddle::dialect::MatmulOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name)); } return op_info; } @@ -1517,24 +1554,28 @@ struct MulOpTranscriber : public OpTranscriber { const auto& [x_shape, x_tensor_type, x_value] = x_info; - IR_ENFORCE(x_num_col_dims <= static_cast(x_shape.size()), - "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " - "dim of input X %s, but got %d", - op_desc.Type(), - x_shape.size(), - x_num_col_dims); + PADDLE_ENFORCE( + x_num_col_dims <= static_cast(x_shape.size()), + platform::errors::InvalidArgument( + "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " + "dim of input X %s, but got %d", + op_desc.Type(), + x_shape.size(), + x_num_col_dims)); ValueInfo y_info = GetTensorInfoByVarName( op_desc, op_desc.Input("Y", true), param_map, "Y"); const auto& [y_shape, y_tensor_type, y_value] = y_info; - IR_ENFORCE(y_num_col_dims <= static_cast(y_shape.size()), - "Expected op[%s]'s attr `y_num_col_dims` less than or equal to " - "dim of input Y %s, but got %d", - op_desc.Type(), - y_shape.size(), - y_num_col_dims); + PADDLE_ENFORCE( + y_num_col_dims <= static_cast(y_shape.size()), + platform::errors::InvalidArgument( + "Expected op[%s]'s attr `y_num_col_dims` less than or equal to " + "dim of input Y %s, but got %d", + op_desc.Type(), + y_shape.size(), + y_num_col_dims)); pir::Builder builder(ctx, block); @@ -1649,9 +1690,10 @@ struct MulGradOpTranscriber : public OpTranscriber { << target_op_name; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name)); } return op_info; } @@ -1686,24 +1728,27 @@ struct MulGradOpTranscriber : public OpTranscriber { const auto& [x_shape, x_tensor_type, x_value] = x_info; - IR_ENFORCE(x_num_col_dims <= static_cast(x_shape.size()), - "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " - "dim of input X %s, but got %d", - op_desc.Type(), - x_shape.size(), - x_num_col_dims); + PADDLE_ENFORCE( + x_num_col_dims <= static_cast(x_shape.size()), + platform::errors::InvalidArgument( + "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " + "dim of input X %s, but got %d", + op_desc.Type(), + x_shape.size(), + x_num_col_dims)); ValueInfo y_info = GetTensorInfoByVarName( op_desc, op_desc.Input("Y", true), param_map, "Y"); const auto& [y_shape, y_tensor_type, y_value] = y_info; - IR_ENFORCE(y_num_col_dims <= static_cast(y_shape.size()), - "Expected op[%s]'s attr `y_num_col_dims` less than or equal to " - "dim of input Y %s, but got %d", - op_desc.Type(), - y_shape.size(), - y_num_col_dims); + PADDLE_ENFORCE(y_num_col_dims <= static_cast(y_shape.size()), + platform::errors::InvalidArgument( + "Expected op[%s]'s attr `y_num_col_dims` less than or " + "equal to dim of input Y %s, but got %d", + op_desc.Type(), + y_shape.size(), + y_num_col_dims)); ValueInfo out_grad_info = GetTensorInfoByVarName( op_desc, op_desc.Input("Out@GRAD", true), param_map, "Out@GRAD"); @@ -1781,16 +1826,19 @@ struct MulGradOpTranscriber : public OpTranscriber { auto gradReshape = [&](const std::string& var_name) { const auto& grad_output = op_desc.Output(var_name); - IR_ENFORCE(grad_output.size() == 1, - "Expected op[%s]'s output %s has only 1 variable, but got %d", - op_desc.Type(), - var_name, - grad_output.size()); + PADDLE_ENFORCE( + grad_output.size() == 1, + platform::errors::InvalidArgument( + "Expected op[%s]'s output %s has only 1 variable, but got %d", + op_desc.Type(), + var_name, + grad_output.size())); const auto& grad_var_name = grad_output[0]; auto idx_iter = arg_to_idx.find(grad_var_name); if (idx_iter == arg_to_idx.end()) { - IR_THROW("op[%s] should have got its %s", op_desc.Type(), var_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "op[%s] should have got its %s", op_desc.Type(), var_name)); } auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" @@ -1799,26 +1847,29 @@ struct MulGradOpTranscriber : public OpTranscriber { VarDesc* var_desc = op_desc.Block()->FindVarRecursive( op_desc.Input(var_name.substr(0, 1))[0]); - IR_ENFORCE(var_desc != nullptr, - "[op:%s] Input %s should not be null", - op_desc.Type(), - var_name.substr(0, 1)); + PADDLE_ENFORCE(var_desc != nullptr, + platform::errors::InvalidArgument( + "[op:%s] Input %s should not be null", + op_desc.Type(), + var_name.substr(0, 1))); std::vector shape = var_desc->GetShape(); DenseTensorTypeStorage::Dim dim = common::make_ddim(shape); pir::Value value_res = operation->result(idx_in_op); auto reshape_op = builder.Build(value_res, shape); - IR_ENFORCE(value_res, - "Expected op[%s]'s input %s is not null", - op_desc.Type(), - grad_var_name); + PADDLE_ENFORCE(value_res != nullptr, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is not null", + op_desc.Type(), + grad_var_name)); pir::Type grad_type = value_res.type(); - IR_ENFORCE(grad_type.isa(), - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - grad_var_name, - grad_type); + PADDLE_ENFORCE(grad_type.isa(), + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + grad_var_name, + grad_type)); dialect::DenseTensorType grad_tensor_type = grad_type.dyn_cast(); @@ -1844,7 +1895,8 @@ struct FillConstant2FullTranscriber : public OpTranscriber { const OpDesc& op_desc) override { const auto& op_info = ctx->GetRegisteredOpInfo(dialect::FullOp::name()); if (!op_info) { - IR_THROW("Op fill_constant should have corresponding OpInfo pd_op.full"); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op fill_constant should have corresponding OpInfo pd_op.full")); } return op_info; @@ -1925,9 +1977,9 @@ struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { const OpDesc& op_desc) override { const auto& op_info = ctx->GetRegisteredOpInfo("pd_op.full_with_tensor"); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op fill_constant should have corresponding OpInfo " - "pd_op.full_with_tensor"); + "pd_op.full_with_tensor")); } return op_info; @@ -2026,16 +2078,18 @@ struct SelectInputOpTranscriber : public OpTranscriber { std::vector op_inputs = {}; auto Mask_name = op_desc.Input("Mask")[0]; auto& Input_name = op_desc.Input("X"); - IR_ENFORCE(param_map->count(Mask_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - Mask_name); + PADDLE_ENFORCE(param_map->count(Mask_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + Mask_name)); op_inputs.push_back(param_map->at(Mask_name).value); for (auto in_name : Input_name) { - IR_ENFORCE(param_map->count(in_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - in_name); + PADDLE_ENFORCE(param_map->count(in_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + in_name)); op_inputs.push_back(param_map->at(in_name).value); } @@ -2073,7 +2127,7 @@ struct SelectInputOpTranscriber : public OpTranscriber { 0, undefined_prefix.size()) == undefined_prefix) { // do nothing } else { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "select_input only support same type or DenseTensorType with " "only different dim, but get dtype:[%s, %s], layout:[%s, %s], " "lod:[%s, %s], offset:[%s, %s].", @@ -2084,7 +2138,7 @@ struct SelectInputOpTranscriber : public OpTranscriber { tensor1.lod(), tensor2.lod(), tensor1.offset(), - tensor2.offset()); + tensor2.offset())); } auto undefined_var_type = tensor1; @@ -2094,11 +2148,13 @@ struct SelectInputOpTranscriber : public OpTranscriber { } auto undefine_value = op_inputs[1 + undefined_var_index]; - IR_ENFORCE( + PADDLE_ENFORCE( undefine_value.defining_op()->isa(), - "undefined_var %s should be generated by assign_value, but got %s", - Input_name[undefined_var_index], - undefine_value.defining_op()); + platform::errors::InvalidArgument( + "undefined_var %s should be generated by assign_value, but got " + "%s", + Input_name[undefined_var_index], + undefine_value.defining_op())); undefine_value.set_type(target_var_type); undefine_value.defining_op()->set_attribute( @@ -2135,11 +2191,11 @@ struct SelectInputOpTranscriber : public OpTranscriber { tensor1.lod(), tensor1.offset())); } else { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "select_input only support same type or DenseTensorType with only " "different dim, now is %s != %s.", input1, - input2); + input2)); } pir::Operation* operation = pir::Operation::Create( @@ -2163,15 +2219,17 @@ struct SelectOutputOpTranscriber : public OpTranscriber { std::vector op_inputs = {}; auto Mask_name = op_desc.Input("Mask")[0]; auto& Input_name = op_desc.Input("X")[0]; - IR_ENFORCE(param_map->count(Mask_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - Mask_name); + PADDLE_ENFORCE(param_map->count(Mask_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + Mask_name)); op_inputs.push_back(param_map->at(Mask_name).value); - IR_ENFORCE(param_map->count(Input_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - Input_name); + PADDLE_ENFORCE(param_map->count(Input_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + Input_name)); op_inputs.push_back(param_map->at(Input_name).value); pir::AttributeMap attribute_map; @@ -2180,8 +2238,9 @@ struct SelectOutputOpTranscriber : public OpTranscriber { OpOutputMapping arg_to_idx; OpOutputTypeList op_output_types; auto Out_names = op_desc.Output("Out"); - IR_ENFORCE(Out_names.size() == 2, - "Expected SelectOutput's output size is 2."); + PADDLE_ENFORCE(Out_names.size() == 2, + platform::errors::InvalidArgument( + "Expected SelectOutput's output size is 2.")); for (size_t idx = 0; idx < Out_names.size(); idx++) { VarDesc* var = op_desc.Block()->FindVarRecursive(Out_names[idx]); arg_to_idx[var->Name()] = {idx, 0}; @@ -2210,23 +2269,28 @@ pir::Value TranslateNumClassesForOneHot(pir::IrContext* ctx, if (op_desc.HasInput(legacy_tensor_name) && !op_desc.Input(legacy_tensor_name).empty()) { legacy_vars = op_desc.Input(legacy_tensor_name); - IR_ENFORCE(legacy_vars.size() == 1, - "depth_tensor input of one hot MUST be a tensor"); + PADDLE_ENFORCE(legacy_vars.size() == 1, + platform::errors::InvalidArgument( + "depth_tensor input of one hot MUST be a tensor")); auto var_name = legacy_vars[0]; - IR_ENFORCE(legacy_vars.size() == 1, - "depth_tensor input of one hot MUST be a tensor"); - IR_ENFORCE(param_map->count(legacy_vars[0]), - "%s should be existed in one_hot_v2 as input depth_tensor.", - legacy_vars[0]); + PADDLE_ENFORCE(legacy_vars.size() == 1, + platform::errors::InvalidArgument( + "depth_tensor input of one hot MUST be a tensor")); + PADDLE_ENFORCE( + param_map->count(legacy_vars[0]), + platform::errors::InvalidArgument( + "%s should be existed in one_hot_v2 as input depth_tensor.", + legacy_vars[0])); auto defining_info = param_map->at(legacy_vars[0]); return defining_info.value; } auto& attribute_translator = AttributeTranslator::instance(); if (!op_desc.HasAttr(legacy_attr_name)) { - IR_THROW("Op %s arg %s should not be zero size", - op_desc.Type(), - legacy_attr_name); + PADDLE_THROW( + phi::errors::InvalidArgument("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name)); } paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" @@ -2251,14 +2315,16 @@ struct OneHotTranscriber : public OpTranscriber { pir::Attribute TranslateDtypeForArange(pir::IrContext* ctx, const OpDesc& op_desc, const OpAttributeInfo& attr_info) { - IR_ENFORCE(op_desc.Input("Start").size() == 1, - "[op:%s] Input [Start]'s size should be equal to 1", - op_desc.Type()); + PADDLE_ENFORCE( + op_desc.Input("Start").size() == 1, + platform::errors::InvalidArgument( + "[op:%s] Input [Start]'s size should be equal to 1", op_desc.Type())); auto var_desc = op_desc.Block()->FindVarRecursive(op_desc.Input("Start")[0]); - IR_ENFORCE(var_desc != nullptr, - "[op:%s] Input %s should not be null", - op_desc.Type(), - op_desc.Input("Start")[0]); + PADDLE_ENFORCE( + var_desc != nullptr, + platform::errors::InvalidArgument("[op:%s] Input %s should not be null", + op_desc.Type(), + op_desc.Input("Start")[0])); auto start_proto_dtype = var_desc->GetDataType(); auto start_phi_dtype = phi::TransToPhiDataType(start_proto_dtype); auto dtype_attr = @@ -2322,15 +2388,18 @@ struct ElementwiseTranscriber : public OpTranscriber { } auto x_names = op_desc.Input("X", true); - IR_ENFORCE(x_names.size() == 1, - "Expected op[%s]'s input X has only 1 variable, but got %d", - op_desc.Type(), - x_names.size()); + PADDLE_ENFORCE( + x_names.size() == 1, + platform::errors::InvalidArgument( + "Expected op[%s]'s input X has only 1 variable, but got %d", + op_desc.Type(), + x_names.size())); auto x_name = x_names[0]; - IR_ENFORCE(param_map->count(x_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - x_name); + PADDLE_ENFORCE(param_map->count(x_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + x_name)); auto x_defining_info = param_map->at(x_name); if (x_defining_info.generated_by_vector) { InsertSliceOperationForTarget( @@ -2338,30 +2407,34 @@ struct ElementwiseTranscriber : public OpTranscriber { x_defining_info = param_map->at(x_name); } pir::Value x_value = x_defining_info.value; - IR_ENFORCE(x_value, - "Expected op[%s]'s input %s is not null", - op_desc.Type(), - x_name); + PADDLE_ENFORCE( + x_value != nullptr, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is not null", op_desc.Type(), x_name)); pir::Type x_type = x_value.type(); - IR_ENFORCE(x_type.isa(), - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - x_name, - x_type); + PADDLE_ENFORCE(x_type.isa(), + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + x_name, + x_type)); dialect::DenseTensorType x_tensor_type = x_type.dyn_cast(); std::vector x_shape = common::vectorize(x_tensor_type.dims()); auto y_names = op_desc.Input("Y", true); - IR_ENFORCE(y_names.size() == 1, - "Expected op[%s]'s input Y has only 1 variable, but got %d", - op_desc.Type(), - y_names.size()); + PADDLE_ENFORCE( + y_names.size() == 1, + platform::errors::InvalidArgument( + "Expected op[%s]'s input Y has only 1 variable, but got %d", + op_desc.Type(), + y_names.size())); auto y_name = y_names[0]; - IR_ENFORCE(param_map->count(y_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - y_name); + PADDLE_ENFORCE(param_map->count(y_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + y_name)); auto y_defining_info = param_map->at(y_name); if (y_defining_info.generated_by_vector) { InsertSliceOperationForTarget( @@ -2369,16 +2442,17 @@ struct ElementwiseTranscriber : public OpTranscriber { y_defining_info = param_map->at(y_name); } pir::Value y_value = y_defining_info.value; - IR_ENFORCE(y_value, - "Expected op[%s]'s input %s is not null", - op_desc.Type(), - y_name); + PADDLE_ENFORCE( + y_value != nullptr, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is not null", op_desc.Type(), y_name)); pir::Type y_type = y_value.type(); - IR_ENFORCE(y_type.isa(), - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - y_name, - y_type); + PADDLE_ENFORCE(y_type.isa(), + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + y_name, + y_type)); dialect::DenseTensorType y_tensor_type = y_type.dyn_cast(); std::vector y_shape = common::vectorize(y_tensor_type.dims()); @@ -2392,11 +2466,13 @@ struct ElementwiseTranscriber : public OpTranscriber { // x.rank=y.rank return {x_value, y_value}; } - IR_ENFORCE(append_size > 0, - "Expected op[%s] have append size > 0 with axis=%d but got %d", - op_desc.Type(), - axis, - append_size); + PADDLE_ENFORCE( + append_size > 0, + platform::errors::InvalidArgument( + "Expected op[%s] have append size > 0 with axis=%d but got %d", + op_desc.Type(), + axis, + append_size)); pir::Builder builder(ctx, block); pir::Value y_new; @@ -2438,9 +2514,9 @@ struct GradAddOpTranscriber : public ElementwiseTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op assign_value should have corresponding OpInfo " - "pd_op.assign_value_"); + "pd_op.assign_value_")); } return op_info; @@ -2465,16 +2541,18 @@ struct ElementwiseGradTranscriber : public OpTranscriber { if (y_grad_output.size() < 1) { return; } - IR_ENFORCE( + PADDLE_ENFORCE( y_grad_output.size() == 1, - "Expected op[%s]'s output Y@GRAD has only 1 variable, but got %d", - op_desc.Type(), - y_grad_output.size()); + platform::errors::InvalidArgument( + "Expected op[%s]'s output Y@GRAD has only 1 variable, but got %d", + op_desc.Type(), + y_grad_output.size())); const auto& y_grad_var_name = y_grad_output[0]; auto idx_iter = arg_to_idx.find(y_grad_var_name); if (idx_iter == arg_to_idx.end()) { - IR_THROW("op[%s] should have got its y_grad", op_desc.Type()); + PADDLE_THROW(phi::errors::InvalidArgument( + "op[%s] should have got its y_grad", op_desc.Type())); } auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" @@ -2483,22 +2561,24 @@ struct ElementwiseGradTranscriber : public OpTranscriber { auto y_names = op_desc.Input("Y", true); auto y_name = y_names[0]; - IR_ENFORCE(param_map->count(y_name) > 0, - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - y_name); + PADDLE_ENFORCE(param_map->count(y_name) > 0, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + y_name)); auto y_defining_info = param_map->at(y_name); pir::Value y_value = y_defining_info.value; - IR_ENFORCE(y_value, - "Expected op[%s]'s input %s is not null", - op_desc.Type(), - y_name); + PADDLE_ENFORCE( + y_value != nullptr, + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is not null", op_desc.Type(), y_name)); pir::Type y_type = y_value.type(); - IR_ENFORCE(y_type.isa(), - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - y_name, - y_type); + PADDLE_ENFORCE(y_type.isa(), + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + y_name, + y_type)); dialect::DenseTensorType y_tensor_type = y_type.dyn_cast(); @@ -2506,11 +2586,12 @@ struct ElementwiseGradTranscriber : public OpTranscriber { // if y_grad' shape is same with y, we don't need a reshape pir::Type y_grad_type = value.type(); - IR_ENFORCE(y_grad_type.isa(), - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - y_grad_var_name, - y_grad_type); + PADDLE_ENFORCE(y_grad_type.isa(), + platform::errors::InvalidArgument( + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + y_grad_var_name, + y_grad_type)); dialect::DenseTensorType y_grad_tensor_type = y_grad_type.dyn_cast(); if (y_grad_tensor_type.dims() == y_tensor_type.dims()) { @@ -2537,9 +2618,10 @@ struct SetValueOpTranscriber : public OpTranscriber { op_normalizer.GetLegacyAttrName(op_desc.Type(), input_info.name); if (!op_desc.HasAttr(legacy_attr_name)) { - IR_THROW("Op %s arg %s should not be zero size", - op_desc.Type(), - legacy_attr_name); + PADDLE_THROW( + phi::errors::InvalidArgument("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name)); } framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" @@ -2559,9 +2641,9 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { std::string target_op_name = dialect::SetValueWithTensorOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op set_value should have corresponding OpInfo " - "pd_op.set_value_with_tensor"); + "pd_op.set_value_with_tensor")); } return op_info; @@ -2579,13 +2661,15 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { const OpInputInfo& info, pir::Block* block) -> pir::Value { std::vector legacy_input_vars; - IR_ENFORCE(op_desc.HasInput("ValueTensor"), - "[set_value] should have ValueTensor"); + PADDLE_ENFORCE(op_desc.HasInput("ValueTensor"), + platform::errors::InvalidArgument( + "[set_value] should have ValueTensor")); legacy_input_vars = op_desc.Input("ValueTensor", true); - IR_ENFORCE( + PADDLE_ENFORCE( legacy_input_vars.size() == 1u, - "[set_value][ValueTensor] should only have 1 variable, but got %d", - legacy_input_vars.size()); + platform::errors::InvalidArgument("[set_value][ValueTensor] should " + "only have 1 variable, but got %d", + legacy_input_vars.size())); auto var_name = legacy_input_vars[0]; auto defining_info = (*param_map)[var_name]; if (defining_info.generated_by_vector) { @@ -2604,9 +2688,9 @@ struct SetValueGradOpTranscriber : public SetValueWithTensorOpTranscriber { std::string target_op_name = dialect::SetValueWithTensorGradOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op set_value_grad should have corresponding OpInfo " - "pd_op.set_value_with_tensor_grad"); + "pd_op.set_value_with_tensor_grad")); } return op_info; @@ -2681,10 +2765,11 @@ struct FusedFeedForwardOpTranscriber : public OpTranscriber { ctx, param_map, op_desc, operation, arg_to_idx); if (op_desc.HasOutput("Out")) { const auto& output_vars = op_desc.Output("Out"); - IR_ENFORCE(output_vars.size() == 1, - "Expected op[%s]'s Out has only 1 var but got %s", - op_desc.Type(), - output_vars.size()); + PADDLE_ENFORCE(output_vars.size() == 1, + platform::errors::InvalidArgument( + "Expected op[%s]'s Out has only 1 var but got %s", + op_desc.Type(), + output_vars.size())); auto output_var = output_vars[0]; auto fused_feedforward_op = operation->dyn_cast(); @@ -2700,9 +2785,9 @@ struct ShareBufferOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ShareDataOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op share_buffer should have corresponding OpInfo " - "pd_op.share_data"); + "pd_op.share_data")); } return op_info; @@ -2724,10 +2809,10 @@ struct RandIntOpTranscriber : public OpTranscriber { const auto& legacy_output_vars = op_desc.Output(legacy_output_name); auto& var_name = legacy_output_vars[0]; VarDesc* var = block->FindVarRecursive(var_name); - IR_ENFORCE(var != nullptr, - "[op:%s] Output %s should not be null", - op_desc.Type(), - var_name); + PADDLE_ENFORCE( + var != nullptr, + platform::errors::InvalidArgument( + "[op:%s] Output %s should not be null", op_desc.Type(), var_name)); int dtype_attr_val = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype")); paddle::framework::proto::VarType::Type var_type = @@ -2842,9 +2927,9 @@ struct FusedElemwiseAddActivationGradOpTranscriber const OpDesc& op_desc) override { const auto inter_out_grad = op_desc.Output("IntermediateOut@GRAD"); if (inter_out_grad.size() > 0) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "pd_op.fused_elemwise_add_activation_grad doesn't have " - "Intermediate_out_grad output"); + "Intermediate_out_grad output")); } return OpTranscriber::LookUpOpInfo(ctx, op_desc); @@ -2857,9 +2942,9 @@ struct PartialSumOpTranscriber : public OpTranscriber { std::string target_op_name = "pd_op.partial_sum"; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op partial_sum should have corresponding OpInfo " - "pd_op.partial_sum"); + "pd_op.partial_sum")); } return op_info; } @@ -2876,10 +2961,10 @@ struct MatrixRankOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op matrix_rank should have corresponding OpInfo pd_op.matrix_rank " "or " - "pd_op.matrix_rank_tol."); + "pd_op.matrix_rank_tol.")); } return op_info; } @@ -2891,9 +2976,9 @@ struct LodArrayLengthOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ArrayLengthOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op lod_array_length should have corresponding OpInfo " - "pd_op.array_length"); + "pd_op.array_length")); } return op_info; @@ -2911,17 +2996,21 @@ struct LodArrayLengthOpTranscriber : public OpTranscriber { const OpInputInfo& info, pir::Block* block) -> pir::Value { VLOG(10) << "[" << op_desc.Type() << "][input `array`]"; - IR_ENFORCE(op_desc.HasInput("X"), - "Op lod_array_length should have input `X` but not found"); + PADDLE_ENFORCE( + op_desc.HasInput("X"), + platform::errors::InvalidArgument( + "Op lod_array_length should have input `X` but not found")); const auto& vars = op_desc.Input("X"); - IR_ENFORCE(vars.size() == 1, - "Input `X` should be one variable %s", - op_desc.Type()); + PADDLE_ENFORCE( + vars.size() == 1, + platform::errors::InvalidArgument( + "Input `X` should be one variable %s", op_desc.Type())); VLOG(10) << "[" << op_desc.Type() << "][input `x`] from " << vars[0]; const VarDesc* var_desc = op_desc.Block()->FindVarRecursive(vars[0]); - IR_ENFORCE(var_desc != nullptr, - "VarDesc `%s` should be exist in legacy program", - vars[0]); + PADDLE_ENFORCE( + var_desc != nullptr, + platform::errors::InvalidArgument( + "VarDesc `%s` should be exist in legacy program", vars[0])); auto defining_value = pir::Value(nullptr); if (param_map->count(var_desc->Name())) { VLOG(10) << "[" << op_desc.Type() << "][input `x`] var: " << vars[0] @@ -2944,9 +3033,9 @@ struct WriteArrayOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ArrayWrite_Op::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op write_to_array should have corresponding OpInfo " - "pd_op.array_write_"); + "pd_op.array_write_")); } return op_info; @@ -2964,17 +3053,21 @@ struct WriteArrayOpTranscriber : public OpTranscriber { const OpInputInfo& info, pir::Block* block) -> pir::Value { VLOG(10) << "[" << op_desc.Type() << "][input `array`]"; - IR_ENFORCE(op_desc.HasOutput("Out"), - "Op write_to_array should have output `Out` but not found"); + PADDLE_ENFORCE( + op_desc.HasOutput("Out"), + platform::errors::InvalidArgument( + "Op write_to_array should have output `Out` but not found")); const auto& vars = op_desc.Output("Out"); - IR_ENFORCE(vars.size() == 1, - "Output `Out` should be one variable %s", - op_desc.Type()); + PADDLE_ENFORCE( + vars.size() == 1, + platform::errors::InvalidArgument( + "Output `Out` should be one variable %s", op_desc.Type())); VLOG(10) << "[" << op_desc.Type() << "][input `array`] from " << vars[0]; const VarDesc* var_desc = op_desc.Block()->FindVarRecursive(vars[0]); - IR_ENFORCE(var_desc != nullptr, - "VarDesc `%s` should be exist in legacy program", - vars[0]); + PADDLE_ENFORCE( + var_desc != nullptr, + platform::errors::InvalidArgument( + "VarDesc `%s` should be exist in legacy program", vars[0])); auto defining_value = pir::Value(nullptr); if (param_map->count(var_desc->Name())) { VLOG(10) << "[" << op_desc.Type() << "][input `array`] var: " << vars[0] @@ -2997,9 +3090,9 @@ struct ReadArrayOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ArrayReadOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op read_from_array should have corresponding OpInfo " - "pd_op.read_array"); + "pd_op.read_array")); } return op_info; @@ -3011,30 +3104,33 @@ struct SliceOpTranscriber : public OpTranscriber { const OpDesc& op_desc) override { std::string target_op_name = dialect::SliceOp::name(); - IR_ENFORCE(op_desc.HasInput("Input"), - "op %s should have input `Input`", - op_desc.Type()); + PADDLE_ENFORCE(op_desc.HasInput("Input"), + platform::errors::InvalidArgument( + "op %s should have input `Input`", op_desc.Type())); const auto& input_vars = op_desc.Input("Input"); - IR_ENFORCE(input_vars.size() == 1, - "op %s should have one input `Input`, but got %d.", - op_desc.Type(), - input_vars.size()); + PADDLE_ENFORCE(input_vars.size() == 1, + platform::errors::InvalidArgument( + "op %s should have one input `Input`, but got %d.", + op_desc.Type(), + input_vars.size())); const auto* input_var = op_desc.Block()->FindVarRecursive(input_vars[0]); if (input_var->GetType() == framework::proto::VarType::LOD_TENSOR_ARRAY) { - IR_ENFORCE(op_desc.HasOutput("Out"), - "op %s should have input `Out`", - op_desc.Type()); + PADDLE_ENFORCE(op_desc.HasOutput("Out"), + platform::errors::InvalidArgument( + "op %s should have input `Out`", op_desc.Type())); const auto& output_vars = op_desc.Output("Out"); - IR_ENFORCE(output_vars.size() == 1, - "op %s should have one input `Out`, but got %d.", - op_desc.Type(), - output_vars.size()); + PADDLE_ENFORCE(output_vars.size() == 1, + platform::errors::InvalidArgument( + "op %s should have one input `Out`, but got %d.", + op_desc.Type(), + output_vars.size())); const auto* output_var = op_desc.Block()->FindVarRecursive(output_vars[0]); - IR_ENFORCE(output_var != nullptr, - "op %s should have non-empty output `%s`.", - op_desc.Type(), - output_vars[0]); + PADDLE_ENFORCE(output_var != nullptr, + platform::errors::InvalidArgument( + "op %s should have non-empty output `%s`.", + op_desc.Type(), + output_vars[0])); if (output_var->GetType() == framework::proto::VarType::LOD_TENSOR) { target_op_name = dialect::SliceArrayDenseOp::name(); @@ -3045,7 +3141,8 @@ struct SliceOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW("Op slice should have corresponding OpInfo %s", target_op_name); + PADDLE_THROW(phi::errors::InvalidArgument( + "Op slice should have corresponding OpInfo %s", target_op_name)); } return op_info; @@ -3062,10 +3159,11 @@ struct LegacyMatmulOpTranscriber : public OpTranscriber { } float v = PADDLE_GET_CONST(float, op_desc.GetAttr(attr_name)); if (abs(v - expected_value) > 1e-6f) { - IR_THROW("Expected op[%s]'s attr %s is not %f", - op_desc.Type(), - attr_name, - v); + PADDLE_THROW( + phi::errors::InvalidArgument("Expected op[%s]'s attr %s is not %f", + op_desc.Type(), + attr_name, + v)); } }; @@ -3076,9 +3174,9 @@ struct LegacyMatmulOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::MatmulOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - IR_THROW( + PADDLE_THROW(phi::errors::InvalidArgument( "Op read_from_array should have corresponding OpInfo " - "pd_op.read_array"); + "pd_op.read_array")); } return op_info; @@ -3098,14 +3196,17 @@ struct LegacyMatmulOpTranscriber : public OpTranscriber { } const auto& output_vars = op_desc.Output("Out"); - IR_ENFORCE(output_vars.size() == 1, - "Expected op[%s]'s output `Out` has only 1 variable, but got %d", - op_desc.Type(), - output_vars.size()); + PADDLE_ENFORCE( + output_vars.size() == 1, + platform::errors::InvalidArgument( + "Expected op[%s]'s output `Out` has only 1 variable, but got %d", + op_desc.Type(), + output_vars.size())); auto idx_iter = arg_to_idx.find(output_vars[0]); if (idx_iter == arg_to_idx.end()) { - IR_THROW("op[%s] should have got its `Out`", op_desc.Type()); + PADDLE_THROW(phi::errors::InvalidArgument( + "op[%s] should have got its `Out`", op_desc.Type())); } auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" diff --git a/paddle/fluid/pir/dialect/operator/ir/ops.yaml b/paddle/fluid/pir/dialect/operator/ir/ops.yaml index f794f80b65f39..1ce1f4d0c1989 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops.yaml @@ -1181,6 +1181,16 @@ func : partial_recv data_type : dtype +- op : partial_sum + args : (Tensor[] x, int start_index = 0, int length = -1) + output : Tensor(out) + infer_meta : + func : PartialSumInferMeta + kernel : + func : partial_sum + data_type : x + backward : partial_sum_grad + - op : pool2d args : (Tensor x, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) output : Tensor(out) @@ -1564,16 +1574,6 @@ backward : sum_grad interfaces : paddle::dialect::InferSymbolicShapeInterface -- op : partial_sum - args : (Tensor[] x, int start_index = 0, int length = -1) - output : Tensor(out) - infer_meta : - func : PartialSumInferMeta - kernel : - func : partial_sum - data_type : x - backward : partial_sum_grad - - op : swish args : (Tensor x) output : Tensor(out) diff --git a/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml index 68a5d9ee9c506..ff4a7cc356949 100644 --- a/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml +++ b/paddle/fluid/pir/dialect/operator/ir/ops_backward.yaml @@ -580,6 +580,16 @@ composite : pad_grad(x, out_grad, paddings, pad_value, x_grad) backward : pad_double_grad +- backward_op : partial_sum_grad + forward : partial_sum (Tensor[] x, int start_index = 0, int length = -1) -> Tensor(out) + args : (Tensor[] x, Tensor out_grad, int start_index, int length) + output : Tensor[](x_grad){x.size()} + infer_meta : + func : PartialSumGradInferMeta + param : [x] + kernel : + func : partial_sum_grad + - backward_op : pool2d_double_grad forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x) args : (Tensor x, Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) @@ -846,16 +856,6 @@ no_need_buffer : x backward : sum_double_grad -- backward_op : partial_sum_grad - forward : partial_sum (Tensor[] x, int start_index = 0, int length = -1) -> Tensor(out) - args : (Tensor[] x, Tensor out_grad, int start_index, int length) - output : Tensor[](x_grad){x.size()} - infer_meta : - func : PartialSumGradInferMeta - param : [x] - kernel : - func : partial_sum_grad - - backward_op : swish_grad forward : swish (Tensor x) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/paddle/phi/infermeta/backward.cc b/paddle/phi/infermeta/backward.cc index 8818887f535d9..2a309755123af 100644 --- a/paddle/phi/infermeta/backward.cc +++ b/paddle/phi/infermeta/backward.cc @@ -879,11 +879,11 @@ void NceGradInferMeta(const MetaTensor& input, void PartialSumGradInferMeta(const std::vector& xs, std::vector x_grads) { auto input_num = xs.size(); - for(size_t i=0; i< input_num; i++){ + for (size_t i = 0; i < input_num; i++) { auto x_dims = xs[i]->dims(); x_grads[i]->set_dims(x_dims); x_grads[i]->set_dtype(xs[i]->dtype()); - } + } } void NllLossGradInferMeta(const MetaTensor& x, diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 2307f7e840677..df7d071a5ddb7 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -4472,7 +4472,6 @@ void PartialSumInferMeta(const std::vector& xs, // when length is -1, need make sure all dimensions to be added are the same for (size_t i = 0; i < inputs_num; i++) { auto x_dim = xs[i]->dims(); - VLOG(1) << "inputs_dims:" << x_dim; PADDLE_ENFORCE_EQ( x_dim.size(), diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 9c638d97c4d2b..bd19499705d82 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -703,7 +703,6 @@ void PartialSumInferMeta(const std::vector& xs, MetaTensor* out, MetaConfig config = MetaConfig()); - void SvdInferMeta(const MetaTensor& x, bool full_matrices, MetaTensor* u, From 005653ac4a315c62aeeb077afaf865b921ddb25f Mon Sep 17 00:00:00 2001 From: cmcandy Date: Sun, 17 Mar 2024 15:43:54 +0800 Subject: [PATCH 4/6] format --- .../ir_adaptor/translator/op_translator.cc | 785 ++++++++---------- 1 file changed, 342 insertions(+), 443 deletions(-) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index a57de9801bf04..b630eb2e6bc50 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -198,11 +198,9 @@ inline pir::Operation* InsertFullOperationForAttributeInput( inline pir::Operation* InsertFullArrayOperationForAttributeInput( pir::IrContext* ctx, pir::Block* block, pir::Attribute attr) { - PADDLE_ENFORCE( - attr.isa(), - platform::errors::InvalidArgument( - "Encounter non IntArray type when trying to insert IntArray " - "mutable attribute")); + IR_ENFORCE(attr.isa(), + "Encounter non IntArray type when trying to insert IntArray " + "mutable attribute"); phi::IntArray int_array = attr.dyn_cast().data(); pir::Builder builder(ctx, block); dialect::FullIntArrayOp full_int_array_op = @@ -286,10 +284,9 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, << target_op_name; auto op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name)); + IR_THROW("Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name); } if (!paddle::dialect::HaveOpToMultiKernelsMap( @@ -316,23 +313,20 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, std::string legacy_input_name = op_normalizer.GetLegacyArgName(op_desc.Type(), info.name); auto legacy_input_vars = op_desc.Input(legacy_input_name, true); - PADDLE_ENFORCE( - legacy_input_vars.size() <= 1, - platform::errors::InvalidArgument( - "Do not support duplicable tensor input, when op have multi " - "kernels. OP is %s", - op_desc.Type())); + IR_ENFORCE(legacy_input_vars.size() <= 1, + "Do not support duplicable tensor input, when op have multi " + "kernels. OP is %s", + op_desc.Type()); if (legacy_input_vars.empty()) { need_inputs_sig.emplace_back(""); continue; } VarDesc* var = op_desc.Block()->FindVarRecursive(legacy_input_vars[0]); - PADDLE_ENFORCE( - var != nullptr, - platform::errors::InvalidArgument("[op:%s] Input %s should not be null", - op_desc.Type(), - legacy_input_vars[0])); + IR_ENFORCE(var != nullptr, + "[op:%s] Input %s should not be null", + op_desc.Type(), + legacy_input_vars[0]); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR) { need_inputs_sig.emplace_back("dense"); @@ -340,10 +334,9 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, paddle::framework::proto::VarType::SELECTED_ROWS) { need_inputs_sig.emplace_back("selected_rows"); } else { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op %d only support dense tensor and selected_rows, but not %d", - op_desc.Type(), - var->GetType())); + IR_THROW("Op %d only support dense tensor and selected_rows, but not %d", + op_desc.Type(), + var->GetType()); } } @@ -371,21 +364,19 @@ pir::OpInfo OpTranscriber::LookUpOpInfo(pir::IrContext* ctx, } } - PADDLE_ENFORCE(!target_op_name.empty(), - platform::errors::InvalidArgument( - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name)); + IR_ENFORCE(!target_op_name.empty(), + "Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name); target_op_name = GetPrefix(ctx, op_desc) + target_op_name; if (IsInplace(op_desc) && *target_op_name.rbegin() != '_') { target_op_name += "_"; } if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name)); + IR_THROW("Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name); } return op_info; @@ -438,10 +429,9 @@ pir::Value OpTranscriber::GetAttributeAsInput(pir::IrContext* ctx, op_normalizer.GetLegacyAttrName(op_desc.Type(), input_info.name); if (!op_desc.HasAttr(legacy_attr_name)) { - PADDLE_THROW( - phi::errors::InvalidArgument("Op %s arg %s should not be zero size", - op_desc.Type(), - legacy_attr_name)); + IR_THROW("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name); } paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" @@ -542,11 +532,10 @@ std::vector OpTranscriber::GenerateOperationInput( // Vector if (legacy_input_vars.size() == 1) { VarDesc* var = op_desc.Block()->FindVarRecursive(legacy_input_vars[0]); - PADDLE_ENFORCE(var != nullptr, - platform::errors::InvalidArgument( - "[op:%s] Input %s should not be null", - op_desc.Type(), - legacy_input_vars[0])); + IR_ENFORCE(var != nullptr, + "[op:%s] Input %s should not be null", + op_desc.Type(), + legacy_input_vars[0]); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { is_vector = false; @@ -555,17 +544,15 @@ std::vector OpTranscriber::GenerateOperationInput( // if src type is Tensor if (!is_vector) { - PADDLE_ENFORCE(legacy_input_vars.size() == 1u, - platform::errors::InvalidArgument( - "Input %s not found when parsing op %s", - info.name, - op_desc.Type())); - PADDLE_ENFORCE(param_map->count(legacy_input_vars[0]), - platform::errors::InvalidArgument( - "Input [%s: %s] of op [%s] not found in param map", - info.name, - legacy_input_vars[0], - op_desc.Type())); + IR_ENFORCE(legacy_input_vars.size() == 1u, + "Input %s not found when parsing op %s", + info.name, + op_desc.Type()); + IR_ENFORCE(param_map->count(legacy_input_vars[0]), + "Input [%s: %s] of op [%s] not found in param map", + info.name, + legacy_input_vars[0], + op_desc.Type()); auto defining_info = (*param_map)[legacy_input_vars[0]]; op_inputs.push_back(defining_info.value); @@ -606,11 +593,10 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "] optional " << info.name << " :" << info.type_name << " " << legacy_output_name; - PADDLE_ENFORCE(info.optional, - platform::errors::InvalidArgument( - "Op %s arg %s should be optional if it can be empty", - op_desc.Type(), - legacy_output_name)); + IR_ENFORCE(info.optional, + "Op %s arg %s should be optional if it can be empty", + op_desc.Type(), + legacy_output_name); op_output_types.emplace_back(nullptr); continue; } @@ -627,11 +613,10 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, // Vector if (legacy_output_vars.size() == 1) { VarDesc* var = block->FindVarRecursive(legacy_output_vars[0]); - PADDLE_ENFORCE(var != nullptr, - platform::errors::InvalidArgument( - "[op:%s] Output %s should not be null", - op_desc.Type(), - legacy_output_vars[0])); + IR_ENFORCE(var != nullptr, + "[op:%s] Output %s should not be null", + op_desc.Type(), + legacy_output_vars[0]); if (var->GetType() == paddle::framework::proto::VarType::LOD_TENSOR_ARRAY) { pir::Type translated_var_type = @@ -655,11 +640,10 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, auto& var_name = legacy_output_vars[0]; VarDesc* var = block->FindVarRecursive(var_name); - PADDLE_ENFORCE(var != nullptr, - platform::errors::InvalidArgument( - "[op:%s] Output %s should not be null", - op_desc.Type(), - var_name)); + IR_ENFORCE(var != nullptr, + "[op:%s] Output %s should not be null", + op_desc.Type(), + var_name); VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "]" << info.name << " var: " << var_name << " type: " << var->GetType(); @@ -685,11 +669,10 @@ OpTranscriber::GenerateOperationOutput(pir::IrContext* ctx, continue; } VarDesc* var = block->FindVarRecursive(var_name); - PADDLE_ENFORCE(var != nullptr, - platform::errors::InvalidArgument( - "[op:%s] Output %s should not be null", - op_desc.Type(), - var_name)); + IR_ENFORCE(var != nullptr, + "[op:%s] Output %s should not be null", + op_desc.Type(), + var_name); VLOG(10) << "[output translating]" << "[" << op_desc.Type() << "]" << info.name << " var: " << var_name << " type: " << var->GetType(); @@ -859,15 +842,13 @@ struct AssignOpTranscriber : public OpTranscriber { const OpDesc& op_desc) override { std::string target_op_name; - PADDLE_ENFORCE(op_desc.HasInput("X"), - platform::errors::InvalidArgument( - "op %s should have input `X`", op_desc.Type())); + IR_ENFORCE( + op_desc.HasInput("X"), "op %s should have input `X`", op_desc.Type()); const auto& input_vars = op_desc.Input("X"); - PADDLE_ENFORCE(input_vars.size() == 1, - platform::errors::InvalidArgument( - "op %s should have one input `X`, but got %d.", - op_desc.Type(), - input_vars.size())); + IR_ENFORCE(input_vars.size() == 1, + "op %s should have one input `X`, but got %d.", + op_desc.Type(), + input_vars.size()); const auto* input_var = op_desc.Block()->FindVarRecursive(input_vars[0]); if (input_var->GetType() == framework::proto::VarType::LOD_TENSOR_ARRAY) { target_op_name = dialect::AssignArray_Op::name(); @@ -877,8 +858,7 @@ struct AssignOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op assign should have corresponding OpInfo %s", target_op_name)); + IR_THROW("Op assign should have corresponding OpInfo %s", target_op_name); } return op_info; @@ -955,9 +935,9 @@ struct AssignValueOpTranscriber : public OpTranscriber { std::string target_op_name = "pd_op.assign_value"; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op assign_value should have corresponding OpInfo " - "pd_op.assign_value")); + "pd_op.assign_value"); } return op_info; @@ -988,8 +968,7 @@ struct AssignValueOpTranscriber : public OpTranscriber { if (op_desc.HasAttr("shape")) { legacy_attr = op_desc.GetAttr("shape"); } else { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op assign_value should have attribute `shape` but not find")); + IR_THROW("Op assign_value should have attribute `shape` but not find"); } pir::Attribute attr_shape = attribute_translator(attr_info_maps.at("shape").type_name, legacy_attr); @@ -998,8 +977,7 @@ struct AssignValueOpTranscriber : public OpTranscriber { if (op_desc.HasAttr("dtype")) { legacy_attr = op_desc.GetAttr("dtype"); } else { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op assign_value should have attribute `dtype` but not find")); + IR_THROW("Op assign_value should have attribute `dtype` but not find"); } pir::Attribute attr_dtype = attribute_translator(attr_info_maps.at("dtype").type_name, legacy_attr); @@ -1027,11 +1005,10 @@ struct AssignValueOpTranscriber : public OpTranscriber { } } - PADDLE_ENFORCE( + IR_ENFORCE( attribute_map.find("values") != attribute_map.end(), - platform::errors::InvalidArgument( - "Op assign_value should have attribute `**_values` or `values` but " - "not find")); + "Op assign_value should have attribute `**_values` or `values` but " + "not find"); TranslateOpDistAttribute(op_desc, &attribute_map); @@ -1079,18 +1056,16 @@ pir::Value TranslateDropOutStateIn(pir::IrContext* ctx, // `DropoutState` is a tensor VarDesc* dropout_state = op_desc.Block()->FindVarRecursive(legacy_output_vars[0]); - PADDLE_ENFORCE( - dropout_state != nullptr, - platform::errors::InvalidArgument("[op:%s] Output %s should not be null", - op_desc.Type(), - legacy_output_vars[0])); + IR_ENFORCE(dropout_state != nullptr, + "[op:%s] Output %s should not be null", + op_desc.Type(), + legacy_output_vars[0]); auto& type_translator = TypeTranslator::instance(); pir::Type translated_var_type = type_translator[dropout_state->GetType()](ctx, *dropout_state); - PADDLE_ENFORCE( + IR_ENFORCE( translated_var_type.isa(), - platform::errors::InvalidArgument( - "Unexpected: Rnn Op's output DropoutState should be a DenseTensor")); + "Unexpected: Rnn Op's output DropoutState should be a DenseTensor"); auto tensor_type = translated_var_type.dyn_cast(); pir::Builder builder(ctx, block); @@ -1141,10 +1116,9 @@ struct EmbeddingGradOpTranscriber : public OpTranscriber { << target_op_name; auto op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name)); + IR_THROW("Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name); } return op_info; @@ -1220,9 +1194,7 @@ struct SplitOpTranscriber : public OpTranscriber { std::vector op_inputs; // process first input auto x_input_vars = op_desc.Input("X"); - PADDLE_ENFORCE( - x_input_vars.size() == 1, - platform::errors::InvalidArgument("x input of split MUST be a tensor")); + IR_ENFORCE(x_input_vars.size() == 1, "x input of split MUST be a tensor"); auto x_defining_info = (*param_map)[x_input_vars[0]]; op_inputs.push_back(x_defining_info.value); @@ -1252,9 +1224,8 @@ struct SplitOpTranscriber : public OpTranscriber { !op_desc.Input("AxisTensor").empty()) { // get axis from input auto axis_var_list = op_desc.Input("AxisTensor"); - PADDLE_ENFORCE(axis_var_list.size() == 1, - platform::errors::InvalidArgument( - "axis tensor input of split MUST be a tensor")); + IR_ENFORCE(axis_var_list.size() == 1, + "axis tensor input of split MUST be a tensor"); auto axis_defining_info = (*param_map)[axis_var_list[0]]; op_inputs.push_back(axis_defining_info.value); } else { @@ -1312,9 +1283,8 @@ struct SplitOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op assign_value should have corresponding OpInfo %s.", - target_op_name)); + IR_THROW("Op assign_value should have corresponding OpInfo %s.", + target_op_name); } return op_info; @@ -1405,8 +1375,7 @@ struct AddNOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op add_n should have corresponding OpInfo %s", target_op_name)); + IR_THROW("Op add_n should have corresponding OpInfo %s", target_op_name); } return op_info; @@ -1425,9 +1394,9 @@ struct TrilAndTriuOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op tril_triu should have corresponding OpInfo pd_op.tril or " - "pd_op.triu.")); + "pd_op.triu."); } return op_info; @@ -1446,10 +1415,10 @@ struct TrilAndTriuGradOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op tril_triu_grad should have corresponding OpInfo pd_op.tril_grad " "or " - "pd_op.triu_grad.")); + "pd_op.triu_grad."); } return op_info; @@ -1463,32 +1432,27 @@ ValueInfo GetTensorInfoByVarName(const OpDesc& op_desc, const std::vector& names, TranslationContext* param_map, const std::string& var_name) { - PADDLE_ENFORCE( - names.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has only 1 variable, but got %d", - op_desc.Type(), - var_name, - names.size())); + IR_ENFORCE(names.size() == 1, + "Expected op[%s]'s input %s has only 1 variable, but got %d", + op_desc.Type(), + var_name, + names.size()); const auto& name = names[0]; - PADDLE_ENFORCE( - param_map->count(name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", op_desc.Type(), name)); + IR_ENFORCE(param_map->count(name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + name); const auto& defining_info = param_map->at(name); pir::Value value = defining_info.value; - PADDLE_ENFORCE( - value != nullptr, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is not null", op_desc.Type(), name)); + IR_ENFORCE( + value, "Expected op[%s]'s input %s is not null", op_desc.Type(), name); const pir::Type& type = value.type(); - PADDLE_ENFORCE(type.isa(), - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - name, - type)); + IR_ENFORCE(type.isa(), + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + name, + type); dialect::DenseTensorType tensor_type = type.dyn_cast(); @@ -1516,10 +1480,9 @@ struct MulOpTranscriber : public OpTranscriber { const std::string& target_op_name = paddle::dialect::MatmulOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name)); + IR_THROW("Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name); } return op_info; } @@ -1554,28 +1517,24 @@ struct MulOpTranscriber : public OpTranscriber { const auto& [x_shape, x_tensor_type, x_value] = x_info; - PADDLE_ENFORCE( - x_num_col_dims <= static_cast(x_shape.size()), - platform::errors::InvalidArgument( - "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " - "dim of input X %s, but got %d", - op_desc.Type(), - x_shape.size(), - x_num_col_dims)); + IR_ENFORCE(x_num_col_dims <= static_cast(x_shape.size()), + "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " + "dim of input X %s, but got %d", + op_desc.Type(), + x_shape.size(), + x_num_col_dims); ValueInfo y_info = GetTensorInfoByVarName( op_desc, op_desc.Input("Y", true), param_map, "Y"); const auto& [y_shape, y_tensor_type, y_value] = y_info; - PADDLE_ENFORCE( - y_num_col_dims <= static_cast(y_shape.size()), - platform::errors::InvalidArgument( - "Expected op[%s]'s attr `y_num_col_dims` less than or equal to " - "dim of input Y %s, but got %d", - op_desc.Type(), - y_shape.size(), - y_num_col_dims)); + IR_ENFORCE(y_num_col_dims <= static_cast(y_shape.size()), + "Expected op[%s]'s attr `y_num_col_dims` less than or equal to " + "dim of input Y %s, but got %d", + op_desc.Type(), + y_shape.size(), + y_num_col_dims); pir::Builder builder(ctx, block); @@ -1690,10 +1649,9 @@ struct MulGradOpTranscriber : public OpTranscriber { << target_op_name; const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op %d should have corresponding OpInfo %d", - op_desc.Type(), - target_op_name)); + IR_THROW("Op %d should have corresponding OpInfo %d", + op_desc.Type(), + target_op_name); } return op_info; } @@ -1728,27 +1686,24 @@ struct MulGradOpTranscriber : public OpTranscriber { const auto& [x_shape, x_tensor_type, x_value] = x_info; - PADDLE_ENFORCE( - x_num_col_dims <= static_cast(x_shape.size()), - platform::errors::InvalidArgument( - "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " - "dim of input X %s, but got %d", - op_desc.Type(), - x_shape.size(), - x_num_col_dims)); + IR_ENFORCE(x_num_col_dims <= static_cast(x_shape.size()), + "Expected op[%s]'s attr `x_num_col_dims` less than or equal to " + "dim of input X %s, but got %d", + op_desc.Type(), + x_shape.size(), + x_num_col_dims); ValueInfo y_info = GetTensorInfoByVarName( op_desc, op_desc.Input("Y", true), param_map, "Y"); const auto& [y_shape, y_tensor_type, y_value] = y_info; - PADDLE_ENFORCE(y_num_col_dims <= static_cast(y_shape.size()), - platform::errors::InvalidArgument( - "Expected op[%s]'s attr `y_num_col_dims` less than or " - "equal to dim of input Y %s, but got %d", - op_desc.Type(), - y_shape.size(), - y_num_col_dims)); + IR_ENFORCE(y_num_col_dims <= static_cast(y_shape.size()), + "Expected op[%s]'s attr `y_num_col_dims` less than or equal to " + "dim of input Y %s, but got %d", + op_desc.Type(), + y_shape.size(), + y_num_col_dims); ValueInfo out_grad_info = GetTensorInfoByVarName( op_desc, op_desc.Input("Out@GRAD", true), param_map, "Out@GRAD"); @@ -1826,19 +1781,16 @@ struct MulGradOpTranscriber : public OpTranscriber { auto gradReshape = [&](const std::string& var_name) { const auto& grad_output = op_desc.Output(var_name); - PADDLE_ENFORCE( - grad_output.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s output %s has only 1 variable, but got %d", - op_desc.Type(), - var_name, - grad_output.size())); + IR_ENFORCE(grad_output.size() == 1, + "Expected op[%s]'s output %s has only 1 variable, but got %d", + op_desc.Type(), + var_name, + grad_output.size()); const auto& grad_var_name = grad_output[0]; auto idx_iter = arg_to_idx.find(grad_var_name); if (idx_iter == arg_to_idx.end()) { - PADDLE_THROW(phi::errors::InvalidArgument( - "op[%s] should have got its %s", op_desc.Type(), var_name)); + IR_THROW("op[%s] should have got its %s", op_desc.Type(), var_name); } auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" @@ -1847,29 +1799,26 @@ struct MulGradOpTranscriber : public OpTranscriber { VarDesc* var_desc = op_desc.Block()->FindVarRecursive( op_desc.Input(var_name.substr(0, 1))[0]); - PADDLE_ENFORCE(var_desc != nullptr, - platform::errors::InvalidArgument( - "[op:%s] Input %s should not be null", - op_desc.Type(), - var_name.substr(0, 1))); + IR_ENFORCE(var_desc != nullptr, + "[op:%s] Input %s should not be null", + op_desc.Type(), + var_name.substr(0, 1)); std::vector shape = var_desc->GetShape(); DenseTensorTypeStorage::Dim dim = common::make_ddim(shape); pir::Value value_res = operation->result(idx_in_op); auto reshape_op = builder.Build(value_res, shape); - PADDLE_ENFORCE(value_res != nullptr, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is not null", - op_desc.Type(), - grad_var_name)); + IR_ENFORCE(value_res, + "Expected op[%s]'s input %s is not null", + op_desc.Type(), + grad_var_name); pir::Type grad_type = value_res.type(); - PADDLE_ENFORCE(grad_type.isa(), - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - grad_var_name, - grad_type)); + IR_ENFORCE(grad_type.isa(), + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + grad_var_name, + grad_type); dialect::DenseTensorType grad_tensor_type = grad_type.dyn_cast(); @@ -1895,8 +1844,7 @@ struct FillConstant2FullTranscriber : public OpTranscriber { const OpDesc& op_desc) override { const auto& op_info = ctx->GetRegisteredOpInfo(dialect::FullOp::name()); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op fill_constant should have corresponding OpInfo pd_op.full")); + IR_THROW("Op fill_constant should have corresponding OpInfo pd_op.full"); } return op_info; @@ -1977,9 +1925,9 @@ struct FillConstant2FullWithTensorTranscriber : public OpTranscriber { const OpDesc& op_desc) override { const auto& op_info = ctx->GetRegisteredOpInfo("pd_op.full_with_tensor"); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op fill_constant should have corresponding OpInfo " - "pd_op.full_with_tensor")); + "pd_op.full_with_tensor"); } return op_info; @@ -2078,18 +2026,16 @@ struct SelectInputOpTranscriber : public OpTranscriber { std::vector op_inputs = {}; auto Mask_name = op_desc.Input("Mask")[0]; auto& Input_name = op_desc.Input("X"); - PADDLE_ENFORCE(param_map->count(Mask_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - Mask_name)); + IR_ENFORCE(param_map->count(Mask_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + Mask_name); op_inputs.push_back(param_map->at(Mask_name).value); for (auto in_name : Input_name) { - PADDLE_ENFORCE(param_map->count(in_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - in_name)); + IR_ENFORCE(param_map->count(in_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + in_name); op_inputs.push_back(param_map->at(in_name).value); } @@ -2127,7 +2073,7 @@ struct SelectInputOpTranscriber : public OpTranscriber { 0, undefined_prefix.size()) == undefined_prefix) { // do nothing } else { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "select_input only support same type or DenseTensorType with " "only different dim, but get dtype:[%s, %s], layout:[%s, %s], " "lod:[%s, %s], offset:[%s, %s].", @@ -2138,7 +2084,7 @@ struct SelectInputOpTranscriber : public OpTranscriber { tensor1.lod(), tensor2.lod(), tensor1.offset(), - tensor2.offset())); + tensor2.offset()); } auto undefined_var_type = tensor1; @@ -2148,13 +2094,11 @@ struct SelectInputOpTranscriber : public OpTranscriber { } auto undefine_value = op_inputs[1 + undefined_var_index]; - PADDLE_ENFORCE( + IR_ENFORCE( undefine_value.defining_op()->isa(), - platform::errors::InvalidArgument( - "undefined_var %s should be generated by assign_value, but got " - "%s", - Input_name[undefined_var_index], - undefine_value.defining_op())); + "undefined_var %s should be generated by assign_value, but got %s", + Input_name[undefined_var_index], + undefine_value.defining_op()); undefine_value.set_type(target_var_type); undefine_value.defining_op()->set_attribute( @@ -2191,11 +2135,11 @@ struct SelectInputOpTranscriber : public OpTranscriber { tensor1.lod(), tensor1.offset())); } else { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "select_input only support same type or DenseTensorType with only " "different dim, now is %s != %s.", input1, - input2)); + input2); } pir::Operation* operation = pir::Operation::Create( @@ -2219,17 +2163,15 @@ struct SelectOutputOpTranscriber : public OpTranscriber { std::vector op_inputs = {}; auto Mask_name = op_desc.Input("Mask")[0]; auto& Input_name = op_desc.Input("X")[0]; - PADDLE_ENFORCE(param_map->count(Mask_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - Mask_name)); + IR_ENFORCE(param_map->count(Mask_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + Mask_name); op_inputs.push_back(param_map->at(Mask_name).value); - PADDLE_ENFORCE(param_map->count(Input_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - Input_name)); + IR_ENFORCE(param_map->count(Input_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + Input_name); op_inputs.push_back(param_map->at(Input_name).value); pir::AttributeMap attribute_map; @@ -2238,9 +2180,8 @@ struct SelectOutputOpTranscriber : public OpTranscriber { OpOutputMapping arg_to_idx; OpOutputTypeList op_output_types; auto Out_names = op_desc.Output("Out"); - PADDLE_ENFORCE(Out_names.size() == 2, - platform::errors::InvalidArgument( - "Expected SelectOutput's output size is 2.")); + IR_ENFORCE(Out_names.size() == 2, + "Expected SelectOutput's output size is 2."); for (size_t idx = 0; idx < Out_names.size(); idx++) { VarDesc* var = op_desc.Block()->FindVarRecursive(Out_names[idx]); arg_to_idx[var->Name()] = {idx, 0}; @@ -2269,28 +2210,23 @@ pir::Value TranslateNumClassesForOneHot(pir::IrContext* ctx, if (op_desc.HasInput(legacy_tensor_name) && !op_desc.Input(legacy_tensor_name).empty()) { legacy_vars = op_desc.Input(legacy_tensor_name); - PADDLE_ENFORCE(legacy_vars.size() == 1, - platform::errors::InvalidArgument( - "depth_tensor input of one hot MUST be a tensor")); + IR_ENFORCE(legacy_vars.size() == 1, + "depth_tensor input of one hot MUST be a tensor"); auto var_name = legacy_vars[0]; - PADDLE_ENFORCE(legacy_vars.size() == 1, - platform::errors::InvalidArgument( - "depth_tensor input of one hot MUST be a tensor")); - PADDLE_ENFORCE( - param_map->count(legacy_vars[0]), - platform::errors::InvalidArgument( - "%s should be existed in one_hot_v2 as input depth_tensor.", - legacy_vars[0])); + IR_ENFORCE(legacy_vars.size() == 1, + "depth_tensor input of one hot MUST be a tensor"); + IR_ENFORCE(param_map->count(legacy_vars[0]), + "%s should be existed in one_hot_v2 as input depth_tensor.", + legacy_vars[0]); auto defining_info = param_map->at(legacy_vars[0]); return defining_info.value; } auto& attribute_translator = AttributeTranslator::instance(); if (!op_desc.HasAttr(legacy_attr_name)) { - PADDLE_THROW( - phi::errors::InvalidArgument("Op %s arg %s should not be zero size", - op_desc.Type(), - legacy_attr_name)); + IR_THROW("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name); } paddle::framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" @@ -2315,16 +2251,14 @@ struct OneHotTranscriber : public OpTranscriber { pir::Attribute TranslateDtypeForArange(pir::IrContext* ctx, const OpDesc& op_desc, const OpAttributeInfo& attr_info) { - PADDLE_ENFORCE( - op_desc.Input("Start").size() == 1, - platform::errors::InvalidArgument( - "[op:%s] Input [Start]'s size should be equal to 1", op_desc.Type())); + IR_ENFORCE(op_desc.Input("Start").size() == 1, + "[op:%s] Input [Start]'s size should be equal to 1", + op_desc.Type()); auto var_desc = op_desc.Block()->FindVarRecursive(op_desc.Input("Start")[0]); - PADDLE_ENFORCE( - var_desc != nullptr, - platform::errors::InvalidArgument("[op:%s] Input %s should not be null", - op_desc.Type(), - op_desc.Input("Start")[0])); + IR_ENFORCE(var_desc != nullptr, + "[op:%s] Input %s should not be null", + op_desc.Type(), + op_desc.Input("Start")[0]); auto start_proto_dtype = var_desc->GetDataType(); auto start_phi_dtype = phi::TransToPhiDataType(start_proto_dtype); auto dtype_attr = @@ -2388,18 +2322,15 @@ struct ElementwiseTranscriber : public OpTranscriber { } auto x_names = op_desc.Input("X", true); - PADDLE_ENFORCE( - x_names.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s input X has only 1 variable, but got %d", - op_desc.Type(), - x_names.size())); + IR_ENFORCE(x_names.size() == 1, + "Expected op[%s]'s input X has only 1 variable, but got %d", + op_desc.Type(), + x_names.size()); auto x_name = x_names[0]; - PADDLE_ENFORCE(param_map->count(x_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - x_name)); + IR_ENFORCE(param_map->count(x_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + x_name); auto x_defining_info = param_map->at(x_name); if (x_defining_info.generated_by_vector) { InsertSliceOperationForTarget( @@ -2407,34 +2338,30 @@ struct ElementwiseTranscriber : public OpTranscriber { x_defining_info = param_map->at(x_name); } pir::Value x_value = x_defining_info.value; - PADDLE_ENFORCE( - x_value != nullptr, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is not null", op_desc.Type(), x_name)); + IR_ENFORCE(x_value, + "Expected op[%s]'s input %s is not null", + op_desc.Type(), + x_name); pir::Type x_type = x_value.type(); - PADDLE_ENFORCE(x_type.isa(), - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - x_name, - x_type)); + IR_ENFORCE(x_type.isa(), + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + x_name, + x_type); dialect::DenseTensorType x_tensor_type = x_type.dyn_cast(); std::vector x_shape = common::vectorize(x_tensor_type.dims()); auto y_names = op_desc.Input("Y", true); - PADDLE_ENFORCE( - y_names.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s input Y has only 1 variable, but got %d", - op_desc.Type(), - y_names.size())); + IR_ENFORCE(y_names.size() == 1, + "Expected op[%s]'s input Y has only 1 variable, but got %d", + op_desc.Type(), + y_names.size()); auto y_name = y_names[0]; - PADDLE_ENFORCE(param_map->count(y_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - y_name)); + IR_ENFORCE(param_map->count(y_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + y_name); auto y_defining_info = param_map->at(y_name); if (y_defining_info.generated_by_vector) { InsertSliceOperationForTarget( @@ -2442,17 +2369,16 @@ struct ElementwiseTranscriber : public OpTranscriber { y_defining_info = param_map->at(y_name); } pir::Value y_value = y_defining_info.value; - PADDLE_ENFORCE( - y_value != nullptr, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is not null", op_desc.Type(), y_name)); + IR_ENFORCE(y_value, + "Expected op[%s]'s input %s is not null", + op_desc.Type(), + y_name); pir::Type y_type = y_value.type(); - PADDLE_ENFORCE(y_type.isa(), - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - y_name, - y_type)); + IR_ENFORCE(y_type.isa(), + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + y_name, + y_type); dialect::DenseTensorType y_tensor_type = y_type.dyn_cast(); std::vector y_shape = common::vectorize(y_tensor_type.dims()); @@ -2466,13 +2392,11 @@ struct ElementwiseTranscriber : public OpTranscriber { // x.rank=y.rank return {x_value, y_value}; } - PADDLE_ENFORCE( - append_size > 0, - platform::errors::InvalidArgument( - "Expected op[%s] have append size > 0 with axis=%d but got %d", - op_desc.Type(), - axis, - append_size)); + IR_ENFORCE(append_size > 0, + "Expected op[%s] have append size > 0 with axis=%d but got %d", + op_desc.Type(), + axis, + append_size); pir::Builder builder(ctx, block); pir::Value y_new; @@ -2514,9 +2438,9 @@ struct GradAddOpTranscriber : public ElementwiseTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op assign_value should have corresponding OpInfo " - "pd_op.assign_value_")); + "pd_op.assign_value_"); } return op_info; @@ -2541,18 +2465,16 @@ struct ElementwiseGradTranscriber : public OpTranscriber { if (y_grad_output.size() < 1) { return; } - PADDLE_ENFORCE( + IR_ENFORCE( y_grad_output.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s output Y@GRAD has only 1 variable, but got %d", - op_desc.Type(), - y_grad_output.size())); + "Expected op[%s]'s output Y@GRAD has only 1 variable, but got %d", + op_desc.Type(), + y_grad_output.size()); const auto& y_grad_var_name = y_grad_output[0]; auto idx_iter = arg_to_idx.find(y_grad_var_name); if (idx_iter == arg_to_idx.end()) { - PADDLE_THROW(phi::errors::InvalidArgument( - "op[%s] should have got its y_grad", op_desc.Type())); + IR_THROW("op[%s] should have got its y_grad", op_desc.Type()); } auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" @@ -2561,24 +2483,22 @@ struct ElementwiseGradTranscriber : public OpTranscriber { auto y_names = op_desc.Input("Y", true); auto y_name = y_names[0]; - PADDLE_ENFORCE(param_map->count(y_name) > 0, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s has been parsed", - op_desc.Type(), - y_name)); + IR_ENFORCE(param_map->count(y_name) > 0, + "Expected op[%s]'s input %s has been parsed", + op_desc.Type(), + y_name); auto y_defining_info = param_map->at(y_name); pir::Value y_value = y_defining_info.value; - PADDLE_ENFORCE( - y_value != nullptr, - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is not null", op_desc.Type(), y_name)); + IR_ENFORCE(y_value, + "Expected op[%s]'s input %s is not null", + op_desc.Type(), + y_name); pir::Type y_type = y_value.type(); - PADDLE_ENFORCE(y_type.isa(), - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - y_name, - y_type)); + IR_ENFORCE(y_type.isa(), + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + y_name, + y_type); dialect::DenseTensorType y_tensor_type = y_type.dyn_cast(); @@ -2586,12 +2506,11 @@ struct ElementwiseGradTranscriber : public OpTranscriber { // if y_grad' shape is same with y, we don't need a reshape pir::Type y_grad_type = value.type(); - PADDLE_ENFORCE(y_grad_type.isa(), - platform::errors::InvalidArgument( - "Expected op[%s]'s input %s is DenseTensor but got %s", - op_desc.Type(), - y_grad_var_name, - y_grad_type)); + IR_ENFORCE(y_grad_type.isa(), + "Expected op[%s]'s input %s is DenseTensor but got %s", + op_desc.Type(), + y_grad_var_name, + y_grad_type); dialect::DenseTensorType y_grad_tensor_type = y_grad_type.dyn_cast(); if (y_grad_tensor_type.dims() == y_tensor_type.dims()) { @@ -2618,10 +2537,9 @@ struct SetValueOpTranscriber : public OpTranscriber { op_normalizer.GetLegacyAttrName(op_desc.Type(), input_info.name); if (!op_desc.HasAttr(legacy_attr_name)) { - PADDLE_THROW( - phi::errors::InvalidArgument("Op %s arg %s should not be zero size", - op_desc.Type(), - legacy_attr_name)); + IR_THROW("Op %s arg %s should not be zero size", + op_desc.Type(), + legacy_attr_name); } framework::Attribute legacy_attr = op_desc.GetAttr(legacy_attr_name); VLOG(10) << "[" << op_desc.Type() << "][attribute]" @@ -2641,9 +2559,9 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { std::string target_op_name = dialect::SetValueWithTensorOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op set_value should have corresponding OpInfo " - "pd_op.set_value_with_tensor")); + "pd_op.set_value_with_tensor"); } return op_info; @@ -2661,15 +2579,13 @@ struct SetValueWithTensorOpTranscriber : public SetValueOpTranscriber { const OpInputInfo& info, pir::Block* block) -> pir::Value { std::vector legacy_input_vars; - PADDLE_ENFORCE(op_desc.HasInput("ValueTensor"), - platform::errors::InvalidArgument( - "[set_value] should have ValueTensor")); + IR_ENFORCE(op_desc.HasInput("ValueTensor"), + "[set_value] should have ValueTensor"); legacy_input_vars = op_desc.Input("ValueTensor", true); - PADDLE_ENFORCE( + IR_ENFORCE( legacy_input_vars.size() == 1u, - platform::errors::InvalidArgument("[set_value][ValueTensor] should " - "only have 1 variable, but got %d", - legacy_input_vars.size())); + "[set_value][ValueTensor] should only have 1 variable, but got %d", + legacy_input_vars.size()); auto var_name = legacy_input_vars[0]; auto defining_info = (*param_map)[var_name]; if (defining_info.generated_by_vector) { @@ -2688,9 +2604,9 @@ struct SetValueGradOpTranscriber : public SetValueWithTensorOpTranscriber { std::string target_op_name = dialect::SetValueWithTensorGradOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op set_value_grad should have corresponding OpInfo " - "pd_op.set_value_with_tensor_grad")); + "pd_op.set_value_with_tensor_grad"); } return op_info; @@ -2765,11 +2681,10 @@ struct FusedFeedForwardOpTranscriber : public OpTranscriber { ctx, param_map, op_desc, operation, arg_to_idx); if (op_desc.HasOutput("Out")) { const auto& output_vars = op_desc.Output("Out"); - PADDLE_ENFORCE(output_vars.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s Out has only 1 var but got %s", - op_desc.Type(), - output_vars.size())); + IR_ENFORCE(output_vars.size() == 1, + "Expected op[%s]'s Out has only 1 var but got %s", + op_desc.Type(), + output_vars.size()); auto output_var = output_vars[0]; auto fused_feedforward_op = operation->dyn_cast(); @@ -2785,9 +2700,9 @@ struct ShareBufferOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ShareDataOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op share_buffer should have corresponding OpInfo " - "pd_op.share_data")); + "pd_op.share_data"); } return op_info; @@ -2809,10 +2724,10 @@ struct RandIntOpTranscriber : public OpTranscriber { const auto& legacy_output_vars = op_desc.Output(legacy_output_name); auto& var_name = legacy_output_vars[0]; VarDesc* var = block->FindVarRecursive(var_name); - PADDLE_ENFORCE( - var != nullptr, - platform::errors::InvalidArgument( - "[op:%s] Output %s should not be null", op_desc.Type(), var_name)); + IR_ENFORCE(var != nullptr, + "[op:%s] Output %s should not be null", + op_desc.Type(), + var_name); int dtype_attr_val = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype")); paddle::framework::proto::VarType::Type var_type = @@ -2927,9 +2842,9 @@ struct FusedElemwiseAddActivationGradOpTranscriber const OpDesc& op_desc) override { const auto inter_out_grad = op_desc.Output("IntermediateOut@GRAD"); if (inter_out_grad.size() > 0) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "pd_op.fused_elemwise_add_activation_grad doesn't have " - "Intermediate_out_grad output")); + "Intermediate_out_grad output"); } return OpTranscriber::LookUpOpInfo(ctx, op_desc); @@ -2961,10 +2876,10 @@ struct MatrixRankOpTranscriber : public OpTranscriber { } const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op matrix_rank should have corresponding OpInfo pd_op.matrix_rank " "or " - "pd_op.matrix_rank_tol.")); + "pd_op.matrix_rank_tol."); } return op_info; } @@ -2976,9 +2891,9 @@ struct LodArrayLengthOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ArrayLengthOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op lod_array_length should have corresponding OpInfo " - "pd_op.array_length")); + "pd_op.array_length"); } return op_info; @@ -2996,21 +2911,17 @@ struct LodArrayLengthOpTranscriber : public OpTranscriber { const OpInputInfo& info, pir::Block* block) -> pir::Value { VLOG(10) << "[" << op_desc.Type() << "][input `array`]"; - PADDLE_ENFORCE( - op_desc.HasInput("X"), - platform::errors::InvalidArgument( - "Op lod_array_length should have input `X` but not found")); + IR_ENFORCE(op_desc.HasInput("X"), + "Op lod_array_length should have input `X` but not found"); const auto& vars = op_desc.Input("X"); - PADDLE_ENFORCE( - vars.size() == 1, - platform::errors::InvalidArgument( - "Input `X` should be one variable %s", op_desc.Type())); + IR_ENFORCE(vars.size() == 1, + "Input `X` should be one variable %s", + op_desc.Type()); VLOG(10) << "[" << op_desc.Type() << "][input `x`] from " << vars[0]; const VarDesc* var_desc = op_desc.Block()->FindVarRecursive(vars[0]); - PADDLE_ENFORCE( - var_desc != nullptr, - platform::errors::InvalidArgument( - "VarDesc `%s` should be exist in legacy program", vars[0])); + IR_ENFORCE(var_desc != nullptr, + "VarDesc `%s` should be exist in legacy program", + vars[0]); auto defining_value = pir::Value(nullptr); if (param_map->count(var_desc->Name())) { VLOG(10) << "[" << op_desc.Type() << "][input `x`] var: " << vars[0] @@ -3033,9 +2944,9 @@ struct WriteArrayOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ArrayWrite_Op::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op write_to_array should have corresponding OpInfo " - "pd_op.array_write_")); + "pd_op.array_write_"); } return op_info; @@ -3053,21 +2964,17 @@ struct WriteArrayOpTranscriber : public OpTranscriber { const OpInputInfo& info, pir::Block* block) -> pir::Value { VLOG(10) << "[" << op_desc.Type() << "][input `array`]"; - PADDLE_ENFORCE( - op_desc.HasOutput("Out"), - platform::errors::InvalidArgument( - "Op write_to_array should have output `Out` but not found")); + IR_ENFORCE(op_desc.HasOutput("Out"), + "Op write_to_array should have output `Out` but not found"); const auto& vars = op_desc.Output("Out"); - PADDLE_ENFORCE( - vars.size() == 1, - platform::errors::InvalidArgument( - "Output `Out` should be one variable %s", op_desc.Type())); + IR_ENFORCE(vars.size() == 1, + "Output `Out` should be one variable %s", + op_desc.Type()); VLOG(10) << "[" << op_desc.Type() << "][input `array`] from " << vars[0]; const VarDesc* var_desc = op_desc.Block()->FindVarRecursive(vars[0]); - PADDLE_ENFORCE( - var_desc != nullptr, - platform::errors::InvalidArgument( - "VarDesc `%s` should be exist in legacy program", vars[0])); + IR_ENFORCE(var_desc != nullptr, + "VarDesc `%s` should be exist in legacy program", + vars[0]); auto defining_value = pir::Value(nullptr); if (param_map->count(var_desc->Name())) { VLOG(10) << "[" << op_desc.Type() << "][input `array`] var: " << vars[0] @@ -3090,9 +2997,9 @@ struct ReadArrayOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::ArrayReadOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op read_from_array should have corresponding OpInfo " - "pd_op.read_array")); + "pd_op.read_array"); } return op_info; @@ -3104,33 +3011,30 @@ struct SliceOpTranscriber : public OpTranscriber { const OpDesc& op_desc) override { std::string target_op_name = dialect::SliceOp::name(); - PADDLE_ENFORCE(op_desc.HasInput("Input"), - platform::errors::InvalidArgument( - "op %s should have input `Input`", op_desc.Type())); + IR_ENFORCE(op_desc.HasInput("Input"), + "op %s should have input `Input`", + op_desc.Type()); const auto& input_vars = op_desc.Input("Input"); - PADDLE_ENFORCE(input_vars.size() == 1, - platform::errors::InvalidArgument( - "op %s should have one input `Input`, but got %d.", - op_desc.Type(), - input_vars.size())); + IR_ENFORCE(input_vars.size() == 1, + "op %s should have one input `Input`, but got %d.", + op_desc.Type(), + input_vars.size()); const auto* input_var = op_desc.Block()->FindVarRecursive(input_vars[0]); if (input_var->GetType() == framework::proto::VarType::LOD_TENSOR_ARRAY) { - PADDLE_ENFORCE(op_desc.HasOutput("Out"), - platform::errors::InvalidArgument( - "op %s should have input `Out`", op_desc.Type())); + IR_ENFORCE(op_desc.HasOutput("Out"), + "op %s should have input `Out`", + op_desc.Type()); const auto& output_vars = op_desc.Output("Out"); - PADDLE_ENFORCE(output_vars.size() == 1, - platform::errors::InvalidArgument( - "op %s should have one input `Out`, but got %d.", - op_desc.Type(), - output_vars.size())); + IR_ENFORCE(output_vars.size() == 1, + "op %s should have one input `Out`, but got %d.", + op_desc.Type(), + output_vars.size()); const auto* output_var = op_desc.Block()->FindVarRecursive(output_vars[0]); - PADDLE_ENFORCE(output_var != nullptr, - platform::errors::InvalidArgument( - "op %s should have non-empty output `%s`.", - op_desc.Type(), - output_vars[0])); + IR_ENFORCE(output_var != nullptr, + "op %s should have non-empty output `%s`.", + op_desc.Type(), + output_vars[0]); if (output_var->GetType() == framework::proto::VarType::LOD_TENSOR) { target_op_name = dialect::SliceArrayDenseOp::name(); @@ -3141,8 +3045,7 @@ struct SliceOpTranscriber : public OpTranscriber { const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op slice should have corresponding OpInfo %s", target_op_name)); + IR_THROW("Op slice should have corresponding OpInfo %s", target_op_name); } return op_info; @@ -3159,11 +3062,10 @@ struct LegacyMatmulOpTranscriber : public OpTranscriber { } float v = PADDLE_GET_CONST(float, op_desc.GetAttr(attr_name)); if (abs(v - expected_value) > 1e-6f) { - PADDLE_THROW( - phi::errors::InvalidArgument("Expected op[%s]'s attr %s is not %f", - op_desc.Type(), - attr_name, - v)); + IR_THROW("Expected op[%s]'s attr %s is not %f", + op_desc.Type(), + attr_name, + v); } }; @@ -3174,9 +3076,9 @@ struct LegacyMatmulOpTranscriber : public OpTranscriber { std::string target_op_name = dialect::MatmulOp::name(); const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( + IR_THROW( "Op read_from_array should have corresponding OpInfo " - "pd_op.read_array")); + "pd_op.read_array"); } return op_info; @@ -3196,17 +3098,14 @@ struct LegacyMatmulOpTranscriber : public OpTranscriber { } const auto& output_vars = op_desc.Output("Out"); - PADDLE_ENFORCE( - output_vars.size() == 1, - platform::errors::InvalidArgument( - "Expected op[%s]'s output `Out` has only 1 variable, but got %d", - op_desc.Type(), - output_vars.size())); + IR_ENFORCE(output_vars.size() == 1, + "Expected op[%s]'s output `Out` has only 1 variable, but got %d", + op_desc.Type(), + output_vars.size()); auto idx_iter = arg_to_idx.find(output_vars[0]); if (idx_iter == arg_to_idx.end()) { - PADDLE_THROW(phi::errors::InvalidArgument( - "op[%s] should have got its `Out`", op_desc.Type())); + IR_THROW("op[%s] should have got its `Out`", op_desc.Type()); } auto [idx_in_op, idx_in_vec] = idx_iter->second; VLOG(10) << "[output recording]" From b6248d59c6033009969dca3fee692ff347f15ce4 Mon Sep 17 00:00:00 2001 From: cmcamdy Date: Mon, 18 Mar 2024 16:28:20 +0800 Subject: [PATCH 5/6] fix optranslator --- .../fluid/ir_adaptor/translator/op_translator.cc | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/paddle/fluid/ir_adaptor/translator/op_translator.cc b/paddle/fluid/ir_adaptor/translator/op_translator.cc index b630eb2e6bc50..6a7e8a4dd5b44 100644 --- a/paddle/fluid/ir_adaptor/translator/op_translator.cc +++ b/paddle/fluid/ir_adaptor/translator/op_translator.cc @@ -2851,20 +2851,6 @@ struct FusedElemwiseAddActivationGradOpTranscriber } }; -struct PartialSumOpTranscriber : public OpTranscriber { - pir::OpInfo LookUpOpInfo(pir::IrContext* ctx, - const OpDesc& op_desc) override { - std::string target_op_name = "pd_op.partial_sum"; - const auto& op_info = ctx->GetRegisteredOpInfo(target_op_name); - if (!op_info) { - PADDLE_THROW(phi::errors::InvalidArgument( - "Op partial_sum should have corresponding OpInfo " - "pd_op.partial_sum")); - } - return op_info; - } -}; - struct MatrixRankOpTranscriber : public OpTranscriber { pir::OpInfo LookUpOpInfo(pir::IrContext* ctx, const OpDesc& op_desc) override { @@ -3196,7 +3182,6 @@ OpTranslator::OpTranslator() { special_handlers["slice"] = SliceOpTranscriber(); special_handlers["split"] = SplitOpTranscriber(); special_handlers["sum"] = AddNOpTranscriber(); - special_handlers["partial_sum"] = PartialSumOpTranscriber(); special_handlers["tril_triu"] = TrilAndTriuOpTranscriber(); special_handlers["tril_triu_grad"] = TrilAndTriuGradOpTranscriber(); special_handlers["matmul"] = LegacyMatmulOpTranscriber(); From 77fee5f724e7b7169fb32ba9370a0fd8d37c353c Mon Sep 17 00:00:00 2001 From: cmcandy Date: Tue, 19 Mar 2024 16:57:42 +0800 Subject: [PATCH 6/6] fix: add debug log --- paddle/phi/infermeta/unary.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index df7d071a5ddb7..9ce7e7d430cf8 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -4468,6 +4468,10 @@ void PartialSumInferMeta(const std::vector& xs, "ShapeError: Input tensors count should > 0. But " "received inputs' length is 0.")); + if (inputs_num == 1) { + VLOG(3) << "Warning: partial_sum op have only one input, may be useless"; + } + // Only support two dimensions now, should be extended later // when length is -1, need make sure all dimensions to be added are the same for (size_t i = 0; i < inputs_num; i++) {