Skip to content

Commit

Permalink
Merge branch 'develop' into error_80_81_82
Browse files Browse the repository at this point in the history
  • Loading branch information
Luohongzhige authored Aug 1, 2024
2 parents 3ae7f8e + 3ec3634 commit 4d2223b
Show file tree
Hide file tree
Showing 1,613 changed files with 33,310 additions and 18,093 deletions.
28 changes: 14 additions & 14 deletions .github/CODEOWNERS
Validating CODEOWNERS rules …
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# This file is migrated from CI script, it's an effort of modenizing our dev infra.
# Code owners are expected to take responsibility for review patches to respective file.

/CMakeLists.txt @wanghuancoder @Aurelius84 @XiaoguangHu01 @qili93
/CMakeLists.txt @wanghuancoder @Aurelius84 @XiaoguangHu01
paddle/fluid/distributed/collective @sneaxiy @ForFishes
paddle/fluid/eager/autograd_meta.cc @JiabinYang @phlrain
paddle/fluid/eager/autograd_meta.h @JiabinYang @phlrain
Expand All @@ -12,20 +12,20 @@ paddle/fluid/eager/grad_node_info.h @JiabinYang @phlrain
paddle/fluid/eager/grad_tensor_holder.cc @JiabinYang @phlrain
paddle/fluid/eager/grad_tensor_holder.h @JiabinYang @phlrain
paddle/fluid/eager/tensor_wrapper.h @JiabinYang @phlrain
paddle/fluid/framework/block_desc.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/details/op_registry.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/framework.proto @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/grad_op_desc_maker.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/ir/graph.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/ir/node.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/lod_tensor.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/op_desc.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/operator.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/scope.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/selected_rows.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/tensor.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/block_desc.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/details/op_registry.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/framework.proto @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/grad_op_desc_maker.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/ir/graph.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/ir/node.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/lod_tensor.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/op_desc.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/operator.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/scope.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/selected_rows.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/tensor.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/framework/unused_var_check.cc @zhiqiu @phlrain
paddle/fluid/framework/var_desc.h @XiaoguangHu01 @zhiqiu @Xreki @qili93 @Aurelius84
paddle/fluid/framework/var_desc.h @XiaoguangHu01 @zhiqiu @Xreki @Aurelius84
paddle/fluid/operators/distributed/send_recv.proto.in @gongweibao @seiriosPlus
paddle/fluid/prim/api/api.yaml @cxxly @xiaoguoguo626807 @Charles-hit @cyber-pioneer @JiabinYang
paddle/fluid/prim/api/composite_backward/composite_backward_api.h @cxxly @xiaoguoguo626807 @Charles-hit @cyber-pioneer @JiabinYang
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/xpu.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ if(NOT DEFINED XPU_XRE_BASE_VERSION)
set(XPU_XRE_BASE_VERSION "4.32.0.1")
endif()
if(NOT DEFINED XPU_XHPC_BASE_DATE)
set(XPU_XHPC_BASE_DATE "20240712")
set(XPU_XHPC_BASE_DATE "20240730")
endif()
set(XPU_XCCL_BASE_VERSION "1.2.5")
if(NOT DEFINED XPU_XFT_BASE_VERSION)
Expand Down
2 changes: 1 addition & 1 deletion cmake/generic.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -1312,7 +1312,7 @@ function(math_library TARGET)
set(cc_srcs)
set(cu_srcs)
set(hip_srcs)
set(math_common_deps device_context framework_proto enforce)
set(math_common_deps device_context framework_proto phi common)
if(WITH_GPU)
if(${CMAKE_CUDA_COMPILER_VERSION} LESS 11.0)
list(APPEND math_common_deps cub)
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/adt.h
Original file line number Diff line number Diff line change
Expand Up @@ -283,7 +283,7 @@ struct Ok final {
bool operator!=(const Ok&) const { return false; }
};

#define ADT_TODO() PADDLE_THROW(phi::errors::Fatal("TODO"))
#define ADT_TODO() PADDLE_THROW(::common::errors::Fatal("TODO"))

inline std::size_t hash_combine(std::size_t lhs, std::size_t rhs) {
return lhs ^= rhs + 0x9e3779b9 + (lhs << 6) + (lhs >> 2);
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/adt/anchor_sd_equation_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ void GenerateScheduleMeshEquationsImpl(const List<ScheduleDim>& sched_dims,
PADDLE_ENFORCE_EQ(
input_iterators->size() == output_iterators->size(),
true,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of input iterators and output iterators should be equal, "
"but got input iterators size = %d, output iterators size = %d.",
input_iterators->size(),
Expand All @@ -53,7 +53,7 @@ void GenerateScheduleMeshEquationsImpl(
PADDLE_ENFORCE_EQ(
shape.value()->size() == output_iterators->size(),
true,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of shape and output iterators should be equal, but got "
"shape size = %d, output iterators size = %d.",
shape.value()->size(),
Expand All @@ -75,7 +75,7 @@ void GenerateScheduleMeshEquationsImpl(
const auto& [sched_mesh, perm] = sched_transpose.tuple();
PADDLE_ENFORCE_EQ(GetOutputRank(sched_mesh) == output_iterators->size(),
true,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of output iterators should be equal to the "
"rank of the schedule mesh, but got output iterators "
"size = %d, rank of the schedule mesh = %d.",
Expand All @@ -99,7 +99,7 @@ void GenerateScheduleMeshEquationsImpl(
const auto& [sched_mesh, _] = sched_padding.tuple();
PADDLE_ENFORCE_EQ(GetOutputRank(sched_mesh) == output_iterators->size(),
true,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of output iterators should be equal to the "
"rank of the schedule mesh, but got output iterators "
"size = %d, rank of the schedule mesh = %d.",
Expand Down
8 changes: 4 additions & 4 deletions paddle/cinn/adt/equation_solver.cc
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ std::unordered_map<Variable, Value> InferValuesImpl(
PADDLE_ENFORCE_EQ(
out_msg_in_indexes.value()->size() == in_msg_in_indexes.value()->size(),
true,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of out_msg_in_indexes should be equal to the size of "
"in_msg_in_indexes, but got out_msg_in_indexes size = %d, "
"in_msg_in_indexes size = %d.",
Expand All @@ -195,7 +195,7 @@ std::unordered_map<Variable, Value> InferValuesImpl(
PADDLE_ENFORCE_EQ(
out_msg_out_indexes.value()->size() == in_msg_out_indexes.value()->size(),
true,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of out_msg_out_indexes should be equal to the size of "
"in_msg_out_indexes, but got out_msg_out_indexes size = %d, "
"in_msg_out_indexes size = %d.",
Expand Down Expand Up @@ -288,8 +288,8 @@ void CheckEquationsSolvable(
[&](const auto& opt_old_value, const auto& simplified_value) {
LOG(ERROR) << "old_value: " << ToTxtString(opt_old_value);
LOG(ERROR) << "simplified_value: " << ToTxtString(simplified_value);
PADDLE_THROW(
phi::errors::InvalidArgument("CheckEquationsSolvable Failed"));
PADDLE_THROW(::common::errors::InvalidArgument(
"CheckEquationsSolvable Failed"));
return tValueInferSuccess<bool>{false};
});
};
Expand Down
10 changes: 5 additions & 5 deletions paddle/cinn/adt/generate_map_expr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ bool HasDynamicShape(const ::pir::Value& tensor) {
PADDLE_ENFORCE_EQ(
dim,
-1UL,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The dynamic shape dim should be -1, but got %d.", dim));
return true;
}
Expand Down Expand Up @@ -249,7 +249,7 @@ std::shared_ptr<KGroup> GenerateKGroups(
PADDLE_ENFORCE_EQ(
igroups.size(),
1UL,
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of igroups should be 1, but got %d.", igroups.size()));
return std::make_shared<KGroup>(group, igroups);
}
Expand Down Expand Up @@ -326,7 +326,7 @@ LoopDescriptor4IterVarT MakeGetterLoopDescriptor4IterVar(
PADDLE_ENFORCE_EQ(
loop_iters->size(),
sd->size(),
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of loop iterators and loop descriptors should be equal, "
"but got loop iterators size = %d, loop descriptors size = %d.",
loop_iters->size(),
Expand Down Expand Up @@ -363,8 +363,8 @@ MapStmt<Stmt> MakeMapStmt(const MapIrList& map_irs) {
PADDLE_ENFORCE_EQ(
stmts->size(),
1UL,
phi::errors::InvalidArgument("The size of stmts should be 1, but got %d.",
stmts->size()));
::common::errors::InvalidArgument(
"The size of stmts should be 1, but got %d.", stmts->size()));
CHECK(stmts->at(0).Has<MapStmt<Stmt>>());
return stmts->at(0).Get<MapStmt<Stmt>>();
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/get_sub_reshape_dim_ranges.cc
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ GetSubReshapeDimRanges(const List<DimExpr>& lhs_dims,
} else if (LhsAcc() > RhsAcc()) {
rhs_end++;
} else {
PADDLE_THROW(phi::errors::Fatal("Dead code"));
PADDLE_THROW(::common::errors::Fatal("Dead code"));
}
}
CHECK(lhs_end == lhs_dims->size() && rhs_end == rhs_dims->size());
Expand Down
15 changes: 10 additions & 5 deletions paddle/cinn/adt/igroup.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,14 @@ std::shared_ptr<IndexExprInferContext> MakeIndexExprInferContext(
const auto& anchor_iterators = igroup.GetAnchorIterators();

for (std::size_t i = 0; i < anchor_iterators->size(); ++i) {
CHECK(anchor_iterator2value
.emplace(anchor_iterators->at(i), anchor_iterators->at(i))
.second);
PADDLE_ENFORCE_EQ(
anchor_iterator2value
.emplace(anchor_iterators->at(i), anchor_iterators->at(i))
.second,
true,
phi::errors::InvalidArgument(
"The element in anchor iterators failed to insert in anchor "
"iterator2value! Please check."));
}

return std::make_shared<IndexExprInferContext>(anchor_iterator2value);
Expand Down Expand Up @@ -102,10 +107,10 @@ List<Iterator> IGroup::GetIndexIterators(const Index& index) const {
} else if (arg_pos.Has<Undefined>()) {
// do nothing
} else {
PADDLE_THROW(phi::errors::Fatal("Dead code"));
PADDLE_THROW(::common::errors::Fatal("Dead code"));
}
}
PADDLE_THROW(phi::errors::Fatal("Can not find anchor iterators"));
PADDLE_THROW(::common::errors::Fatal("Can not find anchor iterators"));
}

} // namespace cinn::adt
6 changes: 3 additions & 3 deletions paddle/cinn/adt/m_ir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -38,12 +38,12 @@ void CollectTensorIndexIterators(const TensorIndexExpr& tensor_index_expr,

void CollectTensorIndexIteratorsImpl(const Undefined& tensor_index_expr,
std::unordered_set<Iterator>* ret) {
PADDLE_THROW(phi::errors::Unimplemented("Not Implemented"));
PADDLE_THROW(::common::errors::Unimplemented("Not Implemented"));
}

void CollectTensorIndexIteratorsImpl(const Ok& ok,
std::unordered_set<Iterator>* ret) {
PADDLE_THROW(phi::errors::Unimplemented("Not Implemented"));
PADDLE_THROW(::common::errors::Unimplemented("Not Implemented"));
}

void CollectTensorIndexIteratorsImpl(const Iterator& iterator,
Expand Down Expand Up @@ -134,7 +134,7 @@ LoopIterators GetAnchorTensorLoopIterators(
namespace {

Tensor GetTensorImpl(const OpStmt& op_stmt, const Undefined& undefined) {
PADDLE_THROW(phi::errors::Fatal("position not found"));
PADDLE_THROW(::common::errors::Fatal("position not found"));
}

Tensor GetTensorImpl(const OpStmt& op_stmt, const tIn<std::size_t>& pos) {
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/naive_bidirection_equation_generator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ void VisitEachInMsgOutMsgPair(const List<Index>& in_msg_indexes,
PADDLE_ENFORCE_EQ(
in_msg_indexes->size(),
out_msg_indexes->size(),
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of in_msg_indexes and out_msg_indexes should be equal, but "
"got in_msg_indexes size = %d, out_msg_indexes size = %d.",
in_msg_indexes->size(),
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/naive_op_equation_context.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,7 +240,7 @@ std::optional<std::int64_t> GetArgDimSizeImpl(
const Undefined&,
const GetArgStaticDimT& GetInDim,
const GetArgStaticDimT& GetOutDim) {
PADDLE_THROW(phi::errors::Fatal("position not found"));
PADDLE_THROW(::common::errors::Fatal("position not found"));
}

std::optional<std::int64_t> GetArgDimSize(const OpArgDimPos& arg_dim_pos,
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/partition_op_stmts.cc
Original file line number Diff line number Diff line change
Expand Up @@ -368,7 +368,7 @@ std::unordered_map<AnchorIndex, AnchorGroup> PartitionOpStmtsIntoAnchorGroups(

PADDLE_ENFORCE_EQ(all_visited_op_stmts.size(),
op_stmts->size(),
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"Some fake_op_placeholders are not visited."));
return anchor_index2igroup_spec;
}
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/print_utils/print_map_expr.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ std::string ToTxtStringImpl(const adapter::DynamicTensor& tensor) {
}

std::string ToTxtStringImpl(const TempStorage& tensor) {
PADDLE_THROW(phi::errors::Unimplemented("Not supported yet"));
PADDLE_THROW(::common::errors::Unimplemented("Not supported yet"));
}

} // namespace
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/adt/schedule_descriptor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ LoopDescriptors CreateScheduleDescriptor(const ScheduleMesh& sched_mesh,
PADDLE_ENFORCE_EQ(
sched_dims->size(),
loop_types->size(),
phi::errors::InvalidArgument(
::common::errors::InvalidArgument(
"The size of sched_dims and loop_types should be equal, but got "
"sched_dims size = %d, loop_types size = %d.",
sched_dims->size(),
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/adt/schedule_dim.cc
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ List<int> GetReduceAxis(const List<ScheduleDim>& loop_sizes) {
} else if (sched_dim.Has<tInjective<LoopSize>>()) {
// do nothing
} else {
PADDLE_THROW(phi::errors::Fatal("Dead code"));
PADDLE_THROW(::common::errors::Fatal("Dead code"));
}
}
return reduce_axis;
Expand All @@ -203,7 +203,7 @@ List<int> GetInjectiveAxis(const List<ScheduleDim>& loop_sizes) {
} else if (sched_dim.Has<tInjective<LoopSize>>()) {
injective_axis->emplace_back(i);
} else {
PADDLE_THROW(phi::errors::Fatal("Dead code"));
PADDLE_THROW(::common::errors::Fatal("Dead code"));
}
}
return injective_axis;
Expand Down
Loading

0 comments on commit 4d2223b

Please sign in to comment.