Skip to content

Commit

Permalink
rename reduce infermeta
Browse files Browse the repository at this point in the history
  • Loading branch information
MingMingShangTian committed Mar 8, 2022
1 parent 4424722 commit 9a8f555
Show file tree
Hide file tree
Showing 8 changed files with 36 additions and 61 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/operators/reduce_ops/reduce_max_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ class ReduceMaxOpMaker : public ops::ReduceOpMaker {
};

DECLARE_INFER_SHAPE_FUNCTOR(reduce_max, ReduceMaxInferShapeFunctor,
PD_INFER_META(phi::MaxRawInferMeta));
PD_INFER_META(phi::ReduceInferMetaBase));

REGISTER_OPERATOR(
reduce_max, ops::ReduceOp, ReduceMaxOpMaker,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/reduce_ops/reduce_mean_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ class __reduce_meanMaker__ : public ops::ReduceOpMaker {
};

DECLARE_INFER_SHAPE_FUNCTOR(reduce_mean, ReduceMeanInferShapeFunctor,
PD_INFER_META(phi::MeanRawInferMeta));
PD_INFER_META(phi::ReduceInferMetaBase));

REGISTER_OPERATOR(reduce_mean, ops::ReduceOp, __reduce_meanMaker__,
ops::ReduceMeanOpGradMaker<paddle::framework::OpDesc>,
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/operators/reduce_ops/reduce_sum_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -103,7 +103,7 @@ class ReduceSumOpMaker : public ops::ReduceOpMaker {
};

DECLARE_INFER_SHAPE_FUNCTOR(reduce_sum, ReduceSumInferShapeFunctor,
PD_INFER_META(phi::ReduceInferMetaBase));
PD_INFER_META(phi::SumRawInferMeta));

REGISTER_OPERATOR(reduce_sum, ops::ReduceOp, ReduceSumOpMaker,
ops::ReduceSumVarTypeInference,
Expand Down
52 changes: 18 additions & 34 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -382,7 +382,7 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
ReshapeInferMeta(x, shape, out, config);
}

/* Why not use ReduceInferMetaBase directly?
/* Why not use SumRawInferMeta directly?
Because we need make InferMetaFunction's args follow the design of api.yaml
*/
void SumInferMeta(const MetaTensor& x,
Expand All @@ -391,7 +391,7 @@ void SumInferMeta(const MetaTensor& x,
bool keep_dim,
MetaTensor* out) {
bool reduce_all = false;
ReduceInferMetaBase(x, axis, keep_dim, reduce_all, dtype, out);
SumRawInferMeta(x, axis, keep_dim, reduce_all, dtype, out);
}

DDim ReduceInferDim(const MetaTensor& x,
Expand Down Expand Up @@ -463,12 +463,12 @@ DDim ReduceInferDim(const MetaTensor& x,
return out_dim;
}

void ReduceInferMetaBase(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
DataType dtype,
MetaTensor* out) {
void SumRawInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
DataType dtype,
MetaTensor* out) {
DDim out_dim = ReduceInferDim(x, axis, keep_dim, reduce_all);

DataType out_dtype;
Expand All @@ -488,39 +488,23 @@ void ReduceInferMetaBase(const MetaTensor& x,
out->set_layout(x.layout());
}

void MaxRawInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
MetaTensor* out) {
void ReduceInferMetaBase(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
MetaTensor* out) {
DDim out_dim = ReduceInferDim(x, axis, keep_dim, reduce_all);
out->set_dims(out_dim);
out->set_dtype(x.dtype());
out->set_layout(x.layout());
}

void MaxInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
MetaTensor* out) {
bool reduce_all = false;
MaxRawInferMeta(x, axis, keep_dim, reduce_all, out);
}

void MeanRawInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
MetaTensor* out) {
ReduceInferMetaBase(x, axis, keep_dim, reduce_all, DataType::UNDEFINED, out);
}

void MeanInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
MetaTensor* out) {
void ReduceInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
MetaTensor* out) {
bool reduce_all = false;
ReduceInferMetaBase(x, axis, keep_dim, reduce_all, DataType::UNDEFINED, out);
ReduceInferMetaBase(x, axis, keep_dim, reduce_all, out);
}

void TransferLayoutInferMeta(const MetaTensor& x,
Expand Down
27 changes: 8 additions & 19 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -85,35 +85,24 @@ void ReshapeWithXShapeInferMeta(const MetaTensor& x,
MetaTensor* out,
MetaConfig config = MetaConfig());

void SumRawInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
DataType dtype,
MetaTensor* out);

void ReduceInferMetaBase(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
DataType dtype,
MetaTensor* out);

void MeanRawInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
MetaTensor* out);

void MaxRawInferMeta(const MetaTensor& x,
void ReduceInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
bool reduce_all,
MetaTensor* out);

void MaxInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
MetaTensor* out);

void MeanInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
bool keep_dim,
MetaTensor* out);

void SumInferMeta(const MetaTensor& x,
const std::vector<int64_t>& axis,
DataType dtype,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/math_kernel.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ DenseTensor Mean(const Context& dev_ctx,
bool keep_dim) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
ReduceInferMetaBase(x, axis, keep_dim, false, x.dtype(), &meta_out);
SumRawInferMeta(x, axis, keep_dim, false, x.dtype(), &meta_out);
MeanKernel<T, Context>(dev_ctx, x, axis, keep_dim, &dense_out);
return dense_out;
}
Expand Down
8 changes: 5 additions & 3 deletions paddle/phi/ops/compat/reduce_sig.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ KernelSignature ReduceSumOpArgumentMapping(const ArgumentMappingContext& ctx) {
bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
// When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
// InferShape, so we must return the "sum_raw" KernelSignature.
// And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
// And the InferMeta function(i.e. SumRawInferMeta) is accordance with
// the "sum_raw" KernelSignature
if (ctx.IsForInferShape() || reduce_all) {
return KernelSignature("sum_raw",
Expand All @@ -40,7 +40,8 @@ KernelSignature ReduceMeanOpArgumentMapping(const ArgumentMappingContext& ctx) {
bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
// When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
// InferShape, so we must return the "mean_raw" KernelSignature.
// And the InferMeta function(i.e. MeanRawInferMeta) is accordance with the
// And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
// the
// "mean_raw" KernelSignature
if (ctx.IsForInferShape() || reduce_all) {
return KernelSignature(
Expand All @@ -61,7 +62,8 @@ KernelSignature ReduceMaxOpArgumentMapping(const ArgumentMappingContext& ctx) {
bool reduce_all = paddle::any_cast<bool>(ctx.Attr("reduce_all"));
// When ctx is InferShapeArgumentMappingContext, the reduce_all is used in
// InferShape, so we must return the "max_raw" KernelSignature.
// And the InferMeta function(i.e. MaxRawInferMeta) is accordance with the
// And the InferMeta function(i.e. ReduceInferMetaBase) is accordance with
// the
// "max_raw" KernelSignature
if (ctx.IsForInferShape() || reduce_all) {
return KernelSignature(
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/utils/code_gen/api.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@
args : (Tensor x, int64_t[] axis={}, bool keep_dim=false)
output : Tensor
infer_meta :
func : MeanInferMeta
func : ReduceInferMeta
kernel :
func : mean

Expand Down

1 comment on commit 9a8f555

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #40225 Commit ID: 9a8f555 contains failed CI.

🔹 Failed: PR-CI-Coverage

test_failed
2022-03-08 18:24:43 The following tests FAILED:
2022-03-08 18:24:43 1561 - test_collective_process_group (Failed)
2022-03-08 18:24:43 1561 - test_collective_process_group (Failed)
2022-03-08 18:24:43 1561 - test_collective_process_group (Timeout)
2022-03-08 18:24:43 + EXCODE=8
2022-03-08 18:24:43 + echo 8
2022-03-08 18:24:43 8
2022-03-08 18:24:43 + echo 'ipipe_log_param_EXCODE: 8'
2022-03-08 18:24:43 ipipe_log_param_EXCODE: 8
2022-03-08 18:24:43 + '[' 8 -ne 0 ']'
2022-03-08 18:24:43 + '[' 8 -ne 9 ']'
2022-03-08 18:24:43 + exit 8
2022-03-08 18:24:43 {build code state=8}
2022-03-08 18:24:53 kill agent BUILD_CODE_FAIL

Please sign in to comment.