Skip to content

Commit

Permalink
【PaddlePaddle Hackathon 2】24、为 Paddle 新增 nn.ChannelShuffle 组网 API (#4…
Browse files Browse the repository at this point in the history
…0743)

* Add infermeta for ChannelShuffle

* Create channel_shuffle_grad_kernel.h

* Create channel_shuffle_kernel.h

* Create channel_shuffle_sig.cc

* Create channel_shuffle_op.cc

ChannelShuffle算子的描述

* Create channel_shuffle_kernel_impl.h

ChannelShuffle核函数的实现

* Create channel_shuffle_grad_kernel_impl.h

ChannelShuffle反向核函数的实现

* Add kernel register of channel shuffle and grad

注册ChannelShuffle及其反向的核函数

* add nn.functional.channel_shuffle

* add nn.ChannelShuffle

* Create test_channel_shuffle.py

* Update example of ChannelShuffle in vision.py

* Update test_channel_shuffle.py

* 修改channel_shuffle核函数的实现位置

* 修正代码格式

* 删除多余空格

* 完善channel_shuffle的错误检查

* Update unary.cc

* Update channel_shuffle_op.cc

* Update test_channel_shuffle.py

* Update unary.cc

* add channel_shuffle

* Update test_channel_shuffle.py

* Update vision.py

* 调整代码格式

* Update channel_shuffle_sig.cc

* 更新ChannelShuffle的文档

* 更新channel_shuffle的文档

* remove ChannelShuffleOpArgumentMapping

* add ChannelShuffleGradInferMeta

* Update channel_shuffle_op.cc

* 调整channel_shuffle及其梯度的核函数的位置
  • Loading branch information
BrilliantYuKaimin authored Apr 25, 2022
1 parent c2a05a9 commit bbaaf21
Show file tree
Hide file tree
Showing 21 changed files with 877 additions and 0 deletions.
100 changes: 100 additions & 0 deletions paddle/fluid/operators/channel_shuffle_op.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"

namespace paddle {
namespace operators {

class ChannelShuffleOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

class ChannelShuffleOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor, default Tensor<float>), "
"the input feature data of ChannelShuffleOp, the layout is "
"[N, C, H, W] or [N, H, W, C].");
AddOutput("Out",
"(Tensor, default Tensor<float>), the output of "
"ChannelShuffleOp. The layout is also [N, C, "
"H, W] or [N, H, W, C].");
AddAttr<int>("groups", "number of groups to divide channels in.");
AddAttr<std::string>(
"data_format",
"An optional string from: \"NHWC\", \"NCHW\". "
"Defaults to \"NHWC\", Specify the data format of the input data.")
.SetDefault("NCHW");

AddComment(R"DOC(
Channel Shuffle operator
This operator divides channels in a tensor of shape :math:`(*, C, H, W)`
into :math:`g` groups and rearranges them as :math:`(*, C/g, g, H, W)`
while keeping the original tensor shape.
Please refer to the paper:
`ShuffleNet: An Extremely Efficient Convolutional Neural Network for
Mobile Devices <https://arxiv.org/abs/1707.01083>`_
by Zhang et. al (2017) for more details.
)DOC");
}
};

class ChannelShuffleGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};

template <typename T>
class ChannelShuffleGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("channel_shuffle_grad");
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetAttrMap(this->Attrs());
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(channel_shuffle, ChannelShuffleInferShapeFunctor,
PD_INFER_META(phi::ChannelShuffleInferMeta));

REGISTER_OPERATOR(channel_shuffle, ops::ChannelShuffleOp,
ops::ChannelShuffleOpMaker,
ops::ChannelShuffleGradOpMaker<paddle::framework::OpDesc>,
ops::ChannelShuffleGradOpMaker<paddle::imperative::OpBase>,
ChannelShuffleInferShapeFunctor);

DECLARE_INFER_SHAPE_FUNCTOR(channel_shuffle_grad,
ChannelShuffleGradInferShapeFunctor,
PD_INFER_META(phi::ChannelShuffleGradInferMeta));

REGISTER_OPERATOR(channel_shuffle_grad, ops::ChannelShuffleGradOp,
ChannelShuffleGradInferShapeFunctor);
16 changes: 16 additions & 0 deletions paddle/phi/infermeta/backward.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,22 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
}
}

void ChannelShuffleGradInferMeta(const MetaTensor& out_grad,
int groups,
const std::string& data_format,
MetaTensor* x_grad) {
auto do_dims = out_grad.dims();
PADDLE_ENFORCE_EQ(do_dims.size(),
4,
phi::errors::InvalidArgument(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u.",
do_dims.size()));
auto dx_dims = do_dims;
x_grad->set_dims(dx_dims);
x_grad->set_dtype(out_grad.dtype());
}

void ConvTransposeGradInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const MetaTensor& dout,
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/infermeta/backward.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ void BilinearTensorProductGradInferMeta(const MetaTensor& x,
MetaTensor* dweight,
MetaTensor* dbias);

void ChannelShuffleGradInferMeta(const MetaTensor& out_grad,
int groups,
const std::string& data_format,
MetaTensor* x_grad);

void ConvTransposeGradInferMeta(const MetaTensor& x,
const MetaTensor& filter,
const MetaTensor& dout,
Expand Down
46 changes: 46 additions & 0 deletions paddle/phi/infermeta/unary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2999,6 +2999,52 @@ void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out) {
out->set_dtype(DataType::INT64);
}

void ChannelShuffleInferMeta(const MetaTensor& x,
int groups,
const std::string& data_format,
MetaTensor* out) {
auto input_dims = x.dims();
PADDLE_ENFORCE_EQ(input_dims.size(),
4,
phi::errors::InvalidArgument(
"Input should be a 4-D tensor of format [N, C, H, W] "
"or [N, H, W, C], but got %u.",
input_dims.size()));
PADDLE_ENFORCE_GE(
groups,
1,
phi::errors::InvalidArgument("groups should be larger than 0."));
PADDLE_ENFORCE_EQ(data_format == "NCHW" || data_format == "NHWC",
true,
phi::errors::InvalidArgument(
"data_format must be one of "
"NCHW and NHWC. But recevied data_format: %s",
data_format));

const bool channel_last = (data_format == "NHWC");

if (!channel_last) {
PADDLE_ENFORCE_EQ(input_dims[1] % groups,
0,
phi::errors::InvalidArgument(
"The number of groups to divide channels in [%u] "
"should divide the number of channel [%u]",
groups,
input_dims[1]));
} else {
PADDLE_ENFORCE_EQ(input_dims[3] % groups,
0,
phi::errors::InvalidArgument(
"The number of groups to divide channels in [%u] "
"should divide the number of channel [%u]",
groups,
input_dims[3]));
}
auto output_dims = input_dims;
out->set_dtype(x.dtype());
out->set_dims(output_dims);
}

} // namespace phi

PD_REGISTER_INFER_META_FN(flatten, phi::FlattenInferMeta);
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/infermeta/unary.h
Original file line number Diff line number Diff line change
Expand Up @@ -435,4 +435,9 @@ void OneHotInferMeta(const MetaTensor& x, const Scalar& depth, MetaTensor* out);

void WhereIndexInferMeta(const MetaTensor& condition, MetaTensor* out);

void ChannelShuffleInferMeta(const MetaTensor& x,
int groups,
const std::string& data_format,
MetaTensor* out);

} // namespace phi
29 changes: 29 additions & 0 deletions paddle/phi/kernels/channel_shuffle_grad_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <string>
#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void ChannelShuffleGradKernel(const Context& dev_ctx,
const DenseTensor& out_grad,
int groups,
const std::string& data_format,
DenseTensor* x_grad);

} // namespace phi
29 changes: 29 additions & 0 deletions paddle/phi/kernels/channel_shuffle_kernel.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include <string>
#include "paddle/phi/core/dense_tensor.h"

namespace phi {

template <typename T, typename Context>
void ChannelShuffleKernel(const Context& dev_ctx,
const DenseTensor& x,
int groups,
const std::string& data_format,
DenseTensor* out);

} // namespace phi
26 changes: 26 additions & 0 deletions paddle/phi/kernels/cpu/channel_shuffle_grad_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/channel_shuffle_grad_kernel.h"
#include "paddle/phi/kernels/impl/channel_shuffle_grad_kernel_impl.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(channel_shuffle_grad,
CPU,
ALL_LAYOUT,
phi::ChannelShuffleGradKernel,
float,
double) {}
26 changes: 26 additions & 0 deletions paddle/phi/kernels/cpu/channel_shuffle_kernel.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/channel_shuffle_kernel.h"
#include "paddle/phi/kernels/impl/channel_shuffle_kernel_impl.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(channel_shuffle,
CPU,
ALL_LAYOUT,
phi::ChannelShuffleKernel,
float,
double) {}
26 changes: 26 additions & 0 deletions paddle/phi/kernels/gpu/channel_shuffle_grad_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/channel_shuffle_grad_kernel.h"
#include "paddle/phi/kernels/impl/channel_shuffle_grad_kernel_impl.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(channel_shuffle_grad,
GPU,
ALL_LAYOUT,
phi::ChannelShuffleGradKernel,
float,
double) {}
26 changes: 26 additions & 0 deletions paddle/phi/kernels/gpu/channel_shuffle_kernel.cu
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/phi/kernels/channel_shuffle_kernel.h"
#include "paddle/phi/kernels/impl/channel_shuffle_kernel_impl.h"

#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

PD_REGISTER_KERNEL(channel_shuffle,
GPU,
ALL_LAYOUT,
phi::ChannelShuffleKernel,
float,
double) {}
Loading

0 comments on commit bbaaf21

Please sign in to comment.