Skip to content

Commit

Permalink
move elementwise_max/min/mod into phi
Browse files Browse the repository at this point in the history
  • Loading branch information
FlyingQianMM committed Mar 23, 2022
1 parent b1a4668 commit a2ef2c4
Show file tree
Hide file tree
Showing 26 changed files with 610 additions and 627 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ USE_OP(sum);
USE_OP_ITSELF(slice_grad);
USE_OP_ITSELF(lookup_table_grad);
USE_OP(sqrt);
USE_OP(elementwise_max);
USE_OP_ITSELF(elementwise_max);
USE_OP_ITSELF(elementwise_div);
USE_OP_ITSELF(sgd);
USE_OP(squared_l2_norm);
Expand Down
64 changes: 9 additions & 55 deletions paddle/fluid/operators/elementwise/elementwise_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -70,75 +70,29 @@ struct InverseFloorDivFunctor {

// Maximum
template <typename T>
struct MaxFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return a > b ? a : b;
}
};
using MaxFunctor = phi::funcs::MaximumFunctor<T>;

// Minmum
template <typename T>
struct MinFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return a < b ? a : b;
}
};
using MinFunctor = phi::funcs::MinimumFunctor<T>;

template <typename T>
using Complex = paddle::platform::complex<T>;

// Ternary compare
template <typename T>
struct MinGradXFunctor {
inline HOSTDEVICE T operator()(const T x, const T y, const T dout) const {
return dout * static_cast<T>(x < y);
}
};
using MaxGradXFunctor = phi::funcs::MaxGradXFunctor<T>;
template <typename T>
struct MinGradYFunctor {
inline HOSTDEVICE T operator()(const T x, const T y, const T dout) const {
return dout * static_cast<T>(x >= y);
}
};

using MaxGradYFunctor = phi::funcs::MaxGradYFunctor<T>;
template <typename InT, typename OutT>
struct MinGradXYFunctor {
inline HOSTDEVICE phi::Array<OutT, 2> operator()(const InT x, const InT y,
const InT dout) {
phi::Array<OutT, 2> outs;
// dx = dout * (x < y)
outs[0] = static_cast<OutT>(dout * static_cast<InT>(x < y));
// dy = dout * (x >= y)
outs[1] = static_cast<OutT>(dout * static_cast<InT>(x >= y));
return outs;
}
};
using MaxGradXYFunctor = phi::funcs::MaxGradXYFunctor<InT, OutT>;

// Ternary compare
template <typename T>
struct MaxGradXFunctor {
inline HOSTDEVICE T operator()(const T x, const T y, const T dout) const {
return dout * static_cast<T>(x > y);
}
};
using MinGradXFunctor = phi::funcs::MinGradXFunctor<T>;
template <typename T>
struct MaxGradYFunctor {
inline HOSTDEVICE T operator()(const T x, const T y, const T dout) const {
return dout * static_cast<T>(x <= y);
}
};

using MinGradYFunctor = phi::funcs::MinGradYFunctor<T>;
template <typename InT, typename OutT>
struct MaxGradXYFunctor {
inline HOSTDEVICE phi::Array<OutT, 2> operator()(const InT x, const InT y,
const InT dout) {
phi::Array<OutT, 2> outs;
// dx = dout * (x > y)
outs[0] = static_cast<OutT>(dout * static_cast<InT>(x > y));
// dy = dout * (x <= y)
outs[1] = static_cast<OutT>(dout * static_cast<InT>(x <= y));
return outs;
}
};
using MinGradXYFunctor = phi::funcs::MinGradXYFunctor<InT, OutT>;

} // namespace operators
} // namespace paddle
19 changes: 0 additions & 19 deletions paddle/fluid/operators/elementwise/elementwise_max_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/elementwise/elementwise_max_op.h"

#include <string>

#include "paddle/fluid/operators/elementwise/elementwise_op.h"
Expand Down Expand Up @@ -119,23 +117,6 @@ REGISTER_OPERATOR(elementwise_max, ops::ElementwiseOp,

REGISTER_OPERATOR(elementwise_max_grad, ops::ElementwiseOpGrad);

REGISTER_OP_CPU_KERNEL(
elementwise_max,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::ElementwiseMaxKernel<paddle::platform::CPUDeviceContext,
paddle::platform::bfloat16>);
REGISTER_OP_CPU_KERNEL(
elementwise_max_grad,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext, int64_t>,
ops::ElementwiseMaxGradKernel<paddle::platform::CPUDeviceContext,
paddle::platform::bfloat16>);

REGISTER_OP_VERSION(elementwise_max)
.AddCheckpoint(
R"ROC(Register elementwise_max for adding the attribute of Scale_y)ROC",
Expand Down
88 changes: 0 additions & 88 deletions paddle/fluid/operators/elementwise/elementwise_max_op.cu

This file was deleted.

93 changes: 0 additions & 93 deletions paddle/fluid/operators/elementwise/elementwise_max_op.h

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/elementwise/elementwise_max_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_npu.h"
#include "paddle/fluid/platform/device/npu/npu_op_runner.h"

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ limitations under the License. */

#ifdef PADDLE_WITH_XPU

#include "paddle/fluid/operators/elementwise/elementwise_max_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/operators/elementwise/elementwise_xpu.h"
namespace paddle {
Expand Down
15 changes: 0 additions & 15 deletions paddle/fluid/operators/elementwise/elementwise_min_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/fluid/operators/elementwise/elementwise_min_op.h"

#include <string>

#include "paddle/fluid/operators/elementwise/elementwise_op.h"
Expand Down Expand Up @@ -119,19 +117,6 @@ REGISTER_OPERATOR(elementwise_min, ops::ElementwiseOp,

REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad);

REGISTER_OP_CPU_KERNEL(
elementwise_min,
ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMinKernel<paddle::platform::CPUDeviceContext, int64_t>);
REGISTER_OP_CPU_KERNEL(
elementwise_min_grad,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, float>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, double>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int>,
ops::ElementwiseMinGradKernel<paddle::platform::CPUDeviceContext, int64_t>);

REGISTER_OP_VERSION(elementwise_min)
.AddCheckpoint(
R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC",
Expand Down
Loading

1 comment on commit a2ef2c4

@paddle-bot-old
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Congratulation! Your pull request passed all required CI. You could ask reviewer(s) to approve and merge. 🎉

Please sign in to comment.