diff --git a/paddle/phi/kernels/funcs/elementwise_functor.h b/paddle/phi/kernels/funcs/elementwise_functor.h index 2373d38925e474..7dbe375de171db 100644 --- a/paddle/phi/kernels/funcs/elementwise_functor.h +++ b/paddle/phi/kernels/funcs/elementwise_functor.h @@ -1045,6 +1045,10 @@ compute_pow(const T a, const T b) { // it will return a float number like 2.99... , which floor to 2 // when cast to int by default and it is wrong. // Use llrint to cast it to the nearest integer, which is 3. + T zero = static_cast(0); + if (a == zero && b < zero) { + return zero; + } return llrint(pow(static_cast(a), static_cast(b))); } template @@ -1057,6 +1061,11 @@ compute_pow(const T a, const T b) { #else template inline HOSTDEVICE T compute_pow(const T a, const T b) { + if constexpr (std::is_integral::value) { + if (a == static_cast(0) && b < static_cast(0)) { + return static_cast(0); + } + } MPType a_val = static_cast(a); MPType b_val = static_cast(b); #ifdef PADDLE_WITH_XPU_KP diff --git a/paddle/phi/kernels/gpu/activation_kernel.cu b/paddle/phi/kernels/gpu/activation_kernel.cu index d54370e4aa4d24..4ef3b60cedeaef 100644 --- a/paddle/phi/kernels/gpu/activation_kernel.cu +++ b/paddle/phi/kernels/gpu/activation_kernel.cu @@ -208,6 +208,13 @@ void PowKernel(const Context& dev_ctx, const DenseTensor& x, const Scalar& factor, DenseTensor* out) { + if constexpr (std::is_integral::value) { + PADDLE_ENFORCE_GE( + factor.to(), + 0, + common::errors::InvalidArgument( + "Integers to negative integer powers are not allowed.")); + } if (factor.to() == 0) { std::vector vec_dims = common::vectorize(out->dims()); phi::Full( diff --git a/test/legacy_test/test_elementwise_pow_op.py b/test/legacy_test/test_elementwise_pow_op.py index b8edc8bea01ed6..75d22bdc1c5ec8 100644 --- a/test/legacy_test/test_elementwise_pow_op.py +++ b/test/legacy_test/test_elementwise_pow_op.py @@ -62,6 +62,57 @@ def test_check_grad_normal(self): ) +class TestElementwisePowOp_ZeroBaseNumber1(TestElementwisePowOp): + def setUp(self): + self.op_type = "elementwise_pow" + self.python_api = paddle.pow + self.public_python_api = paddle.pow + self.prim_op_type = "prim" + + self.inputs = { + 'X': np.random.randint(-100, -1, size=[20, 5]).astype("int32"), + 'Y': np.random.randint(-200, 0, size=[20, 5], dtype="int32"), + } + self.outputs = {'Out': np.zeros([20, 5]).astype("int32")} + + def test_check_grad_normal(self): + pass + + +class TestElementwisePowOp_ZeroBaseNumber2(TestElementwisePowOp): + def setUp(self): + self.op_type = "elementwise_pow" + self.python_api = paddle.pow + self.public_python_api = paddle.pow + self.prim_op_type = "prim" + + self.inputs = { + 'X': np.random.randint(2, 100, size=[20, 5]).astype("int32"), + 'Y': np.random.randint(-200, 0, size=[20, 5], dtype="int32"), + } + self.outputs = {'Out': np.zeros([20, 5]).astype("int32")} + + def test_check_grad_normal(self): + pass + + +class TestElementwisePowOp_ZeroBaseNumber3(TestElementwisePowOp): + def setUp(self): + self.op_type = "elementwise_pow" + self.python_api = paddle.pow + self.public_python_api = paddle.pow + self.prim_op_type = "prim" + + self.inputs = { + 'X': np.asarray([-1, 0, 1]), + 'Y': np.asarray([-1, -1, -1]), + } + self.outputs = {'Out': np.asarray([-1, 0, 1])} + + def test_check_grad_normal(self): + pass + + class TestElementwisePowOp_ZeroDim1(TestElementwisePowOp): def setUp(self): self.op_type = "elementwise_pow"