Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i0_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,10 @@ void I0GradKernel(const Context& ctx,
const DenseTensor& x,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
if (x_grad && x_grad->numel() == 0) {
ctx.template Alloc<T>(x_grad);
return;
}
auto size = x.numel();
auto* x_data = x.data<T>();
auto* out_grad_data = out_grad.data<T>();
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i0_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ namespace phi {

template <typename T, typename Context>
void I0Kernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
if (out && out->numel() == 0) {
ctx.template Alloc<T>(out);
return;
}
const int64_t size = x.numel();
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/cpu/i0e_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,11 @@ void I0eGradKernel(const Context& ctx,
const DenseTensor& out,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
if (x_grad && x_grad->numel() == 0) {
ctx.template Alloc<T>(x_grad);
return;
}

auto size = x.numel();
auto* x_data = x.data<T>();
auto* out_data = out.data<T>();
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i0e_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ namespace phi {

template <typename T, typename Context>
void I0eKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
if (out && out->numel() == 0) {
ctx.template Alloc<T>(out);
return;
}
int64_t size = x.numel();
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i1_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ void I1GradKernel(const Context& ctx,
const DenseTensor& out,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
if (x_grad && x_grad->numel() == 0) {
ctx.template Alloc<T>(x_grad);
return;
}
const int64_t size = x.numel();
const T* x_data = x.data<T>();
const T* out_data = out.data<T>();
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i1_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ namespace phi {

template <typename T, typename Context>
void I1Kernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
if (out && out->numel() == 0) {
ctx.template Alloc<T>(out);
return;
}
const int64_t size = x.numel();
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i1e_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,10 @@ void I1eGradKernel(const Context& ctx,
const DenseTensor& out,
const DenseTensor& out_grad,
DenseTensor* x_grad) {
if (x_grad && x_grad->numel() == 0) {
ctx.template Alloc<T>(x_grad);
return;
}
const int64_t size = x.numel();
const T* x_data = x.data<T>();
const T* out_data = out.data<T>();
Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/cpu/i1e_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@ namespace phi {

template <typename T, typename Context>
void I1eKernel(const Context& ctx, const DenseTensor& x, DenseTensor* out) {
if (out && out->numel() == 0) {
ctx.template Alloc<T>(out);
return;
}
const int64_t size = x.numel();
const T* x_data = x.data<T>();
T* out_data = ctx.template Alloc<T>(out);
Expand Down
59 changes: 59 additions & 0 deletions test/legacy_test/test_i0_op_0size.py
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

无需新建单测文件,将单测类合并进原来的单测文件就好

Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Copyright (c) 2025 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import numpy as np

import paddle


def create_test_class(op_type, dtype, shape):
class Cls(unittest.TestCase):
def test_zero_size(self):
numpy_tensor_1 = np.random.rand(*shape).astype(dtype)
paddle_x = paddle.to_tensor(numpy_tensor_1)
paddle_x.stop_gradient = False

paddle_api = eval(f"paddle.{op_type}")
paddle_out = paddle_api(paddle_x)
numpy_api = eval(f"scipy.special.{op_type}")
numpy_out = numpy_api(numpy_tensor_1)

np.testing.assert_allclose(
paddle_out.numpy(),
numpy_out,
1e-2,
1e-2,
)
np.testing.assert_allclose(
paddle_out.shape,
numpy_out.shape,
)

cls_name = f"{op_type}{dtype}_0SizeTest"
Cls.__name__ = cls_name
globals()[cls_name] = Cls


op_list = ["i0", "i0e", "i1", "i1e"]

for op in op_list:
create_test_class(op, "float32", [3, 4, 0])
create_test_class(op, "float64", [3, 4, 0, 3, 4])
create_test_class(op, "int32", [3, 4, 0])
create_test_class(op, "int64", [3, 4, 0, 3, 4])

if __name__ == '__main__':
unittest.main()
Loading