Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions paddle/phi/kernels/cpu/fill_diagonal_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ void FillDiagonalGradKernel(const Context& ctx,
DenseTensor* x_grad) {
if (x_grad) {
T* data = ctx.template Alloc<T>(x_grad);
if (x_grad->numel() == 0) return;
phi::Copy(ctx, out_grad, ctx.GetPlace(), false, x_grad);

auto dx_dims = x_grad->dims();
Expand Down
2 changes: 2 additions & 0 deletions paddle/phi/kernels/cpu/fill_diagonal_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@ void FillDiagonalKernel(const Context& ctx,
T temp_var = static_cast<T>(value);

T* out_data = ctx.template Alloc<T>(out);
if (out && out->numel() == 0) return;

phi::Copy(ctx, x, ctx.GetPlace(), false, out);

auto out_dims = out->dims();
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/kernels/gpu/fill_diagonal_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ void FillDiagonalGradKernel(const Context& ctx,
DenseTensor* x_grad) {
const int64_t kMaxBlockDim = 512;
auto* in_data = ctx.template Alloc<T>(x_grad);
if (x_grad && x_grad->numel() == 0) return;

phi::Copy(ctx, out_grad, ctx.GetPlace(), false, x_grad);

Expand Down
4 changes: 4 additions & 0 deletions paddle/phi/kernels/gpu/fill_diagonal_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,10 @@ void FillDiagonalKernel(const Context& ctx,
int offset,
bool wrap,
DenseTensor* out) {
if (out && out->numel() == 0) {
ctx.template Alloc<T>(out);
return;
}
const int64_t kMaxBlockDim = 512;
phi::Copy(ctx, x, ctx.GetPlace(), false, out);

Expand Down
42 changes: 42 additions & 0 deletions test/legacy_test/test_tensor_fill_diagonal_.py
Original file line number Diff line number Diff line change
Expand Up @@ -296,5 +296,47 @@ def test_dim_larger2_normal(self):
)


class TensorFillDiagonal_ZeroSize(unittest.TestCase):
def _test_normal(self, shape):
expected_np = np.random.random(shape)
expected_grad = np.random.random(shape)

places = []
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not base.core.is_compiled_with_cuda()
):
places.append(base.CPUPlace())
if base.core.is_compiled_with_cuda():
places.append(base.CUDAPlace(0))

for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')

x = paddle.ones(shape)
x.stop_gradient = False
y = x * 2
y.retain_grads()
y.fill_diagonal_(1, offset=0, wrap=True)
loss = y.sum()
loss.backward()

self.assertEqual(
(y.numpy().astype('float32') == expected_np).all(), True
)
self.assertEqual(
(y.grad.numpy().astype('float32') == expected_grad).all(),
True,
)

def test_normal(self):
self._test_normal([0, 3])
self._test_normal([0, 0])


if __name__ == '__main__':
unittest.main()