Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 17 additions & 13 deletions paddle/phi/infermeta/binary.cc
Original file line number Diff line number Diff line change
Expand Up @@ -888,19 +888,23 @@ void ConvTransposeInferMeta(const MetaTensor& x,
common::make_ddim(output_size).to_str(),
i,
infer_shape));
PADDLE_ENFORCE_LT(
output_size[i],
infer_shape + strides[i],
errors::InvalidArgument(
"output_size of Op(ConvTransposeOp) should be less "
"than inferred size + stride. But received output_size = [%s], "
"whose dim %d is not less than the inferred output size (%d) + "
"stride (%d) = %d",
common::make_ddim(output_size).to_str(),
i,
infer_shape,
strides[i],
infer_shape + strides[i]));
if (common::product(x_dims) != 0) {
PADDLE_ENFORCE_LT(
output_size[i],
infer_shape + strides[i],
errors::InvalidArgument(
"output_size of Op(ConvTransposeOp) should be less "
"than inferred size + stride. But received output_size = "
"[%s], "
"whose dim %d is not less than the inferred output size (%d) "
"+ "
"stride (%d) = %d",
common::make_ddim(output_size).to_str(),
i,
infer_shape,
strides[i],
infer_shape + strides[i]));
}
}
output_shape.push_back(output_size[i]);
} else if (!output_padding.empty()) {
Expand Down
21 changes: 20 additions & 1 deletion paddle/phi/kernels/gpu/conv_transpose_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/common/layout.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/gpu/depthwise_conv.h"
#include "paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h"
Expand Down Expand Up @@ -77,7 +78,25 @@ void DepthwiseConv2dTransposeGradKernel(const Context& dev_ctx,
if (!dx && !dfilter) {
return;
}

// 0-size
if (x.numel() == 0) {
if (dx) dev_ctx.template Alloc<T>(dx);
if (dfilter) {
phi::Full<T, Context>(dev_ctx,
phi::IntArray(common::vectorize(dfilter->dims())),
0,
dfilter);
}
return;
}
if (filter.numel() == 0) {
if (dfilter) dev_ctx.template Alloc<T>(dfilter);
if (dx) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
}
return;
}
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ = dilations;

Expand Down
5 changes: 5 additions & 0 deletions paddle/phi/kernels/gpu/conv_transpose_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,11 @@ void DepthwiseConv2dTransposeKernel(const Context& dev_ctx,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* out) {
if (x.numel() == 0 || filter.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
return;
}
const DataLayout data_layout = common::StringToDataLayout(data_format);
DenseTensor filter_ = filter;
dev_ctx.template Alloc<T>(out);
Expand Down
21 changes: 21 additions & 0 deletions paddle/phi/kernels/gpudnn/conv_transpose_grad_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ limitations under the License. */
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"
#include "paddle/phi/kernels/gpudnn/conv_cudnn_v7.h"
#endif
#include "paddle/phi/kernels/full_kernel.h"

namespace phi {

Expand All @@ -55,6 +56,26 @@ void ConvTransposeGradRawGPUDNNKernel(const Context& dev_ctx,
const std::string& data_format,
DenseTensor* dx,
DenseTensor* dfilter) {
// 0-size
if (x.numel() == 0) {
if (dx) dev_ctx.template Alloc<T>(dx);
if (dfilter) {
phi::Full<T, Context>(dev_ctx,
phi::IntArray(common::vectorize(dfilter->dims())),
0,
dfilter);
}
return;
}
if (filter.numel() == 0) {
if (dfilter) dev_ctx.template Alloc<T>(dfilter);
if (dx) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
}
return;
}

const T* filter_data = filter.data<T>();
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ =
Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/kernels/gpudnn/conv_transpose_kernel.cu
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ limitations under the License. */
#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h"
#include "paddle/phi/kernels/gpudnn/conv_cudnn_v7.h"
#endif
#include "paddle/phi/kernels/full_kernel.h"

namespace phi {

Expand All @@ -51,6 +52,11 @@ void ConvTransposeRawGPUDNNKernel(const Context& dev_ctx,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* out) {
if (x.numel() == 0 || filter.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
return;
}
std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ =
dilations; // cudnn v5 does not support dilations
Expand Down
21 changes: 21 additions & 0 deletions paddle/phi/kernels/impl/conv_transpose_grad_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/common/layout.h"
#include "paddle/phi/kernels/conv_transpose_grad_kernel.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
#include "paddle/phi/kernels/funcs/im2col.h"
Expand Down Expand Up @@ -48,6 +49,26 @@ void ConvTransposeGradRawKernel(const Context& dev_ctx,
return;
}

// 0-size
if (x.numel() == 0) {
if (dx) dev_ctx.template Alloc<T>(dx);
if (dfilter) {
phi::Full<T, Context>(dev_ctx,
phi::IntArray(common::vectorize(dfilter->dims())),
0,
dfilter);
}
return;
}
if (filter.numel() == 0) {
if (dfilter) dev_ctx.template Alloc<T>(dfilter);
if (dx) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
}
return;
}

std::vector<int> paddings_ = paddings;
std::vector<int> dilations_ = dilations;

Expand Down
6 changes: 6 additions & 0 deletions paddle/phi/kernels/impl/conv_transpose_kernel_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/common/layout.h"
#include "paddle/phi/kernels/conv_transpose_kernel.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/concat_and_split_functor.h"
#include "paddle/phi/kernels/funcs/im2col.h"
Expand All @@ -37,6 +38,11 @@ void ConvTransposeRawKernel(const Context& dev_ctx,
const std::vector<int>& dilations,
const std::string& data_format,
DenseTensor* out) {
if (x.numel() == 0 || filter.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
return;
}
const DataLayout data_layout = common::StringToDataLayout(data_format);
// The filter will be reshaped, so it should not be constant
DenseTensor filter_ = filter;
Expand Down
21 changes: 20 additions & 1 deletion paddle/phi/kernels/xpu/conv_transpose_grad_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/xpu/xpu_api_wrapper.h"

namespace phi {
Expand All @@ -40,7 +41,25 @@ void Conv2dTransposeGradKernel(const Context& dev_ctx,
// that avoids modifying the variable in the Scope.
DenseTensor filter_ = filter;
if (!dx && !dfilter) return;

// 0-size
if (x.numel() == 0) {
if (dx) dev_ctx.template Alloc<T>(dx);
if (dfilter) {
phi::Full<T, Context>(dev_ctx,
phi::IntArray(common::vectorize(dfilter->dims())),
0,
dfilter);
}
return;
}
if (filter.numel() == 0) {
if (dfilter) dev_ctx.template Alloc<T>(dfilter);
if (dx) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(dx->dims())), 0, dx);
}
return;
}
std::vector<int64_t> strides_ =
std::vector<int64_t>(strides.begin(), strides.end());
std::vector<int64_t> paddings_ =
Expand Down
7 changes: 6 additions & 1 deletion paddle/phi/kernels/xpu/conv_transpose_kernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "paddle/phi/backends/xpu/enforce_xpu.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/full_kernel.h"
#include "paddle/phi/kernels/xpu/conv_utils_xpu.h"
#include "paddle/phi/kernels/xpu/xpu_api_wrapper.h"
#ifdef PADDLE_WITH_XPU_XRE5
Expand All @@ -41,7 +42,11 @@ void Conv2dTransposeKernel(const Context& dev_ctx,
const std::string& data_format,
DenseTensor* out) {
using XPUType = typename XPUTypeTrait<T>::Type;

if (x.numel() == 0 || filter.numel() == 0) {
phi::Full<T, Context>(
dev_ctx, phi::IntArray(common::vectorize(out->dims())), 0, out);
return;
}
dev_ctx.template Alloc<T>(out);

PADDLE_ENFORCE_EQ(
Expand Down
59 changes: 59 additions & 0 deletions test/legacy_test/test_conv2d_transpose_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,12 @@

import os
import unittest
from unittest import TestCase

import numpy as np

import paddle
import paddle.base.dygraph as dg
import paddle.static
from paddle import nn

Expand Down Expand Up @@ -1519,5 +1521,62 @@ def call_func(self, x):
return out


class TestFunctionalConv2DTranspose_ZeroSize(TestCase):
def init_data(self):
self.input = np.random.randn(0, 4, 16, 4)
self.filter = np.random.randn(4, 3, 3, 3)
self.np_out = np.zeros([0, 3, 18, 6])

def setUp(self):
self.init_data()
self.bias = None
self.padding = 0
self.stride = 1
self.dilation = 1
self.groups = 1
self.data_format = "NCHW"
self.places = []
if (
os.environ.get('FLAGS_CI_both_cpu_and_gpu', 'False').lower()
in ['1', 'true', 'on']
or not base.core.is_compiled_with_cuda()
):
self.places.append(base.CPUPlace())
if base.core.is_compiled_with_cuda():
self.places.append(base.CUDAPlace(0))

def test_dygraph(self):
for place in self.places:
with dg.guard(place):
input = paddle.to_tensor(self.input)
input.stop_gradient = False
filter = paddle.to_tensor(self.filter)
filter.stop_gradient = False
y = paddle.nn.functional.conv2d_transpose(
input,
filter,
self.bias,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
data_format=self.data_format,
)
np.testing.assert_allclose(y.numpy(), self.np_out)
loss = y.sum()
loss.backward()
np.testing.assert_allclose(input.grad.shape, input.shape)
np.testing.assert_allclose(filter.grad, np.zeros(filter.shape))


class TestFunctionalConv2DTranspose_ZeroSize2(
TestFunctionalConv2DTranspose_ZeroSize
):
def init_data(self):
self.input = np.random.randn(4, 5, 3, 3)
self.filter = np.random.randn(5, 0, 4, 4)
self.np_out = np.zeros([4, 0, 6, 6])


if __name__ == '__main__':
unittest.main()
Loading