diff --git a/test/legacy_test/test_unfold_op.py b/test/legacy_test/test_unfold_op.py index 111a153766df1e..7c8bdfbd904e22 100644 --- a/test/legacy_test/test_unfold_op.py +++ b/test/legacy_test/test_unfold_op.py @@ -161,6 +161,33 @@ def init_dtype(self): self.dtype = np.float16 +class TestUnfoldZeroSize(TestUnfoldOp): + """ + This is for test on unfold Op with zero size input + """ + + def init_data(self): + self.batch_size = 3 + self.input_channels = 0 + self.input_height = 20 + self.input_width = 20 + self.kernel_sizes = [3, 3] + self.strides = [1, 1] + self.paddings = [1, 1, 1, 1] + self.dilations = [1, 1] + input_shape = [ + self.batch_size, + self.input_channels, + self.input_height, + self.input_width, + ] + if self.dtype == np.uint16: + as_type = self.np_dtype + else: + as_type = self.dtype + self.x = np.random.rand(*input_shape).astype(as_type) + + @unittest.skipIf( not core.is_compiled_with_cuda() or not core.is_bfloat16_supported(core.CUDAPlace(0)), diff --git a/test/xpu/test_unfold_op_xpu.py b/test/xpu/test_unfold_op_xpu.py index d409167ecb0ea9..1d16b5d4bfa2bb 100644 --- a/test/xpu/test_unfold_op_xpu.py +++ b/test/xpu/test_unfold_op_xpu.py @@ -170,6 +170,29 @@ def test_dygraph(self): def test_info(self): str(paddle.nn.Unfold(**self.attrs)) + class TestUnfoldZeroSize(TestUnfoldOp): + """ + This is for test on unfold Op with zero size input + """ + + def init_data(self): + self.batch_size = 3 + self.input_channels = 0 + self.input_height = 20 + self.input_width = 20 + + self.kernel_sizes = [2, 2] + self.strides = [1, 1] + self.paddings = [1, 1, 1, 1] + self.dilations = [1, 1] + input_shape = [ + self.batch_size, + self.input_channels, + self.input_height, + self.input_width, + ] + self.x = np.random.rand(*input_shape).astype(self.dtype) + support_types = get_xpu_op_support_types('unfold') for stype in support_types: