From 2caf0a362f6b83a9bf26abc38ded316f0c97605e Mon Sep 17 00:00:00 2001 From: ccrrong <1039058843@qq.com> Date: Tue, 29 Nov 2022 08:54:39 +0000 Subject: [PATCH 1/5] remove pool2d --- .../slim/tests/test_quantization_pass.py | 32 +-- .../tests/test_image_classification_fp16.py | 4 +- .../tests/test_multi_precision_fp16_train.py | 4 +- python/paddle/fluid/layers/nn.py | 244 ------------------ python/paddle/fluid/nets.py | 45 ++-- .../tests/book/test_image_classification.py | 4 +- .../fluid/tests/unittests/dist_se_resnext.py | 41 ++- .../unittests/ipu/test_pool_avg_op_ipu.py | 34 +-- .../unittests/ipu/test_pool_max_op_ipu.py | 39 +-- .../ir/inference/test_trt_pool_op.py | 28 +- .../tests/unittests/mlu/test_pool2d_op_mlu.py | 126 ++++----- .../fluid/tests/unittests/seresnext_net.py | 4 +- .../fluid/tests/unittests/test_layers.py | 8 +- .../fluid/tests/unittests/test_nn_grad.py | 3 +- .../test_parallel_executor_fetch_feed.py | 4 +- .../fluid/tests/unittests/test_pool2d_op.py | 128 ++++----- 16 files changed, 216 insertions(+), 532 deletions(-) diff --git a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py index be42ab5cf2e20..9bba237815de3 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/tests/test_quantization_pass.py @@ -79,12 +79,12 @@ def conv_bn_layer( hidden = fluid.layers.matmul(hidden, matmul_weight, True, True) if quant_skip_pattern: with fluid.name_scope(quant_skip_pattern): - pool = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + pool = paddle.nn.functional.avg_pool2d( + x=hidden, kernel_size=2, stride=2 ) else: - pool = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + pool = paddle.nn.functional.avg_pool2d( + x=hidden, kernel_size=2, stride=2 ) fc = fluid.layers.fc(input=pool, size=10) loss = fluid.layers.cross_entropy(input=fc, label=label) @@ -727,11 +727,11 @@ def conv_bn_layer( hidden = fluid.layers.matmul(hidden, data2, True, True) if isinstance(quant_skip_pattern, str): with fluid.name_scope(quant_skip_pattern): - pool1 = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + pool1 = paddle.nn.functional.avg_pool2d( + x=hidden, kernel_size=2, stride=2 ) - pool2 = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='max', pool_stride=2 + pool2 = paddle.nn.functional.max_pool2d( + x=hidden, kernel_size=2, stride=2 ) pool_add = fluid.layers.elementwise_add( x=pool1, y=pool2, act='relu' @@ -741,22 +741,22 @@ def conv_bn_layer( len(quant_skip_pattern) > 1 ), 'test config error: the len of quant_skip_pattern list should be greater than 1.' with fluid.name_scope(quant_skip_pattern[0]): - pool1 = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + pool1 = paddle.nn.functional.avg_pool2d( + x=hidden, kernel_size=2, stride=2 ) - pool2 = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='max', pool_stride=2 + pool2 = paddle.nn.functional.max_pool2d( + x=hidden, kernel_size=2, stride=2 ) with fluid.name_scope(quant_skip_pattern[1]): pool_add = fluid.layers.elementwise_add( x=pool1, y=pool2, act='relu' ) else: - pool1 = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='avg', pool_stride=2 + pool1 = paddle.nn.functional.avg_pool2d( + x=hidden, kernel_size=2, stride=2 ) - pool2 = fluid.layers.pool2d( - input=hidden, pool_size=2, pool_type='max', pool_stride=2 + pool2 = paddle.nn.functional.max_pool2d( + x=hidden, kernel_size=2, stride=2 ) pool_add = fluid.layers.elementwise_add(x=pool1, y=pool2, act='relu') fc = fluid.layers.fc(input=pool_add, size=10) diff --git a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py index 7edaeb2760bed..8bf98b9adb9db 100644 --- a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py +++ b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py @@ -69,9 +69,7 @@ def layer_warp(block_func, input, ch_in, ch_out, count, stride): res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = fluid.layers.pool2d( - input=res3, pool_size=8, pool_type='avg', pool_stride=1 - ) + pool = paddle.nn.functional.avg_pool2d(x=res3, kernel_size=8, stride=1) return pool diff --git a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py index ba0f6534adfa5..473746672cd0b 100644 --- a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py +++ b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py @@ -88,9 +88,7 @@ def layer_warp(block_func, input, ch_in, ch_out, count, stride): res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = fluid.layers.pool2d( - input=res3, pool_size=8, pool_type='avg', pool_stride=1 - ) + pool = paddle.nn.functional.max_pool2d(x=res3, kernel_size=8, stride=1) return pool diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 4743e4b49f216..51a281c6aea34 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -70,7 +70,6 @@ 'cos_sim', 'conv2d', 'softmax', - 'pool2d', 'batch_norm', 'reduce_mean', 'reduce_all', @@ -1651,249 +1650,6 @@ def _get_default_param_initializer(): return helper.append_activation(pre_act) -@templatedoc() -def pool2d( - input, - pool_size=-1, - pool_type="max", - pool_stride=1, - pool_padding=0, - global_pooling=False, - use_cudnn=True, - ceil_mode=False, - name=None, - exclusive=True, - data_format="NCHW", -): - """ - - ${comment} - - Args: - input (Variable): The input tensor of pooling operator which is a 4-D tensor with - shape [N, C, H, W]. The format of input tensor is `"NCHW"` or - `"NHWC"`, where `N` is batch size, `C` is the number of channels, - `H` is the height of the feature, and `W` is the width of the - feature. The data type if float32 or float64. - pool_size (int|list|tuple): The pool kernel size. If pool kernel size is a tuple or list, - it must contain two integers, (pool_size_Height, pool_size_Width). - Otherwise, the pool kernel size will be a square of an int. - pool_type: ${pooling_type_comment} - pool_stride (int|list|tuple): The pool stride size. If pool stride size is a tuple or list, - it must contain two integers, (pool_stride_Height, pool_stride_Width). - Otherwise, the pool stride size will be a square of an int. - pool_padding (string|int|list|tuple): The pool padding. If `pool_padding` is a string, either 'VALID' or - 'SAME' which is the padding algorithm. If pool padding size is a tuple or list, - it could be in three forms: `[pad_height, pad_width]` or - `[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`, and when `data_format` is `"NCHW"`, - `pool_padding` can be in the form `[[0,0], [0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`. - when `data_format` is `"NHWC"`, `pool_padding` can be in the form - `[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`. - Otherwise, the pool padding size will be a square of an int. - global_pooling (bool): ${global_pooling_comment} - use_cudnn (bool): ${use_cudnn_comment} - ceil_mode (bool): ${ceil_mode_comment} - name(str, optional): For detailed information, please refer - to :ref:`api_guide_Name`. Usually name is no need to set and - None by default. - exclusive (bool): Whether to exclude padding points in average pooling - mode, default is `true`. - data_format (string): The data format of the input and output data. An optional string from: `"NCHW"`, `"NHWC"`. - The default is `"NCHW"`. When it is `"NCHW"`, the data is stored in the order of: - `[batch_size, input_channels, input_height, input_width]`. - - Returns: - Variable: The output tensor of pooling result. The data type is same as input tensor. - - Raises: - ValueError: If `pool_type` is not "max" nor "avg". - ValueError: If `global_pooling` is False and `pool_size` is -1. - TypeError: If `use_cudnn` is not a bool value. - ValueError: If `data_format` is not "NCHW" or "NHWC". - ValueError: If `pool_padding` is a string, but not "SAME" or "VALID". - ValueError: If `pool_padding` is "VALID", but `ceil_mode` is True. - ValueError: If `pool_padding` is a list or tuple, but the elements in the batch or channel dimensions are non-zero. - ShapeError: If the input is not a 4-D or 5-D Tensor. - ShapeError: If the dimension of input minus the size of `pool_stride` is not 2. - ShapeError: If the size of `pool_size` and `pool_stride` is not equal. - ShapeError: If the output's shape calculated is not greater than 0. - - - Examples: - - .. code-block:: python - - import paddle.fluid as fluid - import paddle - - paddle.enable_static() - - data = fluid.data(name='data', shape=[None, 3, 32, 32], dtype='float32') - - # max pool2d - pool2d = fluid.layers.pool2d( - input = data, - pool_size = 2, - pool_type = "max", - pool_stride = 1, - global_pooling=False) - - # average pool2d - pool2d = fluid.layers.pool2d( - input = data, - pool_size = 2, - pool_type = "avg", - pool_stride = 1, - global_pooling=False) - - # global average pool2d - pool2d = fluid.layers.pool2d( - input = data, - pool_size = 2, - pool_type = "avg", - pool_stride = 1, - global_pooling=True) - - # Attr(pool_padding) is a list with 4 elements, Attr(data_format) is "NCHW". - out_1 = fluid.layers.pool2d( - input = data, - pool_size = 3, - pool_type = "avg", - pool_stride = 1, - pool_padding = [1, 2, 1, 0], - data_format = "NCHW") - - # Attr(pool_padding) is a string, Attr(data_format) is "NCHW". - out_2 = fluid.layers.pool2d( - input = data, - pool_size = 3, - pool_type = "avg", - pool_stride = 1, - pool_padding = "VALID", - data_format = "NCHW") - """ - if pool_type not in ["max", "avg"]: - raise ValueError( - "Unknown Attr(pool_type): '%s'. It can only be 'max' or 'avg'.", - str(pool_type), - ) - - if global_pooling is False and pool_size == -1: - raise ValueError( - "When Attr(global_pooling) is False, Attr(pool_size) must be passed " - "and be a valid value. Received pool_size: %s." % str(pool_size) - ) - - if not isinstance(use_cudnn, bool): - raise TypeError( - "Attr(use_cudnn) should be True or False. Received " - "Attr(use_cudnn): %s." % str(use_cudnn) - ) - - if data_format not in ["NCHW", "NHWC"]: - raise ValueError( - "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format) - ) - - pool_size = utils.convert_to_list(pool_size, 2, 'pool_size') - pool_stride = utils.convert_to_list(pool_stride, 2, 'pool_stride') - - def update_padding(padding, data_format): - def is_list_or_tuple(ele): - if isinstance(ele, list) or isinstance(ele, tuple): - return True - return False - - if is_list_or_tuple(padding) and len(padding) == 4: - if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): - if not (padding[0] == [0, 0] and padding[1] == [0, 0]): - raise ValueError( - "Non-zero pool_padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding) - ) - padding = padding[2:4] - padding = [ele for a_list in padding for ele in a_list] - elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): - if not (padding[0] == [0, 0] and padding[3] == [0, 0]): - raise ValueError( - "Non-zero pool_padding(%s) in the batch or channel dimensions " - "is not supported." % str(padding) - ) - padding = padding[1:3] - padding = [ele for a_list in padding for ele in a_list] - padding = utils.convert_to_list(padding, 4, 'padding') - - if utils._is_symmetric_padding(padding, 2): - padding = [padding[0], padding[2]] - else: - padding = utils.convert_to_list(padding, 2, 'padding') - - return padding - - padding_algorithm = "EXPLICIT" - if isinstance(pool_padding, str): - pool_padding = pool_padding.upper() - if pool_padding not in ["SAME", "VALID"]: - raise ValueError( - "Unknown Attr(pool_padding): '%s'. It can only be 'SAME' or 'VALID'." - % str(pool_padding) - ) - if pool_padding == "VALID": - padding_algorithm = "VALID" - pool_padding = [0, 0] - if ceil_mode != False: - raise ValueError( - "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode) must be False. " - "Received ceil_mode: True." - ) - elif pool_padding == "SAME": - padding_algorithm = "SAME" - pool_padding = [0, 0] - - pool_padding = update_padding(pool_padding, data_format) - if in_dygraph_mode(): - input = input._use_cudnn(use_cudnn) - return _C_ops.pool2d( - input, - pool_size, - pool_stride, - pool_padding, - ceil_mode, - exclusive, - data_format, - pool_type, - global_pooling, - False, - padding_algorithm, - ) - op_type = 'pool2d' - helper = LayerHelper(op_type, **locals()) - dtype = helper.input_dtype() - pool_out = helper.create_variable_for_type_inference(dtype) - - helper.append_op( - type=op_type, - inputs={"X": input}, - outputs={"Out": pool_out}, - attrs={ - "pooling_type": pool_type, - "ksize": pool_size, - "global_pooling": global_pooling, - "strides": pool_stride, - "paddings": pool_padding, - "padding_algorithm": padding_algorithm, - "use_cudnn": use_cudnn, - "ceil_mode": ceil_mode, - "use_mkldnn": False, - "exclusive": exclusive, - "data_format": data_format, - }, - ) - - return pool_out - - def batch_norm( input, act=None, diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 5cd8380eba586..f091ff8ad1ebb 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -132,16 +132,20 @@ def simple_img_conv_pool( act=act, use_cudnn=use_cudnn, ) - - pool_out = layers.pool2d( - input=conv_out, - pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - pool_padding=pool_padding, - global_pooling=global_pooling, - use_cudnn=use_cudnn, - ) + if pool_type == 'max': + pool_out = paddle.nn.functional.max_pool2d( + x=conv_out, + kernel_size=pool_size, + stride=pool_stride, + padding=pool_padding, + ) + else: + pool_out = paddle.nn.functional.avg_pool2d( + x=conv_out, + kernel_size=pool_size, + stride=pool_stride, + padding=pool_padding, + ) return pool_out @@ -258,13 +262,20 @@ def __extend_list__(obj): if abs(drop_rate) > 1e-5: tmp = layers.dropout(x=tmp, dropout_prob=drop_rate) - pool_out = layers.pool2d( - input=tmp, - pool_size=pool_size, - pool_type=pool_type, - pool_stride=pool_stride, - use_cudnn=use_cudnn, - ) + if pool_type == 'max': + pool_out = paddle.nn.functional.max_pool2d( + x=conv_out, + kernel_size=pool_size, + stride=pool_stride, + padding=pool_padding, + ) + else: + pool_out = paddle.nn.functional.avg_pool2d( + x=conv_out, + kernel_size=pool_size, + stride=pool_stride, + padding=pool_padding, + ) return pool_out diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index 0ac7adb994bf0..27c7499efbbb8 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -68,9 +68,7 @@ def layer_warp(block_func, input, ch_in, ch_out, count, stride): res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = fluid.layers.pool2d( - input=res3, pool_size=8, pool_type='avg', pool_stride=1 - ) + pool = paddle.nn.functional.avg_pool2d(x=res3, kernel_size=8, stride=1) return pool diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 83befa76062d1..5c9e2d2041f61 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -59,12 +59,11 @@ def net(self, input, class_dim=1000): conv = self.conv_bn_layer( input=input, num_filters=64, filter_size=7, stride=2, act='relu' ) - conv = fluid.layers.pool2d( - input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max', + conv = paddle.nn.functional.max_pool2d( + x=conv, + kernel_size=3, + stride=2, + padding=1, ) elif layers == 101: cardinality = 32 @@ -75,12 +74,11 @@ def net(self, input, class_dim=1000): conv = self.conv_bn_layer( input=input, num_filters=64, filter_size=7, stride=2, act='relu' ) - conv = fluid.layers.pool2d( - input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max', + conv = paddle.nn.functional.max_pool2d( + x=conv, + kernel_size=3, + stride=2, + padding=1, ) elif layers == 152: cardinality = 64 @@ -97,12 +95,11 @@ def net(self, input, class_dim=1000): conv = self.conv_bn_layer( input=conv, num_filters=128, filter_size=3, stride=1, act='relu' ) - conv = fluid.layers.pool2d( - input=conv, - pool_size=3, - pool_stride=2, - pool_padding=1, - pool_type='max', + conv = paddle.nn.functional.max_pool2d( + x=conv, + kernel_size=3, + stride=2, + padding=1, ) for block in range(len(depth)): @@ -115,9 +112,7 @@ def net(self, input, class_dim=1000): reduction_ratio=reduction_ratio, ) - pool = fluid.layers.pool2d( - input=conv, pool_size=7, pool_type='avg', global_pooling=True - ) + pool = paddle.nn.functional.avg_pool2d(x=conv, kernel_size=7) drop = fluid.layers.dropout(x=pool, dropout_prob=0.2) stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0) out = fluid.layers.fc( @@ -185,9 +180,7 @@ def conv_bn_layer( return fluid.layers.batch_norm(input=conv, act=act) def squeeze_excitation(self, input, num_channels, reduction_ratio): - pool = fluid.layers.pool2d( - input=input, pool_size=0, pool_type='avg', global_pooling=True - ) + pool = paddle.nn.functional.avg_pool2d(x=input, kernel_size=0) stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) squeeze = fluid.layers.fc( input=pool, diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py index f177b46b02275..5fb2787ae43ad 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_avg_op_ipu.py @@ -41,11 +41,9 @@ def set_feed_attr(self): def set_op_attrs(self): self.attrs = { - "pool_size": 3, - "pool_type": 'avg', - "pool_stride": 1, - "pool_padding": 0, - "global_pooling": False, + "kernel_size": 3, + "stride": 1, + "padding": 0, "ceil_mode": False, "exclusive": True, "data_format": 'NCHW', @@ -56,7 +54,7 @@ def build_model(self): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - out = paddle.fluid.layers.pool2d(x, **self.attrs) + out = paddle.nn.functional.avg_pool2d(x, **self.attrs) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -73,58 +71,52 @@ def test(self): class TestCase1(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_size'] = 3 + self.attrs['kernel_size'] = 3 class TestCase1_2(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_size'] = [3, 1] + self.attrs['kernel_size'] = [3, 1] class TestCase2(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_stride'] = 2 + self.attrs['stride'] = 2 class TestCase2_2(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_stride'] = [2, 1] + self.attrs['stride'] = [2, 1] class TestCase3(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_padding'] = [1, 1] + self.attrs['padding'] = [1, 1] class TestCase3_2(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_padding'] = [1, 1, 2, 2] + self.attrs['padding'] = [1, 1, 2, 2] @unittest.skip('the results has a positional offset') class TestCase3_3(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_padding'] = [1, 2, 1, 1] + self.attrs['padding'] = [1, 2, 1, 1] @unittest.skip('paddle output has nan') class TestCase3_4(TestBase): def set_attrs(self): super().set_attrs() - self.attrs['pool_size'] = 1 - self.attrs['pool_padding'] = 1 - - -class TestCase4(TestBase): - def set_attrs(self): - super().set_attrs() - self.attrs['global_pooling'] = True + self.attrs['size'] = 1 + self.attrs['padding'] = 1 class TestCase5(TestBase): diff --git a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py index fbbf2780ae20c..b7abcb917b773 100644 --- a/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py +++ b/python/paddle/fluid/tests/unittests/ipu/test_pool_max_op_ipu.py @@ -41,13 +41,10 @@ def set_feed_attr(self): def set_op_attrs(self): self.attrs = { - "pool_size": 3, - "pool_type": 'max', - "pool_stride": 1, - "pool_padding": 0, - "global_pooling": False, + "kernel_size": 3, + "stride": 1, + "padding": 0, "ceil_mode": False, - "exclusive": True, "data_format": 'NCHW', } @@ -56,7 +53,7 @@ def build_model(self): x = paddle.static.data( name=self.feed_list[0], shape=self.feed_shape[0], dtype='float32' ) - out = paddle.fluid.layers.pool2d(x, **self.attrs) + out = paddle.nn.functional.max_pool2d(x, **self.attrs) self.fetch_list = [out.name] def run_model(self, exec_mode): @@ -73,57 +70,51 @@ def test(self): class TestCase1(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_size'] = 3 + self.attrs['kernel_size'] = 3 class TestCase1_2(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_size'] = [3, 1] + self.attrs['kernel_size'] = [3, 1] class TestCase2(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_stride'] = 2 + self.attrs['stride'] = 2 class TestCase2_2(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_stride'] = [2, 1] + self.attrs['stride'] = [2, 1] class TestCase3(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_padding'] = [1, 1] + self.attrs['padding'] = [1, 1] class TestCase3_2(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_padding'] = [1, 1, 2, 2] + self.attrs['padding'] = [1, 1, 2, 2] @unittest.skip('auto_pad is not currently supported') class TestCase3_3(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_padding'] = 'VALID' + self.attrs['padding'] = 'VALID' @unittest.skip('auto_pad is not currently supported') class TestCase3_4(TestBase): def set_op_attrs(self): super().set_op_attrs() - self.attrs['pool_padding'] = 'SAME' - - -class TestCase4(TestBase): - def set_op_attrs(self): - super().set_op_attrs() - self.attrs['global_pooling'] = True + self.attrs['padding'] = 'SAME' class TestCase5(TestBase): @@ -132,11 +123,5 @@ def set_op_attrs(self): self.attrs['ceil_mode'] = True -class TestCase6(TestBase): - def set_op_attrs(self): - super().set_op_attrs() - self.attrs['exclusive'] = False - - if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py index b8f3ced692134..d656015a19bd1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_pool_op.py @@ -20,6 +20,7 @@ import numpy as np from inference_pass_test import InferencePassTest +import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.core import AnalysisConfig, PassVersionChecker @@ -62,16 +63,23 @@ def build_network(self): shape=[-1, self.channel, self.height, self.width], dtype='float32', ) - pool_out = fluid.layers.pool2d( - input=data, - pool_size=self.pool_size, - pool_type=self.pool_type, - pool_stride=self.pool_stride, - pool_padding=self.pool_padding, - global_pooling=self.global_pooling, - ceil_mode=self.ceil_mode, - exclusive=self.exclusive, - ) + if self.pool_type == 'max': + pool_out = paddle.nn.functional.max_pool2d( + x=data, + kernel_size=self.pool_size, + stride=self.pool_stride, + padding=self.pool_padding, + ceil_mode=self.ceil_mode, + ) + else: + pool_out = paddle.nn.functional.avg_pool2d( + x=data, + kernel_size=self.pool_size, + stride=self.pool_stride, + padding=self.pool_padding, + ceil_mode=self.ceil_mode, + exclusive=self.exclusive, + ) out = fluid.layers.batch_norm(pool_out, is_test=True) self.fetch_list = [out] diff --git a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py index 1a7a2f2255145..88bcd0f2b6a52 100644 --- a/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py +++ b/python/paddle/fluid/tests/unittests/mlu/test_pool2d_op_mlu.py @@ -734,86 +734,76 @@ def test_api(self): ) ksize = [3, 3] - out_1 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], + out_1 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding=[1, 1], data_format="NHWC", ) - out_2 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + out_2 = paddle.nn.functional.avg_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding=[[0, 0], [1, 1], [1, 1], [0, 0]], data_format="NHWC", ) - out_3 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]], + out_3 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=ksize, + padding=[[0, 0], [0, 0], [1, 1], [1, 1]], data_format="NCHW", ) - out_4 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding=[1, 2, 1, 0], + out_4 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=ksize, + padding=[1, 2, 1, 0], data_format="NCHW", ) # test VALID - out_5 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=ksize, - pool_type="avg", - pool_padding="VALID", + out_5 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=ksize, + padding="VALID", data_format="NCHW", ) - out_6 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", + out_6 = paddle.nn.functional.avg_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding="VALID", data_format="NHWC", ) # test SAME - out_7 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=[4, 4], - pool_type="avg", - pool_padding="SAME", + out_7 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=[4, 4], + padding="SAME", data_format="NCHW", ) - out_8 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=[4, 4], - pool_type="max", - pool_padding="SAME", + out_8 = paddle.nn.functional.avg_pool2d( + x=input_NHWC, + kernel_size=[4, 4], + padding="SAME", data_format="NHWC", ) # test negetive - out_9 = fluid.layers.pool2d( - input=input_NHWC_negetive, - pool_size=ksize, - pool_type="avg", - pool_padding=[0, 0], + out_9 = paddle.nn.functional.avg_pool2d( + x=input_NHWC_negetive, + kernel_size=ksize, + padding=[0, 0], data_format="NHWC", ) assert out_9.shape == (2, -1, 3, 3) - out_10 = fluid.layers.pool2d( - input=input_NCHW_negetive, - pool_size=ksize, - pool_type="avg", - pool_padding=[0, 0], + out_10 = paddle.nn.functional.avg_pool2d( + x=input_NCHW_negetive, + kernel_size=ksize, + padding=[0, 0], data_format="NCHW", ) assert out_10.shape == (2, 3, -1, -1) @@ -950,11 +940,10 @@ def test_api(self): # data_format value error def run_2(): - out_2 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], + out_2 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding=[1, 1], data_format="NHWCC", ) @@ -962,11 +951,10 @@ def run_2(): # padding str value error def run_3(): - out_3 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALIDSAME", + out_3 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding="VALIDSAME", data_format="NHWC", ) @@ -974,11 +962,10 @@ def run_3(): # padding str valid and ceil_mode value error def run_4(): - out_4 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding="VALID", + out_4 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding="VALID", ceil_mode=True, data_format="NHWC", ) @@ -987,11 +974,10 @@ def run_4(): # padding with 8 ele. value error def run_5(): - out_5 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]], + out_5 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding=[[1, 1], [0, 0], [0, 0], [1, 1]], data_format="NHWC", ) diff --git a/python/paddle/fluid/tests/unittests/seresnext_net.py b/python/paddle/fluid/tests/unittests/seresnext_net.py index 86cf960a282db..8589326b794b0 100644 --- a/python/paddle/fluid/tests/unittests/seresnext_net.py +++ b/python/paddle/fluid/tests/unittests/seresnext_net.py @@ -141,8 +141,8 @@ def SE_ResNeXt50Small(use_feed): conv = conv_bn_layer( input=conv, num_filters=16, filter_size=3, stride=1, act='relu' ) - conv = fluid.layers.pool2d( - input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max' + conv = paddle.nn.functional.max_pool2d( + x=conv, kernel_size=3, stride=2, padding=1 ) cardinality = 32 diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 37f83e47e791d..e9541a1bb8b64 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -3162,8 +3162,8 @@ def make_pool2d(self): fluid.default_main_program(), fluid.default_startup_program() ): x = self._get_data(name='x', shape=[3, 224, 224], dtype='float32') - return layers.pool2d( - x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1) + return paddle.nn.functional.max_pool2d( + x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1) ) def make_pool2d_infershape(self): @@ -3174,8 +3174,8 @@ def make_pool2d_infershape(self): x = paddle.nn.functional.affine_grid( theta, out_shape=[2, 3, 244, 244] ) - return layers.pool2d( - x, pool_size=[5, 3], pool_stride=[1, 2], pool_padding=(2, 1) + return paddle.nn.functional.max_pool2d( + x, kernel_size=[5, 3], stride=[1, 2], padding=(2, 1) ) def make_lstm_unit(self): diff --git a/python/paddle/fluid/tests/unittests/test_nn_grad.py b/python/paddle/fluid/tests/unittests/test_nn_grad.py index c774351db998f..ffdf8ba067359 100644 --- a/python/paddle/fluid/tests/unittests/test_nn_grad.py +++ b/python/paddle/fluid/tests/unittests/test_nn_grad.py @@ -430,7 +430,7 @@ def func(self, place): ) input_NCHW.persistable = True - y = layers.pool2d(input_NCHW, pool_size=2, pool_type="avg") + y = paddle.nn.functional.avg_pool2d(input_NCHW, kernel_size=2) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) gradient_checker.double_grad_check( @@ -532,7 +532,6 @@ def func(self, place): ) input_NCHW.persistable = True - y = layers.pool2d(input_NCHW, pool_size=[4, 4], pool_type="avg") y = paddle.nn.functional.avg_pool2d(input_NCHW, kernel_size=[4, 4]) x_arr = np.random.uniform(-1, 1, [2, 3, 5, 5]).astype(np.float32) diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py index 950ff45e86f39..22d2660765447 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -25,10 +25,10 @@ def Lenet(data, class_dim): conv1 = fluid.layers.conv2d(data, 4, 5, 1, act=None) bn1 = fluid.layers.batch_norm(conv1, act='relu') - pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2) + pool1 = paddle.nn.functional.max_pool2d(bn1, 2, 2) conv2 = fluid.layers.conv2d(pool1, 16, 5, 1, act=None) bn2 = fluid.layers.batch_norm(conv2, act='relu') - pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2) + pool2 = paddle.nn.functional.max_pool2d(bn2, 2, 2) fc1 = fluid.layers.fc(pool2, size=50, act='relu') fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax') diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 3692ef86279ef..10efd79720596 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -15,6 +15,7 @@ import unittest import numpy as np +import paddle import paddle.fluid.core as core from paddle.fluid.tests.unittests.op_test import OpTest import paddle.fluid as fluid @@ -1168,96 +1169,76 @@ def test_api(self): ) ksize = [3, 3] - out_1 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - use_cudnn=False, + out_1 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, + padding=[1, 1], data_format="NHWC", ) - out_2 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="avg", + out_2 = paddle.nn.functional.avg_pool2d( + x=input_NHWC, + kernel_size=ksize, pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]], - use_cudnn=False, data_format="NHWC", ) - out_3 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=ksize, - pool_type="avg", + out_3 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=ksize, pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]], - use_cudnn=False, data_format="NCHW", ) - out_4 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=ksize, - pool_type="avg", + out_4 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=ksize, pool_padding=[1, 2, 1, 0], - use_cudnn=False, data_format="NCHW", ) # test VALID - out_5 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=ksize, - pool_type="avg", + out_5 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=ksize, pool_padding="VALID", - use_cudnn=False, data_format="NCHW", ) - out_6 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", + out_6 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, pool_padding="VALID", - use_cudnn=False, data_format="NHWC", ) # test SAME - out_7 = fluid.layers.pool2d( - input=input_NCHW, - pool_size=[4, 4], - pool_type="avg", + out_7 = paddle.nn.functional.avg_pool2d( + x=input_NCHW, + kernel_size=[4, 4], pool_padding="SAME", - use_cudnn=False, data_format="NCHW", ) - out_8 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=[4, 4], - pool_type="max", + out_8 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=[4, 4], pool_padding="SAME", - use_cudnn=False, data_format="NHWC", ) # test negetive - out_9 = fluid.layers.pool2d( - input=input_NHWC_negetive, - pool_size=ksize, - pool_type="avg", + out_9 = paddle.nn.functional.avg_pool2d( + x=input_NHWC_negetive, + kernel_size=ksize, pool_padding=[0, 0], - use_cudnn=False, data_format="NHWC", ) assert out_9.shape == (2, -1, 3, 3) - out_10 = fluid.layers.pool2d( - input=input_NCHW_negetive, - pool_size=ksize, - pool_type="avg", + out_10 = paddle.nn.functional.avg_pool2d( + x=input_NCHW_negetive, + kernel_size=ksize, pool_padding=[0, 0], - use_cudnn=False, data_format="NCHW", ) assert out_10.shape == (2, 3, -1, -1) @@ -1392,27 +1373,12 @@ def test_api(self): ) ksize = [3, 3] - # cudnn type error - def run_1(): - out_1 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", - pool_padding=[1, 1], - use_cudnn=[0], - data_format="NHWC", - ) - - self.assertRaises(TypeError, run_1) - # data_format value error def run_2(): - out_2 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", + out_2 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, pool_padding=[1, 1], - use_cudnn=False, data_format="NHWCC", ) @@ -1420,12 +1386,10 @@ def run_2(): # padding str value error def run_3(): - out_3 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", + out_3 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, pool_padding="VALIDSAME", - use_cudnn=False, data_format="NHWC", ) @@ -1433,12 +1397,10 @@ def run_3(): # padding str valid and ceil_mode value error def run_4(): - out_4 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", + out_4 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, pool_padding="VALID", - use_cudnn=False, ceil_mode=True, data_format="NHWC", ) @@ -1447,12 +1409,10 @@ def run_4(): # padding with 8 ele. value error def run_5(): - out_5 = fluid.layers.pool2d( - input=input_NHWC, - pool_size=ksize, - pool_type="max", + out_5 = paddle.nn.functional.max_pool2d( + x=input_NHWC, + kernel_size=ksize, pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]], - use_cudnn=False, data_format="NHWC", ) From d5db9f8eb374bad7c1bc44a9ac6c628e93d0fb2b Mon Sep 17 00:00:00 2001 From: ccrrong <1039058843@qq.com> Date: Wed, 30 Nov 2022 02:53:34 +0000 Subject: [PATCH 2/5] fix unittest --- python/paddle/fluid/nets.py | 4 +-- .../fluid/tests/unittests/test_pool2d_op.py | 26 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index c9390b8c9bcd0..4e87bcf3e10d0 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -264,14 +264,14 @@ def __extend_list__(obj): if pool_type == 'max': pool_out = paddle.nn.functional.max_pool2d( - x=conv_out, + x=tmp, kernel_size=pool_size, stride=pool_stride, padding=pool_padding, ) else: pool_out = paddle.nn.functional.avg_pool2d( - x=conv_out, + x=tmp, kernel_size=pool_size, stride=pool_stride, padding=pool_padding, diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 981b311dd3ce7..9ca60c57599aa 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -1179,35 +1179,35 @@ def test_api(self): out_2 = paddle.nn.functional.avg_pool2d( x=input_NHWC, kernel_size=ksize, - pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]], + padding=[[0, 0], [1, 1], [1, 1], [0, 0]], data_format="NHWC", ) out_3 = paddle.nn.functional.avg_pool2d( x=input_NCHW, kernel_size=ksize, - pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]], + padding=[[0, 0], [0, 0], [1, 1], [1, 1]], data_format="NCHW", ) out_4 = paddle.nn.functional.avg_pool2d( x=input_NCHW, kernel_size=ksize, - pool_padding=[1, 2, 1, 0], + padding=[1, 2, 1, 0], data_format="NCHW", ) # test VALID out_5 = paddle.nn.functional.avg_pool2d( x=input_NCHW, kernel_size=ksize, - pool_padding="VALID", + padding="VALID", data_format="NCHW", ) out_6 = paddle.nn.functional.max_pool2d( x=input_NHWC, kernel_size=ksize, - pool_padding="VALID", + padding="VALID", data_format="NHWC", ) @@ -1215,14 +1215,14 @@ def test_api(self): out_7 = paddle.nn.functional.avg_pool2d( x=input_NCHW, kernel_size=[4, 4], - pool_padding="SAME", + padding="SAME", data_format="NCHW", ) out_8 = paddle.nn.functional.max_pool2d( x=input_NHWC, kernel_size=[4, 4], - pool_padding="SAME", + padding="SAME", data_format="NHWC", ) @@ -1230,7 +1230,7 @@ def test_api(self): out_9 = paddle.nn.functional.avg_pool2d( x=input_NHWC_negetive, kernel_size=ksize, - pool_padding=[0, 0], + padding=[0, 0], data_format="NHWC", ) assert out_9.shape == (2, -1, 3, 3) @@ -1238,7 +1238,7 @@ def test_api(self): out_10 = paddle.nn.functional.avg_pool2d( x=input_NCHW_negetive, kernel_size=ksize, - pool_padding=[0, 0], + padding=[0, 0], data_format="NCHW", ) assert out_10.shape == (2, 3, -1, -1) @@ -1378,7 +1378,7 @@ def run_2(): out_2 = paddle.nn.functional.max_pool2d( x=input_NHWC, kernel_size=ksize, - pool_padding=[1, 1], + padding=[1, 1], data_format="NHWCC", ) @@ -1389,7 +1389,7 @@ def run_3(): out_3 = paddle.nn.functional.max_pool2d( x=input_NHWC, kernel_size=ksize, - pool_padding="VALIDSAME", + padding="VALIDSAME", data_format="NHWC", ) @@ -1400,7 +1400,7 @@ def run_4(): out_4 = paddle.nn.functional.max_pool2d( x=input_NHWC, kernel_size=ksize, - pool_padding="VALID", + padding="VALID", ceil_mode=True, data_format="NHWC", ) @@ -1412,7 +1412,7 @@ def run_5(): out_5 = paddle.nn.functional.max_pool2d( x=input_NHWC, kernel_size=ksize, - pool_padding=[[1, 1], [0, 0], [0, 0], [1, 1]], + padding=[[1, 1], [0, 0], [0, 0], [1, 1]], data_format="NHWC", ) From a15d0f74abae6de0aa6cd09ac00faf3b97607c90 Mon Sep 17 00:00:00 2001 From: ccrrong <1039058843@qq.com> Date: Wed, 30 Nov 2022 06:58:46 +0000 Subject: [PATCH 3/5] code format --- python/paddle/fluid/nets.py | 2 - .../fluid/tests/unittests/test_pool2d_op.py | 286 ------------------ 2 files changed, 288 deletions(-) diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index 4e87bcf3e10d0..0f1fb58f8e128 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -267,14 +267,12 @@ def __extend_list__(obj): x=tmp, kernel_size=pool_size, stride=pool_stride, - padding=pool_padding, ) else: pool_out = paddle.nn.functional.avg_pool2d( x=tmp, kernel_size=pool_size, stride=pool_stride, - padding=pool_padding, ) return pool_out diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 9ca60c57599aa..39bfd9e0ad012 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -15,7 +15,6 @@ import unittest import numpy as np -import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid import Program, program_guard @@ -1134,291 +1133,6 @@ def init_shape(self): create_test_cudnn_padding_SAME_class(TestCase1_strides) -# ----- test API -class TestPool2DAPI(unittest.TestCase): - def test_api(self): - x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32") - x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32") - - input_NHWC = fluid.layers.data( - name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32", - ) - - input_NCHW = fluid.layers.data( - name="input_NCHW", - shape=[2, 3, 5, 5], - append_batch_size=False, - dtype="float32", - ) - - input_NHWC_negetive = fluid.layers.data( - name="input_NHWC_negetive", - shape=[2, -1, 5, 3], - append_batch_size=False, - dtype="float32", - ) - - input_NCHW_negetive = fluid.layers.data( - name="input_NCHW_negetive", - shape=[2, 3, -1, -1], - append_batch_size=False, - dtype="float32", - ) - - ksize = [3, 3] - out_1 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding=[1, 1], - data_format="NHWC", - ) - - out_2 = paddle.nn.functional.avg_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding=[[0, 0], [1, 1], [1, 1], [0, 0]], - data_format="NHWC", - ) - - out_3 = paddle.nn.functional.avg_pool2d( - x=input_NCHW, - kernel_size=ksize, - padding=[[0, 0], [0, 0], [1, 1], [1, 1]], - data_format="NCHW", - ) - - out_4 = paddle.nn.functional.avg_pool2d( - x=input_NCHW, - kernel_size=ksize, - padding=[1, 2, 1, 0], - data_format="NCHW", - ) - # test VALID - out_5 = paddle.nn.functional.avg_pool2d( - x=input_NCHW, - kernel_size=ksize, - padding="VALID", - data_format="NCHW", - ) - - out_6 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding="VALID", - data_format="NHWC", - ) - - # test SAME - out_7 = paddle.nn.functional.avg_pool2d( - x=input_NCHW, - kernel_size=[4, 4], - padding="SAME", - data_format="NCHW", - ) - - out_8 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=[4, 4], - padding="SAME", - data_format="NHWC", - ) - - # test negetive - out_9 = paddle.nn.functional.avg_pool2d( - x=input_NHWC_negetive, - kernel_size=ksize, - padding=[0, 0], - data_format="NHWC", - ) - assert out_9.shape == (2, -1, 3, 3) - - out_10 = paddle.nn.functional.avg_pool2d( - x=input_NCHW_negetive, - kernel_size=ksize, - padding=[0, 0], - data_format="NCHW", - ) - assert out_10.shape == (2, 3, -1, -1) - - exe = fluid.Executor(place=fluid.CPUPlace()) - [res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8] = exe.run( - fluid.default_main_program(), - feed={ - "input_NHWC": x_NHWC, - "input_NCHW": x_NCHW, - "input_NHWC_negetive": x_NHWC, - "input_NCHW_negetive": x_NCHW, - }, - fetch_list=[out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8], - ) - - assert np.allclose( - res_1, - pool2D_forward_naive( - x=x_NHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1], - paddings=[1, 1], - data_format="NHWC", - ), - ) - - assert np.allclose( - res_2, - pool2D_forward_naive( - x=x_NHWC, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 1, 1, 1], - data_format="NHWC", - ), - ) - assert np.allclose( - res_3, - pool2D_forward_naive( - x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 1, 1, 1], - data_format="NCHW", - ), - rtol=0.07, - atol=1e-05, - ) - - assert np.allclose( - res_4, - pool2D_forward_naive( - x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[1, 2, 1, 0], - data_format="NCHW", - ), - rtol=0.07, - atol=1e-05, - ) - - # VALID - assert np.allclose( - res_5, - pool2D_forward_naive( - x=x_NCHW, - ksize=ksize, - pool_type="avg", - strides=[1, 1], - paddings=[10, 20], # any ele is ok - padding_algorithm="VALID", - data_format="NCHW", - ), - rtol=0.07, - atol=1e-05, - ) - assert np.allclose( - res_6, - pool2D_forward_naive( - x=x_NHWC, - ksize=ksize, - pool_type="max", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="VALID", - data_format="NHWC", - ), - ) - # SAME - assert np.allclose( - res_7, - pool2D_forward_naive( - x=x_NCHW, - ksize=[4, 4], - pool_type="avg", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NCHW", - ), - rtol=0.07, - atol=1e-05, - ) - - assert np.allclose( - res_8, - pool2D_forward_naive( - x=x_NHWC, - ksize=[4, 4], - pool_type="max", - strides=[1, 1], - paddings=[10, 20], - padding_algorithm="SAME", - data_format="NHWC", - ), - ) - - -class TestPool2DAPI_Error(unittest.TestCase): - def test_api(self): - input_NHWC = fluid.layers.data( - name="input_NHWC", - shape=[2, 5, 5, 3], - append_batch_size=False, - dtype="float32", - ) - ksize = [3, 3] - - # data_format value error - def run_2(): - out_2 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding=[1, 1], - data_format="NHWCC", - ) - - self.assertRaises(ValueError, run_2) - - # padding str value error - def run_3(): - out_3 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding="VALIDSAME", - data_format="NHWC", - ) - - self.assertRaises(ValueError, run_3) - - # padding str valid and ceil_mode value error - def run_4(): - out_4 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding="VALID", - ceil_mode=True, - data_format="NHWC", - ) - - self.assertRaises(ValueError, run_4) - - # padding with 8 ele. value error - def run_5(): - out_5 = paddle.nn.functional.max_pool2d( - x=input_NHWC, - kernel_size=ksize, - padding=[[1, 1], [0, 0], [0, 0], [1, 1]], - data_format="NHWC", - ) - - self.assertRaises(ValueError, run_5) - - class TestDygraphPool2DAPIError(unittest.TestCase): def test_errors(self): with program_guard(Program(), Program()): From ef4ed45d0472593c6a5e02ec7b0f241372b31610 Mon Sep 17 00:00:00 2001 From: ccrrong <1039058843@qq.com> Date: Wed, 7 Dec 2022 06:25:51 +0000 Subject: [PATCH 4/5] fix test_multi_precision_fp16_train --- .../fluid/contrib/tests/test_multi_precision_fp16_train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py index 011859c81ca2b..3755ece1cd42f 100644 --- a/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py +++ b/python/paddle/fluid/contrib/tests/test_multi_precision_fp16_train.py @@ -88,7 +88,7 @@ def layer_warp(block_func, input, ch_in, ch_out, count, stride): res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) res2 = layer_warp(basicblock, res1, 16, 32, n, 2) res3 = layer_warp(basicblock, res2, 32, 64, n, 2) - pool = paddle.nn.functional.max_pool2d(x=res3, kernel_size=8, stride=1) + pool = paddle.nn.functional.avg_pool2d(x=res3, kernel_size=8, stride=1) return pool From 778d73cd76230e2c484caa7122704693f50d7b8e Mon Sep 17 00:00:00 2001 From: ccrrong <1039058843@qq.com> Date: Thu, 8 Dec 2022 02:56:36 +0000 Subject: [PATCH 5/5] use adaptive_avg_pool2d when global_pooling=true --- python/paddle/fluid/tests/unittests/dist_se_resnext.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py index 0f32ca09ac9f9..c4dd1de92d2dd 100644 --- a/python/paddle/fluid/tests/unittests/dist_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -112,7 +112,7 @@ def net(self, input, class_dim=1000): reduction_ratio=reduction_ratio, ) - pool = paddle.nn.functional.avg_pool2d(x=conv, kernel_size=7) + pool = paddle.nn.functional.adaptive_avg_pool2d(x=conv, output_size=1) drop = fluid.layers.dropout(x=pool, dropout_prob=0.2) stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0) out = fluid.layers.fc( @@ -180,7 +180,7 @@ def conv_bn_layer( return paddle.static.nn.batch_norm(input=conv, act=act) def squeeze_excitation(self, input, num_channels, reduction_ratio): - pool = paddle.nn.functional.avg_pool2d(x=input, kernel_size=0) + pool = paddle.nn.functional.adaptive_avg_pool2d(x=input, output_size=1) stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) squeeze = fluid.layers.fc( input=pool,