From 79742c0aaf713bbde1e647d9dea926d9a9cb8270 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Tue, 5 Nov 2024 12:08:49 +0000 Subject: [PATCH 01/12] mod int64 --- test/tensorrt/test_converter_common.py | 2 +- test/tensorrt/test_converter_creation.py | 8 +- test/tensorrt/test_converter_logic.py | 12 +- test/tensorrt/test_converter_manipulation.py | 53 +++++---- test/tensorrt/test_converter_math.py | 10 +- test/tensorrt/test_trt_marker_arrange.py | 52 --------- test/tensorrt/test_trt_marker_bmm.py | 72 ------------ test/tensorrt/test_trt_marker_cast.py | 57 --------- .../test_trt_marker_conv2d_transpose.py | 64 ----------- .../test_trt_marker_deformable_conv.py | 75 ------------ .../test_trt_marker_depthwise_conv2d.py | 62 ---------- ...t_trt_marker_depthwise_conv2d_transpose.py | 64 ----------- test/tensorrt/test_trt_marker_divide.py | 58 ---------- test/tensorrt/test_trt_marker_dropout.py | 59 ---------- .../test_trt_marker_elementwise_pow.py | 58 ---------- test/tensorrt/test_trt_marker_expand.py | 56 --------- test/tensorrt/test_trt_marker_floor_divide.py | 58 ---------- test/tensorrt/test_trt_marker_gather.py | 60 ---------- test/tensorrt/test_trt_marker_gather_nd.py | 63 ---------- test/tensorrt/test_trt_marker_gelu.py | 57 --------- .../tensorrt/test_trt_marker_greater_equal.py | 58 ---------- test/tensorrt/test_trt_marker_group_norm.py | 108 ------------------ test/tensorrt/test_trt_marker_index_select.py | 61 ---------- test/tensorrt/test_trt_marker_layer_norm.py | 58 ---------- test/tensorrt/test_trt_marker_logical_not.py | 56 --------- test/tensorrt/test_trt_marker_matmul.py | 62 ---------- test/tensorrt/test_trt_marker_maximum.py | 58 ---------- test/tensorrt/test_trt_marker_minimum.py | 58 ---------- test/tensorrt/test_trt_marker_multiply.py | 58 ---------- test/tensorrt/test_trt_marker_non_zero.py | 56 --------- test/tensorrt/test_trt_marker_pool2d.py | 63 ---------- test/tensorrt/test_trt_marker_relu.py | 56 --------- test/tensorrt/test_trt_marker_remainder.py | 58 ---------- test/tensorrt/test_trt_marker_reshape.py | 59 ---------- test/tensorrt/test_trt_marker_scale.py | 56 --------- test/tensorrt/test_trt_marker_shape.py | 56 --------- test/tensorrt/test_trt_marker_sign.py | 56 --------- test/tensorrt/test_trt_marker_silu.py | 56 --------- test/tensorrt/test_trt_marker_softmax.py | 71 ------------ test/tensorrt/test_trt_marker_split.py | 62 ---------- .../test_trt_marker_split_with_num.py | 63 ---------- test/tensorrt/test_trt_marker_squeeze.py | 59 ---------- test/tensorrt/test_trt_marker_subtract.py | 58 ---------- test/tensorrt/test_trt_marker_transpose.py | 68 ----------- test/tensorrt/test_trt_marker_unsqueeze.py | 60 ---------- 45 files changed, 47 insertions(+), 2487 deletions(-) delete mode 100644 test/tensorrt/test_trt_marker_arrange.py delete mode 100644 test/tensorrt/test_trt_marker_bmm.py delete mode 100644 test/tensorrt/test_trt_marker_cast.py delete mode 100644 test/tensorrt/test_trt_marker_conv2d_transpose.py delete mode 100644 test/tensorrt/test_trt_marker_deformable_conv.py delete mode 100644 test/tensorrt/test_trt_marker_depthwise_conv2d.py delete mode 100644 test/tensorrt/test_trt_marker_depthwise_conv2d_transpose.py delete mode 100644 test/tensorrt/test_trt_marker_divide.py delete mode 100644 test/tensorrt/test_trt_marker_dropout.py delete mode 100644 test/tensorrt/test_trt_marker_elementwise_pow.py delete mode 100644 test/tensorrt/test_trt_marker_expand.py delete mode 100644 test/tensorrt/test_trt_marker_floor_divide.py delete mode 100644 test/tensorrt/test_trt_marker_gather.py delete mode 100644 test/tensorrt/test_trt_marker_gather_nd.py delete mode 100644 test/tensorrt/test_trt_marker_gelu.py delete mode 100644 test/tensorrt/test_trt_marker_greater_equal.py delete mode 100644 test/tensorrt/test_trt_marker_group_norm.py delete mode 100644 test/tensorrt/test_trt_marker_index_select.py delete mode 100644 test/tensorrt/test_trt_marker_layer_norm.py delete mode 100644 test/tensorrt/test_trt_marker_logical_not.py delete mode 100644 test/tensorrt/test_trt_marker_matmul.py delete mode 100644 test/tensorrt/test_trt_marker_maximum.py delete mode 100644 test/tensorrt/test_trt_marker_minimum.py delete mode 100644 test/tensorrt/test_trt_marker_multiply.py delete mode 100644 test/tensorrt/test_trt_marker_non_zero.py delete mode 100644 test/tensorrt/test_trt_marker_pool2d.py delete mode 100644 test/tensorrt/test_trt_marker_relu.py delete mode 100644 test/tensorrt/test_trt_marker_remainder.py delete mode 100644 test/tensorrt/test_trt_marker_reshape.py delete mode 100644 test/tensorrt/test_trt_marker_scale.py delete mode 100644 test/tensorrt/test_trt_marker_shape.py delete mode 100644 test/tensorrt/test_trt_marker_sign.py delete mode 100644 test/tensorrt/test_trt_marker_silu.py delete mode 100644 test/tensorrt/test_trt_marker_softmax.py delete mode 100644 test/tensorrt/test_trt_marker_split.py delete mode 100644 test/tensorrt/test_trt_marker_split_with_num.py delete mode 100644 test/tensorrt/test_trt_marker_squeeze.py delete mode 100644 test/tensorrt/test_trt_marker_subtract.py delete mode 100644 test/tensorrt/test_trt_marker_transpose.py delete mode 100644 test/tensorrt/test_trt_marker_unsqueeze.py diff --git a/test/tensorrt/test_converter_common.py b/test/tensorrt/test_converter_common.py index 1c320374b3f526..f6f738324fcd9a 100644 --- a/test/tensorrt/test_converter_common.py +++ b/test/tensorrt/test_converter_common.py @@ -166,7 +166,7 @@ def setUp(self): self.python_api = bilinear_python_api self.api_args = { "x": np.random.random([2, 3, 6, 10]).astype("float32"), - "OutSize": np.array([12, 12], dtype="int32"), + "OutSize": np.array([12, 12], dtype="int64"), "SizeTensor": None, "Scale": None, "attrs": { diff --git a/test/tensorrt/test_converter_creation.py b/test/tensorrt/test_converter_creation.py index 56f612a20959ea..e12b36253b7c3d 100644 --- a/test/tensorrt/test_converter_creation.py +++ b/test/tensorrt/test_converter_creation.py @@ -64,9 +64,9 @@ class TestArangeTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.arange self.api_args = { - "start": np.array([0]).astype("int32"), - "end": np.array([6]).astype("int32"), - "step": np.array([1]).astype("int32"), + "start": np.array([0]).astype("int64"), + "end": np.array([6]).astype("int64"), + "step": np.array([1]).astype("int64"), } self.program_config = {"feed_list": []} self.min_shape = {} @@ -95,7 +95,7 @@ class TestFullWithTensorTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.tensor.fill_constant self.api_args = { - "shape": np.array([1]).astype("int32"), + "shape": np.array([1]).astype("int64"), "dtype": "float32", "value": np.array([0.0]).astype("float32"), } diff --git a/test/tensorrt/test_converter_logic.py b/test/tensorrt/test_converter_logic.py index 00ace97ab27af4..6300dd25bfb103 100644 --- a/test/tensorrt/test_converter_logic.py +++ b/test/tensorrt/test_converter_logic.py @@ -35,12 +35,12 @@ def test_trt_result(self): self.check_trt_result() -class TestGreaterThanInt32TRTPattern(TensorRTBaseTest): +class TestGreaterThanInt64TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.greater_than self.api_args = { - "x": np.random.randn(3).astype(np.int32), - "y": np.random.randn(3).astype(np.int32), + "x": np.random.randn(3).astype(np.int64), + "y": np.random.randn(3).astype(np.int64), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1], "y": [1]} @@ -65,12 +65,12 @@ def test_trt_result(self): self.check_trt_result() -class TestLessThanInt32TRTPattern(TensorRTBaseTest): +class TestLessThanInt64TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.less_than self.api_args = { - "x": np.random.randn(3).astype(np.int32), - "y": np.random.randn(3).astype(np.int32), + "x": np.random.randn(3).astype(np.int64), + "y": np.random.randn(3).astype(np.int64), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1], "y": [1]} diff --git a/test/tensorrt/test_converter_manipulation.py b/test/tensorrt/test_converter_manipulation.py index a325f2a7e4d197..0a1c892156bf77 100644 --- a/test/tensorrt/test_converter_manipulation.py +++ b/test/tensorrt/test_converter_manipulation.py @@ -32,18 +32,24 @@ def setUp(self): self.min_shape = {"x": [3, 3]} self.max_shape = {"x": [10, 3]} + def test_trt_result(self): + self.check_trt_result() + class TestCast1TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.cast self.api_args = { - "x": np.random.randn(7, 3).astype(np.float16), - "out_dtype": np.int32, + "x": np.random.randn(7, 3).astype(np.int32), + "out_dtype": np.int64, } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [3, 3]} self.max_shape = {"x": [10, 3]} + def test_trt_result(self): + self.check_trt_result() + class TestCast2TRTPattern(TensorRTBaseTest): def setUp(self): @@ -56,6 +62,9 @@ def setUp(self): self.min_shape = {"x": [3, 3]} self.max_shape = {"x": [10, 3]} + def test_trt_result(self): + self.check_trt_result() + class TestConcatTRTPattern(TensorRTBaseTest): def setUp(self): @@ -112,7 +121,7 @@ def setUp(self): self.python_api = paddle.expand self.api_args = { "x": np.random.randn(1, 3).astype("float32"), - "shape": np.array([6, 3]).astype("int32"), + "shape": np.array([6, 3]).astype("int64"), } self.program_config = {"feed_list": ["x", "shape"]} self.min_shape = {"x": [1, 3]} @@ -183,7 +192,7 @@ def setUp(self): self.api_args = { "x": np.array([[1, 2, 3]]).astype("float32"), "y": np.array([[1, 2, 3], [4, 5, 6], [1, 2, 3], [4, 5, 6]]).astype( - "int32" + "int64" ), } self.program_config = {"feed_list": ["x", "y"]} @@ -200,8 +209,8 @@ def setUp(self): self.api_args = { "x": np.random.random([5, 4, 5, 6]).astype("float32"), "axes": [0, 1, 2], - "starts": np.array([1, 0, 2]).astype("int32"), - "ends": np.array([3, 3, 4]).astype("int32"), + "starts": np.array([1, 0, 2]).astype("int64"), + "ends": np.array([3, 3, 4]).astype("int64"), } self.program_config = {"feed_list": ["x", "starts", "ends"]} self.min_shape = {"x": [3, 4, 5, 6]} @@ -233,7 +242,7 @@ def setUp(self): self.api_args = { "x": np.random.randn(3, 9, 5).astype(np.float32), "num_or_sections": 3, - "axis": np.array([1]).astype("int32"), + "axis": np.array([1]).astype("int64"), } self.program_config = {"feed_list": ["x", "axis"]} self.min_shape = {"x": [1, 9, 5]} @@ -249,7 +258,7 @@ def setUp(self): self.api_args = { "x": np.random.randn(1, 2).astype(np.float32), "num_or_sections": 2, - "axis": np.array([1]).astype("int32"), + "axis": np.array([1]).astype("int64"), } self.program_config = {"feed_list": ["x", "axis"]} self.min_shape = {"x": [1, 2]} @@ -297,7 +306,7 @@ def setUp(self): self.api_args = { "x": np.random.randn(3, 9, 5).astype("float32"), "num_or_sections": [2, 4, 3], - "axis": np.array([1]).astype("int32"), + "axis": np.array([1]).astype("int64"), } self.program_config = {"feed_list": ["x", "axis"]} self.min_shape = {"x": [1, 9, 5]} @@ -316,7 +325,7 @@ def setUp(self): self.python_api = split_api self.api_args = { "x": np.random.randn(3, 9, 5).astype("float32"), - "num_or_sections": np.array([2, 4, 3]).astype("int32"), + "num_or_sections": np.array([2, 4, 3]).astype("int64"), "axis": 1, } self.program_config = {"feed_list": ["x", "num_or_sections"]} @@ -332,8 +341,8 @@ def setUp(self): self.python_api = split_api self.api_args = { "x": np.random.randn(3, 9, 5).astype("float32"), - "num_or_sections": np.array([2, 4, 3]).astype("int32"), - "axis": np.array([1]).astype("int32"), + "num_or_sections": np.array([2, 4, 3]).astype("int64"), + "axis": np.array([1]).astype("int64"), } self.program_config = {"feed_list": ["x", "num_or_sections", "axis"]} self.min_shape = {"x": [1, 9, 5]} @@ -367,9 +376,9 @@ def setUp(self): self.python_api = paddle.stack self.api_args = { "x": [ - np.array([[1, 2]]).astype("int32"), - np.array([[3, 4]]).astype("int32"), - np.array([[5, 6]]).astype("int32"), + np.array([[1, 2]]).astype("int64"), + np.array([[3, 4]]).astype("int64"), + np.array([[5, 6]]).astype("int64"), ], "axis": -1, } @@ -403,7 +412,7 @@ class TestStrideSliceCase2TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.strided_slice self.api_args = { - "x": np.random.random([3, 4, 10]).astype("int32"), + "x": np.random.random([3, 4, 10]).astype("int64"), "axes": [0, 1, 2], "starts": [1, 0, 2], "ends": [2, 3, 4], @@ -441,9 +450,9 @@ def setUp(self): self.api_args = { "x": np.random.random([5, 5, 5]).astype("float32"), "axes": [0, 1, 2], - "starts": np.array([1, 0, 0]).astype("int32"), - "ends": np.array([2, 1, 3]).astype("int32"), - "strides": np.array([1, 1, 1]).astype("int32"), + "starts": np.array([1, 0, 0]).astype("int64"), + "ends": np.array([2, 1, 3]).astype("int64"), + "strides": np.array([1, 1, 1]).astype("int64"), } self.program_config = {"feed_list": ["x", "starts", "ends", "strides"]} self.min_shape = {"x": [1, 5, 5]} @@ -459,9 +468,9 @@ def setUp(self): self.api_args = { "x": np.random.random([3, 4, 10]).astype("float32"), "axes": [0, 1, 2], - "starts": np.array([0, -1, 0]).astype("int32"), - "ends": np.array([2, -3, 5]).astype("int32"), - "strides": np.array([1, -1, 1]).astype("int32"), + "starts": np.array([0, -1, 0]).astype("int64"), + "ends": np.array([2, -3, 5]).astype("int64"), + "strides": np.array([1, -1, 1]).astype("int64"), } self.program_config = {"feed_list": ["x", "starts", "ends", "strides"]} self.min_shape = {"x": [1, 4, 10]} diff --git a/test/tensorrt/test_converter_math.py b/test/tensorrt/test_converter_math.py index dab54c20f353de..c5b474ccad4e0a 100644 --- a/test/tensorrt/test_converter_math.py +++ b/test/tensorrt/test_converter_math.py @@ -122,17 +122,17 @@ class TestRemainderIntTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.remainder self.api_args = { - "x": np.random.randint(1, 10, size=(2, 3)).astype(np.int32), + "x": np.random.randint(1, 10, size=(2, 3)).astype(np.int64), "y": np.random.randint(1, 10, size=(2, 3)).astype( - np.int32 + np.int64 ), # Ensure y is non-zero } self.dynamic_shape_data = { "x": lambda shape: np.random.randint(1, 10, size=shape).astype( - np.int32 + np.int64 ), "y": lambda shape: np.random.randint(1, 10, size=shape).astype( - np.int32 + np.int64 ), } self.program_config = {"feed_list": ["x", "y"]} @@ -162,7 +162,7 @@ class TestSumTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.sum self.api_args = { - "x": np.random.randn(2, 4, 6).astype(np.int32), + "x": np.random.randn(2, 4, 6).astype(np.int64), "axis": [1, 1], } self.program_config = {"feed_list": ["x"]} diff --git a/test/tensorrt/test_trt_marker_arrange.py b/test/tensorrt/test_trt_marker_arrange.py deleted file mode 100644 index 9a0ea47f8122f3..00000000000000 --- a/test/tensorrt/test_trt_marker_arrange.py +++ /dev/null @@ -1,52 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestArangeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - arange_out = paddle.arange(5) - out = paddle.assign(arange_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = {} - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.arange"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_bmm.py b/test/tensorrt/test_trt_marker_bmm.py deleted file mode 100644 index 5f5f33aa29639b..00000000000000 --- a/test/tensorrt/test_trt_marker_bmm.py +++ /dev/null @@ -1,72 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestBmmTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 2, 3], dtype='float32' - ) - y = paddle.static.data( - name='y', shape=[2, 3, 2], dtype='float32' - ) - bmm_out = paddle.bmm(x, y) - out = paddle.assign(bmm_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array( - [ - [[1.0, 1.0, 1.0], [2.0, 2.0, 2.0]], - [[3.0, 3.0, 3.0], [4.0, 4.0, 4.0]], - ] - ).astype("float32"), - "y": np.array( - [ - [[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]], - [[4.0, 4.0], [5.0, 5.0], [6.0, 6.0]], - ] - ).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.bmm"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_cast.py b/test/tensorrt/test_trt_marker_cast.py deleted file mode 100644 index 89dade967786af..00000000000000 --- a/test/tensorrt/test_trt_marker_cast.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestCastTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3, 4], dtype='float32') - cast_out = paddle.cast(x, 'bool') - out = paddle.assign(cast_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([3, 4]).astype("float32"), - } - - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.cast"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_conv2d_transpose.py b/test/tensorrt/test_trt_marker_conv2d_transpose.py deleted file mode 100644 index e3c09419697681..00000000000000 --- a/test/tensorrt/test_trt_marker_conv2d_transpose.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestConv2dTransposeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 3, 8, 8], dtype='float32' - ) - w = paddle.static.data( - name='w', shape=[3, 6, 3, 3], dtype='float32' - ) - Conv2dTranspose_out = paddle.nn.functional.conv2d_transpose( - x, w - ) - out = paddle.assign(Conv2dTranspose_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([2, 3, 8, 8]).astype("float32"), - "w": np.random.random([3, 6, 3, 3]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.conv2d_transpose"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_deformable_conv.py b/test/tensorrt/test_trt_marker_deformable_conv.py deleted file mode 100644 index 709ba36a83b01c..00000000000000 --- a/test/tensorrt/test_trt_marker_deformable_conv.py +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestDeformableConvTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[8, 1, 28, 28], dtype='float32' - ) - kh, kw = 3, 3 - weight = paddle.static.data( - name='weight', shape=[16, 1, kh, kw], dtype='float32' - ) - offset = paddle.static.data( - name='offset', - shape=[8, 2 * kh * kw, 26, 26], - dtype='float32', - ) - deformable_conv_out = paddle.vision.ops.deform_conv2d( - x, offset, weight - ) - out = paddle.assign(deformable_conv_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([8, 1, 28, 28]).astype("float32"), - "weight": np.random.random([16, 1, kh, kw]).astype( - "float32" - ), - "offset": np.random.random([8, 2 * kh * kw, 26, 26]).astype( - "float32" - ), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.deformable_conv"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_depthwise_conv2d.py b/test/tensorrt/test_trt_marker_depthwise_conv2d.py deleted file mode 100644 index 253df4a40641ac..00000000000000 --- a/test/tensorrt/test_trt_marker_depthwise_conv2d.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestDepthwiseConv2dTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 3, 8, 8], dtype='float32' - ) - w = paddle.static.data( - name='w', shape=[6, 1, 3, 3], dtype='float32' - ) - Conv2d_out = paddle.nn.functional.conv2d(x, w, groups=3) - out = paddle.assign(Conv2d_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([2, 3, 8, 8]).astype("float32"), - "w": np.random.random([6, 1, 3, 3]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.depthwise_conv2d"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_depthwise_conv2d_transpose.py b/test/tensorrt/test_trt_marker_depthwise_conv2d_transpose.py deleted file mode 100644 index 2194176e3bdc72..00000000000000 --- a/test/tensorrt/test_trt_marker_depthwise_conv2d_transpose.py +++ /dev/null @@ -1,64 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestDepthwiseConv2dTransposeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 3, 8, 8], dtype='float32' - ) - w = paddle.static.data( - name='w', shape=[3, 1, 3, 3], dtype='float32' - ) - Conv2dTranspose_out = paddle.nn.functional.conv2d_transpose( - x, w, groups=3 - ) - out = paddle.assign(Conv2dTranspose_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([2, 3, 8, 8]).astype("float32"), - "w": np.random.random([3, 1, 3, 3]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.depthwise_conv2d_transpose"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_divide.py b/test/tensorrt/test_trt_marker_divide.py deleted file mode 100644 index 387dcfa79acda5..00000000000000 --- a/test/tensorrt/test_trt_marker_divide.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestDivideTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3], dtype='float32') - y = paddle.static.data(name='y', shape=[3], dtype='float32') - divide_out = paddle.divide(x, y) - out = paddle.assign(divide_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([2, 3, 4]).astype("float32"), - "y": np.array([1, 5, 2]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.divide"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_dropout.py b/test/tensorrt/test_trt_marker_dropout.py deleted file mode 100644 index f22e5e470a2f21..00000000000000 --- a/test/tensorrt/test_trt_marker_dropout.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestDropoutTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - paddle.seed(2023) - x = paddle.static.data(name='x', shape=[2, 3], dtype='float32') - dropout_out = paddle.nn.functional.dropout( - x, 0.5, training=False - ) - out = paddle.assign(dropout_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2, 3], [4, 5, 6]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.dropout"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_elementwise_pow.py b/test/tensorrt/test_trt_marker_elementwise_pow.py deleted file mode 100644 index 8bb3ca1c82cf43..00000000000000 --- a/test/tensorrt/test_trt_marker_elementwise_pow.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestElementWisePowTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3], dtype='float32') - y = paddle.static.data(name='y', shape=[1], dtype='float32') - pow_out = paddle.pow(x, y) - out = paddle.assign(pow_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([1, 2, 3]).astype("float32"), - "y": np.array([2]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.elementwise_pow"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_expand.py b/test/tensorrt/test_trt_marker_expand.py deleted file mode 100644 index 5a0c3d51303bfe..00000000000000 --- a/test/tensorrt/test_trt_marker_expand.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestExpandTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name="x", shape=[3], dtype="float32") - expand_out = paddle.expand(x, shape=[2, 3]) - out = paddle.assign(expand_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2, 3]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.expand"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_floor_divide.py b/test/tensorrt/test_trt_marker_floor_divide.py deleted file mode 100644 index 872fbbbfbd3937..00000000000000 --- a/test/tensorrt/test_trt_marker_floor_divide.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestFloorDivideTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[4], dtype='float32') - y = paddle.static.data(name='y', shape=[4], dtype='float32') - floor_divide_out = paddle.floor_divide(x, y) - out = paddle.assign(floor_divide_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([2, 3, 8, 7]).astype("float32"), - "y": np.array([1, 5, 3, 3]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.floor_divide"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_gather.py b/test/tensorrt/test_trt_marker_gather.py deleted file mode 100644 index 3d25f920ef146e..00000000000000 --- a/test/tensorrt/test_trt_marker_gather.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestGatherTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3, 2], dtype='float32') - index = paddle.static.data( - name='index', shape=[2], dtype='int32' - ) - gather_out = paddle.gather(x, index, axis=0) - out = paddle.assign(gather_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [3, 4], [5, 6]]).astype("float32"), - "index": np.array([0, 1]).astype("int32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.gather"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_gather_nd.py b/test/tensorrt/test_trt_marker_gather_nd.py deleted file mode 100644 index 2f7f7f214ee4bd..00000000000000 --- a/test/tensorrt/test_trt_marker_gather_nd.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestGatherNdTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[1, 3, 4], dtype='float32' - ) - index = paddle.static.data( - name='index', shape=[1, 2, 2], dtype='int32' - ) - gather_nd_out = paddle.gather_nd(x, index) - out = paddle.assign(gather_nd_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([1, 3, 4]).astype("float32"), - "index": np.random.random([1, 2, 2]).astype("int32"), - } - - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.gather_nd"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_gelu.py b/test/tensorrt/test_trt_marker_gelu.py deleted file mode 100644 index 54a8498a4bd33a..00000000000000 --- a/test/tensorrt/test_trt_marker_gelu.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestGeluTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[2, 2], dtype='float32') - m = paddle.nn.GELU() - out = m(x) - out = paddle.assign(out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[-1, 0.5], [1, 1.5]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.gelu"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_greater_equal.py b/test/tensorrt/test_trt_marker_greater_equal.py deleted file mode 100644 index 509f083d30021a..00000000000000 --- a/test/tensorrt/test_trt_marker_greater_equal.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestGreaterEqualTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3], dtype='float32') - y = paddle.static.data(name='y', shape=[3], dtype='float32') - greater_equal_out = paddle.greater_equal(x, y) - out = paddle.assign(greater_equal_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([1, 2, 3]).astype("float32"), - "y": np.array([1, 3, 2]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.greater_equal"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_group_norm.py b/test/tensorrt/test_trt_marker_group_norm.py deleted file mode 100644 index 811ea4b940fd9c..00000000000000 --- a/test/tensorrt/test_trt_marker_group_norm.py +++ /dev/null @@ -1,108 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core -from paddle.pir.core import create_parameter - - -class TestGroupNormSiluTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - for x_shape in [[2, 6]]: - dtype = None - if core.is_compiled_with_xpu(): - dtype = 'float32' - elif core.is_compiled_with_cuda(): - dtype = 'float16' - for epilson in [1e-5]: - for groups in [2]: - rand_value = ( - 0.001 - * paddle.rand(shape=[x_shape[1]], dtype=dtype).numpy() - ) - with paddle.pir_utils.IrGuard(): - start_prog = paddle.static.Program() - main_prog = paddle.static.Program() - with paddle.pir.core.program_guard( - main_prog, start_prog - ): - x = paddle.static.data( - name='x', shape=x_shape, dtype=dtype - ) - w = create_parameter( - shape=[x_shape[1]], - dtype=dtype, - initializer=paddle.nn.initializer.Assign( - rand_value - ), - ) - b = create_parameter( - shape=[x_shape[1]], - dtype=dtype, - initializer=paddle.nn.initializer.Assign( - rand_value - ), - ) - group_norm_out = paddle.nn.functional.group_norm( - x, - num_groups=groups, - epsilon=epilson, - weight=w, - bias=b, - data_format="NCHW", - ) - out = paddle.nn.functional.silu(group_norm_out) - out = paddle.assign(out) - if core.is_compiled_with_xpu(): - self.pass_attr_list = [ - {'trt_op_marker_pass': {}} - ] - elif core.is_compiled_with_cuda(): - self.pass_attr_list = [ - {'trt_op_marker_pass': {}} - ] - self.feeds = { - "x": np.random.random(x_shape).astype(dtype), - } - self.fetch_list = [out] - if core.is_compiled_with_xpu(): - self.valid_op_map = { - "pd_op.group_norm_silu_xpu": 0, - } - elif core.is_compiled_with_cuda(): - self.valid_op_map = { - "pd_op.add_group_norm_silu": 0, - } - - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.group_norm"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_index_select.py b/test/tensorrt/test_trt_marker_index_select.py deleted file mode 100644 index 44fc428e9daef6..00000000000000 --- a/test/tensorrt/test_trt_marker_index_select.py +++ /dev/null @@ -1,61 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestIndexSelectTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3, 4], dtype='int32') - index = paddle.static.data( - name='index', shape=[3], dtype='int32' - ) - index_select_out = paddle.index_select(x, index) - out = paddle.assign(index_select_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([3, 4]).astype("int32"), - "index": np.random.random([3]).astype("int32"), - } - - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.index_select"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_layer_norm.py b/test/tensorrt/test_trt_marker_layer_norm.py deleted file mode 100644 index 7687543f2049be..00000000000000 --- a/test/tensorrt/test_trt_marker_layer_norm.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestLayer_normTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 2, 2, 3], dtype='float32' - ) - layer_norm_out = paddle.nn.functional.layer_norm(x, x.shape[1:]) - out = paddle.assign(layer_norm_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([2, 2, 2, 3]).astype("float32") - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.layer_norm"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_logical_not.py b/test/tensorrt/test_trt_marker_logical_not.py deleted file mode 100644 index a5cf6ae018877f..00000000000000 --- a/test/tensorrt/test_trt_marker_logical_not.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestLogicalNotTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[4], dtype='bool') - logical_not_out = paddle.logical_not(x) - out = paddle.assign(logical_not_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([True, False, True, False]).astype("bool"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.logical_not"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_matmul.py b/test/tensorrt/test_trt_marker_matmul.py deleted file mode 100644 index b40106e2db6236..00000000000000 --- a/test/tensorrt/test_trt_marker_matmul.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestMatmulTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[10, 5, 2], dtype='float32' - ) - y = paddle.static.data( - name='y', shape=[10, 2, 5], dtype='float32' - ) - matmul_out = paddle.matmul(x, y) - out = paddle.assign(matmul_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([10, 5, 2]).astype("float32"), - "y": np.random.random([10, 2, 5]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.matmul"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_maximum.py b/test/tensorrt/test_trt_marker_maximum.py deleted file mode 100644 index 0d52018fe26214..00000000000000 --- a/test/tensorrt/test_trt_marker_maximum.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestMaximumTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[2, 2], dtype='int32') - y = paddle.static.data(name='y', shape=[2, 2], dtype='int32') - maximum_out = paddle.maximum(x, y) - out = paddle.assign(maximum_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [7, 8]]).astype("int32"), - "y": np.array([[3, 4], [5, 6]]).astype("int32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.maximum"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_minimum.py b/test/tensorrt/test_trt_marker_minimum.py deleted file mode 100644 index ccef074182fb76..00000000000000 --- a/test/tensorrt/test_trt_marker_minimum.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestMinimumTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[2, 2], dtype='int32') - y = paddle.static.data(name='y', shape=[2, 2], dtype='int32') - minimum_out = paddle.minimum(x, y) - out = paddle.assign(minimum_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [7, 8]]).astype("int32"), - "y": np.array([[3, 4], [5, 6]]).astype("int32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.minimum"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_multiply.py b/test/tensorrt/test_trt_marker_multiply.py deleted file mode 100644 index b48ea64a0ce89d..00000000000000 --- a/test/tensorrt/test_trt_marker_multiply.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestMultiplyTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[2, 2], dtype='float32') - y = paddle.static.data(name='y', shape=[2, 2], dtype='float32') - multiply_out = paddle.multiply(x, y) - out = paddle.assign(multiply_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [3, 4]]).astype("float32"), - "y": np.array([[5, 6], [7, 8]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.multiply"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_non_zero.py b/test/tensorrt/test_trt_marker_non_zero.py deleted file mode 100644 index d719dcf4b0fb15..00000000000000 --- a/test/tensorrt/test_trt_marker_non_zero.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestNonZeroTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[4], dtype='float32') - out_z1_tuple = paddle.nonzero(x) - out = paddle.assign(out_z1_tuple) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([0.0, 1.0, 0.0, 3.0]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.nonzero"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_pool2d.py b/test/tensorrt/test_trt_marker_pool2d.py deleted file mode 100644 index 8c7cbfa1800075..00000000000000 --- a/test/tensorrt/test_trt_marker_pool2d.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestPool2dTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[1, 3, 32, 32], dtype='float32' - ) - AvgPool2D = paddle.nn.AvgPool2D( - kernel_size=2, stride=2, padding=0 - ) - AvgPool2D_output = AvgPool2D(x) - out = paddle.assign(AvgPool2D_output) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.uniform(-1, 1, [1, 3, 32, 32]).astype( - "float32" - ), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.pool2d"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_relu.py b/test/tensorrt/test_trt_marker_relu.py deleted file mode 100644 index d008b8d74aa222..00000000000000 --- a/test/tensorrt/test_trt_marker_relu.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestReluTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3], dtype='float32') - relu_out = paddle.nn.functional.relu(x) - out = paddle.assign(relu_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([-2, 0, 1]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.relu"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_remainder.py b/test/tensorrt/test_trt_marker_remainder.py deleted file mode 100644 index 205dad8a1d8e0e..00000000000000 --- a/test/tensorrt/test_trt_marker_remainder.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestRemainderTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[4], dtype='float32') - y = paddle.static.data(name='y', shape=[4], dtype='float32') - remainder_out = paddle.remainder(x, y) - out = paddle.assign(remainder_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([2, 3, 8, 7]).astype("float32"), - "y": np.array([1, 5, 3, 3]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.remainder"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_reshape.py b/test/tensorrt/test_trt_marker_reshape.py deleted file mode 100644 index 2d5ccb5482a20e..00000000000000 --- a/test/tensorrt/test_trt_marker_reshape.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestReshapeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 4, 6], dtype='float32' - ) - shape_tensor = [-1, 0, 3, 2] - relu_out = paddle.reshape(x, shape_tensor) - out = paddle.assign(relu_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([2, 4, 6]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.reshape"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_scale.py b/test/tensorrt/test_trt_marker_scale.py deleted file mode 100644 index ce698ba686f808..00000000000000 --- a/test/tensorrt/test_trt_marker_scale.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestScaleTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.arange(6).astype("float32").reshape([2, 3]) - scale_out = paddle.scale(x, scale=2.0, bias=1.0) - out = paddle.assign(scale_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.arange(6).astype("float32").reshape([2, 3]), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.scale"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_shape.py b/test/tensorrt/test_trt_marker_shape.py deleted file mode 100644 index eba3206c8a9c40..00000000000000 --- a/test/tensorrt/test_trt_marker_shape.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestShapeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name="x", shape=[2, 2], dtype="float32") - shape_out = paddle.shape(x) - out = paddle.assign(shape_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [7, 8]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.shape"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_sign.py b/test/tensorrt/test_trt_marker_sign.py deleted file mode 100644 index f3cb7ee65ec6dd..00000000000000 --- a/test/tensorrt/test_trt_marker_sign.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSignTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[4], dtype='float32') - sign_out = paddle.sign(x=x) - out = paddle.assign(sign_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([3.0, 0.0, -2.0, 1.7]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.sign"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_silu.py b/test/tensorrt/test_trt_marker_silu.py deleted file mode 100644 index 883dfd4b6b61b2..00000000000000 --- a/test/tensorrt/test_trt_marker_silu.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSiluTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[4], dtype='float32') - Silu_out = paddle.nn.functional.silu(x) - out = paddle.assign(Silu_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2, 3, 4]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.silu"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_softmax.py b/test/tensorrt/test_trt_marker_softmax.py deleted file mode 100644 index 16ac4af998fde2..00000000000000 --- a/test/tensorrt/test_trt_marker_softmax.py +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSoftmaxTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 3, 4], dtype='float32' - ) - softmax_out = paddle.nn.functional.softmax(x) - out = paddle.assign(softmax_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array( - [ - [ - [2.0, 3.0, 4.0, 5.0], - [3.0, 4.0, 5.0, 6.0], - [7.0, 8.0, 8.0, 9.0], - ], - [ - [1.0, 2.0, 3.0, 4.0], - [5.0, 6.0, 7.0, 8.0], - [6.0, 7.0, 8.0, 9.0], - ], - ] - ).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.softmax"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_split.py b/test/tensorrt/test_trt_marker_split.py deleted file mode 100644 index cfc49b589be625..00000000000000 --- a/test/tensorrt/test_trt_marker_split.py +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSplitTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[3, 9, 5], dtype='float32' - ) - num_or_sections = [2, 3, 4] - axis = 1 - output0, output1, output2 = paddle.split( - x, num_or_sections, axis - ) - out = paddle.assign(output0) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([3, 9, 5]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.split"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_split_with_num.py b/test/tensorrt/test_trt_marker_split_with_num.py deleted file mode 100644 index f27f965503c957..00000000000000 --- a/test/tensorrt/test_trt_marker_split_with_num.py +++ /dev/null @@ -1,63 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSplitWithNumTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[3, 9, 5], dtype='int64') - num_or_sections = 3 - axis = 1 - split_out = paddle.split( - x, num_or_sections=num_or_sections, axis=axis - ) - out = paddle.assign(split_out[0]) - out1 = paddle.assign(split_out[1]) - out2 = paddle.assign(split_out[2]) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([3, 9, 5]).astype("int64"), - } - - self.fetch_list = [out, out1, out2] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.split_with_num"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_squeeze.py b/test/tensorrt/test_trt_marker_squeeze.py deleted file mode 100644 index 0d2b84749718f1..00000000000000 --- a/test/tensorrt/test_trt_marker_squeeze.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSqueezeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[5, 1, 10], dtype='float32' - ) - squeeze_out = paddle.squeeze(x, axis=1) - out = paddle.assign(squeeze_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([5, 1, 10]).astype("float32"), - } - - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.squeeze"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == "__main__": - unittest.main() diff --git a/test/tensorrt/test_trt_marker_subtract.py b/test/tensorrt/test_trt_marker_subtract.py deleted file mode 100644 index 2ee722b191b46c..00000000000000 --- a/test/tensorrt/test_trt_marker_subtract.py +++ /dev/null @@ -1,58 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestSubtractTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name='x', shape=[2, 2], dtype='float32') - y = paddle.static.data(name='y', shape=[2, 2], dtype='float32') - subtract_out = paddle.subtract(x, y) - out = paddle.assign(subtract_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [7, 8]]).astype("float32"), - "y": np.array([[5, 6], [3, 4]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.subtract"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_transpose.py b/test/tensorrt/test_trt_marker_transpose.py deleted file mode 100644 index 206846c1f26bde..00000000000000 --- a/test/tensorrt/test_trt_marker_transpose.py +++ /dev/null @@ -1,68 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestTransposeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[2, 3, 4], dtype='float32' - ) - perm0 = [1, 0, 2] - transpose_out = paddle.transpose(x, perm0) - out = paddle.assign(transpose_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array( - [ - [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]], - [ - [13, 14, 15, 16], - [17, 18, 19, 20], - [21, 22, 23, 24], - ], - ] - ).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.transpose"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() diff --git a/test/tensorrt/test_trt_marker_unsqueeze.py b/test/tensorrt/test_trt_marker_unsqueeze.py deleted file mode 100644 index d142af04adc73e..00000000000000 --- a/test/tensorrt/test_trt_marker_unsqueeze.py +++ /dev/null @@ -1,60 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestUnSqueezeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data( - name='x', shape=[-1, 10], dtype='float32' - ) - unsqueeze_out = paddle.unsqueeze(x, axis=[0, 2]) - unsqueeze_out_ = paddle.unsqueeze_(unsqueeze_out, axis=0) - out = paddle.assign(unsqueeze_out_) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.random.random([5, 10]).astype("float32"), - } - - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.unsqueeze"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() From c1587fe1ca24cc0c7005707dc88f1716c65e566e Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Tue, 5 Nov 2024 12:20:52 +0000 Subject: [PATCH 02/12] rm pass_test --- test/tensorrt/pass_test.py | 134 ------------------------------------- 1 file changed, 134 deletions(-) delete mode 100644 test/tensorrt/pass_test.py diff --git a/test/tensorrt/pass_test.py b/test/tensorrt/pass_test.py deleted file mode 100644 index b917cde58b0944..00000000000000 --- a/test/tensorrt/pass_test.py +++ /dev/null @@ -1,134 +0,0 @@ -# Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import abc -import unittest - -import numpy as np - -import paddle -from paddle import pir - - -class PassTest(unittest.TestCase): - @classmethod - def setUpClass(self): - self.feeds = None - self.fetch_list = None - self.valid_op_map = {} - self.pir_program = None - self.places = [] - self.skip_accuracy_verification = False - self.pass_attr_list = [] # pass_name:pass_attr(defalut:None) - self.trt_expected_ops = {} - self.check_trt_attr = True # Open and check by default - - def run_pir_pass(self, program): - pm = pir.PassManager(opt_level=4) - pm.enable_print_statistics() - pm.enable_ir_printing() - for pass_item in self.pass_attr_list: - for pass_name, pass_attr in pass_item.items(): - pm.add_pass(pass_name, pass_attr) - pm.run(program) - - if self.check_trt_attr: - self.check_trt_attributes(program) - return program - - def check_trt_attributes(self, program): - trt_attr_found = False - for op in program.global_block().ops: - if op.name() in self.trt_expected_ops: - if op.has_attr("__l_trt__"): - trt_attr_found = True - break - if not trt_attr_found: - self.fail( - "No operation in the program has the '__l_trt__' attribute." - ) - - def check_fused_ops(self, program): - self.assertTrue( - len(self.valid_op_map) != 0, - "self.fuse_op_map cannot be empty!", - ) - op_names = [op.name() for op in program.global_block().ops] - for valid_op_name, valid_op_count in self.valid_op_map.items(): - actual_valid_op_count = op_names.count(valid_op_name) - self.assertTrue( - valid_op_count == actual_valid_op_count, - f"Checking of the number of fused operator < {valid_op_name} > failed. " - f"Expected: {valid_op_count}, Received: {actual_valid_op_count}", - ) - - @abc.abstractmethod - def sample_program(self): - """ - Generate all pir grogram - """ - raise NotImplementedError - - def run_program(self, executor, startup_program, main_program): - with paddle.pir_utils.IrGuard(): - with paddle.static.program_guard(startup_program, main_program): - fetches = executor.run( - main_program, - feed=self.feeds, - fetch_list=main_program.list_vars()[-1], - ) - return fetches - - def compare_accuracy( - self, baseline_data, actual_data, atol=1e-5, rtol=1e-5 - ): - self.assertTrue( - len(baseline_data) == len(actual_data), - f"The output baseline_data are not equal, the baseline output_data is {len(baseline_data)}, but got {len(actual_data)}", - ) - for i in range(len(baseline_data)): - self.assertEqual( - baseline_data[i].shape, - actual_data[i].shape, - f"The output shapes are not equal, the baseline shape is {baseline_data[i].shape}, but got {actual_data[i].shape}", - ) - np.testing.assert_allclose( - baseline_data[i], actual_data[i], atol=atol, rtol=rtol - ) - - def check_pass_correct(self, atol=1e-5, rtol=1e-5): - for place in self.places: - for program, need_translate_to_pir in self.sample_program(): - main_program = program[0] - startup_program = program[1] - if need_translate_to_pir: - main_program = pir.translate_to_pir(main_program.desc) - with paddle.pir_utils.IrGuard(): - with paddle.static.program_guard( - main_program, startup_program - ): - executor = paddle.static.Executor(place) - executor.run(startup_program) - baseline_fetch = self.run_program( - executor, startup_program, main_program - ) - main_program = self.run_pir_pass(main_program) - self.check_fused_ops(main_program) - actual_fetch = self.run_program( - executor, startup_program, main_program - ) - if self.skip_accuracy_verification is False: - self.compare_accuracy( - baseline_fetch, actual_fetch, atol, rtol - ) From d4d9a0906a0af8be2eabdbea36b638a76a8765a3 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Tue, 5 Nov 2024 12:53:42 +0000 Subject: [PATCH 03/12] change np --- test/tensorrt/test_converter_activation.py | 8 ++-- test/tensorrt/test_converter_attribute.py | 2 +- test/tensorrt/test_converter_logic.py | 16 +++---- test/tensorrt/test_converter_manipulation.py | 20 ++++---- test/tensorrt/test_converter_math.py | 48 ++++++++++---------- test/tensorrt/test_converter_ops.py | 2 +- test/tensorrt/test_converter_others.py | 8 ++-- test/tensorrt/test_converter_search.py | 2 +- test/tensorrt/test_converter_stat.py | 4 +- 9 files changed, 55 insertions(+), 55 deletions(-) diff --git a/test/tensorrt/test_converter_activation.py b/test/tensorrt/test_converter_activation.py index b2a9a9ef628737..58d620ef72e1b2 100644 --- a/test/tensorrt/test_converter_activation.py +++ b/test/tensorrt/test_converter_activation.py @@ -38,7 +38,7 @@ class TestHardSwishTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.nn.functional.hardswish self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} @@ -51,7 +51,7 @@ def test_trt_result(self): class TestRELUTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.nn.functional.relu - self.api_args = {"x": np.random.randn(3).astype(np.float32)} + self.api_args = {"x": np.random.randn(3).astype("float32")} self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1]} self.max_shape = {"x": [5]} @@ -63,7 +63,7 @@ def test_trt_result(self): class TestTANHTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.tanh - self.api_args = {"x": np.random.randn(3).astype(np.float32)} + self.api_args = {"x": np.random.randn(3).astype("float32")} self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1]} self.max_shape = {"x": [5]} @@ -76,7 +76,7 @@ class TestSigmoidTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.nn.functional.sigmoid self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} diff --git a/test/tensorrt/test_converter_attribute.py b/test/tensorrt/test_converter_attribute.py index e2514c3f700745..dd8f8e70f67d8f 100644 --- a/test/tensorrt/test_converter_attribute.py +++ b/test/tensorrt/test_converter_attribute.py @@ -24,7 +24,7 @@ class TestShapeTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.shape self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1, 3]} diff --git a/test/tensorrt/test_converter_logic.py b/test/tensorrt/test_converter_logic.py index 6300dd25bfb103..a46a2f005cd6a0 100644 --- a/test/tensorrt/test_converter_logic.py +++ b/test/tensorrt/test_converter_logic.py @@ -24,8 +24,8 @@ class TestGreaterThanFloat32TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.greater_than self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), - "y": np.random.randn(3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), + "y": np.random.randn(3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [3]} @@ -39,8 +39,8 @@ class TestGreaterThanInt64TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.greater_than self.api_args = { - "x": np.random.randn(3).astype(np.int64), - "y": np.random.randn(3).astype(np.int64), + "x": np.random.randn(3).astype("float32"), + "y": np.random.randn(3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1], "y": [1]} @@ -54,8 +54,8 @@ class TestLessThanFloat32TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.less_than self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), - "y": np.random.randn(3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), + "y": np.random.randn(3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [3]} @@ -69,8 +69,8 @@ class TestLessThanInt64TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.less_than self.api_args = { - "x": np.random.randn(3).astype(np.int64), - "y": np.random.randn(3).astype(np.int64), + "x": np.random.randn(3).astype("int64"), + "y": np.random.randn(3).astype("int64"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1], "y": [1]} diff --git a/test/tensorrt/test_converter_manipulation.py b/test/tensorrt/test_converter_manipulation.py index 0a1c892156bf77..914f2b6a7071b8 100644 --- a/test/tensorrt/test_converter_manipulation.py +++ b/test/tensorrt/test_converter_manipulation.py @@ -25,8 +25,8 @@ class TestCast0TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.cast self.api_args = { - "x": np.random.randn(7, 3).astype(np.float32), - "out_dtype": np.bool_, + "x": np.random.randn(7, 3).astype("float32"), + "out_dtype": "bool", } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [3, 3]} @@ -40,8 +40,8 @@ class TestCast1TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.cast self.api_args = { - "x": np.random.randn(7, 3).astype(np.int32), - "out_dtype": np.int64, + "x": np.random.randn(7, 3).astype("float16"), + "out_dtype": "int32", } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [3, 3]} @@ -55,8 +55,8 @@ class TestCast2TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.cast self.api_args = { - "x": np.random.randn(7, 3).astype(np.float32), - "out_dtype": np.int64, + "x": np.random.randn(7, 3).astype("float32"), + "out_dtype": "int64", } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [3, 3]} @@ -224,7 +224,7 @@ class TestSplitWithNumTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.split self.api_args = { - "x": np.random.randn(3, 9, 5).astype(np.float32), + "x": np.random.randn(3, 9, 5).astype("float32"), "num_or_sections": 3, "axis": 1, } @@ -240,7 +240,7 @@ class TestSplitWithNumAxisTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.split self.api_args = { - "x": np.random.randn(3, 9, 5).astype(np.float32), + "x": np.random.randn(3, 9, 5).astype("float32"), "num_or_sections": 3, "axis": np.array([1]).astype("int64"), } @@ -256,7 +256,7 @@ class TestSplitWithNumAllTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.split self.api_args = { - "x": np.random.randn(1, 2).astype(np.float32), + "x": np.random.randn(1, 2).astype("float32"), "num_or_sections": 2, "axis": np.array([1]).astype("int64"), } @@ -272,7 +272,7 @@ class TestSplitWithNumNegativeAxisTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.split self.api_args = { - "x": np.random.randn(3, 9, 5).astype(np.float32), + "x": np.random.randn(3, 9, 5).astype("float32"), "num_or_sections": 3, "axis": -2, } diff --git a/test/tensorrt/test_converter_math.py b/test/tensorrt/test_converter_math.py index c5b474ccad4e0a..920ba0b03d7656 100644 --- a/test/tensorrt/test_converter_math.py +++ b/test/tensorrt/test_converter_math.py @@ -24,7 +24,7 @@ class TestMaxTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.max self.api_args = { - "x": np.random.randn(2, 4).astype(np.float32), + "x": np.random.randn(2, 4).astype("float32"), "axis": [0, 1], } self.program_config = {"feed_list": ["x"]} @@ -39,8 +39,8 @@ class TestDivideTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.divide self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), - "y": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), + "y": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} @@ -54,8 +54,8 @@ class TestMultiplyTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.multiply self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), - "y": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), + "y": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} @@ -69,8 +69,8 @@ class TestSubstractTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.subtract self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), - "y": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), + "y": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} @@ -84,8 +84,8 @@ class TestAddTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.add self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), - "y": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), + "y": np.random.randn(2, 3).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} @@ -99,16 +99,16 @@ class TestRemainderFloatTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.remainder self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), "y": np.random.uniform(low=0.1, high=1, size=(2, 3)).astype( - np.float32 + "float32" ), # Ensure y is non-zero } self.dynamic_shape_data = { - "x": lambda shape: np.random.randn(*shape).astype(np.float32), + "x": lambda shape: np.random.randn(*shape).astype("float32"), "y": lambda shape: np.random.uniform( low=0.1, high=1, size=shape - ).astype(np.float32), + ).astype("float32"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1, 3], "y": [1, 3]} @@ -122,17 +122,17 @@ class TestRemainderIntTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.remainder self.api_args = { - "x": np.random.randint(1, 10, size=(2, 3)).astype(np.int64), + "x": np.random.randint(1, 10, size=(2, 3)).astype("int64"), "y": np.random.randint(1, 10, size=(2, 3)).astype( - np.int64 + "int64" ), # Ensure y is non-zero } self.dynamic_shape_data = { "x": lambda shape: np.random.randint(1, 10, size=shape).astype( - np.int64 + "int64" ), "y": lambda shape: np.random.randint(1, 10, size=shape).astype( - np.int64 + "int64" ), } self.program_config = {"feed_list": ["x", "y"]} @@ -147,7 +147,7 @@ class TestMinTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.min self.api_args = { - "x": np.random.randn(2, 4).astype(np.float32), + "x": np.random.randn(2, 4).astype("float32"), "axis": [0, 1], } self.program_config = {"feed_list": ["x"]} @@ -162,7 +162,7 @@ class TestSumTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.sum self.api_args = { - "x": np.random.randn(2, 4, 6).astype(np.int64), + "x": np.random.randn(2, 4, 6).astype("int64"), "axis": [1, 1], } self.program_config = {"feed_list": ["x"]} @@ -177,7 +177,7 @@ class TestSum1TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.sum self.api_args = { - "x": np.random.randn(2, 4, 6).astype(np.float32), + "x": np.random.randn(2, 4, 6).astype("float32"), "axis": [1, 1], } self.program_config = {"feed_list": ["x"]} @@ -192,7 +192,7 @@ class TestAnyTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.any self.api_args = { - "x": np.random.randn(2, 3, 2).astype(np.bool_), + "x": np.random.randn(2, 3, 2).astype("bool"), "axis": [1, 1], "keepdim": True, } @@ -208,7 +208,7 @@ class TestAny1TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.any self.api_args = { - "x": np.random.randn(2, 3, 2).astype(np.bool_), + "x": np.random.randn(2, 3, 2).astype("bool"), "axis": [1, 1], "keepdim": False, } @@ -224,7 +224,7 @@ class TestAllTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.all self.api_args = { - "x": np.random.randn(2, 3, 2).astype(np.bool_), + "x": np.random.randn(2, 3, 2).astype("bool"), "axis": [1, 1], "keepdim": True, } @@ -240,7 +240,7 @@ class TestAll1TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.all self.api_args = { - "x": np.random.randn(2, 3, 2).astype(np.bool_), + "x": np.random.randn(2, 3, 2).astype("bool"), "axis": [1, 1], "keepdim": False, } diff --git a/test/tensorrt/test_converter_ops.py b/test/tensorrt/test_converter_ops.py index 2499d0221a9763..f90363748cdfd7 100644 --- a/test/tensorrt/test_converter_ops.py +++ b/test/tensorrt/test_converter_ops.py @@ -24,7 +24,7 @@ class TestSqrtTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.sqrt self.api_args = { - "x": np.random.randn(7, 3).astype(np.float32), + "x": np.random.randn(7, 3).astype("float32"), } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [3, 3]} diff --git a/test/tensorrt/test_converter_others.py b/test/tensorrt/test_converter_others.py index e8d7147d882bba..8f45b7b8c8100b 100644 --- a/test/tensorrt/test_converter_others.py +++ b/test/tensorrt/test_converter_others.py @@ -56,8 +56,8 @@ class TestMulticlassNMS3TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = multiclass_nms3 self.api_args = { - "bboxes": np.random.randn(2, 5, 4).astype(np.float32), - "scores": np.random.randn(2, 4, 5).astype(np.float32), + "bboxes": np.random.randn(2, 5, 4).astype("float32"), + "scores": np.random.randn(2, 4, 5).astype("float32"), } self.program_config = {"feed_list": ["bboxes", "scores"]} self.min_shape = {"bboxes": [1, 5, 4], "scores": [1, 4, 5]} @@ -71,8 +71,8 @@ class TestMulticlassNMS3Marker(TensorRTBaseTest): def setUp(self): self.python_api = multiclass_nms3 self.api_args = { - "bboxes": np.random.randn(2, 5, 4, 1).astype(np.float32), - "scores": np.random.randn(2, 4, 5, 1).astype(np.float32), + "bboxes": np.random.randn(2, 5, 4, 1).astype("float32"), + "scores": np.random.randn(2, 4, 5, 1).astype("float32"), } self.program_config = {"feed_list": ["bboxes", "scores"]} self.target_marker_op = "pd_op.multiclass_nms3" diff --git a/test/tensorrt/test_converter_search.py b/test/tensorrt/test_converter_search.py index 5e5f34ab0abe71..6ca535bd58300f 100644 --- a/test/tensorrt/test_converter_search.py +++ b/test/tensorrt/test_converter_search.py @@ -24,7 +24,7 @@ class TestArgmaxTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.argmax self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), "axis": -1, } self.program_config = {"feed_list": ["x"]} diff --git a/test/tensorrt/test_converter_stat.py b/test/tensorrt/test_converter_stat.py index 8ff0a02422a22e..4ea43f9bbb2f6c 100644 --- a/test/tensorrt/test_converter_stat.py +++ b/test/tensorrt/test_converter_stat.py @@ -24,7 +24,7 @@ class TestMean0TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.mean self.api_args = { - "x": np.random.randn(2, 3).astype(np.float32), + "x": np.random.randn(2, 3).astype("float32"), "axis": [1], "keepdim": False, } @@ -40,7 +40,7 @@ class TestMean1TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.mean self.api_args = { - "x": np.random.randn(2, 3, 2).astype(np.float32), + "x": np.random.randn(2, 3, 2).astype("float32"), "axis": [1, 1], "keepdim": True, } From 9cc1137dd2a9f71671ec479e991a70e473c1afba Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Tue, 5 Nov 2024 12:59:24 +0000 Subject: [PATCH 04/12] change file --- test/tensorrt/test_converter_logic.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/tensorrt/test_converter_logic.py b/test/tensorrt/test_converter_logic.py index a46a2f005cd6a0..af5ec9e643687d 100644 --- a/test/tensorrt/test_converter_logic.py +++ b/test/tensorrt/test_converter_logic.py @@ -39,8 +39,8 @@ class TestGreaterThanInt64TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.greater_than self.api_args = { - "x": np.random.randn(3).astype("float32"), - "y": np.random.randn(3).astype("float32"), + "x": np.random.randn(3).astype("int64"), + "y": np.random.randn(3).astype("int64"), } self.program_config = {"feed_list": ["x", "y"]} self.min_shape = {"x": [1], "y": [1]} From 790b729aca48e63a94f478b5d83a2b502014009a Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Tue, 12 Nov 2024 12:56:07 +0000 Subject: [PATCH 05/12] fix bug --- test/tensorrt/test_converter_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/tensorrt/test_converter_common.py b/test/tensorrt/test_converter_common.py index f6f738324fcd9a..1c320374b3f526 100644 --- a/test/tensorrt/test_converter_common.py +++ b/test/tensorrt/test_converter_common.py @@ -166,7 +166,7 @@ def setUp(self): self.python_api = bilinear_python_api self.api_args = { "x": np.random.random([2, 3, 6, 10]).astype("float32"), - "OutSize": np.array([12, 12], dtype="int64"), + "OutSize": np.array([12, 12], dtype="int32"), "SizeTensor": None, "Scale": None, "attrs": { From 8d718125d0d872eeb7ff7f110757907771b7c614 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Thu, 5 Dec 2024 06:58:08 +0000 Subject: [PATCH 06/12] fix conflict --- test/tensorrt/test_converter_manipulation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/tensorrt/test_converter_manipulation.py b/test/tensorrt/test_converter_manipulation.py index c6f6e7e16a7f03..a319b324708eac 100644 --- a/test/tensorrt/test_converter_manipulation.py +++ b/test/tensorrt/test_converter_manipulation.py @@ -409,8 +409,8 @@ class TestTileTRTPatternCase1(TensorRTBaseTest): def setUp(self): self.python_api = paddle.tile self.api_args = { - "x": np.random.randn(1, 2, 3).astype("int32"), - "repeat_times": np.array([1, 2, 3, 4]).astype("int32"), + "x": np.random.randn(1, 2, 3).astype("int64"), + "repeat_times": np.array([1, 2, 3, 4]).astype("int64"), } self.program_config = {"feed_list": ["x", "repeat_times"]} self.min_shape = {"x": [1, 2, 3]} From 69793e09c1b2a567ddbf1450f42c84a8ea586760 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Thu, 5 Dec 2024 12:06:41 +0000 Subject: [PATCH 07/12] add test mod --- test/tensorrt/test_converter_common.py | 6 +-- test/tensorrt/test_converter_math.py | 2 +- test/tensorrt/test_converter_others.py | 28 ++++++------- test/tensorrt/test_trt_marker_shape.py | 56 -------------------------- 4 files changed, 18 insertions(+), 74 deletions(-) delete mode 100644 test/tensorrt/test_trt_marker_shape.py diff --git a/test/tensorrt/test_converter_common.py b/test/tensorrt/test_converter_common.py index 1c320374b3f526..017a20f3e7040c 100644 --- a/test/tensorrt/test_converter_common.py +++ b/test/tensorrt/test_converter_common.py @@ -166,7 +166,7 @@ def setUp(self): self.python_api = bilinear_python_api self.api_args = { "x": np.random.random([2, 3, 6, 10]).astype("float32"), - "OutSize": np.array([12, 12], dtype="int32"), + "OutSize": np.array([12, 12], dtype="int64"), "SizeTensor": None, "Scale": None, "attrs": { @@ -225,8 +225,8 @@ def setUp(self): "x": x_nchw, "OutSize": None, "SizeTensor": [ - np.array([12], dtype="int32"), - np.array([12], dtype="int32"), + np.array([12], dtype="int64"), + np.array([12], dtype="int64"), ], "Scale": None, "attrs": { diff --git a/test/tensorrt/test_converter_math.py b/test/tensorrt/test_converter_math.py index 4e3b44dbecb036..2cb107363f8a34 100644 --- a/test/tensorrt/test_converter_math.py +++ b/test/tensorrt/test_converter_math.py @@ -308,7 +308,7 @@ class TestLogIntTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.log self.api_args = { - "x": np.random.randn(2, 3).astype("int32"), + "x": np.random.randn(2, 3).astype("int64"), } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1, 3]} diff --git a/test/tensorrt/test_converter_others.py b/test/tensorrt/test_converter_others.py index 5317eb1bf9925a..a26b5546c9a719 100644 --- a/test/tensorrt/test_converter_others.py +++ b/test/tensorrt/test_converter_others.py @@ -158,7 +158,7 @@ class TestSetValueTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = set_value self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), + "x": np.ones([10, 2]).astype("float32"), "starts": [0], "ends": [1], "steps": [1], @@ -181,7 +181,7 @@ class TestSetValueMarkerCase1(TensorRTBaseTest): def setUp(self): self.python_api = set_value self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), + "x": np.ones([10, 2]).astype("float32"), "starts": [0, 0], "ends": [1, 1], "steps": [1, 1], @@ -204,7 +204,7 @@ class TestSetValueMarkerCase2(TensorRTBaseTest): def setUp(self): self.python_api = set_value self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), + "x": np.ones([10, 2]).astype("float32"), "starts": [0], "ends": [1], "steps": [1], @@ -227,7 +227,7 @@ class TestSetValueMarkerCase3(TensorRTBaseTest): def setUp(self): self.python_api = set_value self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), + "x": np.ones([10, 2]).astype("float32"), "starts": [0], "ends": [1], "steps": [1], @@ -250,7 +250,7 @@ class TestSetValueMarkerCase4(TensorRTBaseTest): def setUp(self): self.python_api = set_value self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), + "x": np.ones([10, 2]).astype("float32"), "starts": [0], "ends": [1], "steps": [1], @@ -273,8 +273,8 @@ class TestSetValueMarkerCase5(TensorRTBaseTest): def setUp(self): self.python_api = set_value self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), - "starts": np.zeros([1]).astype(np.int32), + "x": np.ones([10, 2]).astype("float32"), + "starts": np.zeros([1]).astype("int64"), "ends": [1], "steps": [1], "axes": [1], @@ -295,7 +295,7 @@ class TestSetValue_TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = set_value_ self.api_args = { - "x": np.ones([10, 2]).astype(np.float32), + "x": np.ones([10, 2]).astype("float32"), "starts": [0], "ends": [1], "steps": [1], @@ -317,8 +317,8 @@ class TestSetValueWithTensorTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = set_value_with_tensor self.api_args = { - "x": np.ones([2, 3, 3]).astype(np.float32), - "values": np.random.randn(2, 2, 3).astype(np.float32), + "x": np.ones([2, 3, 3]).astype("float32"), + "values": np.random.randn(2, 2, 3).astype("float32"), "starts": [0], "ends": [2], "steps": [1], @@ -340,8 +340,8 @@ class TestSetValueWithTensorMarkerCase1(TensorRTBaseTest): def setUp(self): self.python_api = set_value_with_tensor self.api_args = { - "x": np.ones([2, 3, 3]).astype(np.float32), - "values": np.random.randn(2, 2, 3).astype(np.int32), + "x": np.ones([2, 3, 3]).astype("float32"), + "values": np.random.randn(2, 2, 3).astype("int32"), "starts": [0], "ends": [2], "steps": [1], @@ -362,8 +362,8 @@ class TestSetValueWithTensor_TRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = set_value_with_tensor_ self.api_args = { - "x": np.ones([2, 3, 3]).astype(np.float32), - "values": np.random.randn(2, 2, 3).astype(np.float32), + "x": np.ones([2, 3, 3]).astype("float32"), + "values": np.random.randn(2, 2, 3).astype("float32"), "starts": [0], "ends": [2], "steps": [1], diff --git a/test/tensorrt/test_trt_marker_shape.py b/test/tensorrt/test_trt_marker_shape.py deleted file mode 100644 index 2f314feeb2c7a7..00000000000000 --- a/test/tensorrt/test_trt_marker_shape.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -import numpy as np -from pass_test import PassTest - -import paddle -from paddle.base import core - - -class TestShapeTRTPattern(PassTest): - def is_program_valid(self, program=None): - return True - - def sample_program(self): - with paddle.pir_utils.IrGuard(): - main_prog = paddle.static.Program() - start_prog = paddle.static.Program() - with paddle.pir.core.program_guard(main_prog, start_prog): - x = paddle.static.data(name="x", shape=[2, 2], dtype="float32") - shape_out = paddle.shape(x) - out = paddle.assign(shape_out) - self.pass_attr_list = [{'trt_op_marker_pass': {}}] - self.feeds = { - "x": np.array([[1, 2], [7, 8]]).astype("float32"), - } - self.fetch_list = [out] - self.valid_op_map = { - "pd_op.fusion_transpose_flatten_concat": 0, - } - yield [main_prog, start_prog], False - - def setUp(self): - if core.is_compiled_with_cuda(): - self.places.append(paddle.CUDAPlace(0)) - self.trt_expected_ops = {"pd_op.shape64"} - - def test_check_output(self): - self.check_pass_correct() - - -if __name__ == '__main__': - unittest.main() From 9b5243dce540179432ed830f961cff91aef29d5f Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Fri, 6 Dec 2024 09:31:40 +0000 Subject: [PATCH 08/12] Update --- third_party/pybind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/pybind b/third_party/pybind index 3e9dfa28669416..e7c9b907394ef2 160000 --- a/third_party/pybind +++ b/third_party/pybind @@ -1 +1 @@ -Subproject commit 3e9dfa2866941655c56877882565e7577de6fc7b +Subproject commit e7c9b907394ef241da7f5a8f93d3faec6adb2df5 From 0db85a0b6d28471c00bc91ad30242f693b1d7e7a Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Fri, 6 Dec 2024 10:04:17 +0000 Subject: [PATCH 09/12] fix conflict --- third_party/flashattn | 2 +- third_party/pybind | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/third_party/flashattn b/third_party/flashattn index d8915628a941d9..6c165641f31504 160000 --- a/third_party/flashattn +++ b/third_party/flashattn @@ -1 +1 @@ -Subproject commit d8915628a941d946c0f962e628e28de5469ae690 +Subproject commit 6c165641f3150420b7351735ba82455ffe27d79c diff --git a/third_party/pybind b/third_party/pybind index e7c9b907394ef2..a2e59f0e706540 160000 --- a/third_party/pybind +++ b/third_party/pybind @@ -1 +1 @@ -Subproject commit e7c9b907394ef241da7f5a8f93d3faec6adb2df5 +Subproject commit a2e59f0e7065404b44dfe92a28aca47ba1378dc4 From 4890750b02d1d6523e0bda203bcb6c7c8d8d92b6 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Fri, 6 Dec 2024 10:06:38 +0000 Subject: [PATCH 10/12] update third_party --- third_party/flashattn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/flashattn b/third_party/flashattn index 6c165641f31504..6f8ae73cd96415 160000 --- a/third_party/flashattn +++ b/third_party/flashattn @@ -1 +1 @@ -Subproject commit 6c165641f3150420b7351735ba82455ffe27d79c +Subproject commit 6f8ae73cd96415c50ccc301de2696aaf5481c639 From 187f8d46af56fe39f412fcbf0d34b70b2b45b095 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Fri, 6 Dec 2024 10:17:28 +0000 Subject: [PATCH 11/12] update flashattn --- third_party/flashattn | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/third_party/flashattn b/third_party/flashattn index 6f8ae73cd96415..6ea759b3ea9563 160000 --- a/third_party/flashattn +++ b/third_party/flashattn @@ -1 +1 @@ -Subproject commit 6f8ae73cd96415c50ccc301de2696aaf5481c639 +Subproject commit 6ea759b3ea9563b49d92f1ae0c4cb0fb26a7b365 From dbd332e0e56a5cb9d84a6db375c7d09649ab9252 Mon Sep 17 00:00:00 2001 From: Hanyonggong <1229369094@qq.com> Date: Fri, 13 Dec 2024 08:28:23 +0000 Subject: [PATCH 12/12] change type --- test/tensorrt/test_converter_common.py | 2 +- test/tensorrt/test_converter_math.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/test/tensorrt/test_converter_common.py b/test/tensorrt/test_converter_common.py index 017a20f3e7040c..0146c719ce1502 100644 --- a/test/tensorrt/test_converter_common.py +++ b/test/tensorrt/test_converter_common.py @@ -166,7 +166,7 @@ def setUp(self): self.python_api = bilinear_python_api self.api_args = { "x": np.random.random([2, 3, 6, 10]).astype("float32"), - "OutSize": np.array([12, 12], dtype="int64"), + "OutSize": np.array([12, 12], dtype="int32"), "SizeTensor": None, "Scale": None, "attrs": { diff --git a/test/tensorrt/test_converter_math.py b/test/tensorrt/test_converter_math.py index 2cb107363f8a34..4e3b44dbecb036 100644 --- a/test/tensorrt/test_converter_math.py +++ b/test/tensorrt/test_converter_math.py @@ -308,7 +308,7 @@ class TestLogIntTRTPattern(TensorRTBaseTest): def setUp(self): self.python_api = paddle.log self.api_args = { - "x": np.random.randn(2, 3).astype("int64"), + "x": np.random.randn(2, 3).astype("int32"), } self.program_config = {"feed_list": ["x"]} self.min_shape = {"x": [1, 3]}