Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 1 addition & 15 deletions test/legacy_test/test_block_multihead_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
from op_test import get_device_place, is_custom_device
from op_test import get_cuda_version, get_device_place, is_custom_device

import paddle
from paddle import base
Expand Down Expand Up @@ -49,18 +47,6 @@
is_sm_supported = is_sm8x or is_sm9x or is_sm7x


def get_cuda_version():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1


def create_attn_mask(
mask_type,
batch_size,
Expand Down
16 changes: 1 addition & 15 deletions test/legacy_test/test_flash_attention_deterministic.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
from op_test import get_device_place, is_custom_device
from op_test import get_cuda_version, get_device_place, is_custom_device

import paddle
import paddle.nn.functional as F
Expand All @@ -27,18 +25,6 @@
)


def get_cuda_version():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1


def attention_naive(q, k, v, causal=False):
qt = paddle.transpose(q, [0, 2, 1, 3])
kt = paddle.transpose(k, [0, 2, 1, 3])
Expand Down
17 changes: 1 addition & 16 deletions test/legacy_test/test_flashmask.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
from op_test import get_device_place, is_custom_device
from op_test import get_cuda_version, get_device_place, is_custom_device

import paddle
import paddle.nn.functional as F
Expand All @@ -25,19 +23,6 @@
flashmask_attention,
)


def get_cuda_version():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1


is_sm8x = (
(core.is_compiled_with_cuda() or is_custom_device())
and paddle.device.cuda.get_device_capability()[0] == 8
Expand Down
16 changes: 1 addition & 15 deletions test/legacy_test/test_float8.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
from op_test import get_device, is_custom_device
from op_test import get_cuda_version, get_device, is_custom_device

import paddle
from paddle.base import core
Expand All @@ -26,18 +24,6 @@
E5M2_MAX_POS = 57344.0


def get_cuda_version():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1


def check_fp8_support() -> bool:
"""Return if fp8 support is available"""
gpu_arch = (
Expand Down
21 changes: 1 addition & 20 deletions test/legacy_test/test_memory_efficient_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,12 @@
from __future__ import annotations

import logging
import os
import random
import re
import unittest
from typing import TYPE_CHECKING

import numpy as np
from op_test import get_device_place, is_custom_device
from op_test import get_cuda_version, get_device_place, is_custom_device

import paddle
import paddle.incubate.nn.attn_bias as ab
Expand All @@ -37,23 +35,6 @@
paddle.seed(2023)


def get_cuda_version():
if paddle.is_compiled_with_cuda():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1
elif is_custom_device():
return 13000
else:
return -1


def create_attn_bias(
bias_type,
batch_size: int,
Expand Down
17 changes: 1 addition & 16 deletions test/legacy_test/test_sdpa_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
import unittest

import numpy as np
from op_test import is_custom_device
from op_test import get_cuda_version, is_custom_device

import paddle
import paddle.nn.functional as F
Expand All @@ -27,21 +27,6 @@
from paddle.nn.functional import scaled_dot_product_attention


def get_cuda_version():
import os
import re

result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1


def is_flashattn_supported():
if (
not paddle.base.core.is_compiled_with_cuda()
Expand Down
21 changes: 1 addition & 20 deletions test/legacy_test/test_sparse_addmm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,36 +11,17 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
from op_test import is_custom_device
from op_test import get_cuda_version, is_custom_device

import paddle
from paddle.base.framework import in_pir_mode

paddle.set_default_dtype('float64')


def get_cuda_version():
if paddle.is_compiled_with_cuda():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1
elif is_custom_device():
return 13000
else:
return -1


class TestAddmm(unittest.TestCase):
# input: dense, x: sparse, y: dense, out: dense
def check_result(self, input_shape, x_shape, y_shape, format):
Expand Down
21 changes: 1 addition & 20 deletions test/legacy_test/test_sparse_matmul_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,37 +11,18 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
import scipy.sparse as sp
from op_test import is_custom_device
from op_test import get_cuda_version, is_custom_device

import paddle
from paddle.base.framework import in_pir_mode

paddle.set_default_dtype('float64')


def get_cuda_version():
if paddle.is_compiled_with_cuda():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1
elif is_custom_device():
return 13000
else:
return -1


class TestMatmulSparseDense(unittest.TestCase):
# x: sparse, y: dense, out: dense
def check_result(self, x_shape, y_shape, format):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import unittest

import numpy as np
Expand All @@ -28,23 +26,6 @@
paddle.seed(2023)


def get_cuda_version():
if paddle.is_compiled_with_cuda():
result = os.popen("nvcc --version").read()
regex = r'release (\S+),'
match = re.search(regex, result)
if match:
num = str(match.group(1))
integer, decimal = num.split('.')
return int(integer) * 1000 + int(float(decimal) * 10)
else:
return -1
elif is_custom_device():
return 13000
else:
return -1


def get_cuda_arch():
if paddle.is_compiled_with_cuda():
return paddle.device.cuda.get_device_capability()[0]
Expand Down
Loading