Skip to content

Commit 11f2253

Browse files
apeskovAlexander Peskov
andauthored
Restore "pytest.mark.gpu" for RELAX tests (#16741)
* [TEST] Mark RELAX GPU tests with pytest.mark.gpu Missed pytest.mark.gpu prevents tests from launch in CI. Signed-off-by: Alexander Peskov <[email protected]> * fix Signed-off-by: Alexander Peskov <[email protected]> * Check fp8 compute capability Signed-off-by: Alexander Peskov <[email protected]> * fix func signature Signed-off-by: Alexander Peskov <[email protected]> * lint Signed-off-by: Alexander Peskov <[email protected]> --------- Signed-off-by: Alexander Peskov <[email protected]> Co-authored-by: Alexander Peskov <[email protected]>
1 parent 342f472 commit 11f2253

File tree

6 files changed

+22
-31
lines changed

6 files changed

+22
-31
lines changed

tests/python/relax/test_codegen_cublas.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -36,14 +36,7 @@ def reset_seed():
3636
np.random.seed(0)
3737

3838

39-
has_cublas = tvm.get_global_func("relax.ext.cublas", True)
40-
41-
cublas_enabled = pytest.mark.skipif(
42-
not has_cublas,
43-
reason="CUBLAS not enabled.",
44-
)
45-
46-
pytestmark = [cublas_enabled]
39+
pytestmark = tvm.testing.requires_cublas.marks()
4740

4841

4942
def build_and_run(mod, inputs_np, target, legalize=False, cuda_graph=False):
@@ -231,6 +224,7 @@ def test_matmul_igemm_offload(
231224
tvm.testing.assert_allclose(out, ref, rtol=1e-2, atol=1e-2)
232225

233226

227+
@tvm.testing.requires_cuda_compute_version(9)
234228
@pytest.mark.skipif(ml_dtypes is None, reason="requires ml_dtypes to be installed")
235229
@pytest.mark.parametrize(
236230
"x_shape, y_shape, transpose_y, out_dtype",

tests/python/relax/test_codegen_cudnn.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,7 @@ def reset_seed():
3434
np.random.seed(0)
3535

3636

37-
has_cudnn = tvm.get_global_func("relax.ext.cudnn", True)
38-
39-
cudnn_enabled = pytest.mark.skipif(
40-
not has_cudnn,
41-
reason="cuDNN not enabled.",
42-
)
43-
44-
pytestmark = [cudnn_enabled]
37+
pytestmark = tvm.testing.requires_cudnn.marks()
4538

4639

4740
_activation_table = {

tests/python/relax/test_codegen_cutlass.py

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -75,14 +75,7 @@ def main(
7575
return conv2
7676

7777

78-
has_cutlass = tvm.get_global_func("relax.ext.cutlass", True)
79-
80-
cutlass_enabled = pytest.mark.skipif(
81-
not has_cutlass,
82-
reason="CUTLASS not enabled.",
83-
)
84-
85-
pytestmark = [cutlass_enabled]
78+
pytestmark = tvm.testing.requires_cutlass.marks()
8679

8780

8881
def build_and_run(mod, inputs_np, target, legalize=True, cuda_graph=False):

tests/python/relax/test_codegen_tensorrt.py

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,13 +43,22 @@ def main(
4343

4444

4545
has_tensorrt = tvm.get_global_func("relax.ext.tensorrt", True)
46+
env_checker_runtime = tvm.get_global_func("relax.is_tensorrt_runtime_enabled", True)
4647

47-
tensorrt_enabled = pytest.mark.skipif(
48+
requires_tensorrt_codegen = pytest.mark.skipif(
4849
not has_tensorrt,
4950
reason="TENSORRT not enabled.",
5051
)
5152

52-
pytestmark = [tensorrt_enabled]
53+
requires_tensorrt_runtime = pytest.mark.skipif(
54+
not env_checker_runtime or not env_checker_runtime(),
55+
reason="TensorRT runtime not available",
56+
)
57+
58+
pytestmark = [
59+
requires_tensorrt_codegen,
60+
requires_tensorrt_runtime,
61+
] + tvm.testing.requires_cuda.marks()
5362

5463

5564
def build_and_run(mod, inputs_np, target, legalize=False):

tests/python/relax/test_contrib_vllm.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@
3232
reason="VLLM not enabled.",
3333
)
3434

35-
pytestmark = [vllm_enabled]
35+
pytestmark = [vllm_enabled] + tvm.testing.requires_cuda.marks()
3636

3737

3838
def build_and_run(mod, inputs_np, target, legalize=True):

tests/python/relax/test_transform_codegen_pass.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,17 +30,17 @@
3030
env_checker_codegen = tvm.get_global_func("relax.ext.tensorrt", True)
3131
env_checker_runtime = tvm.get_global_func("relax.is_tensorrt_runtime_enabled", True)
3232

33-
has_tensorrt_codegen = pytest.mark.skipif(
33+
requires_tensorrt_codegen = pytest.mark.skipif(
3434
not env_checker_codegen,
3535
reason="TensorRT codegen not available",
3636
)
37-
has_tensorrt_runtime = pytest.mark.skipif(
37+
requires_tensorrt_runtime = pytest.mark.skipif(
3838
not env_checker_runtime or not env_checker_runtime(),
3939
reason="TensorRT runtime not available",
4040
)
4141

4242
# Global variable in pytest that applies markers to all tests.
43-
pytestmark = [has_tensorrt_codegen, has_tensorrt_runtime]
43+
pytestmark = [requires_tensorrt_codegen] + tvm.testing.requires_cuda.marks()
4444

4545
# Target gpu
4646
target_str = "nvidia/nvidia-t4"
@@ -117,6 +117,7 @@ def setup_test():
117117

118118

119119
@tvm.testing.requires_gpu
120+
@requires_tensorrt_runtime
120121
def test_tensorrt_only(entry_func_name):
121122
mod, inputs, expected = setup_test()
122123

@@ -146,6 +147,7 @@ def test_tensorrt_only(entry_func_name):
146147

147148

148149
@tvm.testing.requires_gpu
150+
@requires_tensorrt_runtime
149151
def test_mix_use_tensorrt_and_tvm():
150152
mod, inputs, expected = setup_test()
151153

@@ -367,7 +369,7 @@ def test_no_op_for_call_to_tir():
367369
@tvm.script.ir_module
368370
class Before:
369371
@R.function
370-
def main(x: R.Tensor):
372+
def main(x: R.Tensor([4], "int64")):
371373
R.func_attr({"relax.force_pure": True})
372374
_ = Before.shape_func(x)
373375
return x

0 commit comments

Comments
 (0)