|
30 | 30 | env_checker_codegen = tvm.get_global_func("relax.ext.tensorrt", True) |
31 | 31 | env_checker_runtime = tvm.get_global_func("relax.is_tensorrt_runtime_enabled", True) |
32 | 32 |
|
33 | | -has_tensorrt_codegen = pytest.mark.skipif( |
| 33 | +requires_tensorrt_codegen = pytest.mark.skipif( |
34 | 34 | not env_checker_codegen, |
35 | 35 | reason="TensorRT codegen not available", |
36 | 36 | ) |
37 | | -has_tensorrt_runtime = pytest.mark.skipif( |
| 37 | +requires_tensorrt_runtime = pytest.mark.skipif( |
38 | 38 | not env_checker_runtime or not env_checker_runtime(), |
39 | 39 | reason="TensorRT runtime not available", |
40 | 40 | ) |
41 | 41 |
|
42 | 42 | # Global variable in pytest that applies markers to all tests. |
43 | | -pytestmark = [has_tensorrt_codegen, has_tensorrt_runtime] |
| 43 | +pytestmark = [requires_tensorrt_codegen] + tvm.testing.requires_cuda.marks() |
44 | 44 |
|
45 | 45 | # Target gpu |
46 | 46 | target_str = "nvidia/nvidia-t4" |
@@ -117,6 +117,7 @@ def setup_test(): |
117 | 117 |
|
118 | 118 |
|
119 | 119 | @tvm.testing.requires_gpu |
| 120 | +@requires_tensorrt_runtime |
120 | 121 | def test_tensorrt_only(entry_func_name): |
121 | 122 | mod, inputs, expected = setup_test() |
122 | 123 |
|
@@ -146,6 +147,7 @@ def test_tensorrt_only(entry_func_name): |
146 | 147 |
|
147 | 148 |
|
148 | 149 | @tvm.testing.requires_gpu |
| 150 | +@requires_tensorrt_runtime |
149 | 151 | def test_mix_use_tensorrt_and_tvm(): |
150 | 152 | mod, inputs, expected = setup_test() |
151 | 153 |
|
@@ -367,7 +369,7 @@ def test_no_op_for_call_to_tir(): |
367 | 369 | @tvm.script.ir_module |
368 | 370 | class Before: |
369 | 371 | @R.function |
370 | | - def main(x: R.Tensor): |
| 372 | + def main(x: R.Tensor([4], "int64")): |
371 | 373 | R.func_attr({"relax.force_pure": True}) |
372 | 374 | _ = Before.shape_func(x) |
373 | 375 | return x |
|
0 commit comments