diff --git a/ci/jenkins/docker-images.ini b/ci/jenkins/docker-images.ini index 211ea029704b..6e55160521b3 100644 --- a/ci/jenkins/docker-images.ini +++ b/ci/jenkins/docker-images.ini @@ -17,13 +17,13 @@ # This data file is read during when Jenkins runs job to determine docker images. [jenkins] -ci_arm: tlcpack/ci-arm:20240126-070121-8ade9c30e -ci_cortexm: tlcpack/ci-cortexm:20240126-070121-8ade9c30e -ci_cpu: tlcpack/ci_cpu:20240322-060059-89cd74c07 -ci_gpu: tlcpack/ci-gpu:20240126-070121-8ade9c30e -ci_hexagon: tlcpack/ci-hexagon:20240126-070121-8ade9c30e -ci_i386: tlcpack/ci-i386:20240126-070121-8ade9c30e -ci_lint: tlcpack/ci-lint:20240126-070121-8ade9c30e -ci_minimal: tlcpack/ci-minimal:20240126-070121-8ade9c30e -ci_riscv: tlcpack/ci-riscv:20240126-070121-8ade9c30e -ci_wasm: tlcpack/ci-wasm:20240126-070121-8ade9c30e +ci_arm: tlcpack/ci-arm:20240428-060115-0b09ed018 +ci_cortexm: tlcpack/ci-cortexm:20240428-060115-0b09ed018 +ci_cpu: tlcpack/ci_cpu:20240428-060115-0b09ed018 +ci_gpu: tlcpack/ci-gpu:20240428-060115-0b09ed018 +ci_hexagon: tlcpack/ci-hexagon:20240428-060115-0b09ed018 +ci_i386: tlcpack/ci-i386:20240428-060115-0b09ed018 +ci_lint: tlcpack/ci-lint:20240428-060115-0b09ed018 +ci_minimal: tlcpack/ci-minimal:20240428-060115-0b09ed018 +ci_riscv: tlcpack/ci-riscv:20240428-060115-0b09ed018 +ci_wasm: tlcpack/ci-wasm:20240428-060115-0b09ed018 diff --git a/tests/micro/zephyr/test_zephyr.py b/tests/micro/zephyr/test_zephyr.py index 72a0a85cf96f..d247e2187bff 100644 --- a/tests/micro/zephyr/test_zephyr.py +++ b/tests/micro/zephyr/test_zephyr.py @@ -650,7 +650,7 @@ def test_debugging_enabled(workspace_dir): def test_qemu_make_fail(workspace_dir, board, microtvm_debug, serial_number): """Testing QEMU make fail.""" if not utils.ZEPHYR_BOARDS[board]["is_qemu"]: - pytest.skip(msg="Only for QEMU targets.") + pytest.skip("Only for QEMU targets.") build_config = {"debug": microtvm_debug} shape = (10,) diff --git a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py index 117e9d4b6f19..52892c60ad22 100644 --- a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py +++ b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_fp16.py @@ -47,7 +47,7 @@ def test_resnet50(hexagon_launcher): model_params = "resnet50_fp16.params" if not os.path.exists(model_json): - pytest.skip(msg="Run python export_models.py first.") + pytest.skip("Run python export_models.py first.") with open(model_json, "r") as file: mod = tvm.ir.load_json(file.read()) diff --git a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py index 111448ea5791..84c796bee5dc 100644 --- a/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py +++ b/tests/python/contrib/test_hexagon/metaschedule_e2e/test_resnet50_int8.py @@ -54,7 +54,7 @@ def load_model(): """Load renset50 model.""" if not os.path.exists(MODEL_JSON): - pytest.skip(msg="Run python export_models.py first.") + pytest.skip("Run python export_models.py first.") with open(MODEL_JSON, "r") as file: mod = tvm.ir.load_json(file.read()) @@ -172,7 +172,7 @@ def test_resnet50(hexagon_launcher): pytest.skip("Skipping test since it takes too long in CI.") if not os.path.exists(MODEL_JSON): - pytest.skip(msg="Run python export_models.py first.") + pytest.skip("Run python export_models.py first.") mod, params = load_model() diff --git a/tests/python/contrib/test_hexagon/test_meta_schedule.py b/tests/python/contrib/test_hexagon/test_meta_schedule.py index a64f0fc28653..26acedb88e21 100644 --- a/tests/python/contrib/test_hexagon/test_meta_schedule.py +++ b/tests/python/contrib/test_hexagon/test_meta_schedule.py @@ -69,7 +69,7 @@ def main(a: T.handle, b: T.handle, c: T.handle) -> None: # type: ignore def test_builder_runner(hexagon_launcher): """Test builder and runner.""" if hexagon_launcher.is_simulator(): - pytest.skip(msg="Tuning on simulator not supported.") + pytest.skip("Tuning on simulator not supported.") mod = MatmulModule @@ -191,7 +191,7 @@ def verify_dense(sch, target, m_size, n_size, k_size, hexagon_session): def test_vrmpy_dense(hexagon_launcher): """Test vector reduce muliply dense.""" if hexagon_launcher.is_simulator(): - pytest.skip(msg="Tuning on simulator not supported.") + pytest.skip("Tuning on simulator not supported.") do_tune = True @@ -302,7 +302,7 @@ def main( # type: ignore def test_vrmpy_dense_auto_tensorize(hexagon_launcher): """Test VRMPY dense operator.""" if hexagon_launcher.is_simulator(): - pytest.skip(msg="Tuning on simulator not supported.") + pytest.skip("Tuning on simulator not supported.") m_size, n_size, k_size = 128, 768, 768 workload = te.create_prim_func(dense_compute(m_size, n_size, k_size)) @@ -367,7 +367,7 @@ def test_vrmpy_dense_auto_tensorize(hexagon_launcher): def test_conv2d_relay_auto_schedule(hexagon_launcher): """Test conv2d using auto schedule.""" if hexagon_launcher.is_simulator(): - pytest.skip(msg="Tuning on simulator not supported.") + pytest.skip("Tuning on simulator not supported.") i_size, o_size, h_size, w_size = 64, 64, 56, 56 k_height_size = k_width_size = 3 @@ -447,7 +447,7 @@ def test_dense_relay_auto_schedule(hexagon_launcher): dense on Hexagon is extremely slow. """ if hexagon_launcher.is_simulator(): - pytest.skip(msg="Tuning on simulator not supported.") + pytest.skip("Tuning on simulator not supported.") target_hexagon = tvm.target.hexagon("v69") target = tvm.target.Target(target_hexagon, host=target_hexagon) diff --git a/tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py b/tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py index 77776bc8da0b..aa1a53c224d5 100644 --- a/tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py +++ b/tests/python/contrib/test_hexagon/topi/slice_op/test_cast_slice.py @@ -77,7 +77,7 @@ def test_cast_fp16_fp32_slice( Top level testing function for cast fp16 to fp32 """ if hexagon_session.is_simulator(): - pytest.skip(msg="Due to https://github.com/apache/tvm/issues/11957") + pytest.skip("Due to https://github.com/apache/tvm/issues/11957") cast_input = te.placeholder(input_shape, name="A", dtype=dtype) cast_output = sl.cast_f16_f32_compute(cast_input) @@ -163,7 +163,7 @@ def test_cast_fp32_fp16_slice( Top level testing function for cast fp32 to fp16 """ if hexagon_session.is_simulator(): - pytest.skip(msg="Due to https://github.com/apache/tvm/issues/11957") + pytest.skip("Due to https://github.com/apache/tvm/issues/11957") cast_input = te.placeholder(input_shape, name="A", dtype=dtype) cast_output = sl.cast_f32_f16_compute(cast_input) diff --git a/tests/python/relax/test_codegen_cudnn.py b/tests/python/relax/test_codegen_cudnn.py index f34270587812..0f911905f820 100644 --- a/tests/python/relax/test_codegen_cudnn.py +++ b/tests/python/relax/test_codegen_cudnn.py @@ -198,6 +198,7 @@ def test_conv2d_offload(data_shape, weight_shape, dtype, with_bias, activation): tvm.testing.assert_allclose(out, ref, rtol=1e-2, atol=1e-2) +@pytest.mark.skip(reason="flaky test") @pytest.mark.parametrize( "data_shape, weight_shape, dtype, with_bias, activation", [