diff --git a/backends/arm/test/models/test_resnet18.py b/backends/arm/test/models/test_resnet18.py index 3cb21abd772..44abc1d34e1 100644 --- a/backends/arm/test/models/test_resnet18.py +++ b/backends/arm/test/models/test_resnet18.py @@ -79,9 +79,6 @@ def test_resnet_u55_INT(per_channel_quantization): @pytest.mark.slow -@pytest.mark.xfail( - reason="For resnet18 for Ethos-U85, the SRAM memory footprint is very high. The compiler team is investigating." -) @common.XfailIfNoCorstone320 @common.parametrize("per_channel_quantization", quant_test_data) def test_resnet_u85_INT(per_channel_quantization): diff --git a/backends/arm/test/models/test_torch_functions.py b/backends/arm/test/models/test_torch_functions.py index de45dbe0356..7f9bbdba177 100644 --- a/backends/arm/test/models/test_torch_functions.py +++ b/backends/arm/test/models/test_torch_functions.py @@ -128,7 +128,6 @@ def test_torch_fns_FP(test_data): "Requires dynamic output shape.", "topk": "NotImplementedError: No registered serialization name for found", "sort": "NotImplementedError: No registered serialization name for found", - "t": "MLETORCH-855: Issue with Quantization folding.", }, strict=False, ) diff --git a/backends/arm/test/ops/test_cosh.py b/backends/arm/test/ops/test_cosh.py index 60920d03f94..46e17f78b67 100644 --- a/backends/arm/test/ops/test_cosh.py +++ b/backends/arm/test/ops/test_cosh.py @@ -76,9 +76,6 @@ def test_cosh_u55_INT(test_data: Tuple): @common.parametrize( "test_data", test_data_suite, - xfails={ - "ones_4D": "MLBEDSW-11046 - Incorrect output for TABLE followed by RESHAPE" - }, strict=False, ) def test_cosh_u85_INT(test_data: Tuple): diff --git a/backends/arm/test/ops/test_mean_dim.py b/backends/arm/test/ops/test_mean_dim.py index 970340c352b..31797e72e78 100644 --- a/backends/arm/test/ops/test_mean_dim.py +++ b/backends/arm/test/ops/test_mean_dim.py @@ -286,15 +286,7 @@ def test_mean_dim_tosa_INT(test_data): pipeline.run() -xfails = { - "rank5_01234": "Rank 5 graph input currently not supported in EthosUBackend (passes since CHW are all averaged over so data order does not matter in this case)", - "rank5_234": "Rank 5 graph input currently not supported in EthosUBackend (passes since CHW are all averaged over so data order does not matter in this case)", - "rank5_12": "Rank 5 graph input currently not supported in EthosUBackend", - "rank5_2": "Rank 5 graph input currently not supported in EthosUBackend", -} - - -@common.parametrize("test_data", MeanDim.test_data_suite, xfails=xfails, strict=False) +@common.parametrize("test_data", MeanDim.test_data_suite) @common.XfailIfNoCorstone300 def test_mean_dim_u55_INT(test_data): test_data, dim, keep_dim = test_data() @@ -313,7 +305,7 @@ def test_mean_dim_u55_INT(test_data): pipeline.run() -@common.parametrize("test_data", MeanDim.test_data_suite, xfails=xfails, strict=False) +@common.parametrize("test_data", MeanDim.test_data_suite) @common.XfailIfNoCorstone320 def test_mean_dim_u85_INT(test_data): test_data, dim, keep_dim = test_data() diff --git a/backends/arm/test/ops/test_ne.py b/backends/arm/test/ops/test_ne.py index e20953b64dc..69f9440d549 100644 --- a/backends/arm/test/ops/test_ne.py +++ b/backends/arm/test/ops/test_ne.py @@ -159,9 +159,6 @@ def test_ne_scalar_u55_INT(test_module): @common.parametrize( "test_module", test_data_tensor, - xfails={ - "ne_tensor_rank4_randn": "MLETORCH-517: Batch size > 1 not fully supported", - }, strict=False, ) @common.XfailIfNoCorstone320 @@ -179,7 +176,6 @@ def test_ne_tensor_u85_INT(test_module): "test_module", test_data_scalar, xfails={ - "ne_scalar_rank4_randn": "MLETORCH-517: Batch size > 1 not fully supported", "ne_scalar_rank4_randn_1batch": "MLETORCH-847: Boolean ne result unstable on U85", }, strict=False, diff --git a/backends/arm/test/ops/test_pixel_shuffling.py b/backends/arm/test/ops/test_pixel_shuffling.py index 5aeb8b2d1bb..dd9dfbe62cc 100644 --- a/backends/arm/test/ops/test_pixel_shuffling.py +++ b/backends/arm/test/ops/test_pixel_shuffling.py @@ -6,8 +6,6 @@ from typing import Tuple -import pytest - import torch from executorch.backends.arm.constants import MAX_RANK @@ -192,9 +190,12 @@ def test_pixel_unshuffle_u55_INT(test_data: input_t1): pipeline.run() -@common.parametrize("test_data", PixelUnShuffle.test_data_generators) +@common.parametrize( + "test_data", + PixelUnShuffle.test_data_generators, + xfails={"rand_4d": "MLETORCH-1424: rand test fails"}, +) @common.XfailIfNoCorstone320 -@pytest.mark.xfail(reason="MLETORCH-1424: rand test fails") def test_pixel_unshuffle_u85_INT(test_data: input_t1): pipeline = EthosU85PipelineINT[input_t1]( PixelUnShuffle(), @@ -219,9 +220,12 @@ def test_pixel_shuffle_u55_INT(test_data: input_t1): pipeline.run() -@common.parametrize("test_data", PixelShuffle.test_data_generators) +@common.parametrize( + "test_data", + PixelShuffle.test_data_generators, + xfails={"rand_4d": "MLETORCH-1424: rand test fails"}, +) @common.XfailIfNoCorstone320 -@pytest.mark.xfail(reason="MLETORCH-1424: rand test fails") def test_pixel_shuffle_u85_INT(test_data: input_t1): pipeline = EthosU85PipelineINT[input_t1]( PixelShuffle(), diff --git a/backends/arm/test/ops/test_pow.py b/backends/arm/test/ops/test_pow.py index 377d1355992..14fb05109cc 100644 --- a/backends/arm/test/ops/test_pow.py +++ b/backends/arm/test/ops/test_pow.py @@ -62,10 +62,10 @@ class Pow_TensorScalar(torch.nn.Module): test_data = { # Test whole number exponents - "exp_minus_three": lambda: (torch.randn((10, 5)), -3.0), - "exp_minus_one": lambda: (torch.randn((42,)), -1.0), - "exp_zero": lambda: (torch.randn((1, 2, 3, 7)), 0.0), - "exp_one": lambda: (torch.randn((1, 4, 6, 2)), 1.0), + "exp_minus_three": lambda: (torch.randn((10, 5)).relu() + 0.1, -3.0), + "exp_minus_one": lambda: (torch.randn((42,)).relu() + 0.1, -1.0), + "exp_zero": lambda: (torch.randn((1, 2, 3, 7)).relu(), 0.0), + "exp_one": lambda: (torch.randn((1, 4, 6, 2)).relu(), 1.0), "exp_two": lambda: (torch.randn((1, 2, 3, 6)), 2.0), # Test decimal exponent (base must be non-negative) "non_neg_base_exp_pos_decimal": lambda: ( @@ -117,11 +117,7 @@ def test_pow_tensor_tensor_vgf_FP(test_data: Pow_TensorTensor.input_t): x_fail = { - "exp_minus_three": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", - "exp_minus_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", - "exp_zero": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", - "exp_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", - "exp_two": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", + "exp_two": "TOSA constraints: If x <0 .", "non_neg_base_exp_pos_decimal": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.", } @@ -138,7 +134,7 @@ def test_pow_tensor_scalar_tosa_FP(test_data: Pow_TensorScalar.input_t): pipeline.run() -@common.parametrize("test_data", Pow_TensorScalar.test_data, x_fail, strict=False) +@common.parametrize("test_data", Pow_TensorScalar.test_data, strict=False) def test_pow_tensor_scalar_tosa_INT(test_data: Pow_TensorScalar.input_t): base, exp = test_data() pipeline = TosaPipelineINT[Pow_TensorScalar.input_t]( diff --git a/backends/arm/test/ops/test_unflatten.py b/backends/arm/test/ops/test_unflatten.py index 7f98ababd65..35b264a4bb0 100644 --- a/backends/arm/test/ops/test_unflatten.py +++ b/backends/arm/test/ops/test_unflatten.py @@ -57,13 +57,7 @@ def test_unflatten_int_tosa_INT(test_data: test_data_t): pipeline.run() -xfails = { - "rand_3d_batch3": "Batch size > 1 currently not supported for FVP tests", - "randn_4d_dim1": "Batch size > 1 currently not supported for FVP tests", -} - - -@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False) +@common.parametrize("test_data", Unflatten.test_data, strict=False) @common.XfailIfNoCorstone300 def test_unflatten_int_u55_INT(test_data: test_data_t): module, inputs = test_data() @@ -75,7 +69,7 @@ def test_unflatten_int_u55_INT(test_data: test_data_t): pipeline.run() -@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False) +@common.parametrize("test_data", Unflatten.test_data, strict=False) @common.XfailIfNoCorstone320 def test_unflatten_int_u85_INT(test_data: test_data_t): module, inputs = test_data()