Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions backends/arm/test/models/test_resnet18.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,9 +79,6 @@ def test_resnet_u55_INT(per_channel_quantization):


@pytest.mark.slow
@pytest.mark.xfail(
reason="For resnet18 for Ethos-U85, the SRAM memory footprint is very high. The compiler team is investigating."
)
@common.XfailIfNoCorstone320
@common.parametrize("per_channel_quantization", quant_test_data)
def test_resnet_u85_INT(per_channel_quantization):
Expand Down
1 change: 0 additions & 1 deletion backends/arm/test/models/test_torch_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,6 @@ def test_torch_fns_FP(test_data):
"Requires dynamic output shape.",
"topk": "NotImplementedError: No registered serialization name for <class 'torch.return_types.topk'> found",
"sort": "NotImplementedError: No registered serialization name for <class 'torch.return_types.sort'> found",
"t": "MLETORCH-855: Issue with Quantization folding.",
},
strict=False,
)
Expand Down
3 changes: 0 additions & 3 deletions backends/arm/test/ops/test_cosh.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,6 @@ def test_cosh_u55_INT(test_data: Tuple):
@common.parametrize(
"test_data",
test_data_suite,
xfails={
"ones_4D": "MLBEDSW-11046 - Incorrect output for TABLE followed by RESHAPE"
},
strict=False,
)
def test_cosh_u85_INT(test_data: Tuple):
Expand Down
12 changes: 2 additions & 10 deletions backends/arm/test/ops/test_mean_dim.py
Original file line number Diff line number Diff line change
Expand Up @@ -286,15 +286,7 @@ def test_mean_dim_tosa_INT(test_data):
pipeline.run()


xfails = {
"rank5_01234": "Rank 5 graph input currently not supported in EthosUBackend (passes since CHW are all averaged over so data order does not matter in this case)",
"rank5_234": "Rank 5 graph input currently not supported in EthosUBackend (passes since CHW are all averaged over so data order does not matter in this case)",
"rank5_12": "Rank 5 graph input currently not supported in EthosUBackend",
"rank5_2": "Rank 5 graph input currently not supported in EthosUBackend",
}


@common.parametrize("test_data", MeanDim.test_data_suite, xfails=xfails, strict=False)
@common.parametrize("test_data", MeanDim.test_data_suite)
@common.XfailIfNoCorstone300
def test_mean_dim_u55_INT(test_data):
test_data, dim, keep_dim = test_data()
Expand All @@ -313,7 +305,7 @@ def test_mean_dim_u55_INT(test_data):
pipeline.run()


@common.parametrize("test_data", MeanDim.test_data_suite, xfails=xfails, strict=False)
@common.parametrize("test_data", MeanDim.test_data_suite)
@common.XfailIfNoCorstone320
def test_mean_dim_u85_INT(test_data):
test_data, dim, keep_dim = test_data()
Expand Down
4 changes: 0 additions & 4 deletions backends/arm/test/ops/test_ne.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,9 +159,6 @@ def test_ne_scalar_u55_INT(test_module):
@common.parametrize(
"test_module",
test_data_tensor,
xfails={
"ne_tensor_rank4_randn": "MLETORCH-517: Batch size > 1 not fully supported",
},
strict=False,
)
@common.XfailIfNoCorstone320
Expand All @@ -179,7 +176,6 @@ def test_ne_tensor_u85_INT(test_module):
"test_module",
test_data_scalar,
xfails={
"ne_scalar_rank4_randn": "MLETORCH-517: Batch size > 1 not fully supported",
"ne_scalar_rank4_randn_1batch": "MLETORCH-847: Boolean ne result unstable on U85",
},
strict=False,
Expand Down
16 changes: 10 additions & 6 deletions backends/arm/test/ops/test_pixel_shuffling.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@

from typing import Tuple

import pytest

import torch

from executorch.backends.arm.constants import MAX_RANK
Expand Down Expand Up @@ -192,9 +190,12 @@ def test_pixel_unshuffle_u55_INT(test_data: input_t1):
pipeline.run()


@common.parametrize("test_data", PixelUnShuffle.test_data_generators)
@common.parametrize(
"test_data",
PixelUnShuffle.test_data_generators,
xfails={"rand_4d": "MLETORCH-1424: rand test fails"},
)
@common.XfailIfNoCorstone320
@pytest.mark.xfail(reason="MLETORCH-1424: rand test fails")
def test_pixel_unshuffle_u85_INT(test_data: input_t1):
pipeline = EthosU85PipelineINT[input_t1](
PixelUnShuffle(),
Expand All @@ -219,9 +220,12 @@ def test_pixel_shuffle_u55_INT(test_data: input_t1):
pipeline.run()


@common.parametrize("test_data", PixelShuffle.test_data_generators)
@common.parametrize(
"test_data",
PixelShuffle.test_data_generators,
xfails={"rand_4d": "MLETORCH-1424: rand test fails"},
)
@common.XfailIfNoCorstone320
@pytest.mark.xfail(reason="MLETORCH-1424: rand test fails")
def test_pixel_shuffle_u85_INT(test_data: input_t1):
pipeline = EthosU85PipelineINT[input_t1](
PixelShuffle(),
Expand Down
16 changes: 6 additions & 10 deletions backends/arm/test/ops/test_pow.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,10 +62,10 @@ class Pow_TensorScalar(torch.nn.Module):

test_data = {
# Test whole number exponents
"exp_minus_three": lambda: (torch.randn((10, 5)), -3.0),
"exp_minus_one": lambda: (torch.randn((42,)), -1.0),
"exp_zero": lambda: (torch.randn((1, 2, 3, 7)), 0.0),
"exp_one": lambda: (torch.randn((1, 4, 6, 2)), 1.0),
"exp_minus_three": lambda: (torch.randn((10, 5)).relu() + 0.1, -3.0),
"exp_minus_one": lambda: (torch.randn((42,)).relu() + 0.1, -1.0),
"exp_zero": lambda: (torch.randn((1, 2, 3, 7)).relu(), 0.0),
"exp_one": lambda: (torch.randn((1, 4, 6, 2)).relu(), 1.0),
"exp_two": lambda: (torch.randn((1, 2, 3, 6)), 2.0),
# Test decimal exponent (base must be non-negative)
"non_neg_base_exp_pos_decimal": lambda: (
Expand Down Expand Up @@ -117,11 +117,7 @@ def test_pow_tensor_tensor_vgf_FP(test_data: Pow_TensorTensor.input_t):


x_fail = {
"exp_minus_three": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.",
"exp_minus_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.",
"exp_zero": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.",
"exp_one": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.",
"exp_two": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.",
"exp_two": "TOSA constraints: If x <0 .",
"non_neg_base_exp_pos_decimal": "TOSA constraints: If x == 0 and y ⇐ 0, the result is undefined.",
}

Expand All @@ -138,7 +134,7 @@ def test_pow_tensor_scalar_tosa_FP(test_data: Pow_TensorScalar.input_t):
pipeline.run()


@common.parametrize("test_data", Pow_TensorScalar.test_data, x_fail, strict=False)
@common.parametrize("test_data", Pow_TensorScalar.test_data, strict=False)
def test_pow_tensor_scalar_tosa_INT(test_data: Pow_TensorScalar.input_t):
base, exp = test_data()
pipeline = TosaPipelineINT[Pow_TensorScalar.input_t](
Expand Down
10 changes: 2 additions & 8 deletions backends/arm/test/ops/test_unflatten.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,13 +57,7 @@ def test_unflatten_int_tosa_INT(test_data: test_data_t):
pipeline.run()


xfails = {
"rand_3d_batch3": "Batch size > 1 currently not supported for FVP tests",
"randn_4d_dim1": "Batch size > 1 currently not supported for FVP tests",
}


@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
@common.parametrize("test_data", Unflatten.test_data, strict=False)
@common.XfailIfNoCorstone300
def test_unflatten_int_u55_INT(test_data: test_data_t):
module, inputs = test_data()
Expand All @@ -75,7 +69,7 @@ def test_unflatten_int_u55_INT(test_data: test_data_t):
pipeline.run()


@common.parametrize("test_data", Unflatten.test_data, xfails=xfails, strict=False)
@common.parametrize("test_data", Unflatten.test_data, strict=False)
@common.XfailIfNoCorstone320
def test_unflatten_int_u85_INT(test_data: test_data_t):
module, inputs = test_data()
Expand Down
Loading