Skip to content

Commit 34c4d3c

Browse files
Ninja91facebook-github-bot
authored andcommitted
Updating tests for 16A8W ops which are supported (#14945)
Summary: Updating the TOSA, U55 & U85 tests to remove xfails. These ops are supported now and updating tests to not expect failure. Differential Revision: D84262200
1 parent bdc526b commit 34c4d3c

File tree

5 files changed

+0
-38
lines changed

5 files changed

+0
-38
lines changed

backends/arm/test/ops/test_add.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
from typing import cast, Tuple
99

10-
import pytest
1110
import torch
1211
from executorch.backends.arm.quantizer import arm_quantizer
1312
from executorch.backends.arm.quantizer.arm_quantizer import (
@@ -260,9 +259,6 @@ def get_symmetric_a16w8_add_quantizer(per_channel_quantization=False):
260259

261260

262261
@common.parametrize("test_data", Add.test_data)
263-
@pytest.mark.xfail(
264-
reason="missing int16 add ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13730"
265-
)
266262
def test_add_tensor_16a8w_tosa_INT(test_data: input_t1):
267263
"""Test add operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
268264
per_channel_quantization = False

backends/arm/test/ops/test_cat.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
from typing import Tuple
1010

11-
import pytest
1211
import torch
1312
from executorch.backends.arm.quantizer.arm_quantizer import (
1413
get_symmetric_a16w8_quantization_config,
@@ -178,9 +177,6 @@ def get_symmetric_a16w8_cat_quantizer(per_channel_quantization=False):
178177

179178

180179
@common.parametrize("test_data", Cat.test_parameters)
181-
@pytest.mark.xfail(
182-
reason="missing int16 cat ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13978"
183-
)
184180
def test_cat_16a8w_tosa_INT(test_data: Tuple):
185181
"""Test cat operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
186182
per_channel_quantization = False
@@ -206,9 +202,6 @@ def test_cat_16a8w_tosa_INT(test_data: Tuple):
206202

207203
@common.parametrize("test_data", Cat.test_parameters)
208204
@common.XfailIfNoCorstone300
209-
@pytest.mark.xfail(
210-
reason="Vela compilation fails with 'Invalid arguments' for int16 cat operations"
211-
)
212205
def test_cat_16a8w_u55_INT16(test_data: Tuple):
213206
"""Test cat operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)"""
214207
per_channel_quantization = False
@@ -233,9 +226,6 @@ def test_cat_16a8w_u55_INT16(test_data: Tuple):
233226

234227
@common.parametrize("test_data", Cat.test_parameters)
235228
@common.XfailIfNoCorstone320
236-
@pytest.mark.xfail(
237-
reason="Vela compilation fails with 'Invalid arguments' for int16 cat operations"
238-
)
239229
def test_cat_16a8w_u85_INT16(test_data: Tuple):
240230
"""Test cat operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)"""
241231
per_channel_quantization = False

backends/arm/test/ops/test_mul.py

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
from typing import Tuple
1010

11-
import pytest
1211
import torch
1312
from executorch.backends.arm.quantizer.arm_quantizer import (
1413
get_symmetric_a16w8_quantization_config,
@@ -310,9 +309,6 @@ def get_symmetric_a16w8_mul_quantizer(per_channel_quantization=False):
310309

311310

312311
@common.parametrize("test_data", test_data_suite)
313-
@pytest.mark.xfail(
314-
reason="missing int16 mul ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13947"
315-
)
316312
def test_mul_tensor_16a8w_tosa_INT(test_data: input_t1):
317313
"""Test mul operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
318314
per_channel_quantization = False

backends/arm/test/ops/test_slice.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77

88
from typing import Tuple
99

10-
import pytest
1110
import torch
1211
from executorch.backends.arm.quantizer.arm_quantizer import (
1312
get_symmetric_a16w8_quantization_config,
@@ -154,9 +153,6 @@ def get_symmetric_a16w8_slice_quantizer(per_channel_quantization=False):
154153

155154

156155
@common.parametrize("test_data", test_data_suite)
157-
@pytest.mark.xfail(
158-
reason="missing int16 slice ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13976"
159-
)
160156
def test_slice_tensor_16a8w_tosa_INT(test_data: torch.Tensor):
161157
"""Test slice operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
162158
per_channel_quantization = False
@@ -182,9 +178,6 @@ def test_slice_tensor_16a8w_tosa_INT(test_data: torch.Tensor):
182178

183179
@common.parametrize("test_data", test_data_suite)
184180
@common.XfailIfNoCorstone300
185-
@pytest.mark.xfail(
186-
reason="Vela compilation fails with 'Invalid arguments' for int16 slice operations"
187-
)
188181
def test_slice_tensor_16a8w_u55_INT16(test_data: torch.Tensor):
189182
"""Test slice operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)"""
190183
per_channel_quantization = False
@@ -209,9 +202,6 @@ def test_slice_tensor_16a8w_u55_INT16(test_data: torch.Tensor):
209202

210203
@common.parametrize("test_data", test_data_suite)
211204
@common.XfailIfNoCorstone320
212-
@pytest.mark.xfail(
213-
reason="Vela compilation fails with 'Invalid arguments' for int16 slice operations"
214-
)
215205
def test_slice_tensor_16a8w_u85_INT16(test_data: torch.Tensor):
216206
"""Test slice operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)"""
217207
per_channel_quantization = False

backends/arm/test/ops/test_view.py

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99

1010
from typing import Tuple
1111

12-
import pytest
1312
import torch
1413
from executorch.backends.arm.quantizer.arm_quantizer import (
1514
get_symmetric_a16w8_quantization_config,
@@ -180,9 +179,6 @@ def get_symmetric_a16w8_view_quantizer(per_channel_quantization=False):
180179

181180

182181
@common.parametrize("test_data", View.needs_transpose_tests)
183-
@pytest.mark.xfail(
184-
reason="missing int16 view ops support; fails at TOSA reference model with Unsupported operation type or rank. See: https://github.com/pytorch/executorch/issues/13977"
185-
)
186182
def test_view_16a8w_tosa_INT(test_data: Tuple):
187183
"""Test view operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
188184
per_channel_quantization = False
@@ -209,9 +205,6 @@ def test_view_16a8w_tosa_INT(test_data: Tuple):
209205

210206
@common.parametrize("test_data", View.needs_transpose_tests)
211207
@common.XfailIfNoCorstone300
212-
@pytest.mark.xfail(
213-
reason="Vela compilation fails with 'Invalid arguments' for int16 view operations"
214-
)
215208
def test_view_16a8w_u55_INT16(test_data: Tuple):
216209
"""Test view operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)"""
217210
per_channel_quantization = False
@@ -237,9 +230,6 @@ def test_view_16a8w_u55_INT16(test_data: Tuple):
237230

238231
@common.parametrize("test_data", View.needs_transpose_tests)
239232
@common.XfailIfNoCorstone320
240-
@pytest.mark.xfail(
241-
reason="Vela compilation fails with 'Invalid arguments' for int16 view operations"
242-
)
243233
def test_view_16a8w_u85_INT16(test_data: Tuple):
244234
"""Test view operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)"""
245235
per_channel_quantization = False

0 commit comments

Comments
 (0)