Skip to content

Commit 3dd50a2

Browse files
committed
Arm backend: Enable linear 16a8w tests
Enable tests of int16 activations and int8 weight quantization. Test for large_rand is disabled to sort out why the test is flaky. Signed-off-by: Per Åstrand <[email protected]> Change-Id: I9de5d472f8862edebcf82c140399985db930c069
1 parent 3c9c4e2 commit 3dd50a2

File tree

2 files changed

+11
-5
lines changed

2 files changed

+11
-5
lines changed

backends/arm/scripts/parse_test_names.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,9 @@ def parse_test_name(
9595
op = op.removesuffix("_1d")
9696
op = op.removesuffix("_2d")
9797

98+
# Remove suffix for 16 bit activation and 8 bit weight test cases
99+
op = op.removesuffix("_16a8w")
100+
98101
assert target != "None", f"{test_name} does not contain one of {TARGETS}"
99102
assert (
100103
op in op_name_map.keys()

backends/arm/test/ops/test_linear.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
from typing import Tuple
1010

11-
import pytest
1211
import torch
1312
from executorch.backends.arm.quantizer.arm_quantizer import (
1413
get_symmetric_a16w8_quantization_config,
@@ -276,10 +275,14 @@ def get_symmetric_a16w8_linear_quantizer(
276275
)
277276

278277

279-
@common.parametrize("test_data", test_data_rank1_INT | test_data_rank4_INT)
280-
@pytest.mark.xfail(
281-
reason="missing int16 linear ops support; fails at TOSA reference model run with Invalid TOSA graph"
282-
)
278+
test_data_all_16a8w = test_data_rank1_INT | test_data_rank4_INT
279+
# TODO: Remove large rand test as they are flaky until sorted out why: MLETORCH-1377
280+
for k in list(test_data_all_16a8w.keys()):
281+
if "large_rand" in k:
282+
test_data_all_16a8w.pop(k)
283+
284+
285+
@common.parametrize("test_data", test_data_all_16a8w)
283286
def test_linear_16a8w_tosa_INT(test_data: torch.Tensor):
284287
"""Test linear operation with 16A8W quantization (16-bit activations, 8-bit weights)"""
285288
test_data, out_features, has_bias, per_channel_quantization = test_data()

0 commit comments

Comments
 (0)