|
8 | 8 |
|
9 | 9 | from typing import Tuple |
10 | 10 |
|
| 11 | +import pytest |
11 | 12 | import torch |
| 13 | +from executorch.backends.arm.quantizer.arm_quantizer import ( |
| 14 | + get_symmetric_a16w8_quantization_config, |
| 15 | + TOSAQuantizer, |
| 16 | +) |
12 | 17 |
|
13 | | -from executorch.backends.arm.test import common |
| 18 | +from executorch.backends.arm.test import common, conftest |
14 | 19 | from executorch.backends.arm.test.tester.test_pipeline import ( |
15 | 20 | EthosU55PipelineINT, |
16 | 21 | EthosU85PipelineINT, |
17 | 22 | TosaPipelineFP, |
18 | 23 | TosaPipelineINT, |
19 | 24 | VgfPipeline, |
20 | 25 | ) |
| 26 | +from executorch.backends.arm.tosa_specification import TosaSpecification |
| 27 | +from executorch.backends.xnnpack.test.tester import Quantize |
21 | 28 |
|
22 | 29 | input_t1 = Tuple[torch.Tensor, torch.Tensor] # Input x |
23 | 30 | aten_op = "torch.ops.aten.mul.Tensor" |
@@ -284,3 +291,102 @@ def test_mul_tensor_vgf_INT_int32(test_data: torch.Tensor): |
284 | 291 | ) |
285 | 292 | pipeline.pop_stage("check.quant_nodes") |
286 | 293 | pipeline.run() |
| 294 | + |
| 295 | + |
| 296 | +def get_symmetric_a16w8_mul_quantizer(per_channel_quantization=False): |
| 297 | + tosa_version = conftest.get_option("tosa_version") |
| 298 | + tosa_profiles = { |
| 299 | + "1.0": TosaSpecification.create_from_string("TOSA-1.0+INT+int16"), |
| 300 | + } |
| 301 | + |
| 302 | + quantizer = TOSAQuantizer(tosa_profiles[tosa_version]) |
| 303 | + quantizer.set_global( |
| 304 | + get_symmetric_a16w8_quantization_config(is_per_channel=per_channel_quantization) |
| 305 | + ) |
| 306 | + |
| 307 | + return Quantize( |
| 308 | + quantizer, |
| 309 | + get_symmetric_a16w8_quantization_config( |
| 310 | + is_per_channel=per_channel_quantization |
| 311 | + ), |
| 312 | + ) |
| 313 | + |
| 314 | + |
| 315 | +@common.parametrize("test_data", test_data_suite) |
| 316 | +def test_mul_tensor_16a8w_tosa_INT(test_data: input_t1): |
| 317 | + """Test mul operation with 16A8W quantization (16-bit activations, 8-bit weights)""" |
| 318 | + per_channel_quantization = False |
| 319 | + |
| 320 | + pipeline = TosaPipelineINT[input_t1]( |
| 321 | + Mul(), |
| 322 | + test_data(), |
| 323 | + aten_op, |
| 324 | + exir_op=[], |
| 325 | + per_channel_quantization=per_channel_quantization, |
| 326 | + use_to_edge_transform_and_lower=True, |
| 327 | + tosa_extensions=["int16"], |
| 328 | + ) |
| 329 | + |
| 330 | + pipeline.change_args( |
| 331 | + "quantize", |
| 332 | + get_symmetric_a16w8_mul_quantizer( |
| 333 | + per_channel_quantization=per_channel_quantization |
| 334 | + ), |
| 335 | + ) |
| 336 | + pipeline.run() |
| 337 | + |
| 338 | + |
| 339 | +@common.parametrize("test_data", test_data_suite) |
| 340 | +@common.XfailIfNoCorstone300 |
| 341 | +@pytest.mark.xfail( |
| 342 | + reason="Vela compilation fails with 'Invalid arguments' for int16 mul operations. See: https://github.com/pytorch/executorch/issues/13947" |
| 343 | +) |
| 344 | +def test_mul_tensor_16a8w_u55_INT16(test_data: input_t1): |
| 345 | + """Test mul operation with 16A8W quantization on U55 (16-bit activations, 8-bit weights)""" |
| 346 | + per_channel_quantization = False |
| 347 | + |
| 348 | + pipeline = EthosU55PipelineINT[input_t1]( |
| 349 | + Mul(), |
| 350 | + test_data(), |
| 351 | + aten_op, |
| 352 | + exir_ops=[], |
| 353 | + per_channel_quantization=per_channel_quantization, |
| 354 | + use_to_edge_transform_and_lower=True, |
| 355 | + run_on_fvp=True, |
| 356 | + ) |
| 357 | + |
| 358 | + pipeline.change_args( |
| 359 | + "quantize", |
| 360 | + get_symmetric_a16w8_mul_quantizer( |
| 361 | + per_channel_quantization=per_channel_quantization |
| 362 | + ), |
| 363 | + ) |
| 364 | + pipeline.run() |
| 365 | + |
| 366 | + |
| 367 | +@common.parametrize("test_data", test_data_suite) |
| 368 | +@common.XfailIfNoCorstone320 |
| 369 | +@pytest.mark.xfail( |
| 370 | + reason="Vela compilation fails with 'Invalid arguments' for int16 mul operations. See: https://github.com/pytorch/executorch/issues/13947" |
| 371 | +) |
| 372 | +def test_mul_tensor_16a8w_u85_INT16(test_data: input_t1): |
| 373 | + """Test mul operation with 16A8W quantization on U85 (16-bit activations, 8-bit weights)""" |
| 374 | + per_channel_quantization = False |
| 375 | + |
| 376 | + pipeline = EthosU85PipelineINT[input_t1]( |
| 377 | + Mul(), |
| 378 | + test_data(), |
| 379 | + aten_op, |
| 380 | + exir_ops=[], |
| 381 | + per_channel_quantization=per_channel_quantization, |
| 382 | + use_to_edge_transform_and_lower=True, |
| 383 | + run_on_fvp=True, |
| 384 | + ) |
| 385 | + |
| 386 | + pipeline.change_args( |
| 387 | + "quantize", |
| 388 | + get_symmetric_a16w8_mul_quantizer( |
| 389 | + per_channel_quantization=per_channel_quantization |
| 390 | + ), |
| 391 | + ) |
| 392 | + pipeline.run() |
0 commit comments