Skip to content

Commit a484fb2

Browse files
committed
Pull request pytorch#69: Fix path to skipped test in test_integration.py
Merge in AITEC/executorch from hotfix/nxf93343/fix-skipped-integration-test to main-nxp * commit 'c798a8661641fd61adfd8621a1924e7b0c886d06': Fix batch norm tests overwriting static context Fix path to skipped test in test_integration.py
2 parents 0603bab + c798a86 commit a484fb2

File tree

3 files changed

+32
-10
lines changed

3 files changed

+32
-10
lines changed

backends/nxp/tests/executors.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -206,3 +206,18 @@ def convert_run_compare(edge_program: ExportedProgram, input_data, rtol=1.e-5, a
206206
" number of outputs. Testing is not implemented for this case.")
207207

208208
return tflite_executor, edge_program_executor
209+
210+
211+
class OverrideSupportedTargets:
212+
213+
def __init__(self, converter_class, *, new_targets):
214+
self._converter_class = converter_class
215+
self._new_targets = new_targets
216+
217+
self._old_targets = self._converter_class.supported_targets
218+
219+
def __enter__(self):
220+
self._converter_class.supported_targets = self._new_targets
221+
222+
def __exit__(self, exc_type, exc_val, exc_tb):
223+
self._converter_class.supported_targets = self._old_targets

backends/nxp/tests/test_batch_norm_fusion.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from executorch.backends.nxp.pytorch_passes.fuse_batch_norm_with_linear_pass import FuseBatchNormWithLinearPass
1212
from executorch.backends.nxp.pytorch_passes.nxp_pytorch_pass_manager import NXPPyTorchPassManager
1313
from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_edge_program
14+
from executorch.backends.nxp.tests.executors import OverrideSupportedTargets
1415

1516

1617
@pytest.fixture(autouse=True)
@@ -150,11 +151,10 @@ def test_batch_norm_linear_fusing__full_pipeline(bias: bool):
150151

151152
# Don't delegate the Linear node, because there seems to be a bug with the NeutronConverter/NeutronPartitioner.
152153
# But that doesn't affect the validity of this test.
153-
AddMMConverter.supported_targets = []
154-
MMConverter.supported_targets = []
155-
156-
edge_program = to_quantized_edge_program(module, tuple(input_shape)).exported_program()
157-
nodes = list(edge_program.graph.nodes)
154+
with OverrideSupportedTargets(AddMMConverter, new_targets=[]):
155+
with OverrideSupportedTargets(MMConverter, new_targets=[]):
156+
edge_program = to_quantized_edge_program(module, tuple(input_shape)).exported_program()
157+
nodes = list(edge_program.graph.nodes)
158158

159159
assert len(nodes) == 14
160160
assert not any(node.op == 'call_function' and 'batch_norm' in node.target.__name__ for node in nodes)

backends/nxp/tests/test_integration.py

Lines changed: 12 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
1-
import os.path
1+
import glob
22
import pathlib
33

4-
import pytest
54
import torch
65

76
from executorch.backends.nxp.tests.executorch_pipeline import to_quantized_executorch_program
@@ -11,12 +10,20 @@
1110
_CURRENT_DIR = pathlib.Path(__file__).parent
1211
_PROJECT_DIR = _CURRENT_DIR.parent.parent.parent
1312

14-
QUANTIZED_OPS_AOT_LIB_PATH = _PROJECT_DIR / "cmake-build-debug" / "kernels" / "quantized" / "libquantized_ops_aot_lib.so"
13+
14+
def _get_quantized_ops_aot_lib_path(
15+
default_path_pattern="pip-out/temp*/cmake-out/kernels/quantized/libquantized_ops_aot_lib.so"
16+
):
17+
lib_path = glob.glob(f"{_PROJECT_DIR}/{default_path_pattern}")
18+
19+
if len(lib_path) < 1:
20+
raise RuntimeError("Unable to find 'libquantized_ops_aot_lib'. Make sure you've built project "
21+
"with './install_requirements.sh' or provided correct path.")
22+
return lib_path[0]
1523

1624

17-
@pytest.mark.skipif(not os.path.exists(QUANTIZED_OPS_AOT_LIB_PATH), reason="Quant OPS AoT library file not found.")
1825
def test_conv_fc_softmax__to_executorch_program():
19-
torch.ops.load_library(str(QUANTIZED_OPS_AOT_LIB_PATH))
26+
torch.ops.load_library(str(_get_quantized_ops_aot_lib_path()))
2027

2128
model = ConvFCSoftmaxModule()
2229
input_shape = (1, 4, 5, 5)

0 commit comments

Comments
 (0)