diff --git a/backends/example/TARGETS b/backends/example/TARGETS index 4bcc77e2a10..59df492e027 100644 --- a/backends/example/TARGETS +++ b/backends/example/TARGETS @@ -53,7 +53,6 @@ python_unittest( "//caffe2:torch", "//executorch/exir:delegate", "//executorch/exir:lib", - "//executorch/exir/backend:backend_api", "//executorch/exir/backend/canonical_partitioners:canonical_partitioner_lib", "//pytorch/vision:torchvision", ], diff --git a/backends/example/test_example_delegate.py b/backends/example/test_example_delegate.py index 27354e02ad8..d85e8e87229 100644 --- a/backends/example/test_example_delegate.py +++ b/backends/example/test_example_delegate.py @@ -11,7 +11,7 @@ from executorch import exir from executorch.backends.example.example_partitioner import ExamplePartitioner from executorch.backends.example.example_quantizer import ExampleQuantizer -from executorch.exir.backend.backend_api import to_backend +from executorch.exir import to_edge from executorch.exir.backend.canonical_partitioners.duplicate_dequant_node_pass import ( DuplicateDequantNodePass, @@ -19,8 +19,8 @@ from executorch.exir.delegate import executorch_call_delegate from torch.ao.quantization.quantize_pt2e import convert_pt2e, prepare_pt2e +from torch.export import export -# @manual=//pytorch/vision:torchvision from torchvision.models.quantization import mobilenet_v2 @@ -40,7 +40,6 @@ def get_example_inputs(): model = Conv2dModule() example_inputs = Conv2dModule.get_example_inputs() - CAPTURE_CONFIG = exir.CaptureConfig(enable_aot=True) EDGE_COMPILE_CONFIG = exir.EdgeCompileConfig( _check_ir_validity=False, ) @@ -59,24 +58,23 @@ def get_example_inputs(): m = convert_pt2e(m) quantized_gm = m - exported_program = exir.capture( - quantized_gm, copy.deepcopy(example_inputs), CAPTURE_CONFIG - ).to_edge(EDGE_COMPILE_CONFIG) + exported_program = to_edge( + export(quantized_gm, copy.deepcopy(example_inputs)), + compile_config=EDGE_COMPILE_CONFIG, + ) - lowered_export_program = to_backend( - exported_program.exported_program, + lowered_export_program = exported_program.to_backend( ExamplePartitioner(), ) print("After lowering to qnn backend: ") - lowered_export_program.graph.print_tabular() + lowered_export_program.exported_program().graph.print_tabular() def test_delegate_mobilenet_v2(self): model = mobilenet_v2(num_classes=3) model.eval() example_inputs = (torch.rand(1, 3, 320, 240),) - CAPTURE_CONFIG = exir.CaptureConfig(enable_aot=True) EDGE_COMPILE_CONFIG = exir.EdgeCompileConfig( _check_ir_validity=False, ) @@ -91,20 +89,22 @@ def test_delegate_mobilenet_v2(self): m = convert_pt2e(m) quantized_gm = m - exported_program = exir.capture( - quantized_gm, copy.deepcopy(example_inputs), CAPTURE_CONFIG - ).to_edge(EDGE_COMPILE_CONFIG) + exported_program = to_edge( + export(quantized_gm, copy.deepcopy(example_inputs)), + compile_config=EDGE_COMPILE_CONFIG, + ) - lowered_export_program = to_backend( - exported_program.transform(DuplicateDequantNodePass()).exported_program, + lowered_export_program = exported_program.transform( + [DuplicateDequantNodePass()] + ).to_backend( ExamplePartitioner(), ) - lowered_export_program.graph.print_tabular() + lowered_export_program.exported_program().graph.print_tabular() call_deleage_node = [ node - for node in lowered_export_program.graph.nodes + for node in lowered_export_program.exported_program().graph.nodes if node.target == executorch_call_delegate ] self.assertEqual(len(call_deleage_node), 1)