From ace3baa5296e08d06d8df54f7a5ad5dd611d385c Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Mon, 27 Oct 2025 15:44:28 -0700 Subject: [PATCH 01/18] build updates for yolo example --- examples/models/yolo12/CMakeLists.txt | 14 +++----------- examples/models/yolo12/requirements.txt | 2 +- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/examples/models/yolo12/CMakeLists.txt b/examples/models/yolo12/CMakeLists.txt index 60b11685bdf..9bcfd8e1ef4 100644 --- a/examples/models/yolo12/CMakeLists.txt +++ b/examples/models/yolo12/CMakeLists.txt @@ -2,7 +2,7 @@ cmake_minimum_required(VERSION 3.5) project(Yolo12DetectionDemo VERSION 0.1) -option(USE_OPENVINO_BACKEND "Build the tutorial with the OPENVINO backend" ON) +option(USE_OPENVINO_BACKEND "Build the tutorial with the OPENVINO backend" OFF) option(USE_XNNPACK_BACKEND "Build the tutorial with the XNNPACK backend" OFF) set(CMAKE_INCLUDE_CURRENT_DIR ON) @@ -38,21 +38,13 @@ list(APPEND link_libraries portable_ops_lib portable_kernels) executorch_target_link_options_shared_lib(portable_ops_lib) if(USE_XNNPACK_BACKEND) - set(xnnpack_backend_libs xnnpack_backend XNNPACK microkernels-prod) + set(xnnpack_backend_libs xnnpack_backend XNNPACK xnnpack-microkernels-prod) list(APPEND link_libraries ${xnnpack_backend_libs}) executorch_target_link_options_shared_lib(xnnpack_backend) endif() if(USE_OPENVINO_BACKEND) - add_subdirectory(${EXECUTORCH_ROOT}/backends/openvino openvino_backend) - - target_include_directories( - openvino_backend - INTERFACE - ${CMAKE_CURRENT_BINARY_DIR}/../../include - ${CMAKE_CURRENT_BINARY_DIR}/../../include/executorch/runtime/core/portable_type/c10 - ${CMAKE_CURRENT_BINARY_DIR}/../../lib - ) + find_package(OpenVINO REQUIRED) list(APPEND link_libraries openvino_backend) executorch_target_link_options_shared_lib(openvino_backend) endif() diff --git a/examples/models/yolo12/requirements.txt b/examples/models/yolo12/requirements.txt index de537f46170..383cf53aba9 100644 --- a/examples/models/yolo12/requirements.txt +++ b/examples/models/yolo12/requirements.txt @@ -1 +1 @@ -ultralytics==8.3.97 \ No newline at end of file +ultralytics==8.3.196 From f0d4fbf80ca1376380d46567ef4eaf3338f93f27 Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Mon, 27 Oct 2025 16:13:04 -0700 Subject: [PATCH 02/18] Remove llama runner build from openvino build script --- backends/openvino/scripts/openvino_build.sh | 57 ++++++++------------- 1 file changed, 22 insertions(+), 35 deletions(-) diff --git a/backends/openvino/scripts/openvino_build.sh b/backends/openvino/scripts/openvino_build.sh index 6d7853b96e5..c9bfb6440f7 100755 --- a/backends/openvino/scripts/openvino_build.sh +++ b/backends/openvino/scripts/openvino_build.sh @@ -16,6 +16,8 @@ install_requirements() { build_cpp_runtime() { echo "Building C++ Runtime Libraries" + local llm_enabled=${1:-0} + # Set build directory local build_dir="cmake-out" @@ -23,47 +25,33 @@ build_cpp_runtime() { cd "$EXECUTORCH_ROOT" rm -rf "${build_dir}" + CMAKE_ARGS=( + "-DCMAKE_BUILD_TYPE=Release" + "-DEXECUTORCH_BUILD_OPENVINO=ON" + "-DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON" + "-DEXECUTORCH_BUILD_EXTENSION_MODULE=ON" + "-DEXECUTORCH_BUILD_EXTENSION_NAMED_DATA_MAP=ON" + "-DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON" + "-DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON" + "-DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON" + "-DEXECUTORCH_BUILD_EXECUTOR_RUNNER=ON" + "-DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" + ) + + if [[ "$llm_enabled" -eq 1 ]]; then + CMAKE_ARGS+=("-DEXECUTORCH_BUILD_EXTENSION_LLM=ON -DEXECUTORCH_BUILD_EXTENSION_LLM_RUNNER=ON") + fi + # Configure the project with CMake # Note: Add any additional configuration options you need here cmake -DCMAKE_INSTALL_PREFIX="${build_dir}" \ - -DCMAKE_BUILD_TYPE=Release \ - -DEXECUTORCH_BUILD_OPENVINO=ON \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_NAMED_DATA_MAP=ON \ - -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ - -DEXECUTORCH_BUILD_EXTENSION_FLAT_TENSOR=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_EXECUTOR_RUNNER=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ - -DEXECUTORCH_BUILD_EXTENSION_LLM=ON \ - -DEXECUTORCH_BUILD_EXTENSION_LLM_RUNNER=ON \ + ${CMAKE_ARGS[@]} \ -B"${build_dir}" - # Build the project cmake --build ${build_dir} --target install --config Release -j$(nproc) } -build_llama_runner() { - echo "Building Export Llama Runner" - - # Set build directory - local build_dir="cmake-out" - - # Enter the Executorch root directory - cd "$EXECUTORCH_ROOT" - - # Configure the project with CMake - # Note: Add any additional configuration options you need here - cmake -DCMAKE_INSTALL_PREFIX="${build_dir}" \ - -DCMAKE_BUILD_TYPE=Release \ - -B"${build_dir}"/examples/models/llama \ - examples/models/llama - # Build the export llama runner - cmake --build cmake-out/examples/models/llama -j$(nproc) --config Release -} - build_python_enabled() { echo "Building Python Package with Pybinding" @@ -92,7 +80,6 @@ main() { install_requirements build_python_enabled build_cpp_runtime - build_llama_runner # If the first arguments is --cpp_runtime, build libraries for C++ runtime elif [[ "$build_type" == "--cpp_runtime" ]]; then @@ -100,8 +87,8 @@ main() { # If the first arguments is --llama_runner, build export llama runner binary # Note: c++ runtime with openvino backend should be built before building export llama runner - elif [[ "$build_type" == "--llama_runner" ]]; then - build_llama_runner + elif [[ "$build_type" == "--cpp_runtime_llm" ]]; then + build_cpp_runtime 1 # If the first arguments is --enable_python, build python package with python bindings elif [[ "$build_type" == "--enable_python" ]]; then From df4082a0e7ffa5b57aa2facce9ec184bd1ed2cb8 Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Mon, 27 Oct 2025 16:34:42 -0700 Subject: [PATCH 03/18] Updated openvino llama readme --- examples/openvino/llama/README.md | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/examples/openvino/llama/README.md b/examples/openvino/llama/README.md index a98645b3918..1473491e7da 100644 --- a/examples/openvino/llama/README.md +++ b/examples/openvino/llama/README.md @@ -28,13 +28,19 @@ python -m executorch.extension.llm.export.export_llm \ OpenVINO backend also offers Quantization support for llama models when exporting the model. The different quantization modes that are offered are INT4 groupwise & per-channel weights compression and INT8 per-channel weights compression. It can be achieved by setting `pt2e_quantize` option in `llama3_2_ov_4wo.yaml` file under `quantization`. Set this parameter to `openvino_4wo` for INT4 or `openvino_8wo` for INT8 weight compression. It is set to `openvino_4wo` in `llama3_2_ov_4wo.yaml` file by default. For modifying the group size, set `group_size` option in `llama3_2_ov_4wo.yaml` file under `quantization`. By default group size 128 is used to achieve optimal performance with the NPU. ## Build OpenVINO C++ Runtime with Llama Runner: -First, build the backend libraries by executing the script below in `/backends/openvino/scripts` folder: +First, build the backend libraries with llm extension by executing the script below in `/backends/openvino/scripts` folder: ```bash -./openvino_build.sh --cpp_runtime +./openvino_build.sh --cpp_runtime_llm ``` -Then, build the llama runner by executing the script below (with `--llama_runner` argument) also in `/backends/openvino/scripts` folder: +Then, build the llama runner by executing commands below in `` folder: ```bash -./openvino_build.sh --llama_runner +# Configure the project with CMake +cmake -DCMAKE_INSTALL_PREFIX=cmake-out \ + -DCMAKE_BUILD_TYPE=Release \ + -Bcmake-out/examples/models/llama \ + examples/models/llama +# Build the llama runner +cmake --build cmake-out/examples/models/llama -j$(nproc) --config Release ``` The executable is saved in `/cmake-out/examples/models/llama/llama_main` From 369615397fb4157eb86ef2d667c6af262db83332 Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Mon, 27 Oct 2025 16:45:58 -0700 Subject: [PATCH 04/18] Update OpenVINO backend readme file --- backends/openvino/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/backends/openvino/README.md b/backends/openvino/README.md index 5ce38ade56f..fcc8c32267c 100644 --- a/backends/openvino/README.md +++ b/backends/openvino/README.md @@ -109,9 +109,9 @@ Follow the steps below to setup your build environment: ```bash ./openvino_build.sh --cpp_runtime ``` - **Build C++ Llama Runner**: First, ensure the C++ runtime libraries are built by following the earlier instructions. Then, run the `openvino_build.sh` script with the `--llama_runner flag` to compile the LlaMA runner as shown the below command, which enables executing inference with models exported using export_llama. The resulting binary is located at: `/cmake-out/examples/models/llama/llama_main` + **Build C++ Runtime Libraries with LLM Extension**: Run the `openvino_build.sh` script with the `--cpp_runtime_llm` flag to build the C++ runtime libraries with LLM extension as shown in the below command. Use this option instead of `--cpp_runtime` for LLM extension support which is required by LLM examples. ```bash - ./openvino_build.sh --llama_runner + ./openvino_build.sh --cpp_runtime_llm ``` For more information about ExecuTorch environment setup, refer to the [Environment Setup](https://pytorch.org/executorch/main/getting-started-setup#environment-setup) guide. From 5175e36675e6a5b7cbb56713bfd0ea500639796c Mon Sep 17 00:00:00 2001 From: suryasidd Date: Thu, 30 Oct 2025 23:47:35 -0700 Subject: [PATCH 05/18] Separated requiremets for building backend and examples --- backends/openvino/requirements.txt | 1 - backends/openvino/scripts/openvino_build.sh | 19 ++++++++++--------- .../{stable_diffusion => }/requirements.txt | 1 + examples/openvino/stable_diffusion/README.md | 2 +- 4 files changed, 12 insertions(+), 11 deletions(-) rename examples/openvino/{stable_diffusion => }/requirements.txt (56%) diff --git a/backends/openvino/requirements.txt b/backends/openvino/requirements.txt index 519818d0aac..88ae5f9546b 100644 --- a/backends/openvino/requirements.txt +++ b/backends/openvino/requirements.txt @@ -1,2 +1 @@ -transformers git+https://github.com/openvinotoolkit/nncf@3d753ac#egg=nncf diff --git a/backends/openvino/scripts/openvino_build.sh b/backends/openvino/scripts/openvino_build.sh index c9bfb6440f7..bc5d0e9cfbf 100755 --- a/backends/openvino/scripts/openvino_build.sh +++ b/backends/openvino/scripts/openvino_build.sh @@ -7,6 +7,9 @@ set -e EXECUTORCH_ROOT=$(realpath "$(dirname "$0")/../../..") echo EXECUTORCH_ROOT=${EXECUTORCH_ROOT} +# Enter the Executorch root directory +cd "$EXECUTORCH_ROOT" + install_requirements() { echo "Installing Requirements For OpenVINO Backend" cd "$EXECUTORCH_ROOT" @@ -21,8 +24,6 @@ build_cpp_runtime() { # Set build directory local build_dir="cmake-out" - # Enter the Executorch root directory - cd "$EXECUTORCH_ROOT" rm -rf "${build_dir}" CMAKE_ARGS=( @@ -56,8 +57,7 @@ build_python_enabled() { echo "Building Python Package with Pybinding" # Enter the Executorch root directory - cd "$EXECUTORCH_ROOT" - ./install_executorch.sh --clean + # cd "$EXECUTORCH_ROOT" # Set parameters to configure the project with CMake # Note: Add any additional configuration options you need here @@ -75,22 +75,23 @@ build_python_enabled() { main() { build_type=${1:-"--build_all"} - # If the first arguments is --build_all (default), build python package, C++ runtime, and llama runner binary + # If the first argument is --build_all (default), build python package, C++ runtime if [[ -z "$build_type" || "$build_type" == "--build_all" ]]; then + ./install_executorch.sh --clean install_requirements build_python_enabled build_cpp_runtime - # If the first arguments is --cpp_runtime, build libraries for C++ runtime + # If the first argument is --cpp_runtime, build libraries for C++ runtime elif [[ "$build_type" == "--cpp_runtime" ]]; then build_cpp_runtime - # If the first arguments is --llama_runner, build export llama runner binary - # Note: c++ runtime with openvino backend should be built before building export llama runner + # If the first argument is --cpp_runtime_llm, build C++ runtime with llm extension + # Note: c++ runtime with openvino backend should be built before building llama runner elif [[ "$build_type" == "--cpp_runtime_llm" ]]; then build_cpp_runtime 1 - # If the first arguments is --enable_python, build python package with python bindings + # If the first argument is --enable_python, build python package with python bindings elif [[ "$build_type" == "--enable_python" ]]; then install_requirements build_python_enabled diff --git a/examples/openvino/stable_diffusion/requirements.txt b/examples/openvino/requirements.txt similarity index 56% rename from examples/openvino/stable_diffusion/requirements.txt rename to examples/openvino/requirements.txt index 7a55f6c19b8..d97b6a4365a 100644 --- a/examples/openvino/stable_diffusion/requirements.txt +++ b/examples/openvino/requirements.txt @@ -1 +1,2 @@ +transformers diffusers>=0.29.0 \ No newline at end of file diff --git a/examples/openvino/stable_diffusion/README.md b/examples/openvino/stable_diffusion/README.md index fef1e3f50f9..c24ce10cce0 100644 --- a/examples/openvino/stable_diffusion/README.md +++ b/examples/openvino/stable_diffusion/README.md @@ -11,7 +11,7 @@ Follow the [instructions](../../../backends/openvino/README.md) of **Prerequisit ### Install dependencies ```bash -pip install -r requirements.txt +pip install -r ../requirements.txt ``` ## Export the Model From 10f3cfa98ea8082eb4c6a8e4d6d0631a3c08e62c Mon Sep 17 00:00:00 2001 From: suryasidd Date: Thu, 30 Oct 2025 23:58:40 -0700 Subject: [PATCH 06/18] Clean up commented code --- backends/openvino/scripts/openvino_build.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/backends/openvino/scripts/openvino_build.sh b/backends/openvino/scripts/openvino_build.sh index bc5d0e9cfbf..a444ac31fe0 100755 --- a/backends/openvino/scripts/openvino_build.sh +++ b/backends/openvino/scripts/openvino_build.sh @@ -12,7 +12,6 @@ cd "$EXECUTORCH_ROOT" install_requirements() { echo "Installing Requirements For OpenVINO Backend" - cd "$EXECUTORCH_ROOT" pip install -r backends/openvino/requirements.txt } @@ -57,7 +56,6 @@ build_python_enabled() { echo "Building Python Package with Pybinding" # Enter the Executorch root directory - # cd "$EXECUTORCH_ROOT" # Set parameters to configure the project with CMake # Note: Add any additional configuration options you need here From 86e5c52a299131c8c38e5fec2381b0071349fb0b Mon Sep 17 00:00:00 2001 From: cavusmustafa Date: Fri, 31 Oct 2025 14:47:46 -0700 Subject: [PATCH 07/18] Updated openvino build script and example requirements --- backends/openvino/scripts/openvino_build.sh | 3 --- examples/models/yolo12/requirements.txt | 2 ++ 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/backends/openvino/scripts/openvino_build.sh b/backends/openvino/scripts/openvino_build.sh index a444ac31fe0..0febd2fe208 100755 --- a/backends/openvino/scripts/openvino_build.sh +++ b/backends/openvino/scripts/openvino_build.sh @@ -65,9 +65,6 @@ build_python_enabled() { # Build the package ./install_executorch.sh --minimal - - # Install torchao - pip install third-party/ao } main() { diff --git a/examples/models/yolo12/requirements.txt b/examples/models/yolo12/requirements.txt index 383cf53aba9..76aa232839a 100644 --- a/examples/models/yolo12/requirements.txt +++ b/examples/models/yolo12/requirements.txt @@ -1 +1,3 @@ ultralytics==8.3.196 +opencv-python +torchvision From 1942f7b882b4413a0c8ca5a44dce3f8cf24db66c Mon Sep 17 00:00:00 2001 From: cavusmustafa Date: Fri, 31 Oct 2025 16:03:13 -0700 Subject: [PATCH 08/18] Openvino examples requirement updates --- examples/openvino/requirements.txt | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/openvino/requirements.txt b/examples/openvino/requirements.txt index d97b6a4365a..3af03516719 100644 --- a/examples/openvino/requirements.txt +++ b/examples/openvino/requirements.txt @@ -1,2 +1,6 @@ +--extra-index-url https://download.pytorch.org/whl/nightly/cpu + +diffusers>=0.29.0 +torchvision transformers -diffusers>=0.29.0 \ No newline at end of file +timm From 3fb6f71f6cc5bf5e8fd562c1c0540495b138fd7b Mon Sep 17 00:00:00 2001 From: cavusmustafa Date: Fri, 31 Oct 2025 16:45:50 -0700 Subject: [PATCH 09/18] Remove openvino example requirements which is already installed in install_requirement.py --- examples/models/yolo12/requirements.txt | 1 - examples/openvino/requirements.txt | 4 ---- 2 files changed, 5 deletions(-) diff --git a/examples/models/yolo12/requirements.txt b/examples/models/yolo12/requirements.txt index 76aa232839a..1cf9ba6d7e6 100644 --- a/examples/models/yolo12/requirements.txt +++ b/examples/models/yolo12/requirements.txt @@ -1,3 +1,2 @@ ultralytics==8.3.196 opencv-python -torchvision diff --git a/examples/openvino/requirements.txt b/examples/openvino/requirements.txt index 3af03516719..8c358d6b44c 100644 --- a/examples/openvino/requirements.txt +++ b/examples/openvino/requirements.txt @@ -1,6 +1,2 @@ ---extra-index-url https://download.pytorch.org/whl/nightly/cpu - diffusers>=0.29.0 -torchvision transformers -timm From fc5cbd80abf42e72969e934ccc39ad7674d2bf8a Mon Sep 17 00:00:00 2001 From: cavusmustafa Date: Fri, 31 Oct 2025 17:58:35 -0700 Subject: [PATCH 10/18] move requirements file to stable diffusion example only --- examples/openvino/{ => stable_diffusion}/requirements.txt | 1 - 1 file changed, 1 deletion(-) rename examples/openvino/{ => stable_diffusion}/requirements.txt (58%) diff --git a/examples/openvino/requirements.txt b/examples/openvino/stable_diffusion/requirements.txt similarity index 58% rename from examples/openvino/requirements.txt rename to examples/openvino/stable_diffusion/requirements.txt index 8c358d6b44c..4057c5ace9f 100644 --- a/examples/openvino/requirements.txt +++ b/examples/openvino/stable_diffusion/requirements.txt @@ -1,2 +1 @@ diffusers>=0.29.0 -transformers From 8b84dcddbd53108214770f2740706756dac8de4b Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Sun, 2 Nov 2025 20:04:10 -0800 Subject: [PATCH 11/18] Update README.md --- examples/openvino/stable_diffusion/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/openvino/stable_diffusion/README.md b/examples/openvino/stable_diffusion/README.md index c24ce10cce0..1f5882c53a7 100644 --- a/examples/openvino/stable_diffusion/README.md +++ b/examples/openvino/stable_diffusion/README.md @@ -11,7 +11,7 @@ Follow the [instructions](../../../backends/openvino/README.md) of **Prerequisit ### Install dependencies ```bash -pip install -r ../requirements.txt +pip install -r requirements.txt ``` ## Export the Model @@ -45,4 +45,4 @@ python openvino_lcm.py \ This implementation supports LCM-based Stable Diffusion models: - **SimianLuo/LCM_Dreamshaper_v7** -- **latent-consistency/lcm-sdxl** \ No newline at end of file +- **latent-consistency/lcm-sdxl** From 6e844857740437c05024414e8cf99691de796148 Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Mon, 3 Nov 2025 15:56:18 -0800 Subject: [PATCH 12/18] Update README.md --- examples/models/yolo12/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/models/yolo12/README.md b/examples/models/yolo12/README.md index 1a54f1a4a16..224f5a3554c 100644 --- a/examples/models/yolo12/README.md +++ b/examples/models/yolo12/README.md @@ -51,7 +51,7 @@ python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --b OpenVINO quantized model: ```bash -python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend openvino --quantize --video_input /path/to/calibration/video --device CPU +python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend openvino --quantize --video_path /path/to/calibration/video --device CPU ``` XNNPACK: From 61a09c5ad091d6d3e68f7754e9e18a28aec79e77 Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Tue, 4 Nov 2025 11:00:51 -0800 Subject: [PATCH 13/18] Update README.md --- examples/openvino/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/examples/openvino/README.md b/examples/openvino/README.md index 83e3daf6849..03bb351d6ae 100644 --- a/examples/openvino/README.md +++ b/examples/openvino/README.md @@ -13,6 +13,11 @@ examples/openvino └── llama ├── README.md # Documentation for Llama example └── llama3_2_ov_4wo.yaml # Configuration file for exporting Llama3.2 with OpenVINO backend +└── stable_diffusion + ├── README.md # Documentation for Stable Diffusion example + ├── export_lcm.py # Script for exporting models + ├── openvino_lcm.py # Script for inference execution + └── requirements.txt # Requirements file for Stable Diffusion example ``` # Build Instructions for Examples @@ -20,6 +25,9 @@ examples/openvino ## Environment Setup Follow the [instructions](../../backends/openvino/README.md) of **Prerequisites** and **Setup** in `backends/openvino/README.md` to set up the OpenVINO backend. +## Example Requirements +OpenVINO backend examples have dependencies that can be installed by running the install_requirements.sh script in the Executorch root directory. + ## AOT step: The python script called `aot_optimize_and_infer.py` allows users to export deep learning models from various model suites (TIMM, Torchvision, Hugging Face) to a openvino backend using **Executorch**. Users can dynamically specify the model, input shape, and target device. From 554062d8a1367a93270876fa109b9aed20fdcf5f Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Tue, 4 Nov 2025 11:23:16 -0800 Subject: [PATCH 14/18] Update README.md --- examples/openvino/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/openvino/README.md b/examples/openvino/README.md index 03bb351d6ae..54e632c6881 100644 --- a/examples/openvino/README.md +++ b/examples/openvino/README.md @@ -26,7 +26,7 @@ examples/openvino Follow the [instructions](../../backends/openvino/README.md) of **Prerequisites** and **Setup** in `backends/openvino/README.md` to set up the OpenVINO backend. ## Example Requirements -OpenVINO backend examples have dependencies that can be installed by running the install_requirements.sh script in the Executorch root directory. +OpenVINO backend examples have dependencies that can be installed by running the `install_requirements.sh` script in the Executorch root directory. ## AOT step: From d24652c4fef51c417c1e364bc371c97de9a2c36d Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Wed, 5 Nov 2025 10:38:56 -0800 Subject: [PATCH 15/18] Update README.md --- examples/openvino/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/openvino/README.md b/examples/openvino/README.md index 54e632c6881..310fd6186c9 100644 --- a/examples/openvino/README.md +++ b/examples/openvino/README.md @@ -26,7 +26,7 @@ examples/openvino Follow the [instructions](../../backends/openvino/README.md) of **Prerequisites** and **Setup** in `backends/openvino/README.md` to set up the OpenVINO backend. ## Example Requirements -OpenVINO backend examples have dependencies that can be installed by running the `install_requirements.sh` script in the Executorch root directory. +OpenVINO backend examples have dependencies that can be installed by running the `install_requirements.sh --example` script in the Executorch root directory. ## AOT step: From f9459f76ee62feb6d5beae872b114f109a37fc08 Mon Sep 17 00:00:00 2001 From: Mustafa Cavus Date: Wed, 5 Nov 2025 10:40:38 -0800 Subject: [PATCH 16/18] Update README.md --- examples/openvino/README.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/examples/openvino/README.md b/examples/openvino/README.md index 310fd6186c9..1ce2161c344 100644 --- a/examples/openvino/README.md +++ b/examples/openvino/README.md @@ -26,7 +26,11 @@ examples/openvino Follow the [instructions](../../backends/openvino/README.md) of **Prerequisites** and **Setup** in `backends/openvino/README.md` to set up the OpenVINO backend. ## Example Requirements -OpenVINO backend examples have dependencies that can be installed by running the `install_requirements.sh --example` script in the Executorch root directory. +OpenVINO backend examples have dependencies that can be installed by running the `install_requirements.sh` script with `--example` argument in the Executorch root directory. + +``` +./install_requirements.sh --example +``` ## AOT step: From 7b11ea1cdc36e30ba24faf960a211f2ebe33c02d Mon Sep 17 00:00:00 2001 From: suryasidd Date: Wed, 5 Nov 2025 13:54:39 -0800 Subject: [PATCH 17/18] Updated requirements and Readmes --- backends/openvino/scripts/openvino_build.sh | 2 +- examples/models/yolo12/README.md | 7 ++++--- examples/openvino/README.md | 7 ------- 3 files changed, 5 insertions(+), 11 deletions(-) diff --git a/backends/openvino/scripts/openvino_build.sh b/backends/openvino/scripts/openvino_build.sh index 0febd2fe208..b871bff043d 100755 --- a/backends/openvino/scripts/openvino_build.sh +++ b/backends/openvino/scripts/openvino_build.sh @@ -64,7 +64,7 @@ build_python_enabled() { export CMAKE_BUILD_ARGS="--target openvino_backend" # Build the package - ./install_executorch.sh --minimal + ./install_executorch.sh } main() { diff --git a/examples/models/yolo12/README.md b/examples/models/yolo12/README.md index 224f5a3554c..3705916a589 100644 --- a/examples/models/yolo12/README.md +++ b/examples/models/yolo12/README.md @@ -31,13 +31,14 @@ To install ExecuTorch, follow this [guide](https://pytorch.org/executorch/stable ### Step 3: Install the demo requirements -Python demo requirements: +#### Python Demo Requirements +The demo requires the `ultralytics` package, which depends on `torch` and `torchvision`. Since these packages are already installed as dev dependencies, use `--upgrade-strategy only-if-needed` to avoid version conflicts: ```bash -python -m pip install -r examples/models/yolo12/requirements.txt +python -m pip install --upgrade-strategy only-if-needed -r requirements.txt ``` -Demo infenrece dependency - OpenCV library: +#### Demo Inference Dependency - OpenCV Library ### Step 4: Export the YOLO12 model to the ExecuTorch diff --git a/examples/openvino/README.md b/examples/openvino/README.md index 1ce2161c344..ca4d6b415d4 100644 --- a/examples/openvino/README.md +++ b/examples/openvino/README.md @@ -25,13 +25,6 @@ examples/openvino ## Environment Setup Follow the [instructions](../../backends/openvino/README.md) of **Prerequisites** and **Setup** in `backends/openvino/README.md` to set up the OpenVINO backend. -## Example Requirements -OpenVINO backend examples have dependencies that can be installed by running the `install_requirements.sh` script with `--example` argument in the Executorch root directory. - -``` -./install_requirements.sh --example -``` - ## AOT step: The python script called `aot_optimize_and_infer.py` allows users to export deep learning models from various model suites (TIMM, Torchvision, Hugging Face) to a openvino backend using **Executorch**. Users can dynamically specify the model, input shape, and target device. From 885d630decfab184b92a08344b30ae2c8505f02a Mon Sep 17 00:00:00 2001 From: suryasidd Date: Wed, 5 Nov 2025 15:39:56 -0800 Subject: [PATCH 18/18] Updated README --- examples/models/yolo12/README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/models/yolo12/README.md b/examples/models/yolo12/README.md index 3705916a589..19f96b9450f 100644 --- a/examples/models/yolo12/README.md +++ b/examples/models/yolo12/README.md @@ -38,9 +38,6 @@ The demo requires the `ultralytics` package, which depends on `torch` and `torch python -m pip install --upgrade-strategy only-if-needed -r requirements.txt ``` -#### Demo Inference Dependency - OpenCV Library - - ### Step 4: Export the YOLO12 model to the ExecuTorch OpenVINO: