-
Couldn't load subscription status.
- Fork 700
Exynos Backend for Executorch to bring up on Exynos SoC #13677
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 13 commits
9ab662c
24e6dd9
6f5720c
57579e5
15f6812
d547835
99313fd
aa31232
66bd9ea
5fa3dd3
ac7119d
385711b
0bc88fd
28f9dab
c46b081
96ea729
407b23a
1e41b20
05f1a44
83b7c6c
ed1112e
613b1d6
07d9c61
6870437
b4443e8
b36a000
2ee20e3
bff032f
b892967
b4ce9f3
9eb7a50
4f28bad
2b3bc79
40a27e3
527440c
95d8f3f
7f5a06b
9adac0c
bad80e0
ac315c2
602f714
02e347e
35fd1f3
f6b1025
4994e6b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,141 @@ | ||||||||||||||||
| # Copyright (c) 2025 Samsung Electronics Co. LTD | ||||||||||||||||
| # All rights reserved | ||||||||||||||||
| # | ||||||||||||||||
| # This source code is licensed under the BSD-style license found in the | ||||||||||||||||
| # LICENSE file in the root directory of this source tree. | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| cmake_minimum_required(VERSION 3.15) | ||||||||||||||||
| set(CMAKE_CXX_STANDARD 17) | ||||||||||||||||
| set(CMAKE_CXX_STANDARD_REQUIRED ON) | ||||||||||||||||
|
|
||||||||||||||||
| get_filename_component(EXECUTORCH_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../.." ABSOLUTE) | ||||||||||||||||
|
|
||||||||||||||||
| if(NOT DEFINED EXYNOS_AI_LITECORE_PATH) | ||||||||||||||||
| message(FATAL_ERROR "Please define EXYNOS_AI_LIRECORE_PATH by adding cmake parameter -DEXYNOS_AI_LITECORE_PATH=<...>") | ||||||||||||||||
| endif() | ||||||||||||||||
| if(CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$") | ||||||||||||||||
| message(FATAL_ERROR "IOS is not supported on Exynos.") | ||||||||||||||||
| endif() | ||||||||||||||||
|
|
||||||||||||||||
| if(NOT FLATC_EXECUTABLE) | ||||||||||||||||
| set(FLATC_EXECUTABLE flatc) | ||||||||||||||||
| endif() | ||||||||||||||||
|
|
||||||||||||||||
| add_compile_options(-Wall -Werror -fPIC) | ||||||||||||||||
| if(CMAKE_BUILD_TYPE STREQUAL "Release") | ||||||||||||||||
| # strip symbols | ||||||||||||||||
| add_link_options("-s") | ||||||||||||||||
| # hide dynamic symbols | ||||||||||||||||
| set(CMAKE_C_VISIBILITY_PRESET hidden) | ||||||||||||||||
| set(CMAKE_CXX_VISIBILITY_PRESET hidden) | ||||||||||||||||
| add_definitions(-DNDEBUG) | ||||||||||||||||
| endif() | ||||||||||||||||
|
|
||||||||||||||||
| include_directories(${EXECUTORCH_SOURCE_DIR}/..) | ||||||||||||||||
| include_directories(${EXYNOS_AI_LITECORE_PATH}) | ||||||||||||||||
|
|
||||||||||||||||
| if(${ANDROID}) | ||||||||||||||||
| find_library(android_log log) | ||||||||||||||||
| endif() | ||||||||||||||||
|
|
||||||||||||||||
| # add logging library | ||||||||||||||||
| add_library(enn_logging STATIC) | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64") | ||||||||||||||||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We'll likely want to build and install the python extensions as part of the install logic in setup.py. It will ensure that the extensions are placed in the proper python path and work out of box when enabled. That could be done as a follow-up. I'm looking at refactoring the QNN backend to do something similar and we can follow that pattern once merged. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Thanks for sugestion. Once the behavior for the models for Exynos backend is complete, we will consider reflecting that Refactoring feature. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I created an issue to track here as a follow-up PR after this one lands: #14000 |
||||||||||||||||
| add_subdirectory( | ||||||||||||||||
| ${EXECUTORCH_SOURCE_DIR}/third-party/pybind11 | ||||||||||||||||
| ${CMAKE_CURRENT_BINARY_DIR}/pybind11 | ||||||||||||||||
| ) | ||||||||||||||||
| add_library(PyEnnWrapperAdaptor MODULE) | ||||||||||||||||
|
|
||||||||||||||||
| find_library(GG_API_LIB NAMES graphgen_api HINTS ${EXYNOS_AI_LITECORE_PATH}/lib64/) | ||||||||||||||||
| add_library(graphgen_api SHARED IMPORTED GLOBAL) | ||||||||||||||||
| set_target_properties(graphgen_api PROPERTIES | ||||||||||||||||
| INTERFACE_INCLUDE_DIRECTORIES "${EXYNOS_AI_LITECORE_PATH}/include" | ||||||||||||||||
| IMPORTED_LOCATION "${GG_API_LIB}" | ||||||||||||||||
| ) | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| set(_enn_compile_options_schema ${CMAKE_CURRENT_SOURCE_DIR}/serialization/compile_options_def.fbs) | ||||||||||||||||
|
|
||||||||||||||||
| set(_enn_schema_generate_dir "${CMAKE_BINARY_DIR}/schema/include/executorch/backends/samsung") | ||||||||||||||||
| # Paths to headers generated from the .fbs files. | ||||||||||||||||
| string(REGEX REPLACE "serialization/([^/]+)[.]fbs$" "\\1_generated.h" generated_header "${fbs_file}") | ||||||||||||||||
| set(_enn_schema_output "${_enn_schema_generate_dir}/${generated_header}") | ||||||||||||||||
|
|
||||||||||||||||
| # Generate the headers from the .fbs files. | ||||||||||||||||
| add_custom_command( | ||||||||||||||||
| OUTPUT ${_enn_schema_output} | ||||||||||||||||
| COMMAND | ||||||||||||||||
| ${FLATC_EXECUTABLE} --cpp --cpp-std c++11 --scoped-enums -o | ||||||||||||||||
| "${_enn_schema_generate_dir}" | ||||||||||||||||
| ${_enn_compile_options_schema} | ||||||||||||||||
| DEPENDS ${_enn_compile_options_schema} | ||||||||||||||||
| WORKING_DIRECTORY ${EXECUTORCH_SOURCE_DIR} | ||||||||||||||||
| COMMENT "Generating enn compile options headers" | ||||||||||||||||
| VERBATIM | ||||||||||||||||
| ) | ||||||||||||||||
| add_custom_target(enn_compile_options_output ALL DEPENDS ${_enn_schema_output}) | ||||||||||||||||
|
|
||||||||||||||||
| set_target_properties(PyEnnWrapperAdaptor PROPERTIES CXX_VISIBILITY_PRESET hidden) | ||||||||||||||||
| target_link_libraries(PyEnnWrapperAdaptor PRIVATE | ||||||||||||||||
| pybind11::module | ||||||||||||||||
| pybind11::lto | ||||||||||||||||
| graphgen_api | ||||||||||||||||
| enn_logging | ||||||||||||||||
| ) | ||||||||||||||||
| target_include_directories(PyEnnWrapperAdaptor BEFORE PRIVATE | ||||||||||||||||
| ${CMAKE_BINARY_DIR}/schema/include | ||||||||||||||||
| ${EXECUTORCH_SOURCE_DIR}/third-party/flatbuffers/include | ||||||||||||||||
| ) | ||||||||||||||||
| add_dependencies(PyEnnWrapperAdaptor enn_compile_options_output) | ||||||||||||||||
| pybind11_extension(PyEnnWrapperAdaptor) | ||||||||||||||||
|
|
||||||||||||||||
| # PyGraphWrapperAdaptor | ||||||||||||||||
| add_library(PyGraphWrapperAdaptor MODULE) | ||||||||||||||||
| # | ||||||||||||||||
| find_library(GRAPH_WRAPPER_LIB NAMES graph_wrapper HINTS ${EXYNOS_AI_LITECORE_PATH}/lib64/) | ||||||||||||||||
| add_library(graph_wrapper SHARED IMPORTED GLOBAL) | ||||||||||||||||
| set_target_properties(graph_wrapper PROPERTIES | ||||||||||||||||
| INTERFACE_INCLUDE_DIRECTORIES "${EXYNOS_AI_LITECORE_PATH}/include" | ||||||||||||||||
| IMPORTED_LOCATION "${GRAPH_WRAPPER_LIB}" | ||||||||||||||||
| ) | ||||||||||||||||
| set_target_properties(PyGraphWrapperAdaptor PROPERTIES CXX_VISIBILITY_PRESET hidden) | ||||||||||||||||
| target_link_libraries(PyGraphWrapperAdaptor PRIVATE | ||||||||||||||||
| pybind11::module | ||||||||||||||||
| pybind11::lto | ||||||||||||||||
| graph_wrapper | ||||||||||||||||
| enn_logging | ||||||||||||||||
| ) | ||||||||||||||||
| pybind11_extension(PyGraphWrapperAdaptor) | ||||||||||||||||
|
|
||||||||||||||||
| add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/aot) | ||||||||||||||||
| endif() | ||||||||||||||||
|
|
||||||||||||||||
|
|
||||||||||||||||
| if(${ANDROID}) | ||||||||||||||||
| target_link_libraries(enn_logging PRIVATE ${android_log}) | ||||||||||||||||
| add_library(enn_backend STATIC) | ||||||||||||||||
| target_link_libraries(enn_backend PRIVATE enn_logging) | ||||||||||||||||
| target_link_options_shared_lib(enn_backend) | ||||||||||||||||
|
|
||||||||||||||||
| set(__enn_executor_runner_srcs ${EXECUTORCH_SOURCE_DIR}/examples/samsung/executor_runner/enn_executor_runner.cpp) | ||||||||||||||||
| add_executable(enn_executor_runner ${__enn_executor_runner_srcs}) | ||||||||||||||||
| target_link_libraries(enn_executor_runner PRIVATE | ||||||||||||||||
| enn_logging | ||||||||||||||||
| enn_backend | ||||||||||||||||
| gflags | ||||||||||||||||
| executorch | ||||||||||||||||
| extension_data_loader | ||||||||||||||||
| portable_ops_lib | ||||||||||||||||
| ) | ||||||||||||||||
| set_target_properties(enn_executor_runner PROPERTIES CXX_VISIBILITY_PRESET hidden) | ||||||||||||||||
| install( | ||||||||||||||||
|
||||||||||||||||
| install( | |
| TARGETS xnnpack_backend xnnpack_schema | |
| EXPORT ExecuTorchTargets | |
| DESTINATION ${CMAKE_INSTALL_LIBDIR} | |
| INCLUDES | |
| DESTINATION ${_common_include_directories} | |
| ) |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for suggestion. I added EXPORT directive according to your suggestion.
| Original file line number | Diff line number | Diff line change | ||||
|---|---|---|---|---|---|---|
| @@ -0,0 +1,71 @@ | ||||||
| # ExecuTorch Samsung Exynos Delegate | ||||||
mergennachin marked this conversation as resolved.
Show resolved
Hide resolved
|
||||||
|
|
||||||
| The subtree contains Exynos delegation implementation for ExecuTorch. The target of delegation | ||||||
|
||||||
| The subtree contains Exynos delegation implementation for ExecuTorch. The target of delegation | |
| The subtree contains Exynos delegate implementation for ExecuTorch. The target of delegation |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, it's fixed.
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| is deploying torch model run with exynos NPU/DSP. | |
| is deploying torch model to run with exynos NPU/DSP. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, it's fixed.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
How do we do that? Is there a documentation? Or clear readme?
Ideal path is for us to have exynos_litecore as pip package so that you can just do pip install exynos_litecore
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we'll consider installing the LiteCore library using pip later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Created an issue to track: #14004
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what about int4? is it under dev?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, right. it is under dev now. if it's prepared, we will add int4.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Created an issue to track: #14003
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| Generated python artifacts allow user call `Compile` interface to lower a model to ENN backend in python script. | |
| Generates python artifacts that allow user call `compile` interface to lower a model to ENN backend in python script. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, it's fixed. but i think previous expression is right for our goel.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do you wanna add a basic end-to-end integration test to our CI? Can we run everything in x86 environment by any chance?
See example test cases in https://github.com/pytorch/executorch/blob/main/.github/workflows/pull.yml (runs on every PR before merging with main) and https://github.com/pytorch/executorch/blob/main/.github/workflows/trunk.yml (jobs that runs after the PR merges with main)
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could we add e2e test to your CI after all codes which is developed are merged? and i think you also need to get samsung device for testing CI.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i think you also need to get samsung device for testing CI.
yeah, we can look into it. also, is there an emulator on linux (either x86 or aarch64) or mac for exynos?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I still think we should add basic unit test, at least for ahead-of-time stuff such as partitioner and serialization. i suppose they won't require samsung phones, right?
for e2e, we can do it after the PR lands as a fast-follow-on.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
i think you also need to get samsung device for testing CI.
yeah, we can look into it. also, is there an emulator on linux (either x86 or aarch64) or mac for exynos?
Unfortunately, we don't have the emulator. Could we discuss on slack for this?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I still think we should add basic unit test, at least for ahead-of-time stuff such as partitioner and serialization. i suppose they won't require samsung phones, right?
for e2e, we can do it after the PR lands as a fast-follow-on.
you mean we are going to test only until the PTE is generated, right? if yes, right it doesn't need to require samsung device.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
For e2e integration test, created an issue to track: #14002
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
| ANDROID_ABI=arm64-v8a is default, necessary runtime executable generate in `build_exynos_android` directory. | |
| ANDROID_ABI=arm64-v8a is default, necessary runtime executable generated in `build_exynos_android` directory. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks, it's fixed.
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what does it generate
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
executor runner for exynos will be generated.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I dont see EXECUTORCH_BUILD_ENN in the camke option
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
cc @Jiseong-oh please take a look at Kimish's question
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Fixed it.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| # Copyright (c) 2025 Samsung Electronics Co. LTD | ||
| # All rights reserved | ||
| # | ||
| # This source code is licensed under the BSD-style license found in the | ||
| # LICENSE file in the root directory of this source tree. | ||
|
|
||
| target_sources( | ||
| PyEnnWrapperAdaptor PUBLIC PyEnnWrapperAdaptor.cpp | ||
| PyEnnWrapperAdaptor.h | ||
| ) | ||
| target_sources( | ||
| PyGraphWrapperAdaptor PUBLIC PyGraphWrapperAdaptor.cpp | ||
| PyGraphWrapperAdaptor.h wrappers/op_param_wrapper.h | ||
| wrappers/op_wrapper.h wrappers/tensor_wrapper.h | ||
| ) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,29 @@ | ||
| /* | ||
| * Copyright (c) 2025 Samsung Electronics Co. LTD | ||
| * All rights reserved | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. | ||
| * | ||
| */ | ||
|
|
||
| #include <executorch/backends/samsung/aot/PyEnnWrapperAdaptor.h> | ||
| #include <pybind11/pybind11.h> | ||
|
|
||
| namespace torch { | ||
| namespace executor { | ||
| namespace enn { | ||
| PYBIND11_MODULE(PyEnnWrapperAdaptor, m) { | ||
| pybind11::class_<PyEnnWrapper, std::shared_ptr<PyEnnWrapper>>(m, "EnnWrapper") | ||
| .def(pybind11::init()) | ||
| .def("Init", &PyEnnWrapper::Init) | ||
| .def("IsNodeSupportedByBackend", &PyEnnWrapper::IsNodeSupportedByBackend) | ||
| .def( | ||
| "Compile", | ||
| &PyEnnWrapper::Compile, | ||
| "Ahead of time compilation for serialized graph.") | ||
| .def("Destroy", &PyEnnWrapper::Destroy, "Release resources."); | ||
| } | ||
| } // namespace enn | ||
| } // namespace executor | ||
| } // namespace torch |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,79 @@ | ||
| /* | ||
| * Copyright (c) 2025 Samsung Electronics Co. LTD | ||
| * All rights reserved | ||
| * | ||
| * This source code is licensed under the BSD-style license found in the | ||
| * LICENSE file in the root directory of this source tree. | ||
| * | ||
| */ | ||
| #pragma once | ||
|
|
||
| #include <include/graphgen_c.h> | ||
| #include <include/graphgen_common.h> | ||
| #include <pybind11/numpy.h> | ||
| #include <pybind11/pybind11.h> | ||
|
|
||
| #include <iostream> | ||
| #include <memory> | ||
| #include <vector> | ||
|
|
||
| namespace py = pybind11; | ||
|
|
||
| namespace torch { | ||
| namespace executor { | ||
| namespace enn { | ||
|
|
||
| class PyEnnWrapper { | ||
| public: | ||
| PyEnnWrapper() {} | ||
|
|
||
| void Init(const py::bytes& compile_opts) { | ||
| graphgen_instance_ = graphgen_create(); | ||
| } | ||
|
|
||
| bool IsNodeSupportedByBackend() { | ||
| return False; | ||
| } | ||
|
|
||
| py::array_t<char> Compile(const py::array_t<char>& model_buffer) { | ||
| if (graphgen_instance_ == nullptr) { | ||
| ENN_LOG_ERROR("Please call `Init()` first before compile."); | ||
| return py::array_t<char>(); | ||
| } | ||
|
|
||
|
|
||
| auto m_buf_info = model_buffer.request(); | ||
| auto* model_buf_ptr = reinterpret_cast<uint8_t*>(m_buf_info.ptr); | ||
| NNCBuffer* nnc_buffer = nullptr; | ||
| if (graphgen_generate( | ||
| graphgen_instance_, model_buf_ptr, m_buf_info.size, &nnc_buffer) != | ||
| GraphGenResult::SUCCESS) { | ||
| ENN_LOG_ERROR("Compile model failed."); | ||
| return py::array_t<char>(); | ||
| } | ||
|
|
||
| auto result = py::array_t<char>({nnc_buffer->size}, {sizeof(char)}); | ||
| auto result_buf = result.request(); | ||
| memcpy(result_buf.ptr, nnc_buffer->addr, nnc_buffer->size); | ||
|
|
||
| graphgen_release_buffer(graphgen_instance_, nnc_buffer); | ||
|
|
||
| return result; | ||
| } | ||
|
|
||
| void Destroy() { | ||
| graphgen_release(graphgen_instance_); | ||
| graphgen_instance_ = nullptr; | ||
| } | ||
|
|
||
| ~PyEnnWrapper() { | ||
| Destroy(); | ||
| } | ||
|
|
||
| private: | ||
| // pointer to enn software entry | ||
| void* graphgen_instance_ = nullptr; | ||
| }; | ||
| } // namespace enn | ||
| } // namespace executor | ||
| } // namespace torch |
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Can you also add the backend target to _executorch_backends (similar to line 585)? This will allow it to be included in the executorch_kernels target.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks. I add enn_backend to _executorch_backends.