Skip to content
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
9ab662c
Initial skeleton codes for Exynos Backend
Jiseong-oh Aug 4, 2025
24e6dd9
Provide a python interface for compliation
Jiseong-oh Aug 5, 2025
6f5720c
Add wrappers for samsung backend
Jiseong-oh Aug 5, 2025
57579e5
Serialization from EXIR
Jiseong-oh Aug 5, 2025
15f6812
Support to converter Exynos's own IR from EXIR
Jiseong-oh Aug 5, 2025
d547835
Implement Exynos backend
Jiseong-oh Aug 8, 2025
99313fd
update the license
Jiseong-oh Aug 8, 2025
aa31232
Make enn executor for enn backend
Jiseong-oh Aug 20, 2025
66bd9ea
add README.md file
Jiseong-oh Aug 22, 2025
5fa3dd3
fix: fix backend Name
Jiseong-oh Aug 26, 2025
ac7119d
Add CMakefile for build
Jiseong-oh Aug 22, 2025
385711b
add build script for exynos backend
Jiseong-oh Aug 22, 2025
0bc88fd
Add exynos backend for executorch's make/README.md
Jiseong-oh Aug 26, 2025
28f9dab
Complete the runtime pipeline
chong-chen01 Aug 27, 2025
c46b081
change class Name Enn backend
Jiseong-oh Aug 27, 2025
96ea729
Make partitioner and preprocess work
chong-chen01 Aug 27, 2025
407b23a
Initialize common ops and support ic3/resnet models
chong-chen01 Aug 27, 2025
1e41b20
update README.md file
Jiseong-oh Aug 27, 2025
05f1a44
add exynos backend to _executorch_backends
Jiseong-oh Aug 27, 2025
83b7c6c
fix argument data type of main
Jiseong-oh Aug 27, 2025
ed1112e
apply updating makefiles
Jiseong-oh Aug 27, 2025
613b1d6
fixed lint issues
Jiseong-oh Aug 28, 2025
07d9c61
update copyright for node_visitor
Jiseong-oh Aug 28, 2025
6870437
Add SamsungTester and build basic test pipeline
chong-chen01 Aug 29, 2025
b4443e8
Add test cases for samsung enn backend in ci
chong-chen01 Sep 1, 2025
b36a000
fix ci issue
Jiseong-oh Sep 3, 2025
2ee20e3
add --insecure option
Jiseong-oh Sep 3, 2025
bff032f
Merge branch 'main' into exynos-backend
Jiseong-oh Sep 3, 2025
b892967
fix LD_LIBRARY_PATH bound error
Jiseong-oh Sep 4, 2025
b4ce9f3
Merge branch 'exynos-backend' of https://github.com/Jiseong-oh/execut…
Jiseong-oh Sep 4, 2025
9eb7a50
fix issue library path is reset due to set u
Jiseong-oh Sep 4, 2025
4f28bad
ci: change docker image for ci
Jiseong-oh Sep 4, 2025
2b3bc79
Merge branch 'pytorch:main' into exynos-backend
Jiseong-oh Sep 4, 2025
40a27e3
fixed ci machine of runner
Jiseong-oh Sep 4, 2025
527440c
Merge branch 'exynos-backend' of https://github.com/Jiseong-oh/execut…
Jiseong-oh Sep 4, 2025
95d8f3f
fix ci issues to setup environment
Jiseong-oh Sep 5, 2025
7f5a06b
Fixed CI issue which Download NDK before compile Exynos backend
Jiseong-oh Sep 5, 2025
9adac0c
fix ci issue due to permission
Jiseong-oh Sep 5, 2025
bad80e0
fix wrong param issue
Jiseong-oh Sep 5, 2025
ac315c2
add testcase for ops
Jiseong-oh Sep 5, 2025
602f714
ci: remove insecure option for downloading LiteCore
Jiseong-oh Sep 8, 2025
02e347e
fixed target_path for ci
Jiseong-oh Sep 8, 2025
35fd1f3
Merge branch 'main' into exynos-backend
Jiseong-oh Sep 8, 2025
f6b1025
fixed CMake option name
Jiseong-oh Sep 8, 2025
4994e6b
Merge branch 'exynos-backend' of https://github.com/Jiseong-oh/execut…
Jiseong-oh Sep 8, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -576,6 +576,10 @@ if(EXECUTORCH_BUILD_QNN)
list(APPEND _executorch_backends qnn_executorch_backend)
endif()

if(EXECUTORCH_BUILD_SAMSUNG)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/samsung)
Copy link
Member

@GregoryComer GregoryComer Aug 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you also add the backend target to _executorch_backends (similar to line 585)? This will allow it to be included in the executorch_kernels target.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks. I add enn_backend to _executorch_backends.

endif()

if(EXECUTORCH_BUILD_XNNPACK)
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/backends/xnnpack)
list(APPEND _executorch_backends xnnpack_backend)
Expand Down
1 change: 1 addition & 0 deletions LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Copyright (c) Qualcomm Innovation Center, Inc.
Copyright (c) 2023 Apple Inc.
Copyright (c) 2024 MediaTek Inc.
Copyright 2023 NXP
Copyright (c) 2025 Samsung Electronics Co. LTD

Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Expand Down
141 changes: 141 additions & 0 deletions backends/samsung/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,141 @@
# Copyright (c) 2025 Samsung Electronics Co. LTD
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.


cmake_minimum_required(VERSION 3.15)
set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)

get_filename_component(EXECUTORCH_SOURCE_DIR "${CMAKE_CURRENT_LIST_DIR}/../.." ABSOLUTE)

if(NOT DEFINED EXYNOS_AI_LITECORE_PATH)
message(FATAL_ERROR "Please define EXYNOS_AI_LIRECORE_PATH by adding cmake parameter -DEXYNOS_AI_LITECORE_PATH=<...>")
endif()
if(CMAKE_TOOLCHAIN_FILE MATCHES ".*(iOS|ios\.toolchain)\.cmake$")
message(FATAL_ERROR "IOS is not supported on Exynos.")
endif()

if(NOT FLATC_EXECUTABLE)
set(FLATC_EXECUTABLE flatc)
endif()

add_compile_options(-Wall -Werror -fPIC)
if(CMAKE_BUILD_TYPE STREQUAL "Release")
# strip symbols
add_link_options("-s")
# hide dynamic symbols
set(CMAKE_C_VISIBILITY_PRESET hidden)
set(CMAKE_CXX_VISIBILITY_PRESET hidden)
add_definitions(-DNDEBUG)
endif()

include_directories(${EXECUTORCH_SOURCE_DIR}/..)
include_directories(${EXYNOS_AI_LITECORE_PATH})

if(${ANDROID})
find_library(android_log log)
endif()

# add logging library
add_library(enn_logging STATIC)


if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "x86_64")
Copy link
Member

@GregoryComer GregoryComer Aug 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We'll likely want to build and install the python extensions as part of the install logic in setup.py. It will ensure that the extensions are placed in the proper python path and work out of box when enabled.

That could be done as a follow-up. I'm looking at refactoring the QNN backend to do something similar and we can follow that pattern once merged.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for sugestion. Once the behavior for the models for Exynos backend is complete, we will consider reflecting that Refactoring feature.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I created an issue to track here as a follow-up PR after this one lands: #14000

add_subdirectory(
${EXECUTORCH_SOURCE_DIR}/third-party/pybind11
${CMAKE_CURRENT_BINARY_DIR}/pybind11
)
add_library(PyEnnWrapperAdaptor MODULE)

find_library(GG_API_LIB NAMES graphgen_api HINTS ${EXYNOS_AI_LITECORE_PATH}/lib64/)
add_library(graphgen_api SHARED IMPORTED GLOBAL)
set_target_properties(graphgen_api PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${EXYNOS_AI_LITECORE_PATH}/include"
IMPORTED_LOCATION "${GG_API_LIB}"
)


set(_enn_compile_options_schema ${CMAKE_CURRENT_SOURCE_DIR}/serialization/compile_options_def.fbs)

set(_enn_schema_generate_dir "${CMAKE_BINARY_DIR}/schema/include/executorch/backends/samsung")
# Paths to headers generated from the .fbs files.
string(REGEX REPLACE "serialization/([^/]+)[.]fbs$" "\\1_generated.h" generated_header "${fbs_file}")
set(_enn_schema_output "${_enn_schema_generate_dir}/${generated_header}")

# Generate the headers from the .fbs files.
add_custom_command(
OUTPUT ${_enn_schema_output}
COMMAND
${FLATC_EXECUTABLE} --cpp --cpp-std c++11 --scoped-enums -o
"${_enn_schema_generate_dir}"
${_enn_compile_options_schema}
DEPENDS ${_enn_compile_options_schema}
WORKING_DIRECTORY ${EXECUTORCH_SOURCE_DIR}
COMMENT "Generating enn compile options headers"
VERBATIM
)
add_custom_target(enn_compile_options_output ALL DEPENDS ${_enn_schema_output})

set_target_properties(PyEnnWrapperAdaptor PROPERTIES CXX_VISIBILITY_PRESET hidden)
target_link_libraries(PyEnnWrapperAdaptor PRIVATE
pybind11::module
pybind11::lto
graphgen_api
enn_logging
)
target_include_directories(PyEnnWrapperAdaptor BEFORE PRIVATE
${CMAKE_BINARY_DIR}/schema/include
${EXECUTORCH_SOURCE_DIR}/third-party/flatbuffers/include
)
add_dependencies(PyEnnWrapperAdaptor enn_compile_options_output)
pybind11_extension(PyEnnWrapperAdaptor)

# PyGraphWrapperAdaptor
add_library(PyGraphWrapperAdaptor MODULE)
#
find_library(GRAPH_WRAPPER_LIB NAMES graph_wrapper HINTS ${EXYNOS_AI_LITECORE_PATH}/lib64/)
add_library(graph_wrapper SHARED IMPORTED GLOBAL)
set_target_properties(graph_wrapper PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${EXYNOS_AI_LITECORE_PATH}/include"
IMPORTED_LOCATION "${GRAPH_WRAPPER_LIB}"
)
set_target_properties(PyGraphWrapperAdaptor PROPERTIES CXX_VISIBILITY_PRESET hidden)
target_link_libraries(PyGraphWrapperAdaptor PRIVATE
pybind11::module
pybind11::lto
graph_wrapper
enn_logging
)
pybind11_extension(PyGraphWrapperAdaptor)

add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/aot)
endif()


if(${ANDROID})
target_link_libraries(enn_logging PRIVATE ${android_log})
add_library(enn_backend STATIC)
target_link_libraries(enn_backend PRIVATE enn_logging)
target_link_options_shared_lib(enn_backend)

set(__enn_executor_runner_srcs ${EXECUTORCH_SOURCE_DIR}/examples/samsung/executor_runner/enn_executor_runner.cpp)
add_executable(enn_executor_runner ${__enn_executor_runner_srcs})
target_link_libraries(enn_executor_runner PRIVATE
enn_logging
enn_backend
gflags
executorch
extension_data_loader
portable_ops_lib
)
set_target_properties(enn_executor_runner PROPERTIES CXX_VISIBILITY_PRESET hidden)
install(
Copy link
Member

@GregoryComer GregoryComer Aug 26, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

To make this work with CMake's find_package functionality, you can add an EXPORT directive here. Here's an example from the XNNPACK backend:

install(
TARGETS xnnpack_backend xnnpack_schema
EXPORT ExecuTorchTargets
DESTINATION ${CMAKE_INSTALL_LIBDIR}
INCLUDES
DESTINATION ${_common_include_directories}
)
This will also be necessary to add this backend target to the executorch_backends target.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks for suggestion. I added EXPORT directive according to your suggestion.

TARGETS enn_backend enn_logging
DESTINATION lib
)
endif()

add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/runtime)
71 changes: 71 additions & 0 deletions backends/samsung/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
# ExecuTorch Samsung Exynos Delegate

The subtree contains Exynos delegation implementation for ExecuTorch. The target of delegation
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
The subtree contains Exynos delegation implementation for ExecuTorch. The target of delegation
The subtree contains Exynos delegate implementation for ExecuTorch. The target of delegation

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, it's fixed.

is deploying torch model run with exynos NPU/DSP.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
is deploying torch model run with exynos NPU/DSP.
is deploying torch model to run with exynos NPU/DSP.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, it's fixed.


This backend is implemented on the top of [EXYNOS_LITECORE](https://soc-developer.semiconductor.samsung.com/global/development/light-core)
Please prepare the SDK before you start, it is important to code compilation and runtime.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

How do we do that? Is there a documentation? Or clear readme?

Ideal path is for us to have exynos_litecore as pip package so that you can just do pip install exynos_litecore

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we'll consider installing the LiteCore library using pip later.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Created an issue to track: #14004


## Delegate Options

### Supported Chipset
- Exynos 2500 (E9955)

### Supported Inference Type
- Quantized (i8/u8/i16/u16)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what about int4? is it under dev?

Copy link
Collaborator Author

@Jiseong-oh Jiseong-oh Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, right. it is under dev now. if it's prepared, we will add int4.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Created an issue to track: #14003

- FP16

## Directory Structure

```
backends/samsung
├── aot # Codes for generating binary buffer for ENN runtime.
├── builders # Codes for lowering each operators.
├── partition # ENN Partitioner.
├── passes # Various passes helping lower models to ENN backend.
├── python # Places to put pybind artifacts for accessing samsung libraries.
├── runtime # ENN runtime for executing lowered models.
├── scripts # Misc supporting scripts, not related to core functionality.
└── serialization # Codes for building Graph IR for Exynos and serializing.
examples
└── samsung # Examples to run ENN backends.
```

## How to build
Please download Exynos AI LiteCore, and set the root path of SDK directory to `EXYNOS_AI_LITECORE_PATH`.</br>
Please navigate to [Android NDK](https://developer.android.com/ndk) and download a version of NDK.
`ANDROID_NDK` refers the root path of NDK directory.</br>

### Set up environment variables
```bash
export LD_LIBRARY_PATH=${EXYNOS_AI_LITECORE_PATH}/lib64
```

### Build AOT Targets
Generated python artifacts allow user call `Compile` interface to lower a model to ENN backend in python script.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
Generated python artifacts allow user call `Compile` interface to lower a model to ENN backend in python script.
Generates python artifacts that allow user call `compile` interface to lower a model to ENN backend in python script.

Copy link
Collaborator Author

@Jiseong-oh Jiseong-oh Aug 27, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, it's fixed. but i think previous expression is right for our goel.

```bash
./backends/samsung/build.sh -b x86_64
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do you wanna add a basic end-to-end integration test to our CI? Can we run everything in x86 environment by any chance?

See example test cases in https://github.com/pytorch/executorch/blob/main/.github/workflows/pull.yml (runs on every PR before merging with main) and https://github.com/pytorch/executorch/blob/main/.github/workflows/trunk.yml (jobs that runs after the PR merges with main)

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we add e2e test to your CI after all codes which is developed are merged? and i think you also need to get samsung device for testing CI.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think you also need to get samsung device for testing CI.

yeah, we can look into it. also, is there an emulator on linux (either x86 or aarch64) or mac for exynos?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still think we should add basic unit test, at least for ahead-of-time stuff such as partitioner and serialization. i suppose they won't require samsung phones, right?

for e2e, we can do it after the PR lands as a fast-follow-on.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

i think you also need to get samsung device for testing CI.

yeah, we can look into it. also, is there an emulator on linux (either x86 or aarch64) or mac for exynos?

Unfortunately, we don't have the emulator. Could we discuss on slack for this?

Copy link
Collaborator Author

@Jiseong-oh Jiseong-oh Aug 28, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I still think we should add basic unit test, at least for ahead-of-time stuff such as partitioner and serialization. i suppose they won't require samsung phones, right?

for e2e, we can do it after the PR lands as a fast-follow-on.

you mean we are going to test only until the PTE is generated, right? if yes, right it doesn't need to require samsung device.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

For e2e integration test, created an issue to track: #14002

```

### Build ENN Runtime
```bash
./backends/samsung/build.sh -b android --ndk ${ANDROID_NDK}
```
ANDROID_ABI=arm64-v8a is default, necessary runtime executable generate in `build_exynos_android` directory.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
ANDROID_ABI=arm64-v8a is default, necessary runtime executable generate in `build_exynos_android` directory.
ANDROID_ABI=arm64-v8a is default, necessary runtime executable generated in `build_exynos_android` directory.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Thanks, it's fixed.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what does it generate

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

executor runner for exynos will be generated.


### Build Anroid Extension
This is later exposed Java app. Please turn on CMake option `EXECUTORCH_BUILD_ENN`, and ENN runtime will be added.
```bash
cmake extension/android \
-DCMAKE_TOOLCHAIN_FILE="${ANDROID_NDK}/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI="${ANDROID_ABI}" \
-DCMAKE_INSTALL_PREFIX=cmake-android-out \
-Bcmake-android-out/extension/android
Comment on lines +59 to +64
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I dont see EXECUTORCH_BUILD_ENN in the camke option

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

cc @Jiseong-oh please take a look at Kimish's question

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed it.


cmake --build cmake-android-out/extension/android -j8
```

## Examples

Please see this [README.md](../../examples/samsung/README.md).
15 changes: 15 additions & 0 deletions backends/samsung/aot/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
# Copyright (c) 2025 Samsung Electronics Co. LTD
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

target_sources(
PyEnnWrapperAdaptor PUBLIC PyEnnWrapperAdaptor.cpp
PyEnnWrapperAdaptor.h
)
target_sources(
PyGraphWrapperAdaptor PUBLIC PyGraphWrapperAdaptor.cpp
PyGraphWrapperAdaptor.h wrappers/op_param_wrapper.h
wrappers/op_wrapper.h wrappers/tensor_wrapper.h
)
29 changes: 29 additions & 0 deletions backends/samsung/aot/PyEnnWrapperAdaptor.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
/*
* Copyright (c) 2025 Samsung Electronics Co. LTD
* All rights reserved
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
*/

#include <executorch/backends/samsung/aot/PyEnnWrapperAdaptor.h>
#include <pybind11/pybind11.h>

namespace torch {
namespace executor {
namespace enn {
PYBIND11_MODULE(PyEnnWrapperAdaptor, m) {
pybind11::class_<PyEnnWrapper, std::shared_ptr<PyEnnWrapper>>(m, "EnnWrapper")
.def(pybind11::init())
.def("Init", &PyEnnWrapper::Init)
.def("IsNodeSupportedByBackend", &PyEnnWrapper::IsNodeSupportedByBackend)
.def(
"Compile",
&PyEnnWrapper::Compile,
"Ahead of time compilation for serialized graph.")
.def("Destroy", &PyEnnWrapper::Destroy, "Release resources.");
}
} // namespace enn
} // namespace executor
} // namespace torch
79 changes: 79 additions & 0 deletions backends/samsung/aot/PyEnnWrapperAdaptor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
/*
* Copyright (c) 2025 Samsung Electronics Co. LTD
* All rights reserved
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*
*/
#pragma once

#include <include/graphgen_c.h>
#include <include/graphgen_common.h>
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>

#include <iostream>
#include <memory>
#include <vector>

namespace py = pybind11;

namespace torch {
namespace executor {
namespace enn {

class PyEnnWrapper {
public:
PyEnnWrapper() {}

void Init(const py::bytes& compile_opts) {
graphgen_instance_ = graphgen_create();
}

bool IsNodeSupportedByBackend() {
return False;
}

py::array_t<char> Compile(const py::array_t<char>& model_buffer) {
if (graphgen_instance_ == nullptr) {
ENN_LOG_ERROR("Please call `Init()` first before compile.");
return py::array_t<char>();
}


auto m_buf_info = model_buffer.request();
auto* model_buf_ptr = reinterpret_cast<uint8_t*>(m_buf_info.ptr);
NNCBuffer* nnc_buffer = nullptr;
if (graphgen_generate(
graphgen_instance_, model_buf_ptr, m_buf_info.size, &nnc_buffer) !=
GraphGenResult::SUCCESS) {
ENN_LOG_ERROR("Compile model failed.");
return py::array_t<char>();
}

auto result = py::array_t<char>({nnc_buffer->size}, {sizeof(char)});
auto result_buf = result.request();
memcpy(result_buf.ptr, nnc_buffer->addr, nnc_buffer->size);

graphgen_release_buffer(graphgen_instance_, nnc_buffer);

return result;
}

void Destroy() {
graphgen_release(graphgen_instance_);
graphgen_instance_ = nullptr;
}

~PyEnnWrapper() {
Destroy();
}

private:
// pointer to enn software entry
void* graphgen_instance_ = nullptr;
};
} // namespace enn
} // namespace executor
} // namespace torch
Loading
Loading