diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index 3a5b94ccc..36e44a0c5 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -915,9 +915,8 @@ jobs: - if: ${{ steps.install-et.outputs.cache-hit != 'true' }} continue-on-error: true run: | - echo "Intalling ExecuTorch" - export TORCHCHAT_ROOT=${PWD} - bash scripts/install_et.sh + echo "Installing ExecuTorch" + bash scripts/build_native.sh et - name: Install ET pip run: | echo "ET build directory" diff --git a/runner/build_android.sh b/runner/build_android.sh index 1bdc3f0c1..1b25e4531 100755 --- a/runner/build_android.sh +++ b/runner/build_android.sh @@ -30,7 +30,7 @@ export CMAKE_OUT_DIR="cmake-out-android" build_runner_et() { rm -rf cmake-out-android echo "ET BUILD DIR IS ${ET_BUILD_DIR}" - cmake -DET_USE_ADPATIVE_THREADS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -S . -B cmake-out-android -G Ninja + cmake -DET_USE_ADAPTIVE_THREADS=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_TOOLCHAIN_FILE=$ANDROID_NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -S . -B cmake-out-android -G Ninja cmake --build cmake-out-android/ -j16 --config Release --target et_run } diff --git a/runner/et.cmake b/runner/et.cmake index 8c198ab58..6b233ae9c 100644 --- a/runner/et.cmake +++ b/runner/et.cmake @@ -50,8 +50,8 @@ if(executorch_FOUND) set(_srcs runner/run.cpp) set(_common_compile_options -D__ET__MODEL -D_GLIBCXX_USE_CXX11_ABI=1) - if(ET_USE_ADPATIVE_THREADS) - list(APPEND _common_compile_options -DET_USE_ADPATIVE_THREADS) + if(ET_USE_ADAPTIVE_THREADS) + list(APPEND _common_compile_options -DET_USE_ADAPTIVE_THREADS) set(EXECUTORCH_SRC_ROOT ${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/src/executorch) set(XNNPACK_ROOT ${EXECUTORCH_SRC_ROOT}/backends/xnnpack) diff --git a/runner/run.cpp b/runner/run.cpp index dcbebef4f..e572bfe99 100644 --- a/runner/run.cpp +++ b/runner/run.cpp @@ -33,7 +33,7 @@ torch::Device cpu_device(torch::kCPU); #include #include -#if defined(ET_USE_ADPATIVE_THREADS) +#if defined(ET_USE_ADAPTIVE_THREADS) #include #include #endif @@ -823,7 +823,7 @@ int main(int argc, char* argv[]) { int vocab_size = -1; int llama_ver = 2; -#if defined(ET_USE_ADPATIVE_THREADS) +#if defined(ET_USE_ADAPTIVE_THREADS) uint32_t num_performant_cores = torch::executorch::cpuinfo::get_num_performant_cores(); if (num_performant_cores > 0) { diff --git a/scripts/install_utils.sh b/scripts/install_utils.sh index b09476dc8..3b3ad4926 100644 --- a/scripts/install_utils.sh +++ b/scripts/install_utils.sh @@ -38,7 +38,7 @@ clone_executorch() { install_executorch_python_libs() { if [ ! -d "${TORCHCHAT_ROOT}/${ET_BUILD_DIR}" ]; then echo "Directory ${TORCHCHAT_ROOT}/${ET_BUILD_DIR} does not exist." - echo "Make sur eyou run clone_executorch" + echo "Make sure you run clone_executorch" exit 1 fi pushd ${TORCHCHAT_ROOT}/${ET_BUILD_DIR}/src @@ -63,7 +63,8 @@ COMMON_CMAKE_ARGS="\ -DEXECUTORCH_BUILD_KERNELS_OPTIMIZED=ON \ -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON" + -DEXECUTORCH_BUILD_KERNELS_QUANTIZED=ON \ + -DEXECUTORCH_BUILD_XNNPACK=ON" install_executorch() { # AOT lib has to be build for model export