Skip to content

Commit a95892a

Browse files
committed
Introduce CMake workflow
CMake workflow combines configure and build into 1 command. Instead of doing: ``` cmake --preset llm \ -DEXECUTORCH_BUILD_CUDA=ON \ -DCMAKE_INSTALL_PREFIX=cmake-out \ -DCMAKE_BUILD_TYPE=Release \ -Bcmake-out -S. cmake --build cmake-out -j$(nproc) --target install --config Release ``` We can simply do `cmake --workflow llm-release-cuda`. This largely reduces the burden of running these cmake commands. Next step I'm going to create workflow for the popular runners (llama, whisper, voxtral etc) and further simplify the build command ghstack-source-id: d86adc5 Pull Request resolved: #15804
1 parent 6de1f4e commit a95892a

File tree

10 files changed

+326
-85
lines changed

10 files changed

+326
-85
lines changed

.ci/scripts/test_llama_lora.sh

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,7 @@ source "$(dirname "${BASH_SOURCE[0]}")/utils.sh"
1212
cmake_install_executorch_libraries() {
1313
echo "Installing libexecutorch.a, libextension_module.so, libportable_ops_lib.a"
1414
rm -rf cmake-out
15-
retry cmake --preset llm \
16-
-DCMAKE_INSTALL_PREFIX=cmake-out \
17-
-DCMAKE_BUILD_TYPE=Release
18-
cmake --build cmake-out -j9 --target install --config Release
15+
cmake --workflow llm-release
1916
}
2017

2118
cmake_build_llama_runner() {

.ci/scripts/test_model_e2e.sh

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -157,20 +157,17 @@ echo "::endgroup::"
157157
echo "::group::Build $MODEL_NAME Runner"
158158

159159
if [ "$DEVICE" = "cuda" ]; then
160+
WORKFLOW="llm-release-cuda"
160161
BUILD_BACKEND="EXECUTORCH_BUILD_CUDA"
161162
elif [ "$DEVICE" = "metal" ]; then
163+
WORKFLOW="llm-release-metal"
162164
BUILD_BACKEND="EXECUTORCH_BUILD_METAL"
163165
else
164166
echo "Error: Unsupported device '$DEVICE'. Must be 'cuda' or 'metal'."
165167
exit 1
166168
fi
167169

168-
cmake --preset llm \
169-
-D${BUILD_BACKEND}=ON \
170-
-DCMAKE_INSTALL_PREFIX=cmake-out \
171-
-DCMAKE_BUILD_TYPE=Release \
172-
-Bcmake-out -S.
173-
cmake --build cmake-out -j$(nproc) --target install --config Release
170+
cmake --workflow $WORKFLOW
174171

175172
cmake -D${BUILD_BACKEND}=ON \
176173
-DCMAKE_BUILD_TYPE=Release \

.ci/scripts/test_phi_3_mini.sh

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,16 @@ if hash nproc &> /dev/null; then NPROC=$(nproc); fi
2323

2424
cmake_install_executorch_libraries() {
2525
rm -rf cmake-out
26-
cmake --preset llm -DCMAKE_INSTALL_PREFIX=cmake-out -DCMAKE_BUILD_TYPE=${BUILD_TYPE}
27-
cmake --build cmake-out -j16 --target install --config ${BUILD_TYPE}
26+
27+
# Select workflow preset based on BUILD_TYPE
28+
if [[ "${BUILD_TYPE}" == "Debug" ]]; then
29+
WORKFLOW_PRESET="llm-debug"
30+
else
31+
WORKFLOW_PRESET="llm-release"
32+
fi
33+
34+
echo "Using workflow preset: ${WORKFLOW_PRESET}"
35+
cmake --workflow --preset ${WORKFLOW_PRESET}
2836
}
2937

3038
cmake_build_phi_3_mini() {

CMakePresets.json

Lines changed: 259 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -119,38 +119,118 @@
119119
}
120120
},
121121
{
122-
"name": "llm",
123-
"displayName": "Build LLM libraries",
124-
"inherits": ["common"],
125-
"cacheVariables": {
126-
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/llm.cmake",
127-
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
128-
},
129-
"condition": {
130-
"type": "inList",
131-
"string": "${hostSystemName}",
132-
"list": ["Darwin", "Linux", "Windows"]
133-
}
122+
"name": "llm",
123+
"displayName": "Build LLM libraries",
124+
"inherits": [
125+
"common"
126+
],
127+
"cacheVariables": {
128+
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/llm.cmake",
129+
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
130+
},
131+
"condition": {
132+
"type": "inList",
133+
"string": "${hostSystemName}",
134+
"list": ["Darwin", "Linux", "Windows"]
135+
}
134136
},
135137
{
136-
"name": "profiling",
137-
"displayName": "Build ExecuTorch with Profiling Enabled",
138-
"inherits": [
139-
"common"
140-
],
141-
"cacheVariables": {
142-
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/profiling.cmake",
143-
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
144-
},
145-
"condition": {
146-
"type": "inList",
147-
"string": "${hostSystemName}",
148-
"list": [
149-
"Darwin",
150-
"Linux",
151-
"Windows"
152-
]
153-
}
138+
"name": "llm-release",
139+
"displayName": "LLM release build",
140+
"inherits": [
141+
"llm"
142+
],
143+
"cacheVariables": {
144+
"CMAKE_BUILD_TYPE": "Release",
145+
"CMAKE_INSTALL_PREFIX": "${sourceDir}/cmake-out"
146+
}
147+
},
148+
{
149+
"name": "llm-release-cuda",
150+
"displayName": "LLM release build with CUDA",
151+
"inherits": [
152+
"llm-release"
153+
],
154+
"cacheVariables": {
155+
"EXECUTORCH_BUILD_CUDA": "ON"
156+
},
157+
"condition": {
158+
"lhs": "${hostSystemName}",
159+
"type": "equals",
160+
"rhs": "Linux"
161+
}
162+
},
163+
{
164+
"name": "llm-release-metal",
165+
"displayName": "LLM release build with Metal",
166+
"inherits": [
167+
"llm-release"
168+
],
169+
"cacheVariables": {
170+
"EXECUTORCH_BUILD_METAL": "ON"
171+
},
172+
"condition": {
173+
"lhs": "${hostSystemName}",
174+
"type": "equals",
175+
"rhs": "Darwin"
176+
}
177+
},
178+
{
179+
"name": "llm-debug",
180+
"displayName": "LLM debug build",
181+
"inherits": [
182+
"llm"
183+
],
184+
"cacheVariables": {
185+
"CMAKE_BUILD_TYPE": "Debug",
186+
"CMAKE_INSTALL_PREFIX": "${sourceDir}/cmake-out"
187+
}
188+
},
189+
{
190+
"name": "llm-debug-cuda",
191+
"displayName": "LLM debug build with CUDA",
192+
"inherits": [
193+
"llm-debug"
194+
],
195+
"cacheVariables": {
196+
"EXECUTORCH_BUILD_CUDA": "ON"
197+
},
198+
"condition": {
199+
"lhs": "${hostSystemName}",
200+
"type": "equals",
201+
"rhs": "Linux"
202+
}
203+
},
204+
{
205+
"name": "llm-debug-metal",
206+
"displayName": "LLM debug build with Metal",
207+
"inherits": [
208+
"llm-debug"
209+
],
210+
"cacheVariables": {
211+
"EXECUTORCH_BUILD_METAL": "ON"
212+
},
213+
"condition": {
214+
"lhs": "${hostSystemName}",
215+
"type": "equals",
216+
"rhs": "Darwin"
217+
}
218+
},
219+
{
220+
"name": "profiling",
221+
"displayName": "Build ExecuTorch with Profiling Enabled",
222+
"inherits": [
223+
"common"
224+
],
225+
"cacheVariables": {
226+
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/profiling.cmake",
227+
"CMAKE_OSX_DEPLOYMENT_TARGET": "12.0"
228+
},
229+
"condition": {
230+
"type": "inList",
231+
"string": "${hostSystemName}",
232+
"list": ["Darwin", "Linux", "Windows"]
233+
}
154234
},
155235
{
156236
"name": "windows",
@@ -177,13 +257,155 @@
177257
}
178258
},
179259
{
180-
"name": "arm-baremetal",
181-
"displayName": "Build ExecuTorch for Arm baremetal",
182-
"inherits": ["common"],
183-
"cacheVariables": {
184-
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/arm_baremetal.cmake",
185-
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/examples/arm/ethos-u-setup/arm-none-eabi-gcc.cmake"
186-
}
260+
"name": "arm-baremetal",
261+
"displayName": "Build ExecuTorch for Arm baremetal",
262+
"inherits": ["common"],
263+
"cacheVariables": {
264+
"EXECUTORCH_BUILD_PRESET_FILE": "${sourceDir}/tools/cmake/preset/arm_baremetal.cmake",
265+
"CMAKE_TOOLCHAIN_FILE": "${sourceDir}/examples/arm/ethos-u-setup/arm-none-eabi-gcc.cmake"
266+
}
267+
}
268+
],
269+
"buildPresets": [
270+
{
271+
"name": "llm-release-install",
272+
"displayName": "Build and install LLM extension release artifacts",
273+
"configurePreset": "llm-release",
274+
"targets": [
275+
"install"
276+
],
277+
"jobs": 0
278+
},
279+
{
280+
"name": "llm-release-cuda-install",
281+
"displayName": "Build and install LLM extension release artifacts (CUDA)",
282+
"configurePreset": "llm-release-cuda",
283+
"targets": [
284+
"install"
285+
],
286+
"jobs": 0
287+
},
288+
{
289+
"name": "llm-release-metal-install",
290+
"displayName": "Build and install LLM extension release artifacts (Metal)",
291+
"configurePreset": "llm-release-metal",
292+
"targets": [
293+
"install"
294+
],
295+
"jobs": 0
296+
},
297+
{
298+
"name": "llm-debug-install",
299+
"displayName": "Build and install LLM extension debug artifacts",
300+
"configurePreset": "llm-debug",
301+
"targets": [
302+
"install"
303+
],
304+
"jobs": 0
305+
},
306+
{
307+
"name": "llm-debug-cuda-install",
308+
"displayName": "Build and install LLM extension debug artifacts (CUDA)",
309+
"configurePreset": "llm-debug-cuda",
310+
"targets": [
311+
"install"
312+
],
313+
"jobs": 0
314+
},
315+
{
316+
"name": "llm-debug-metal-install",
317+
"displayName": "Build and install LLM extension debug artifacts (Metal)",
318+
"configurePreset": "llm-debug-metal",
319+
"targets": [
320+
"install"
321+
],
322+
"jobs": 0
323+
}
324+
],
325+
"workflowPresets": [
326+
{
327+
"name": "llm-release",
328+
"displayName": "Configure, build and install ExecuTorch LLM extension with default CPU backend",
329+
"steps": [
330+
{
331+
"type": "configure",
332+
"name": "llm-release"
333+
},
334+
{
335+
"type": "build",
336+
"name": "llm-release-install"
337+
}
338+
]
339+
},
340+
{
341+
"name": "llm-release-cuda",
342+
"displayName": "Configure, build and install ExecuTorch LLM extension with CUDA enabled",
343+
"steps": [
344+
{
345+
"type": "configure",
346+
"name": "llm-release-cuda"
347+
},
348+
{
349+
"type": "build",
350+
"name": "llm-release-cuda-install"
351+
}
352+
]
353+
},
354+
{
355+
"name": "llm-release-metal",
356+
"displayName": "Configure, build and install ExecuTorch LLM extension with Metal enabled",
357+
"steps": [
358+
{
359+
"type": "configure",
360+
"name": "llm-release-metal"
361+
},
362+
{
363+
"type": "build",
364+
"name": "llm-release-metal-install"
365+
}
366+
]
367+
},
368+
{
369+
"name": "llm-debug",
370+
"displayName": "Configure, build and install ExecuTorch LLM extension with default CPU backend (Debug)",
371+
"steps": [
372+
{
373+
"type": "configure",
374+
"name": "llm-debug"
375+
},
376+
{
377+
"type": "build",
378+
"name": "llm-debug-install"
379+
}
380+
]
381+
},
382+
{
383+
"name": "llm-debug-cuda",
384+
"displayName": "Configure, build and install ExecuTorch LLM extension with CUDA enabled (Debug)",
385+
"steps": [
386+
{
387+
"type": "configure",
388+
"name": "llm-debug-cuda"
389+
},
390+
{
391+
"type": "build",
392+
"name": "llm-debug-cuda-install"
393+
}
394+
]
395+
},
396+
{
397+
"name": "llm-debug-metal",
398+
"displayName": "Configure, build and install ExecuTorch LLM extension with Metal enabled (Debug)",
399+
"steps": [
400+
{
401+
"type": "configure",
402+
"name": "llm-debug-metal"
403+
},
404+
{
405+
"type": "build",
406+
"name": "llm-debug-metal-install"
407+
}
408+
]
187409
}
188410
]
189411
}

examples/models/gemma3/README.md

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -82,12 +82,7 @@ Ensure you have a CUDA-capable GPU and CUDA toolkit installed on your system.
8282
./install_executorch.sh
8383

8484
# Build the multimodal runner with CUDA
85-
cmake --preset llm \
86-
-DEXECUTORCH_BUILD_CUDA=ON \
87-
-DCMAKE_INSTALL_PREFIX=cmake-out \
88-
-DCMAKE_BUILD_TYPE=Release \
89-
-Bcmake-out -S.
90-
cmake --build cmake-out -j$(nproc) --target install --config Release
85+
cmake --workflow llm-release-cuda
9186

9287
# Build the Gemma3 runner
9388
cmake -DEXECUTORCH_BUILD_CUDA=ON \

examples/models/llama/README.md

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -235,11 +235,9 @@ If you're interested in deploying on non-CPU backends, [please refer the non-cpu
235235
## Step 3: Run on your computer to validate
236236

237237
1. Build executorch with optimized CPU performance as follows. Build options available [here](https://github.com/pytorch/executorch/blob/main/CMakeLists.txt#L59).
238-
```
239-
cmake --preset llm -DCMAKE_BUILD_TYPE=Release -DCMAKE_INSTALL_PREFIX=cmake-out
240-
241-
cmake --build cmake-out -j16 --target install --config Release
242-
```
238+
```
239+
cmake --workflow llm-release
240+
```
243241
Note for Mac users: There's a known linking issue with Xcode 15.1. Refer to the section of Common Issues and Mitigations below for solutions.
244242

245243
2. Build llama runner.

0 commit comments

Comments
 (0)