Skip to content

Commit 6760651

Browse files
authored
feat(cli): support multi-gpus when running inference locally (#249)
Because - currently its limit to single gpu for local model inference This commit - support multi-gpus when running inference locally
1 parent e38eefe commit 6760651

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

instill/helpers/cli.py

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,14 @@ def cli():
118118
action="store_true",
119119
required=False,
120120
)
121+
run_parser.add_argument(
122+
"-ng",
123+
"--num-of-gpus",
124+
help="number of gpus to use if gpu flag is on, default to 1",
125+
type=int,
126+
default=1,
127+
required=False,
128+
)
121129
run_parser.add_argument(
122130
"-t",
123131
"--tag",
@@ -311,7 +319,7 @@ def run(args):
311319
{args.name}:{args.tag} /bin/bash -c \
312320
\"serve build _model:entrypoint -o serve.yaml && \
313321
sed -i 's/app1/default/' serve.yaml && \
314-
sed -i 's/num_cpus: 0.0/num_gpus: 1.0/' serve.yaml && \
322+
sed -i 's/num_cpus: 0.0/num_gpus: {args.num_of_gpus}/' serve.yaml && \
315323
serve run serve.yaml\"",
316324
shell=True,
317325
check=True,

0 commit comments

Comments
 (0)