Skip to content
This repository was archived by the owner on Apr 20, 2026. It is now read-only.
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
98 changes: 98 additions & 0 deletions recipes/vllm/kimi-k2.5/disagg-gb200-1p4d-tep4.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
name: "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep"
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Recipe name looks truncated (-tep vs -tep4).

This creates avoidable mismatch with the filename and can confuse sweep/result identification.

Suggested fix
-name: "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep"
+name: "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep4"
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
name: "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep"
name: "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep4"
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@recipes/vllm/kimi-k2.5/disagg-gb200-1p4d-tep4.yaml` at line 1, The recipe
name string "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep" is missing
the trailing "4" and should match the filename; update the name field in the
YAML to "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-1p4d-tep4" so sweep/result
IDs align (edit the name value in the top-level YAML entry).


model:
path: "kimi-k2.5-nvfp4"
container: "v0.18.0"
precision: "fp4"

dynamo:
version: 1.0.1
install: true

setup_script: vllm-container-deps.sh

resources:
gpu_type: "gb200"
gpus_per_node: 4
prefill_nodes: 1
decode_nodes: 4
prefill_workers: 1
decode_workers: 4
gpus_per_prefill: 4
gpus_per_decode: 4

frontend:
type: dynamo
enable_multiple_frontends: false

backend:
type: vllm
connector: null

prefill_environment:
VLLM_USE_FLASHINFER_MOE_FP4: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"

decode_environment:
VLLM_USE_FLASHINFER_MOE_FP4: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"

vllm_config:
prefill:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "nvidia/Kimi-K2.5-NVFP4"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 4
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 10240
max-num-seqs: 64
enforce-eager: true
compilation-config: '{"custom_ops":["+quant_fp8","+rms_norm","+rotary_embedding"],"pass_config":{"fuse_attn_quant":true,"fuse_allreduce_rms":true}}'
max-num-batched-tokens: 16384
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-chunked-prefill: true
attention-backend: "FLASHINFER_MLA"
block-size: 64
attention-config: '{"use_trtllm_ragged_deepseek_prefill": true}'
all2all-backend: "allgather_reducescatter"
gpu-memory-utilization: 0.9

decode:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "nvidia/Kimi-K2.5-NVFP4"
kv-cache-dtype: "fp8"
tensor-parallel-size: 4
pipeline-parallel-size: 1
enable-expert-parallel: true
max-model-len: 10240
max-num-seqs: 16
max-num-batched-tokens: 10240
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-chunked-prefill: true
async-scheduling: true
attention-backend: "FLASHINFER_MLA"
block-size: 64
compilation-config: '{"cudagraph_mode":"FULL_DECODE_ONLY","custom_ops":["+quant_fp8","+rms_norm","+rotary_embedding"],"pass_config":{"fuse_attn_quant":true,"fuse_allreduce_rms":true}}'
gpu-memory-utilization: 0.9
stream-interval: 50
max-cudagraph-capture-size: 16

benchmark:
type: "sa-bench"
isl: 8192
osl: 1024
concurrencies: "4x8x16x32x64x128x256"
req_rate: "inf"
101 changes: 101 additions & 0 deletions recipes/vllm/kimi-k2.5/disagg-gb200-3p1d.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
name: "kimi-vllm-disagg-gb200-3p1d-dep16"

model:
path: "kimi-k2.5-nvfp4"
container: "v0.18.0"
precision: "fp4"

dynamo:
version: 1.0.1
install: true

setup_script: vllm-container-deps.sh

resources:
gpu_type: "gb200"
gpus_per_node: 4
prefill_nodes: 3
decode_nodes: 4
prefill_workers: 3
decode_workers: 1
gpus_per_prefill: 4
gpus_per_decode: 16

frontend:
type: dynamo
enable_multiple_frontends: false

backend:
type: vllm
connector: null

prefill_environment:
VLLM_USE_FLASHINFER_MOE_FP4: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"

decode_environment:
VLLM_USE_FLASHINFER_MOE_FP4: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"

vllm_config:
prefill:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "nvidia/Kimi-K2.5-NVFP4"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 4
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 10240
max-num-seqs: 64
enforce-eager: true
compilation-config: '{"custom_ops":["+quant_fp8","+rms_norm","+rotary_embedding"],"pass_config":{"fuse_attn_quant":true,"fuse_allreduce_rms":true}}'
max-num-batched-tokens: 16384
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-chunked-prefill: true
attention-backend: "FLASHINFER_MLA"
block-size: 64
attention-config: '{"use_trtllm_ragged_deepseek_prefill": true}'
all2all-backend: "allgather_reducescatter"
gpu-memory-utilization: 0.9

decode:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "nvidia/Kimi-K2.5-NVFP4"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 16
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 10240
max-num-seqs: 256
max-num-batched-tokens: 10240
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-chunked-prefill: true
async-scheduling: true
attention-backend: "FLASHINFER_MLA"
block-size: 64
all2all-backend: "allgather_reducescatter"
compilation-config: '{"cudagraph_mode":"FULL_DECODE_ONLY","custom_ops":["+quant_fp8","+rms_norm","+rotary_embedding"],"pass_config":{"fuse_attn_quant":true,"fuse_allreduce_rms":true}}'
gpu-memory-utilization: 0.9
stream-interval: 50
max-cudagraph-capture-size: 256

benchmark:
type: "sa-bench"
isl: 8192
osl: 1024
concurrencies: "512x1024"
req_rate: "inf"
101 changes: 101 additions & 0 deletions recipes/vllm/kimi-k2.5/disagg-gb200-5p1d.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
name: "coreai_devtech_all-sa.kimi-vllm-disagg-gb200-5p1d"

model:
path: "kimi-k2.5-nvfp4"
container: "v0.18.0"
precision: "fp4"

dynamo:
version: 1.0.1
install: true

setup_script: vllm-container-deps.sh

resources:
gpu_type: "gb200"
gpus_per_node: 4
prefill_nodes: 5
decode_nodes: 2
prefill_workers: 5
decode_workers: 1
gpus_per_prefill: 4
gpus_per_decode: 8

frontend:
type: dynamo
enable_multiple_frontends: false

backend:
type: vllm
connector: null

prefill_environment:
VLLM_USE_FLASHINFER_MOE_FP4: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"

decode_environment:
VLLM_USE_FLASHINFER_MOE_FP4: "1"
VLLM_USE_NCCL_SYMM_MEM: "1"
NCCL_CUMEM_ENABLE: "1"
NCCL_MNNVL_ENABLE: "1"
NCCL_NVLS_ENABLE: "1"

vllm_config:
prefill:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "nvidia/Kimi-K2.5-NVFP4"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 4
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 10240
max-num-seqs: 64
enforce-eager: true
compilation-config: '{"custom_ops":["+quant_fp8","+rms_norm","+rotary_embedding"],"pass_config":{"fuse_attn_quant":true,"fuse_allreduce_rms":true}}'
max-num-batched-tokens: 16384
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-chunked-prefill: true
attention-backend: "FLASHINFER_MLA"
block-size: 64
attention-config: '{"use_trtllm_ragged_deepseek_prefill": true}'
all2all-backend: "allgather_reducescatter"
gpu-memory-utilization: 0.9

decode:
kv-transfer-config: '{"kv_connector": "NixlConnector", "kv_role": "kv_both"}'
served-model-name: "nvidia/Kimi-K2.5-NVFP4"
kv-cache-dtype: "fp8"
tensor-parallel-size: 1
pipeline-parallel-size: 1
data-parallel-size: 8
data-parallel-rpc-port: 13345
enable-expert-parallel: true
max-model-len: 10240
max-num-seqs: 512
max-num-batched-tokens: 10240
safetensors-load-strategy: "prefetch"
trust-remote-code: true
no-enable-prefix-caching: true
no-enable-chunked-prefill: true
async-scheduling: true
attention-backend: "FLASHINFER_MLA"
block-size: 64
all2all-backend: "allgather_reducescatter"
compilation-config: '{"cudagraph_mode":"FULL_DECODE_ONLY","custom_ops":["+quant_fp8","+rms_norm","+rotary_embedding"],"pass_config":{"fuse_attn_quant":true,"fuse_allreduce_rms":true}}'
gpu-memory-utilization: 0.9
stream-interval: 50
max-cudagraph-capture-size: 512

benchmark:
type: "sa-bench"
isl: 8192
osl: 1024
concurrencies: "2048"
req_rate: "inf"
Loading
Loading