Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion .github/configs/nvidia-master.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2250,7 +2250,7 @@ glm5-fp4-b300-sglang-mtp:
- { tp: 4, ep: 1, conc-start: 4, conc-end: 256, spec-decoding: mtp }

qwen3.5-fp8-b200-sglang-mtp:
image: lmsysorg/sglang:v0.5.9-cu130
image: lmsysorg/sglang:nightly-dev-20260422-de962f32
model: Qwen/Qwen3.5-397B-A17B-FP8
model-prefix: qwen3.5
runner: b200
Expand All @@ -2262,10 +2262,12 @@ qwen3.5-fp8-b200-sglang-mtp:
- isl: 1024
osl: 1024
search-space:
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4, spec-decoding: mtp }
- { tp: 4, ep: 1, conc-start: 4, conc-end: 256, spec-decoding: mtp }
- isl: 8192
osl: 1024
search-space:
- { tp: 8, ep: 1, conc-start: 4, conc-end: 4, spec-decoding: mtp }
- { tp: 4, ep: 1, conc-start: 4, conc-end: 256, spec-decoding: mtp }


Expand Down
60 changes: 24 additions & 36 deletions benchmarks/single_node/qwen3.5_fp8_b200_mtp.sh
Original file line number Diff line number Diff line change
Expand Up @@ -20,55 +20,43 @@ nvidia-smi

hf download "$MODEL"

export NCCL_NVLS_ENABLE=1
export SGLANG_ENABLE_JIT_DEEPGEMM=false
export PYTHONUNBUFFERED=1

SERVER_LOG=/workspace/server.log
PORT=${PORT:-8888}

# Default: recv every ~10 requests; if CONC >= 16, relax to ~30 requests between scheduler recv polls.
if [[ $CONC -ge 16 ]]; then
SCHEDULER_RECV_INTERVAL=30
else
SCHEDULER_RECV_INTERVAL=10
fi

MEM_FRAC_STATIC=0.8
CHUNKED_PREFILL_SIZE=32768
MAX_PREFILL_TOKENS=32768
CUDA_GRAPH_MAX_BATCH_SIZE=$CONC
MAX_RUNNING_REQUESTS=$CONC
CONTEXT_LENGTH=$((ISL + OSL + 20))

# MTP (Multi-Token Prediction) Config - EAGLE speculative decoding
SPECULATIVE_NUM_STEPS=3
SPECULATIVE_DRAFT_TOKENS=4
SPECULATIVE_EAGLE_TOPK=1

echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL"

if [ "${EVAL_ONLY}" = "true" ]; then
setup_eval_context
CONTEXT_LENGTH="$EVAL_MAX_MODEL_LEN"
fi

# Start GPU monitoring (power, temperature, clocks every second)
start_gpu_monitor

set -x
PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \
SGLANG_ENABLE_SPEC_V2=1 PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \
--trust-remote-code \
--tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \
--quantization fp8 --kv-cache-dtype fp8_e4m3 --mamba-ssm-dtype bfloat16 \
--cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \
--mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \
--context-length $CONTEXT_LENGTH --disable-radix-cache \
--fp8-gemm-backend=flashinfer_trtllm \
--attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \
--enable-flashinfer-allreduce-fusion --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \
--tokenizer-worker-num 6 --stream-interval 30 \
--speculative-algorithm EAGLE --speculative-num-steps $SPECULATIVE_NUM_STEPS --speculative-eagle-topk $SPECULATIVE_EAGLE_TOPK --speculative-num-draft-tokens $SPECULATIVE_DRAFT_TOKENS \
> $SERVER_LOG 2>&1 &
--tensor-parallel-size=$TP --data-parallel-size=1 --expert-parallel-size=$EP_SIZE \
--enable-symm-mem \
--disable-radix-cache \
--quantization fp8 \
--kv-cache-dtype fp8_e4m3 \
--mamba-ssm-dtype bfloat16 \
--attention-backend trtllm_mha \
--moe-runner-backend flashinfer_trtllm \
--cuda-graph-max-bs $CONC \
--max-running-requests $CONC \
--max-prefill-tokens 16384 \
--chunked-prefill-size 16384 \
--mem-fraction-static 0.8 \
--stream-interval 50 \
--scheduler-recv-interval $( [[ $CONC -gt 4 ]] && echo 30 || echo 10 ) \
--tokenizer-worker-num 6 \
--tokenizer-path $MODEL \
--speculative-algorithm EAGLE \
--speculative-num-steps 3 \
--speculative-eagle-topk 1 \
--speculative-num-draft-tokens 4 \
--context-length $CONTEXT_LENGTH > $SERVER_LOG 2>&1 &

Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please change:
--scheduler-recv-interval $( [[ $CONC -gt 4 ]] && echo 30 || echo 10 )

SERVER_PID=$!

Expand Down
10 changes: 10 additions & 0 deletions perf-changelog.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2187,3 +2187,13 @@
- "Use vllm/vllm-openai:v0.20.1-ubuntu2404 directly for GB200 MTP2 instead of upgrading vLLM inside the v0.20.0 container"
- "Fix applies to all 7 multinode launch scripts, the benchmark-multinode-tmpl workflow, and process_result.py"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1256

- config-keys:
- qwen3.5-fp8-b200-sglang-mtp
description:
- "Update image to lmsysorg/sglang:nightly-dev-20260422-de962f32"
- "Add TP8 search-space point (conc 4) for 1k1k and 8k1k"
- "Dynamic scheduler-recv-interval: 30 for CONC>4, 10 otherwise"
- "Align B200 flags with B300: SGLANG_ENABLE_SPEC_V2=1, --enable-symm-mem, --expert-parallel-size"
- "Reduce prefill tokens from 32768 to 16384, drop flashinfer_allreduce_fusion"
pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1065
Loading