diff --git a/.github/configs/nvidia-master.yaml b/.github/configs/nvidia-master.yaml index 3cbd9c49c..64c48477f 100644 --- a/.github/configs/nvidia-master.yaml +++ b/.github/configs/nvidia-master.yaml @@ -2065,7 +2065,7 @@ qwen3.5-fp4-b200-sglang: - { tp: 2, ep: 1, conc-start: 4, conc-end: 128 } qwen3.5-fp4-b200-sglang-mtp: - image: lmsysorg/sglang:nightly-dev-20260402-d7256eb6 + image: lmsysorg/sglang:nightly-dev-20260422-de962f32 model: nvidia/Qwen3.5-397B-A17B-NVFP4 model-prefix: qwen3.5 runner: b200 @@ -2077,11 +2077,13 @@ qwen3.5-fp4-b200-sglang-mtp: - isl: 1024 osl: 1024 search-space: - - { tp: 4, ep: 1, conc-start: 4, conc-end: 128, spec-decoding: mtp } + - { tp: 4, ep: 1, conc-start: 4, conc-end: 4, spec-decoding: mtp } + - { tp: 2, ep: 1, conc-start: 4, conc-end: 64, spec-decoding: mtp } - isl: 8192 osl: 1024 search-space: - - { tp: 4, ep: 1, conc-start: 4, conc-end: 128, spec-decoding: mtp } + - { tp: 4, ep: 1, conc-start: 4, conc-end: 4, spec-decoding: mtp } + - { tp: 2, ep: 1, conc-start: 4, conc-end: 64, spec-decoding: mtp } glm5-fp8-b200-sglang: image: lmsysorg/sglang:nightly-dev-cu13-20260317-1eea7448 diff --git a/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh b/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh index 72e2f9c39..961e3b710 100755 --- a/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh +++ b/benchmarks/single_node/qwen3.5_fp4_b200_mtp.sh @@ -20,61 +20,43 @@ nvidia-smi hf download "$MODEL" -export NCCL_NVLS_ENABLE=1 -export SGL_ENABLE_JIT_DEEPGEMM=false -export SGLANG_ENABLE_FLASHINFER_GEMM=true -export PYTHONUNBUFFERED=1 - SERVER_LOG=/workspace/server.log PORT=${PORT:-8888} -# Default: recv every ~10 requests; if CONC >= 16, relax to ~30 requests between scheduler recv polls. -if [[ $CONC -ge 16 ]]; then - SCHEDULER_RECV_INTERVAL=30 -else - SCHEDULER_RECV_INTERVAL=10 -fi - -MEM_FRAC_STATIC=0.85 -CHUNKED_PREFILL_SIZE=32768 -MAX_PREFILL_TOKENS=32768 -CUDA_GRAPH_MAX_BATCH_SIZE=$CONC -MAX_RUNNING_REQUESTS=128 CONTEXT_LENGTH=$((ISL + OSL + 20)) if [ "${EVAL_ONLY}" = "true" ]; then setup_eval_context CONTEXT_LENGTH="$EVAL_MAX_MODEL_LEN" fi -if [[ $TP -eq 8 ]]; then - EXTRA_ARGS="--enable-flashinfer-allreduce-fusion" -else - EXTRA_ARGS="" -fi - -echo "SCHEDULER_RECV_INTERVAL: $SCHEDULER_RECV_INTERVAL, CONC: $CONC, ISL: $ISL, OSL: $OSL" - # Start GPU monitoring (power, temperature, clocks every second) start_gpu_monitor set -x -PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ +SGLANG_ENABLE_SPEC_V2=1 PYTHONNOUSERSITE=1 python3 -m sglang.launch_server --model-path=$MODEL --host=0.0.0.0 --port=$PORT \ --trust-remote-code \ ---tensor-parallel-size=$TP --data-parallel-size=1 --ep-size $EP_SIZE \ ---quantization modelopt_fp4 --fp4-gemm-backend flashinfer_cutlass \ +--tensor-parallel-size=$TP --data-parallel-size=1 --expert-parallel-size=$EP_SIZE \ +--enable-symm-mem \ +--disable-radix-cache \ +--quantization modelopt_fp4 \ --kv-cache-dtype fp8_e4m3 \ --mamba-ssm-dtype bfloat16 \ ---cuda-graph-max-bs $CUDA_GRAPH_MAX_BATCH_SIZE --max-running-requests $MAX_RUNNING_REQUESTS \ ---mem-fraction-static $MEM_FRAC_STATIC --chunked-prefill-size $CHUNKED_PREFILL_SIZE --max-prefill-tokens $MAX_PREFILL_TOKENS \ ---context-length $CONTEXT_LENGTH --disable-radix-cache \ ---attention-backend trtllm_mha --moe-runner-backend flashinfer_trtllm \ -$EXTRA_ARGS --scheduler-recv-interval $SCHEDULER_RECV_INTERVAL \ ---tokenizer-worker-num 6 --stream-interval 30 \ +--attention-backend trtllm_mha \ +--moe-runner-backend flashinfer_trtllm \ +--cuda-graph-max-bs $CONC \ +--max-running-requests $CONC \ +--max-prefill-tokens 16384 \ +--chunked-prefill-size 16384 \ +--mem-fraction-static 0.8 \ +--stream-interval 50 \ +--scheduler-recv-interval $( [[ $CONC -gt 4 ]] && echo 30 || echo 10 ) \ +--tokenizer-worker-num 6 \ +--tokenizer-path $MODEL \ --speculative-algorithm EAGLE \ --speculative-num-steps 3 \ --speculative-eagle-topk 1 \ --speculative-num-draft-tokens 4 \ -> $SERVER_LOG 2>&1 & +--context-length $CONTEXT_LENGTH > $SERVER_LOG 2>&1 & SERVER_PID=$! diff --git a/perf-changelog.yaml b/perf-changelog.yaml index 98002a100..d34b93dba 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -2221,3 +2221,13 @@ - "Update the TensorRT-LLM DeepSeek-V4-Pro image to ghcr.io/semianalysisai/trtllm-deepseek-v4:feat-deepseek_v4-9aa3715" - "Enable TRTLLM fused MHC by default with the DeepSeek-V4 feature image" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1270 + +- config-keys: + - qwen3.5-fp4-b200-sglang-mtp + description: + - "Update image to lmsysorg/sglang:nightly-dev-20260422-de962f32" + - "Add tp:2 ep:1 conc 4-128 search-space for 1k1k and 8k1k" + - "Align server flags with FP4 B200 STP: --enable-symm-mem, --expert-parallel-size, dynamic scheduler-recv-interval" + - "Add MTP flags: SGLANG_ENABLE_SPEC_V2=1, EAGLE speculative decoding (steps=3, topk=1, draft=4)" + - "Reduce prefill/chunked from 32768 to 16384" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1257