diff --git a/.github/configs/amd-master.yaml b/.github/configs/amd-master.yaml index 3ec6b95c9..de7f5e62a 100644 --- a/.github/configs/amd-master.yaml +++ b/.github/configs/amd-master.yaml @@ -495,6 +495,28 @@ minimaxm2.5-fp8-mi355x-atom: - { tp: 4, conc-start: 4, conc-end: 128 } - { tp: 8, ep: 8, conc-start: 32, conc-end: 256 } +minimaxm2.5-fp4-mi355x-vllm: + image: vllm/vllm-openai-rocm:v0.19.1 + model: amd/MiniMax-M2.5-MXFP4 + model-prefix: minimaxm2.5 + runner: mi355x + precision: fp4 + framework: vllm + multinode: false + seq-len-configs: + - isl: 1024 + osl: 1024 + search-space: + - { tp: 1, conc-start: 4, conc-end: 32 } + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + - isl: 8192 + osl: 1024 + search-space: + - { tp: 1, conc-start: 4, conc-end: 32 } + - { tp: 2, conc-start: 4, conc-end: 64 } + - { tp: 4, conc-start: 4, conc-end: 64 } + minimaxm2.5-fp8-mi300x-vllm: image: vllm/vllm-openai-rocm:v0.16.0 model: MiniMaxAI/MiniMax-M2.5 diff --git a/benchmarks/single_node/minimaxm2.5_fp4_mi355x.sh b/benchmarks/single_node/minimaxm2.5_fp4_mi355x.sh new file mode 100755 index 000000000..116a1c325 --- /dev/null +++ b/benchmarks/single_node/minimaxm2.5_fp4_mi355x.sh @@ -0,0 +1,94 @@ +#!/usr/bin/env bash + +source "$(dirname "$0")/../benchmark_lib.sh" + +check_env_vars \ + MODEL \ + TP \ + EP_SIZE \ + CONC \ + ISL \ + OSL \ + MAX_MODEL_LEN \ + RANDOM_RANGE_RATIO \ + RESULT_FILENAME + +if [[ -n "$SLURM_JOB_ID" ]]; then + echo "JOB $SLURM_JOB_ID running on $SLURMD_NODENAME" +fi + +hf download "$MODEL" + +# Set HIP_VISIBLE_DEVICES to match ROCR_VISIBLE_DEVICES for Ray compatibility in vLLM 0.14+ +if [ -n "$ROCR_VISIBLE_DEVICES" ]; then + export HIP_VISIBLE_DEVICES="$ROCR_VISIBLE_DEVICES" +fi + +export VLLM_ROCM_USE_AITER=1 +EXTRA_VLLM_ARGS="" +# if [ "$TP" -ge 4 ]; then +# # AITER CK fused MoE kernels lack compiled tiles for N=intermediate_size/TP +# # when TP>=4 (TP=4, N=384). Disable AITER MoE to fall back to triton, but keep +# # AITER attention. See: https://github.com/vllm-project/vllm/issues/35637 +# export VLLM_ROCM_USE_AITER_MOE=0 +# EXTRA_VLLM_ARGS="--attention-backend ROCM_AITER_UNIFIED_ATTN" +# pip install amd-quark 2>/dev/null || true +# fi + +SERVER_LOG=/workspace/server.log +PORT=${PORT:-8888} + +if [ "${EVAL_ONLY}" = "true" ]; then + setup_eval_context + MAX_MODEL_LEN="$EVAL_MAX_MODEL_LEN" +fi + +if [ "$EP_SIZE" -gt 1 ]; then + EP=" --enable-expert-parallel" +else + EP=" " +fi + +# Start GPU monitoring (power, temperature, clocks every second) +start_gpu_monitor + +set -x +vllm serve $MODEL --port $PORT \ +--tensor-parallel-size=$TP \ +$EP \ +--gpu-memory-utilization 0.95 \ +--max-model-len $MAX_MODEL_LEN \ +--kv-cache-dtype fp8 \ +--block-size=32 \ +--no-enable-prefix-caching \ +--attention-backend "ROCM_AITER_FA" \ +--trust-remote-code \ +$EXTRA_VLLM_ARGS > $SERVER_LOG 2>&1 & + +SERVER_PID=$! + +# Wait for server to be ready +wait_for_server_ready --port "$PORT" --server-log "$SERVER_LOG" --server-pid "$SERVER_PID" + +run_benchmark_serving \ + --model "$MODEL" \ + --port "$PORT" \ + --backend vllm \ + --input-len "$ISL" \ + --output-len "$OSL" \ + --random-range-ratio "$RANDOM_RANGE_RATIO" \ + --num-prompts "$((CONC * 10))" \ + --max-concurrency "$CONC" \ + --result-filename "$RESULT_FILENAME" \ + --result-dir /workspace/ \ + --trust-remote-code + +# After throughput, run evaluation only if RUN_EVAL is true +if [ "${RUN_EVAL}" = "true" ]; then + run_eval --framework lm-eval --port "$PORT" + append_lm_eval_summary +fi + +# Stop GPU monitoring +stop_gpu_monitor +set +x diff --git a/perf-changelog.yaml b/perf-changelog.yaml index c0d1c1333..6bd6c92e3 100644 --- a/perf-changelog.yaml +++ b/perf-changelog.yaml @@ -1282,6 +1282,16 @@ - "Upgrade vLLM image to v0.19.0" - "Enable FP8 KV cache + AITER FA for minimaxm2.5-fp8-mi355x-vllm" pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/1003 + +- config-keys: + - minimaxm2.5-fp4-mi355x-vllm + description: + - "Add MiniMax M2.5 MXFP4 vLLM benchmark for MI355X" + - "Model: amd/MiniMax-M2.5-MXFP4 with --trust-remote-code and --block-size=32" + - "Image: vllm/vllm-openai-rocm:v0.19.1" + - "Environment: VLLM_ROCM_USE_AITER=1" + - "Tp=1, TP=2 and TP=4, concurrency 4-64 for 1k1k, 1k8k, and 8k1k sequence lengths" + pr-link: https://github.com/SemiAnalysisAI/InferenceX/pull/827 - config-keys: - qwen3.5-fp8-h200-sglang-mtp