diff --git a/.buildkite/test-pipeline.yaml b/.buildkite/test-pipeline.yaml index 14eb4ec21b44..9a26dc611515 100644 --- a/.buildkite/test-pipeline.yaml +++ b/.buildkite/test-pipeline.yaml @@ -1406,3 +1406,19 @@ steps: working_dir: "/vllm-workspace" commands: - bash .buildkite/scripts/scheduled_integration_test/qwen30b_a3b_fp8_block_ep_eplb.sh 0.8 200 8020 2 1 + +##### MoE Refactor (Temporary) Tests ##### + +- label: MoE Refactor Integration Test (H100 - TEMPORARY) # optional + gpu: h100 + optional: true + num_gpus: 2 + commands: + - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor/config-h100.txt + +- label: MoE Refactor Integration Test (B200 - TEMPORARY) # optional + gpu: b200 + optional: true + num_gpus: 2 + commands: + - pytest -s -v evals/gsm8k/test_gsm8k_correctness.py --config-list-file=evals/gsm8k/configs/moe-refactor/config-b200.txt diff --git a/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-fi-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-fi-cutlass.yaml new file mode 100644 index 000000000000..4c9a01274d99 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-fi-cutlass.yaml @@ -0,0 +1,8 @@ +model_name: "nvidia/Llama-4-Scout-17B-16E-Instruct-FP8" +accuracy_threshold: 0.92 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_FLASHINFER_MOE_FP8: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-fi-trtllm.yaml b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-fi-trtllm.yaml new file mode 100644 index 000000000000..17f067215eb5 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-fi-trtllm.yaml @@ -0,0 +1,8 @@ +model_name: "nvidia/Llama-4-Scout-17B-16E-Instruct-FP8" +accuracy_threshold: 0.92 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_FLASHINFER_MOE_FP8: "1" + VLLM_FLASHINFER_MOE_BACKEND: "latency" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-marlin.yaml b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-marlin.yaml new file mode 100644 index 000000000000..be8192f2a89a --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-marlin.yaml @@ -0,0 +1,7 @@ +model_name: "nvidia/Llama-4-Scout-17B-16E-Instruct-FP8" +accuracy_threshold: 0.92 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_TEST_FORCE_FP8_MARLIN: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-triton.yaml b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-triton.yaml new file mode 100644 index 000000000000..80e279edc971 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Llama-4-Scout-Fp8-ModelOpt-triton.yaml @@ -0,0 +1,5 @@ +model_name: "nvidia/Llama-4-Scout-17B-16E-Instruct-FP8" +accuracy_threshold: 0.92 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Mixtral-8x7B-Fp8-AutoFp8-fi-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Mixtral-8x7B-Fp8-AutoFp8-fi-cutlass.yaml new file mode 100644 index 000000000000..b9c6a1997dc3 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Mixtral-8x7B-Fp8-AutoFp8-fi-cutlass.yaml @@ -0,0 +1,9 @@ +# TODO(rob): enable +# model_name: "amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV" +# accuracy_threshold: 0.62 +# num_questions: 1319 +# num_fewshot: 5 +# server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +# env: +# VLLM_USE_FLASHINFER_MOE_FP8: "1" +# VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Mixtral-8x7B-Fp8-AutoFp8-triton.yaml b/tests/evals/gsm8k/configs/moe-refactor/Mixtral-8x7B-Fp8-AutoFp8-triton.yaml new file mode 100644 index 000000000000..f730e2e2fb1a --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Mixtral-8x7B-Fp8-AutoFp8-triton.yaml @@ -0,0 +1,5 @@ +model_name: "amd/Mixtral-8x7B-Instruct-v0.1-FP8-KV" +accuracy_threshold: 0.62 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-deepgemm.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-deepgemm.yaml new file mode 100644 index 000000000000..b6cff0abc9d3 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-deepgemm.yaml @@ -0,0 +1,8 @@ +model_name: "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "1" + VLLM_USE_DEEP_GEMM_MOE: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-fi-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-fi-cutlass.yaml new file mode 100644 index 000000000000..080c8d338e58 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-fi-cutlass.yaml @@ -0,0 +1,10 @@ +model_name: "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" + VLLM_USE_FLASHINFER_MOE_FP8: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-fi-trtllm.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-fi-trtllm.yaml new file mode 100644 index 000000000000..a656cc7c37f1 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-fi-trtllm.yaml @@ -0,0 +1,10 @@ +model_name: "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" + VLLM_USE_FLASHINFER_MOE_FP8: "1" + VLLM_FLASHINFER_MOE_BACKEND: "latency" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-marlin.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-marlin.yaml new file mode 100644 index 000000000000..f2273bf2c96c --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-marlin.yaml @@ -0,0 +1,9 @@ +model_name: "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" + VLLM_TEST_FORCE_FP8_MARLIN: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-triton.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-triton.yaml new file mode 100644 index 000000000000..ed61e9b89978 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-AutoFp8-triton.yaml @@ -0,0 +1,8 @@ +model_name: "Qwen/Qwen3-Coder-30B-A3B-Instruct-FP8" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-deepgemm.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-deepgemm.yaml new file mode 100644 index 000000000000..f7ddd30342b3 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-deepgemm.yaml @@ -0,0 +1,8 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-FP8-block" +accuracy_threshold: 0.85 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "1" + VLLM_USE_DEEP_GEMM_MOE: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-fi-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-fi-cutlass.yaml new file mode 100644 index 000000000000..db18dd01bb23 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-fi-cutlass.yaml @@ -0,0 +1,10 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-FP8-block" +accuracy_threshold: 0.85 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" + VLLM_USE_FLASHINFER_MOE_FP8: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-marlin.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-marlin.yaml new file mode 100644 index 000000000000..3d82d2e22c1a --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-marlin.yaml @@ -0,0 +1,9 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-FP8-block" +accuracy_threshold: 0.85 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" + VLLM_TEST_FORCE_FP8_MARLIN: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-vllm-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-vllm-cutlass.yaml new file mode 100644 index 000000000000..5621217de83a --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Block-vllm-cutlass.yaml @@ -0,0 +1,8 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-FP8-block" +accuracy_threshold: 0.85 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_DEEP_GEMM: "0" + VLLM_USE_DEEP_GEMM_MOE: "0" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Channel-marlin.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Channel-marlin.yaml new file mode 100644 index 000000000000..8ed6410c36b5 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Channel-marlin.yaml @@ -0,0 +1,7 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-FP8-dynamic" +accuracy_threshold: 0.85 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_TEST_FORCE_FP8_MARLIN: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Channel-vllm-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Channel-vllm-cutlass.yaml new file mode 100644 index 000000000000..d6adbfc5fba0 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-Fp8-CT-Channel-vllm-cutlass.yaml @@ -0,0 +1,5 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-FP8-dynamic" +accuracy_threshold: 0.85 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-cutlass-dp-ep.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-cutlass-dp-ep.yaml new file mode 100644 index 000000000000..53fd62bac839 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-cutlass-dp-ep.yaml @@ -0,0 +1,8 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --data-parallel-size 2 --enable-expert-parallel" +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-cutlass.yaml new file mode 100644 index 000000000000..6edacc32975c --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-cutlass.yaml @@ -0,0 +1,8 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-trtllm.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-trtllm.yaml new file mode 100644 index 000000000000..8e0b155fa70d --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-fi-trtllm.yaml @@ -0,0 +1,8 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" + VLLM_FLASHINFER_MOE_BACKEND: "latency" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-marlin.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-marlin.yaml new file mode 100644 index 000000000000..8199e6563495 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-marlin.yaml @@ -0,0 +1,7 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_TEST_FORCE_FP8_MARLIN: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-vllm-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-vllm-cutlass.yaml new file mode 100644 index 000000000000..b1ccadeddbba --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-CT-vllm-cutlass.yaml @@ -0,0 +1,5 @@ +model_name: "RedHatAI/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass-dp-ep.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass-dp-ep.yaml new file mode 100644 index 000000000000..44f8700e4b46 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass-dp-ep.yaml @@ -0,0 +1,8 @@ +model_name: "nvidia/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --data-parallel-size 2 --enable-expert-parallel" +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass.yaml new file mode 100644 index 000000000000..09e76e21ab43 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass.yaml @@ -0,0 +1,8 @@ +model_name: "nvidia/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" + VLLM_FLASHINFER_MOE_BACKEND: "throughput" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-trtllm.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-trtllm.yaml new file mode 100644 index 000000000000..a98afafbcde9 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-fi-trtllm.yaml @@ -0,0 +1,8 @@ +model_name: "nvidia/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_USE_FLASHINFER_MOE_FP4: "1" + VLLM_FLASHINFER_MOE_BACKEND: "latency" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-marlin.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-marlin.yaml new file mode 100644 index 000000000000..4156cec89761 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-marlin.yaml @@ -0,0 +1,7 @@ +model_name: "nvidia/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" +env: + VLLM_TEST_FORCE_FP8_MARLIN: "1" diff --git a/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-vllm-cutlass.yaml b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-vllm-cutlass.yaml new file mode 100644 index 000000000000..49a1589fcfea --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/Qwen3-30B-A3B-NvFp4-ModelOpt-vllm-cutlass.yaml @@ -0,0 +1,5 @@ +model_name: "nvidia/Qwen3-30B-A3B-NVFP4" +accuracy_threshold: 0.88 +num_questions: 1319 +num_fewshot: 5 +server_args: "--enforce-eager --max-model-len 8192 --tensor-parallel-size 2" diff --git a/tests/evals/gsm8k/configs/moe-refactor/config-b200.txt b/tests/evals/gsm8k/configs/moe-refactor/config-b200.txt new file mode 100644 index 000000000000..bf02f1363be3 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/config-b200.txt @@ -0,0 +1,12 @@ +Llama-4-Scout-Fp8-ModelOpt-fi-trtllm.yaml +Qwen3-30B-A3B-Fp8-AutoFp8-fi-trtllm.yaml +Qwen3-30B-A3B-NvFp4-CT-vllm-cutlass.yaml +Qwen3-30B-A3B-NvFp4-CT-marlin.yaml +Qwen3-30B-A3B-NvFp4-CT-fi-trtllm.yaml +Qwen3-30B-A3B-NvFp4-CT-fi-cutlass.yaml +Qwen3-30B-A3B-NvFp4-CT-fi-cutlass-dp-ep.yaml +Qwen3-30B-A3B-NvFp4-ModelOpt-vllm-cutlass.yaml +Qwen3-30B-A3B-NvFp4-ModelOpt-marlin.yaml +Qwen3-30B-A3B-NvFp4-ModelOpt-fi-trtllm.yaml +Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass.yaml +Qwen3-30B-A3B-NvFp4-ModelOpt-fi-cutlass-dp-ep.yaml diff --git a/tests/evals/gsm8k/configs/moe-refactor/config-h100.txt b/tests/evals/gsm8k/configs/moe-refactor/config-h100.txt new file mode 100644 index 000000000000..9725db7c8be2 --- /dev/null +++ b/tests/evals/gsm8k/configs/moe-refactor/config-h100.txt @@ -0,0 +1,13 @@ +Mixtral-8x7B-Fp8-AutoFp8-triton.yaml +Qwen3-30B-A3B-Fp8-AutoFp8-deepgemm.yaml +Qwen3-30B-A3B-Fp8-AutoFp8-fi-cutlass.yaml +Qwen3-30B-A3B-Fp8-AutoFp8-marlin.yaml +Qwen3-30B-A3B-Fp8-AutoFp8-triton.yaml +Qwen3-30B-A3B-Fp8-CT-Block-deepgemm.yaml +Qwen3-30B-A3B-Fp8-CT-Block-marlin.yaml +Qwen3-30B-A3B-Fp8-CT-Block-vllm-cutlass.yaml +Qwen3-30B-A3B-Fp8-CT-Channel-marlin.yaml +Qwen3-30B-A3B-Fp8-CT-Channel-vllm-cutlass.yaml +Llama-4-Scout-Fp8-ModelOpt-fi-cutlass.yaml +Llama-4-Scout-Fp8-ModelOpt-marlin.yaml +Llama-4-Scout-Fp8-ModelOpt-triton.yaml