diff --git a/.github/workflows/checkpoint_converter.yml b/.github/workflows/checkpoint_converter.yml deleted file mode 100644 index 4820497f79c..00000000000 --- a/.github/workflows/checkpoint_converter.yml +++ /dev/null @@ -1,175 +0,0 @@ -# # Tests layout - -# Each folder under tests/ corresponds to a test category for a sub-namespace in verl. For instance: -# - `tests/trainer` for testing functionality related to `verl/trainer` -# - `tests/models` for testing functionality related to `verl/models` -# - ... - -# There are a few folders with `special_` prefix, created for special purposes: -# - `special_distributed`: unit tests that must run with multiple GPUs -# - `special_e2e`: end-to-end tests with training/generation scripts -# - `special_npu`: tests for NPUs -# - `special_sanity`: a suite of quick sanity tests -# - `special_standalone`: a set of test that are designed to run in dedicated environments - -# Accelerators for tests -# - By default tests are run with GPU available, except for the ones under `special_npu`, and any test script whose name ends with `on_cpu.py`. -# - For test scripts with `on_cpu.py` name suffix would be tested on CPU resources in linux environment. - -# # Workflow layout - -# All CI tests are configured by yaml files in `.github/workflows/`. Here's an overview of all test configs: -# 1. A list of always triggered CPU sanity tests: `check-pr-title.yml`, `secrets_scan.yml`, `check-pr-title,yml`, `pre-commit.yml`, `doc.yml` -# 2. Some heavy multi-GPU unit tests, such as `model.yml`, `vllm.yml`, `sgl.yml` -# 3. End-to-end tests: `e2e_*.yml` -# 4. Unit tests -# - `cpu_unit_tests.yml`, run pytest on all scripts with file name pattern `tests/**/test_*_on_cpu.py` -# - `gpu_unit_tests.yml`, run pytest on all scripts with file without the `on_cpu.py` suffix. -# - Since cpu/gpu unit tests by default runs all tests under `tests`, please make sure tests are manually excluded in them when -# - new workflow yaml is added to `.github/workflows` -# - new tests are added to workflow mentioned in 2. - -name: checkpoint_converter -# latest version: Megatron-LM core_v0.14.0 https://github.com/NVIDIA/Megatron-LM/tree/core_v0.14.0 - -on: - # Trigger the workflow on push or pull request, - # but only for the main branch - push: - branches: - - main - - v0.* - pull_request: - branches: - - main - - v0.* - paths: - - "**/*.py" - # Other entrypoints - - "!examples/**" - - "!tests/**" - - "!verl/trainer/main_*.py" - - "!verl/trainer/fsdp_sft_trainer.py" - # Recipes - - "!recipe/**" - # FSDP - - "!verl/workers/**/*dp_*.py" - # Entrypoints - - ".github/workflows/checkpoint_converter.yml" - - ".github/workflows/e2e_ppo_trainer_megatron.yml" - - "examples/data_preprocess/gsm8k.py" - - "tests/special_e2e/run_ppo_trainer_megatron.sh" - - "verl/trainer/main_ppo.py" - - "verl/trainer/config/ppo_megatron_trainer.yaml" - -# Cancel jobs on the same ref if a new one is triggered -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: ${{ github.ref != 'refs/heads/main' }} - -# Declare permissions just read content. -permissions: - contents: read - -env: - IMAGE: "verl-ci-cn-beijing.cr.volces.com/verlai/verl:sgl055.dev2" - DYNAMIC_RUNNER_ENDPOINT: "https://sd10g3clalm04ug7alq90.apigateway-cn-beijing.volceapi.com/runner" - -jobs: - setup: - if: github.repository_owner == 'volcengine' - runs-on: ubuntu-latest - outputs: - runner-label: ${{ steps.create-runner.outputs.runner-label }} - mlp-task-id: ${{ steps.create-runner.outputs.mlp-task-id }} - steps: - - uses: actions/checkout@v4 - - id: create-runner - uses: volcengine/vemlp-github-runner@v1 - with: - mode: "create" - faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" - mlp-image: "${{ env.IMAGE }}" - - checkpoint_converter: - needs: setup - runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] - timeout-minutes: 20 # Increase this timeout value as needed - env: - HTTP_PROXY: ${{ secrets.PROXY_HTTP }} - HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} - NO_PROXY: "localhost,127.0.0.1" - HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - name: Install the current repository - run: | - pip3 install -e .[test] -# - name: Download Model to Use -# run: | -# huggingface-cli download Qwen/Qwen2.5-0.5B --local-dir ${HOME}/models/Qwen/Qwen2.5-0.5B -# huggingface-cli download deepseek-ai/deepseek-coder-1.3b-instruct --local-dir ${HOME}/models/deepseek-ai/deepseek-coder-1.3b-instruct -# export HF_HUB_OFFLINE=1 - - name: Running Huggingface to Megatron dist_ckpt converter (Qwen/Qwen2.5-0.5B) - run: | - ray stop --force - python scripts/converter_hf_to_mcore.py --hf_model_path=${HOME}/models/Qwen/Qwen2.5-0.5B --output_path checkpoints/Qwen/Qwen2.5-0.5B --test - - name: Running Huggingface to Megatron dist_ckpt converter (deepseek-ai/deepseek-coder-1.3b-instruct) - run: | - ray stop --force - python scripts/converter_hf_to_mcore.py --hf_model_path=${HOME}/models/deepseek-ai/deepseek-coder-1.3b-instruct --output_path checkpoints/deepseek-ai/deepseek-coder-1.3b-instruct --test - - name: Clean up - run: | - rm -rf checkpoints - - checkpoint_converter_large_moe_models: - needs: setup - runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] - timeout-minutes: 30 # Increase this timeout value as needed - env: - HTTP_PROXY: ${{ secrets.PROXY_HTTP }} - HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} - NO_PROXY: "localhost,127.0.0.1" - HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable - HF_ENDPOINT: "https://hf-mirror.com" - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - name: Install the current repository - run: | - pip3 install -e .[test] -# - name: Download Model to Use -# run: | -# huggingface-cli download Qwen/Qwen1.5-MoE-A2.7B-Chat --local-dir ${HOME}/models/Qwen/Qwen1.5-MoE-A2.7B-Chat -# export HF_HUB_OFFLINE=1 - - name: Running Huggingface to Megatron dist_ckpt CPU converter (Qwen/Qwen1.5-MoE-A2.7B-Chat) - run: | - ray stop --force - python scripts/converter_hf_to_mcore.py --hf_model_path=${HOME}/models/Qwen/Qwen1.5-MoE-A2.7B-Chat --output_path checkpoints/Qwen/Qwen1.5-MoE-A2.7B-Chat --use_cpu_initialization - - name: Running distributed Huggingface to Megatron dist_ckpt CPU converter (Qwen/Qwen1.5-MoE-A2.7B-Chat) - run: | - ray stop --force - torchrun --nproc_per_node 8 --nnodes 1 scripts/converter_hf_to_mcore.py --hf_model_path=${HOME}/models/Qwen/Qwen1.5-MoE-A2.7B-Chat --output_path checkpoints/Qwen/Qwen1.5-MoE-A2.7B-Chat_dist --use_cpu_initialization - - name: clean up - run: | - rm -rf checkpoints - - cleanup: - runs-on: ubuntu-latest - needs: - [ - setup, - checkpoint_converter, - checkpoint_converter_large_moe_models - ] - if: always() - steps: - - id: destroy-runner - uses: volcengine/vemlp-github-runner@v1 - with: - mode: "destroy" - faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" - mlp-task-id: "${{ needs.setup.outputs.mlp-task-id }}" \ No newline at end of file diff --git a/.github/workflows/e2e_ppo_trainer_megatron_sglang.yml b/.github/workflows/e2e_ppo_trainer_megatron_sglang.yml index ccdc7c9c15d..df049bb0871 100644 --- a/.github/workflows/e2e_ppo_trainer_megatron_sglang.yml +++ b/.github/workflows/e2e_ppo_trainer_megatron_sglang.yml @@ -136,11 +136,6 @@ jobs: export VLLM_USE_V1=1 ray start --head ENGINE=sglang MODE=async RESUME_MODE=auto MODEL_ID=deepseek-ai/deepseek-coder-1.3b-instruct TOTAL_TRAIN_STEPS=2 bash tests/special_e2e/run_ppo_trainer_megatron.sh - - name: Test Megatron checkpoints merging function (DeepSeek Actor and Critic) - run: | - exp_name="deepseek-coder-1.3b-instruct-megatron-gsm8k-minimal" - python -m verl.model_merger test --backend megatron --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface - python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints/verl-test/${exp_name}/global_step_1/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/critic/huggingface - name: Profiling GRPO GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Deepseek) run: | ray stop --force @@ -181,11 +176,6 @@ jobs: run: | ray stop --force ALL_OFFLOAD=True VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 LR_WARMUP_STEPS=1 TOTAL_TRAIN_STEPS=2 MODEL_ID=Qwen/Qwen3-0.6B bash tests/special_e2e/run_ppo_trainer_megatron.sh - - name: Test Megatron checkpoints merging function (Qwen3 Actor and Critic) - run: | - exp_name="qwen3-0.6b-megatron-gsm8k-minimal" - python -m verl.model_merger test --backend megatron --tie-word-embedding --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface - python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints/verl-test/${exp_name}/global_step_1/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/critic/huggingface - name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with FP8 rollout run: | ray stop --force diff --git a/.github/workflows/e2e_ppo_trainer_megatron_sglang_2.yml b/.github/workflows/e2e_ppo_trainer_megatron_sglang_2.yml index ccc503b0d58..e738fde2f8b 100644 --- a/.github/workflows/e2e_ppo_trainer_megatron_sglang_2.yml +++ b/.github/workflows/e2e_ppo_trainer_megatron_sglang_2.yml @@ -105,37 +105,6 @@ jobs: faas-url: "${{ env.DYNAMIC_RUNNER_ENDPOINT }}" mlp-image: "${{ env.IMAGE }}" - e2e_ppo_trainer_megatron-qwen2_5vl-3b: - needs: setup - runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] - timeout-minutes: 60 # Increase this timeout value as needed - env: - HTTP_PROXY: ${{ secrets.PROXY_HTTP }} - HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} - NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" - HF_ENDPOINT: "https://hf-mirror.com" - HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - name: Install the current repository - run: | - pip3 install --no-deps -e .[test] - - name: Prepare Geo3k dataset - run: | - python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k/ - - name: Prepare dist_ckpt of Qwen2.5-VL-3B, only supports dist_ckpt - run: | - python3 scripts/converter_hf_to_mcore.py --hf_model_path ${HOME}/models/Qwen/Qwen2.5-VL-3B-Instruct --output_path checkpoints/verl-test/qwen2.5-vl-3b-megatron - - name: Running Geo3k E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen) - run: | - ray stop --force - ENGINE=sglang ROLLOUT_MODE=async TRAIN_FILES=${HOME}/data/geo3k/train.parquet VAL_FILES=${HOME}/data/geo3k/test.parquet MAX_PROMPT_LENGTH=1024 MAX_RESPONSE_LENGTH=2048 MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct ADV_ESTIMATOR=grpo USE_DYNAMIC_BSZ=False SKIP_SAVE_HF_MODEL=1 COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 COMMON_TP=2 USE_DIST_CKPT=true DIST_CKPT_PATH=checkpoints/verl-test/qwen2.5-vl-3b-megatron bash tests/special_e2e/run_ppo_trainer_megatron.sh - - name: clean up - run: | - rm -rf checkpoints - e2e_ppo_trainer_fsdp_sglang: needs: setup runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] @@ -221,7 +190,6 @@ jobs: needs: [ setup, - e2e_ppo_trainer_megatron-qwen2_5vl-3b, e2e_ppo_trainer_fsdp-qwen2_5vl-3b, e2e_ppo_trainer_fsdp_sglang, ] diff --git a/.github/workflows/e2e_ppo_trainer_megatron_vllm.yml b/.github/workflows/e2e_ppo_trainer_megatron_vllm.yml index 5dfaa4776b1..f329ae9b7aa 100644 --- a/.github/workflows/e2e_ppo_trainer_megatron_vllm.yml +++ b/.github/workflows/e2e_ppo_trainer_megatron_vllm.yml @@ -186,11 +186,6 @@ jobs: run: | ray stop --force ALL_OFFLOAD=True VAL_BEFORE_TRAIN=True TEST_FREQ=1 SAVE_FREQ=1 LR_WARMUP_STEPS=1 TOTAL_TRAIN_STEPS=2 MODEL_ID=Qwen/Qwen3-0.6B bash tests/special_e2e/run_ppo_trainer_megatron.sh - - name: Test Megatron checkpoints merging function (Qwen3 Actor and Critic) - run: | - exp_name="qwen3-0.6b-megatron-gsm8k-minimal" - python -m verl.model_merger test --backend megatron --tie-word-embedding --local_dir checkpoints/verl-test/${exp_name}/global_step_1/actor --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/actor/huggingface - python -m verl.model_merger test --backend megatron --is-value-model --local_dir checkpoints/verl-test/${exp_name}/global_step_1/critic --test_hf_dir checkpoints/verl-test/${exp_name}/global_step_1/critic/huggingface - name: Running GSM8K E2E training tests with 3D parallelism on 8 L20 GPUs with FP8 rollout run: | ray stop --force diff --git a/.github/workflows/e2e_ppo_trainer_megatron_vllm_2.yml b/.github/workflows/e2e_ppo_trainer_megatron_vllm_2.yml index 2d5e0821d48..a35756dd224 100644 --- a/.github/workflows/e2e_ppo_trainer_megatron_vllm_2.yml +++ b/.github/workflows/e2e_ppo_trainer_megatron_vllm_2.yml @@ -153,42 +153,6 @@ jobs: run: | rm -rf checkpoints - e2e_ppo_trainer_megatron-qwen2_5vl-3b: - needs: setup - runs-on: ["${{ needs.setup.outputs.runner-label || 'L20x8' }}"] - timeout-minutes: 60 # Increase this timeout value as needed - env: - HTTP_PROXY: ${{ secrets.PROXY_HTTP }} - HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} - NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" - HF_ENDPOINT: "https://hf-mirror.com" - HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - name: Install the current repository - run: | - pip3 install --no-deps -e .[test] - pip3 install transformers==$TRANSFORMERS_VERSION - - name: Prepare Geo3k dataset - run: | - python3 examples/data_preprocess/geo3k.py --local_dataset_path ${HOME}/models/hf_data/hiyouga/geometry3k/ - - name: Prepare dist_ckpt of Qwen2.5-VL-3B, only supports dist_ckpt - run: | - python3 scripts/converter_hf_to_mcore.py --hf_model_path ${HOME}/models/Qwen/Qwen2.5-VL-3B-Instruct --output_path checkpoints/verl-test/qwen2.5-vl-3b-megatron - - name: Running Geo3k E2E training tests with 3D parallelism on 8 L20 GPUs with Megatron (Qwen) - run: | - ray stop --force - TRAIN_FILES=${HOME}/data/geo3k/train.parquet VAL_FILES=${HOME}/data/geo3k/test.parquet \ - MAX_PROMPT_LENGTH=1024 MAX_RESPONSE_LENGTH=2048 MODEL_ID=Qwen/Qwen2.5-VL-3B-Instruct ADV_ESTIMATOR=grpo \ - USE_DYNAMIC_BSZ=False USE_FUSED_KERNELS=True SKIP_SAVE_HF_MODEL=1 \ - COMMON_PP=4 COMMON_VPP=null COMMON_CP=1 COMMON_TP=2 USE_DIST_CKPT=true \ - DIST_CKPT_PATH=checkpoints/verl-test/qwen2.5-vl-3b-megatron bash tests/special_e2e/run_ppo_trainer_megatron.sh - - name: clean up - run: | - rm -rf checkpoints - e2e_ppo_trainer_fsdp_vllm: needs: setup runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] @@ -330,7 +294,6 @@ jobs: [ setup, e2e_ppo_trainer_megatron-moe-expert-parallel, - e2e_ppo_trainer_megatron-qwen2_5vl-3b, e2e_ppo_trainer_fsdp-qwen2_5vl-3b, e2e_ppo_trainer_fsdp_vllm, ] diff --git a/.github/workflows/model.yml b/.github/workflows/model.yml index cab35a68d96..c9f1f2deac2 100644 --- a/.github/workflows/model.yml +++ b/.github/workflows/model.yml @@ -48,7 +48,6 @@ on: # Entrypoints - ".github/workflows/model.yml" - "tests/special_distributed/test_fsdp_ckpt.py" - - "tests/special_distributed/test_mcore_config_converter.py" - "tests/special_distributed/test_tensor_dict.py" - "tests/models/**" - "tests/special_distributed/run_all.sh" @@ -144,34 +143,6 @@ jobs: run: | STRATEGY=fsdp2 torchrun --nproc_per_node=8 tests/special_distributed/test_fsdp_ckpt.py - mcore_config_converter: - needs: setup - runs-on: [ "${{ needs.setup.outputs.runner-label || 'L20x8' }}" ] - timeout-minutes: 20 # Increase this timeout value as needed - env: - HTTP_PROXY: ${{ secrets.PROXY_HTTP }} - HTTPS_PROXY: ${{ secrets.PROXY_HTTPS }} - NO_PROXY: "localhost,127.0.0.1,hf-mirror.com" - HF_ENDPOINT: "https://hf-mirror.com" - HF_HUB_ENABLE_HF_TRANSFER: "0" # This is more stable - steps: - - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - with: - fetch-depth: 0 - - name: Install the current repository - run: | - pip3 install -e .[test] -# - name: Download model config files -# run: | -# hf download Qwen/Qwen2.5-7B config.json --local-dir $HOME/configs/Qwen/Qwen2.5-7B -# hf download Qwen/Qwen3-8B config.json --local-dir $HOME/configs/Qwen/Qwen3-8B -# hf download deepseek-ai/deepseek-coder-1.3b-instruct config.json --local-dir $HOME/configs/deepseek-ai/deepseek-coder-1.3b-instruct -# hf download Qwen/Qwen2-57B-A14B config.json --local-dir $HOME/configs/Qwen/Qwen2-57B-A14B -# hf download Qwen/Qwen3-30B-A3B config.json --local-dir $HOME/configs/Qwen/Qwen3-30B-A3B -# hf download deepseek-ai/DeepSeek-V3-Base config.json --local-dir $HOME/configs/deepseek-ai/DeepSeek-V3-Base - - name: Running mcore config converter tests on 8 L20 GPUs - run: | - torchrun --nproc_per_node=8 tests/special_distributed/test_mcore_config_converter.py model_engine: needs: setup @@ -206,7 +177,6 @@ jobs: setup, model_rmpad, model_rmpad_fsdp2_unstable, - mcore_config_converter, model_engine ] if: always() diff --git a/docs/advance/checkpoint.rst b/docs/advance/checkpoint.rst index 56bec4a75c3..9782af951d9 100644 --- a/docs/advance/checkpoint.rst +++ b/docs/advance/checkpoint.rst @@ -137,32 +137,8 @@ Current implementation use solution 2. HuggingFace to Megatron DistCheckpoint details ---------------------------------------------- -If your model is quite huge, we recommend you to use Megatron dist-checkpoint to load the model. -Megatron dist-checkpoint supports loading with different kinds of model parallelism, -and it is much faster than the original checkpoint loading. - -To convert original HuggingFace model to Megatron dist-checkpoint, -you can use the ``scripts/converter_hf_to_mcore.py`` script. Large MoE models are temporarily supported with CPU initialization, -which is a little slower. While we are working on a better solution to support large models. - -Example command to convert the model is as follows: - -.. code:: bash - - python scripts/converter_hf_to_mcore.py \ - --hf_model_path Qwen/Qwen1.5-MoE-A2.7B-Chat \ - --output_path /mnt/disk/Qwen/Qwen1.5-MoE-A2.7B-Chat \ - --use_cpu_initialization # Only work for MoE models - - -Example command to distributed convert the huge model like deepseekv3 671B is as follows: - -.. code:: bash - - torchrun --nproc_per_node 1 --nnodes 8 --node_rank ${RANK} scripts/converter_hf_to_mcore.py \ - --hf_model_path deepseek-ai/DeepSeek-V3 \ - --output_path /mnt/disk/deepseek-ai/DeepSeek-V3 \ - --use_cpu_initialization # Only work for MoE models +Through ``mbridge``, we can directly save the mcore model to huggingface format during training. +No need to convert the model to Megatron dist-checkpoint format. Original Checkpoint Utils ------------------------- diff --git a/docs/perf/best_practices.rst b/docs/perf/best_practices.rst index d7ff382c250..69d8286710a 100644 --- a/docs/perf/best_practices.rst +++ b/docs/perf/best_practices.rst @@ -110,6 +110,10 @@ Parameter Reference Path to the actor checkpoint in HuggingFace-compatible format. - ``actor_rollout_ref.actor.megatron.use_mbridge``: Enable mbridge format conversion when the model was trained with Megatron. Use the latest mbridge release: https://github.com/ISEEKYAN/mbridge. + Now it must be True. + - ``actor_rollout_ref.actor.megatron.vanilla_mbridge``: + If set to True, use mbridge, else use Megatron-Bridge https://github.com/NVIDIA-NeMo/Megatron-Bridge. + Now it is True by default. and it will defaultly be set to False in the future(v0.8). :math:`\pi` - ``actor_rollout_ref.rollout.name``: