From 1aa088b60bd8c50fb96d705d7f618156e6ca05c6 Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Tue, 5 Aug 2025 16:30:39 -0700 Subject: [PATCH 01/10] chore: relocate deepscaler configs + add deepscaler CI tests Signed-off-by: Terry Kong --- docs/guides/grpo-deepscaler.md | 10 ++-- .../llm}/grpo-deepscaler-1.5b-16K.yaml | 0 .../llm}/grpo-deepscaler-1.5b-24K.yaml | 0 .../llm}/grpo-deepscaler-1.5b-8K.yaml | 0 .../llm/grpo-deepscaler-1.5b-16K.sh | 58 +++++++++++++++++++ .../llm/grpo-deepscaler-1.5b-24K.sh | 58 +++++++++++++++++++ .../llm/grpo-deepscaler-1.5b-8K.sh | 51 ++++++++++++++++ 7 files changed, 172 insertions(+), 5 deletions(-) rename examples/configs/{ => recipes/llm}/grpo-deepscaler-1.5b-16K.yaml (100%) rename examples/configs/{ => recipes/llm}/grpo-deepscaler-1.5b-24K.yaml (100%) rename examples/configs/{ => recipes/llm}/grpo-deepscaler-1.5b-8K.yaml (100%) create mode 100755 tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh create mode 100755 tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh create mode 100755 tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh diff --git a/docs/guides/grpo-deepscaler.md b/docs/guides/grpo-deepscaler.md index 42f9029230..03ec680560 100644 --- a/docs/guides/grpo-deepscaler.md +++ b/docs/guides/grpo-deepscaler.md @@ -5,12 +5,12 @@ This guide explains how to use NeMo RL to train long Chain of Thought (CoT) reas ## Train the Model We follow the DeepScaleR recipe and train the model in three stages. In the first stage, we train with an 8K context window. In the second stage, we train with a 16K context window. In the third stage, we train with a 24K context window. -To train the model using NeMo RL, use the `examples/configs/grpo-deepscaler-1.5b-8K.yaml` config file. This file closely matches the experiment settings in the original DeepScaleR recipe. We then train with `examples/configs/grpo-deepscaler-1.5b-16K.yaml` and `examples/configs/grpo-deepscaler-1.5b-24K.yaml` for the second and third stages, respectively. +To train the model using NeMo RL, use the `examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml` config file. This file closely matches the experiment settings in the original DeepScaleR recipe. We then train with `examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml` and `examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml` for the second and third stages, respectively. ```sh -uv run examples/run_grpo_math.py --config=examples/configs/grpo-deepscaler-1.5b-8K.yaml -uv run examples/run_grpo_math.py --config=examples/configs/grpo-deepscaler-1.5b-16K.yaml policy.model_name=/path/to/8K/checkpoint/hf -uv run examples/run_grpo_math.py --config=examples/configs/grpo-deepscaler-1.5b-24K.yaml policy.model_name=/path/to/16K/checkpoint/hf +uv run examples/run_grpo_math.py --config=examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml +uv run examples/run_grpo_math.py --config=examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml policy.model_name=/path/to/8K/checkpoint/hf +uv run examples/run_grpo_math.py --config=examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml policy.model_name=/path/to/16K/checkpoint/hf ``` At the end of each stage, you need to specify the Hugging Face checkpoint to continue training with. To get this checkpoint, we convert a model checkpoint to a Hugging Face checkpoint with the following command: @@ -19,7 +19,7 @@ At the end of each stage, you need to specify the Hugging Face checkpoint to con uv run examples/converters/convert_dcp_to_hf.py --config=results/grpo-deepscaler-1.5b-8K/step_240/config.yaml --dcp-ckpt-path=results/grpo-deepscaler-1.5b-8K/step_240/policy/weights --hf-ckpt-path=results/grpo-deepscaler-1.5b-8K/step_240/hf ``` -When running the next command, we use the Hugging Face checkpoint as the initial checkpoint. We train with an 8K context window for 240 steps, a 16K context window for 290 steps, and a 24K context window for 50 steps. The 8K and 16K steps can be run on a single 8XH100 80GB node, while the 24K step requires four nodes. If you're running on 8XA100 80GB, you will need at least 1 node for 8K training and four nodes for 16-24k training. +When running the next command, we use the Hugging Face checkpoint as the initial checkpoint. We train with an 8K context window for 240 steps, a 16K context window for 290 steps, and a 24K context window for 50 steps. We run all experiments on a single 8XH100 80GB node. If you're running on 8XA100 80GB, you will need at least 1 node for 8K training and 2 nodes for 16-24k training. ## Training Curve When using the above commands, we get the following training curve: diff --git a/examples/configs/grpo-deepscaler-1.5b-16K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml similarity index 100% rename from examples/configs/grpo-deepscaler-1.5b-16K.yaml rename to examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml diff --git a/examples/configs/grpo-deepscaler-1.5b-24K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml similarity index 100% rename from examples/configs/grpo-deepscaler-1.5b-24K.yaml rename to examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml diff --git a/examples/configs/grpo-deepscaler-1.5b-8K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml similarity index 100% rename from examples/configs/grpo-deepscaler-1.5b-8K.yaml rename to examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh new file mode 100755 index 0000000000..e8beffa8ed --- /dev/null +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh @@ -0,0 +1,58 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=$((530-240)) +MAX_STEPS=$((530-240)) +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Use checkpoint created from the 8K checkpoint in grpo-deepscaler-1.5b-8K.sh +if [[ -z "$CACHED_MODEL_PATH" ]]; then + echo "Need to set CACHED_MODEL_PATH to the path to the trained 8K checkpoint" + exit 1 +fi + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + policy.model_name=$CACHED_MODEL_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["290"] < 1.1' +fi + +# Convert 16k checkpoint +uv run examples/converters/convert_dcp_to_hf.py \ + --config=results/grpo/step_290/config.yaml \ + --dcp-ckpt-path=results/grpo/step_290/policy/weights \ + --hf-ckpt-path=results/grpo-deepscaler-16k-290-hf + +# Run eval +uv run examples/run_eval.py \ + generation.model_name=results/grpo-deepscaler-16k-290-hf \ + data.prompt_file=examples/prompts/cot.txt \ + generation.vllm_cfg.max_model_len=32768 \ No newline at end of file diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh new file mode 100755 index 0000000000..9213f2139c --- /dev/null +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh @@ -0,0 +1,58 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=4 +STEPS_PER_RUN=$((630-530)) +MAX_STEPS=$((630-530)) +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Use checkpoint created from the 16K checkpoint in grpo-deepscaler-1.5b-16K.sh +if [[ -z "$CACHED_MODEL_PATH" ]]; then + echo "Need to set CACHED_MODEL_PATH to the path to the trained 16K checkpoint" + exit 1 +fi + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + policy.model_name=$CACHED_MODEL_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["100"] < 1.1' +fi + +# Convert 16k checkpoint +uv run examples/converters/convert_dcp_to_hf.py \ + --config=results/grpo/step_100/config.yaml \ + --dcp-ckpt-path=results/grpo/step_100/policy/weights \ + --hf-ckpt-path=results/grpo-deepscaler-24k-100-hf + +# Run eval +uv run examples/run_eval.py \ + generation.model_name=results/grpo-deepscaler-24k-100-hf \ + data.prompt_file=examples/prompts/cot.txt \ + generation.vllm_cfg.max_model_len=32768 diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh new file mode 100755 index 0000000000..997ffa0584 --- /dev/null +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh @@ -0,0 +1,51 @@ +#!/bin/bash +SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd) +source $SCRIPT_DIR/common.env + +# ===== BEGIN CONFIG ===== +NUM_NODES=1 +STEPS_PER_RUN=240 +MAX_STEPS=240 +NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up +NUM_MINUTES=240 +# ===== END CONFIG ===== + +exit_if_max_steps_reached + +# Run the experiment +cd $PROJECT_ROOT +uv run examples/run_grpo_math.py \ + --config $CONFIG_PATH \ + grpo.max_num_steps=$MAX_STEPS \ + logger.log_dir=$LOG_DIR \ + logger.wandb_enabled=True \ + logger.wandb.project=nemo-rl \ + logger.wandb.name=$EXP_NAME \ + logger.monitor_gpus=True \ + logger.tensorboard_enabled=True \ + checkpointing.enabled=True \ + checkpointing.checkpoint_dir=$CKPT_DIR \ + $@ \ + 2>&1 | tee $RUN_LOG + +# Convert tensorboard logs to json +uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS + +# Only run metrics if the target step is reached +if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then + uv run tests/check_metrics.py $JSON_METRICS \ + 'mean(data["train/token_mult_prob_error"]) < 1.1' \ + 'data["train/token_mult_prob_error"]["240"] < 1.1' +fi + +# Convert 8k checkpoint +uv run examples/converters/convert_dcp_to_hf.py \ + --config=results/grpo/step_240/config.yaml \ + --dcp-ckpt-path=results/grpo/step_240/policy/weights \ + --hf-ckpt-path=results/grpo-deepscaler-8k-240-hf + +# Run eval +uv run examples/run_eval.py \ + generation.model_name=results/grpo-deepscaler-8k-240-hf \ + data.prompt_file=examples/prompts/cot.txt \ + generation.vllm_cfg.max_model_len=32768 \ No newline at end of file From b1ff1f8d5c90cd869f158249517742de62e9cb58 Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 02:39:41 +0000 Subject: [PATCH 02/10] updated recipes + hacks Signed-off-by: Terry Kong --- nemo_rl/data/processors.py | 2 ++ nemo_rl/evals/eval.py | 2 +- .../llm/grpo-deepscaler-1.5b-16K.sh | 18 ++++++---- .../llm/grpo-deepscaler-1.5b-24K.sh | 20 +++++++---- .../llm/grpo-deepscaler-1.5b-8K.sh | 33 ++++++++++++++----- 5 files changed, 53 insertions(+), 22 deletions(-) diff --git a/nemo_rl/data/processors.py b/nemo_rl/data/processors.py index 67e3658882..7fb54ceceb 100644 --- a/nemo_rl/data/processors.py +++ b/nemo_rl/data/processors.py @@ -65,6 +65,8 @@ def math_data_processor( add_special_tokens=False, ) user_message["token_ids"] = tokenizer(message, return_tensors="pt")["input_ids"][0] + if tokenizer.bos_token_id == user_message["token_ids"][0] == user_message["token_ids"][1]: + user_message["token_ids"] = tokenizer(message, return_tensors="pt", add_special_tokens=False)["input_ids"][0] user_message["content"] = message message_log.append(user_message) diff --git a/nemo_rl/evals/eval.py b/nemo_rl/evals/eval.py index 9f5be0dbf7..285c901e77 100644 --- a/nemo_rl/evals/eval.py +++ b/nemo_rl/evals/eval.py @@ -427,7 +427,7 @@ def _save_evaluation_data_to_json(evaluation_data, master_config, save_path): "model_name": master_config["generation"]["model_name"], "dataset_name": master_config["data"]["dataset_name"], "metric": master_config["eval"]["metric"], - "pass_k_value": master_config["eval"]["pass_k_value"], + "k_value": master_config["eval"]["k_value"], "num_tests_per_prompt": master_config["eval"]["num_tests_per_prompt"], "temperature": master_config["generation"]["temperature"], "top_p": master_config["generation"]["top_p"], diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh index e8beffa8ed..56bc935bdc 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh @@ -42,17 +42,23 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ 'mean(data["train/token_mult_prob_error"]) < 1.1' \ - 'data["train/token_mult_prob_error"]["290"] < 1.1' + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" fi # Convert 16k checkpoint uv run examples/converters/convert_dcp_to_hf.py \ - --config=results/grpo/step_290/config.yaml \ - --dcp-ckpt-path=results/grpo/step_290/policy/weights \ - --hf-ckpt-path=results/grpo-deepscaler-16k-290-hf + --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ + --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ + --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf # Run eval uv run examples/run_eval.py \ - generation.model_name=results/grpo-deepscaler-16k-290-hf \ + generation.model_name=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf \ data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 \ No newline at end of file + generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-16k + +cat ${RUN_LOG}.aime-16k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-16k-metric.json + +uv run tests/check_metrics.py ${RUN_LOG}-16k-metric.json \ + 'data["score"] >= 0.25' \ + diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh index 9213f2139c..6156c0e9f1 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh @@ -42,17 +42,23 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ 'mean(data["train/token_mult_prob_error"]) < 1.1' \ - 'data["train/token_mult_prob_error"]["100"] < 1.1' + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" fi -# Convert 16k checkpoint +# Convert 24k checkpoint uv run examples/converters/convert_dcp_to_hf.py \ - --config=results/grpo/step_100/config.yaml \ - --dcp-ckpt-path=results/grpo/step_100/policy/weights \ - --hf-ckpt-path=results/grpo-deepscaler-24k-100-hf + --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ + --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ + --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf # Run eval uv run examples/run_eval.py \ - generation.model_name=results/grpo-deepscaler-24k-100-hf \ + generation.model_name=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf \ data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 + generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-24k + +cat ${RUN_LOG}.aime-24k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-24k-metric.json + +uv run tests/check_metrics.py ${RUN_LOG}-24k-metric.json \ + 'data["score"] >= 0.25' \ + diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh index 997ffa0584..b76964c956 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh @@ -4,11 +4,13 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 -STEPS_PER_RUN=240 -MAX_STEPS=240 +STEPS_PER_RUN=40 +MAX_STEPS=40 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 # ===== END CONFIG ===== +STEPS_PER_RUN=240 +MAX_STEPS=240 exit_if_max_steps_reached @@ -35,17 +37,32 @@ uv run tests/json_dump_tb_logs.py $LOG_DIR --output_path $JSON_METRICS if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | map(tonumber) | max' $JSON_METRICS) -ge $MAX_STEPS ]]; then uv run tests/check_metrics.py $JSON_METRICS \ 'mean(data["train/token_mult_prob_error"]) < 1.1' \ - 'data["train/token_mult_prob_error"]["240"] < 1.1' + "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" fi # Convert 8k checkpoint uv run examples/converters/convert_dcp_to_hf.py \ - --config=results/grpo/step_240/config.yaml \ - --dcp-ckpt-path=results/grpo/step_240/policy/weights \ - --hf-ckpt-path=results/grpo-deepscaler-8k-240-hf + --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ + --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ + --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf # Run eval uv run examples/run_eval.py \ - generation.model_name=results/grpo-deepscaler-8k-240-hf \ + generation.model_name=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf \ data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 \ No newline at end of file + generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-8k + +cat ${RUN_LOG}.aime-8k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-8k-metric.json + +uv run tests/check_metrics.py ${RUN_LOG}-8k-metric.json \ + 'data["score"] >= 0.25' \ + +#uv run examples/run_eval.py \ +# generation.model_name=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ +# data.prompt_file=examples/prompts/cot.txt \ +# generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-baseline + +#cat ${RUN_LOG}.aime-baseline | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-baseline-metric.json + +#uv run tests/check_metrics.py ${RUN_LOG}-baseline-metric.json \ +# 'data["score"] == 0.2' \ From ea59442ddb29800d2983319c27f807716cded225 Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 00:33:20 -0700 Subject: [PATCH 03/10] better for memory Signed-off-by: Terry Kong --- examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml | 1 + examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml index 866b365da4..570fecb1b9 100644 --- a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-16K.yaml @@ -8,6 +8,7 @@ loss_fn: policy: max_total_sequence_length: 16384 + logprob_batch_size: 2 dtensor_cfg: enabled: true diff --git a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml index 52d1ed2018..2ab0304118 100644 --- a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml @@ -8,6 +8,7 @@ loss_fn: policy: max_total_sequence_length: 24576 + logprob_batch_size: 2 dtensor_cfg: enabled: true From 5eae133cf7341d3c1e8728649d8ed8859dc4282e Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 00:36:17 -0700 Subject: [PATCH 04/10] adjust step time Signed-off-by: Terry Kong --- tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh | 2 +- tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh | 2 +- tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh | 4 +--- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh index 56bc935bdc..416ef96d10 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh @@ -4,7 +4,7 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 -STEPS_PER_RUN=$((530-240)) +STEPS_PER_RUN=30 MAX_STEPS=$((530-240)) NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh index 6156c0e9f1..f0cf01d280 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh @@ -4,7 +4,7 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=4 -STEPS_PER_RUN=$((630-530)) +STEPS_PER_RUN=30 MAX_STEPS=$((630-530)) NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh index b76964c956..17620c3b04 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh @@ -5,12 +5,10 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 STEPS_PER_RUN=40 -MAX_STEPS=40 +MAX_STEPS=240 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 # ===== END CONFIG ===== -STEPS_PER_RUN=240 -MAX_STEPS=240 exit_if_max_steps_reached From 0e60a394f0a2d3683902dfbd5ec93ff1746ef93b Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 15:40:58 -0700 Subject: [PATCH 05/10] fix steps Signed-off-by: Terry Kong --- tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh | 2 +- tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh | 2 +- tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh index 416ef96d10..2470e551b7 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh @@ -5,7 +5,7 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 STEPS_PER_RUN=30 -MAX_STEPS=$((530-240)) +MAX_STEPS=30 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 # ===== END CONFIG ===== diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh index f0cf01d280..cf4f81e683 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh @@ -5,7 +5,7 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=4 STEPS_PER_RUN=30 -MAX_STEPS=$((630-530)) +MAX_STEPS=30 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 # ===== END CONFIG ===== diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh index 17620c3b04..ea4d51e3b1 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh @@ -5,7 +5,7 @@ source $SCRIPT_DIR/common.env # ===== BEGIN CONFIG ===== NUM_NODES=1 STEPS_PER_RUN=40 -MAX_STEPS=240 +MAX_STEPS=40 NUM_RUNS=$(( (MAX_STEPS + STEPS_PER_RUN - 1) / STEPS_PER_RUN )) # Round up NUM_MINUTES=240 # ===== END CONFIG ===== From 000ec413543ce789f9eb49d7e0646670ac73c69e Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 15:42:38 -0700 Subject: [PATCH 06/10] disable eval this round Signed-off-by: Terry Kong --- .../llm/grpo-deepscaler-1.5b-16K.sh | 33 +++++++------ .../llm/grpo-deepscaler-1.5b-24K.sh | 33 +++++++------ .../llm/grpo-deepscaler-1.5b-8K.sh | 49 ++++++++++--------- 3 files changed, 59 insertions(+), 56 deletions(-) diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh index 2470e551b7..284ffd5c0f 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh @@ -45,20 +45,21 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" fi -# Convert 16k checkpoint -uv run examples/converters/convert_dcp_to_hf.py \ - --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ - --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ - --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf - -# Run eval -uv run examples/run_eval.py \ - generation.model_name=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf \ - data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-16k - -cat ${RUN_LOG}.aime-16k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-16k-metric.json - -uv run tests/check_metrics.py ${RUN_LOG}-16k-metric.json \ - 'data["score"] >= 0.25' \ +# TODO: enable in subsequent PR to do a quick accuracy check +## Convert 16k checkpoint +#uv run examples/converters/convert_dcp_to_hf.py \ +# --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ +# --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ +# --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf +# +## Run eval +#uv run examples/run_eval.py \ +# generation.model_name=$CKPT_DIR/grpo-deepscaler-16k-${MAX_STEPS}-hf \ +# data.prompt_file=examples/prompts/cot.txt \ +# generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-16k +# +#cat ${RUN_LOG}.aime-16k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-16k-metric.json +# +#uv run tests/check_metrics.py ${RUN_LOG}-16k-metric.json \ +# 'data["score"] >= 0.25' \ diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh index cf4f81e683..f269e56977 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh @@ -45,20 +45,21 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" fi -# Convert 24k checkpoint -uv run examples/converters/convert_dcp_to_hf.py \ - --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ - --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ - --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf - -# Run eval -uv run examples/run_eval.py \ - generation.model_name=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf \ - data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-24k - -cat ${RUN_LOG}.aime-24k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-24k-metric.json - -uv run tests/check_metrics.py ${RUN_LOG}-24k-metric.json \ - 'data["score"] >= 0.25' \ +# TODO: enable in subsequent PR to do a quick accuracy check +## Convert 24k checkpoint +#uv run examples/converters/convert_dcp_to_hf.py \ +# --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ +# --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ +# --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf +# +## Run eval +#uv run examples/run_eval.py \ +# generation.model_name=$CKPT_DIR/grpo-deepscaler-24k-${MAX_STEPS}-hf \ +# data.prompt_file=examples/prompts/cot.txt \ +# generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-24k +# +#cat ${RUN_LOG}.aime-24k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-24k-metric.json +# +#uv run tests/check_metrics.py ${RUN_LOG}-24k-metric.json \ +# 'data["score"] >= 0.25' \ diff --git a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh index ea4d51e3b1..d3b0f689a7 100755 --- a/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh +++ b/tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh @@ -38,29 +38,30 @@ if [[ $(jq 'to_entries | .[] | select(.key == "train/loss") | .value | keys | ma "data['train/token_mult_prob_error']['$MAX_STEPS'] < 1.1" fi -# Convert 8k checkpoint -uv run examples/converters/convert_dcp_to_hf.py \ - --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ - --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ - --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf - -# Run eval -uv run examples/run_eval.py \ - generation.model_name=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf \ - data.prompt_file=examples/prompts/cot.txt \ - generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-8k - -cat ${RUN_LOG}.aime-8k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-8k-metric.json - -uv run tests/check_metrics.py ${RUN_LOG}-8k-metric.json \ - 'data["score"] >= 0.25' \ - +# TODO: enable in subsequent PR to do a quick accuracy check +## Convert 8k checkpoint +#uv run examples/converters/convert_dcp_to_hf.py \ +# --config=$CKPT_DIR/step_${MAX_STEPS}/config.yaml \ +# --dcp-ckpt-path=$CKPT_DIR/step_${MAX_STEPS}/policy/weights \ +# --hf-ckpt-path=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf +# +## Run eval #uv run examples/run_eval.py \ -# generation.model_name=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ +# generation.model_name=$CKPT_DIR/grpo-deepscaler-8k-${MAX_STEPS}-hf \ # data.prompt_file=examples/prompts/cot.txt \ -# generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-baseline - -#cat ${RUN_LOG}.aime-baseline | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-baseline-metric.json - -#uv run tests/check_metrics.py ${RUN_LOG}-baseline-metric.json \ -# 'data["score"] == 0.2' \ +# generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-8k +# +#cat ${RUN_LOG}.aime-8k | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-8k-metric.json +# +#uv run tests/check_metrics.py ${RUN_LOG}-8k-metric.json \ +# 'data["score"] >= 0.25' \ +# +##uv run examples/run_eval.py \ +## generation.model_name=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B \ +## data.prompt_file=examples/prompts/cot.txt \ +## generation.vllm_cfg.max_model_len=32768 2>&1 | tee ${RUN_LOG}.aime-baseline +# +##cat ${RUN_LOG}.aime-baseline | grep "score=" | sed 's/.*score=\([^ ]*\).*/{"score": \1}/' > ${RUN_LOG}-baseline-metric.json +# +##uv run tests/check_metrics.py ${RUN_LOG}-baseline-metric.json \ +## 'data["score"] == 0.2' \ From 13daaff40576bf3cca153d6c9daa29c62240d40f Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 15:47:26 -0700 Subject: [PATCH 07/10] comment Signed-off-by: Terry Kong --- nemo_rl/data/processors.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nemo_rl/data/processors.py b/nemo_rl/data/processors.py index 7fb54ceceb..33c972d631 100644 --- a/nemo_rl/data/processors.py +++ b/nemo_rl/data/processors.py @@ -66,7 +66,10 @@ def math_data_processor( ) user_message["token_ids"] = tokenizer(message, return_tensors="pt")["input_ids"][0] if tokenizer.bos_token_id == user_message["token_ids"][0] == user_message["token_ids"][1]: + # This is an attempt to remove bos if two are detected. + # General solution is tracked here https://github.com/NVIDIA-NeMo/RL/issues/855 user_message["token_ids"] = tokenizer(message, return_tensors="pt", add_special_tokens=False)["input_ids"][0] + assert tokenizer.bos_token_id == user_message["token_ids"][0] == user_message["token_ids"][1], f"Still encountering double-bos. Please raise your tokenizer in https://github.com/NVIDIA-NeMo/RL/issues/855." user_message["content"] = message message_log.append(user_message) From a40aa788e34d073e3a84959abe2e54c5f416d2da Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Thu, 7 Aug 2025 22:49:00 +0000 Subject: [PATCH 08/10] lint Signed-off-by: Terry Kong --- nemo_rl/data/processors.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/nemo_rl/data/processors.py b/nemo_rl/data/processors.py index 33c972d631..1ac5a981bf 100644 --- a/nemo_rl/data/processors.py +++ b/nemo_rl/data/processors.py @@ -65,11 +65,23 @@ def math_data_processor( add_special_tokens=False, ) user_message["token_ids"] = tokenizer(message, return_tensors="pt")["input_ids"][0] - if tokenizer.bos_token_id == user_message["token_ids"][0] == user_message["token_ids"][1]: + if ( + tokenizer.bos_token_id + == user_message["token_ids"][0] + == user_message["token_ids"][1] + ): # This is an attempt to remove bos if two are detected. # General solution is tracked here https://github.com/NVIDIA-NeMo/RL/issues/855 - user_message["token_ids"] = tokenizer(message, return_tensors="pt", add_special_tokens=False)["input_ids"][0] - assert tokenizer.bos_token_id == user_message["token_ids"][0] == user_message["token_ids"][1], f"Still encountering double-bos. Please raise your tokenizer in https://github.com/NVIDIA-NeMo/RL/issues/855." + user_message["token_ids"] = tokenizer( + message, return_tensors="pt", add_special_tokens=False + )["input_ids"][0] + assert ( + tokenizer.bos_token_id + == user_message["token_ids"][0] + == user_message["token_ids"][1] + ), ( + "Still encountering double-bos. Please raise your tokenizer in https://github.com/NVIDIA-NeMo/RL/issues/855." + ) user_message["content"] = message message_log.append(user_message) From 8be415dd21cf5ba4b81a31ed37b5b336413e0817 Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Fri, 8 Aug 2025 00:44:15 +0000 Subject: [PATCH 09/10] revert Signed-off-by: Terry Kong --- nemo_rl/data/processors.py | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/nemo_rl/data/processors.py b/nemo_rl/data/processors.py index 1ac5a981bf..67e3658882 100644 --- a/nemo_rl/data/processors.py +++ b/nemo_rl/data/processors.py @@ -65,23 +65,6 @@ def math_data_processor( add_special_tokens=False, ) user_message["token_ids"] = tokenizer(message, return_tensors="pt")["input_ids"][0] - if ( - tokenizer.bos_token_id - == user_message["token_ids"][0] - == user_message["token_ids"][1] - ): - # This is an attempt to remove bos if two are detected. - # General solution is tracked here https://github.com/NVIDIA-NeMo/RL/issues/855 - user_message["token_ids"] = tokenizer( - message, return_tensors="pt", add_special_tokens=False - )["input_ids"][0] - assert ( - tokenizer.bos_token_id - == user_message["token_ids"][0] - == user_message["token_ids"][1] - ), ( - "Still encountering double-bos. Please raise your tokenizer in https://github.com/NVIDIA-NeMo/RL/issues/855." - ) user_message["content"] = message message_log.append(user_message) From a46c60cff3ec1cf2f83d3861a4caa2e0ea092180 Mon Sep 17 00:00:00 2001 From: Terry Kong Date: Fri, 8 Aug 2025 17:03:16 +0000 Subject: [PATCH 10/10] fix some config issues uncovered by placing configs in the correct dir Signed-off-by: Terry Kong --- examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml | 3 --- examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml | 3 --- tests/test_suites/nightly.txt | 5 +++++ tests/unit/test_recipes_and_test_suites.py | 4 +++- 4 files changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml index 2ab0304118..c48b54996d 100644 --- a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-24K.yaml @@ -45,9 +45,6 @@ policy: gpu_memory_utilization: 0.8 enforce_eager: True max_model_len: ${policy.max_total_sequence_length} - # For most cases, use "dummy" to load the initial weights, since they will be overwritten during refit - # For Gemma models, we need to use "auto" due to a vllm bug - load_format: dummy cluster: gpus_per_node: 8 diff --git a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml index 69976b5cb5..a8a21fac83 100644 --- a/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml +++ b/examples/configs/recipes/llm/grpo-deepscaler-1.5b-8K.yaml @@ -103,9 +103,6 @@ policy: gpu_memory_utilization: 0.6 max_model_len: ${policy.max_total_sequence_length} enforce_eager: True - # For most cases, use "dummy" to load the initial weights, since they will be overwritten during refit - # For Gemma models, we need to use "auto" due to a vllm bug - load_format: dummy colocated: # true: generation shares training GPUs # false: uses dedicated generation resources diff --git a/tests/test_suites/nightly.txt b/tests/test_suites/nightly.txt index d28e61a8e6..07c3eb5b9c 100644 --- a/tests/test_suites/nightly.txt +++ b/tests/test_suites/nightly.txt @@ -13,6 +13,11 @@ tests/test_suites/llm/grpo-qwen2.5-7b-instruct-4n8g-fsdp2tp4sp.v3.sh # Functional 32b run tests/test_suites/llm/grpo-qwen2.5-32b-32n8g-fsdp2tp8sp-actckpt.v3.sh +# Deepscaler (short tests) +tests/test_suites/llm/grpo-deepscaler-1.5b-16K.sh +tests/test_suites/llm/grpo-deepscaler-1.5b-24K.sh +tests/test_suites/llm/grpo-deepscaler-1.5b-8K.sh + ####### # SFT # ####### diff --git a/tests/unit/test_recipes_and_test_suites.py b/tests/unit/test_recipes_and_test_suites.py index 47d1d2f45b..c79dc5fbed 100644 --- a/tests/unit/test_recipes_and_test_suites.py +++ b/tests/unit/test_recipes_and_test_suites.py @@ -283,6 +283,8 @@ def test_all_recipes_can_merge_configs_with_base_config( ): from omegaconf import OmegaConf + from nemo_rl.utils.config import load_config + base_yaml = os.path.join(project_root, algo_base_yaml) base_config = OmegaConf.load(base_yaml) # Would result in an error if we couldn't merge our config with the recipe's config @@ -293,7 +295,7 @@ def test_all_recipes_can_merge_configs_with_base_config( # test_all_recipes_start_with_algo_hyphen() continue recipe_yaml_path = os.path.join(recipes_dir, recipe_yaml) - recipe_config = OmegaConf.load(recipe_yaml_path) + recipe_config = load_config(recipe_yaml_path) OmegaConf.set_struct(recipe_config, True) # This will raise a error if the config can't be merged print(f"Merging {recipe_yaml} with {base_yaml}")