-
Notifications
You must be signed in to change notification settings - Fork 163
feat: add custom judge type support for external repo integration #1274
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
26f94c1
305a3a8
d72cf82
598c841
0857d88
1d6035c
1f40be5
9c738a2
57761a9
9604fd5
285cbbc
25633a0
39f25ce
f90490e
9c6800d
d9b1649
b7b7dbe
af7454a
4111e16
a1fdfe2
d27e339
31f6ff7
b397ca8
6984468
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -41,177 +41,6 @@ class SingleNodeMode(str, enum.Enum): | |
| parallel = "parallel" | ||
|
|
||
|
|
||
| def _create_comet_judge_tasks( | ||
| exp, | ||
| expname, | ||
| benchmark, | ||
| judge_pipeline_args, | ||
| rerun_done, | ||
| log_dir, | ||
| server_parameters, | ||
| cluster_config, | ||
| judge_server_gpus, | ||
| judge_server_nodes, | ||
| partition, | ||
| run_after, | ||
| reuse_code_exp, | ||
| reuse_code, | ||
| dependent_tasks, | ||
| all_tasks, | ||
| _task_dependencies, | ||
| installation_command, | ||
| skip_hf_home_check, | ||
| sbatch_kwargs, | ||
| ): | ||
| """Create tasks for Comet judge evaluation.""" | ||
| from nemo_skills.pipeline.utils.generation import get_remaining_jobs | ||
|
|
||
| output_dir_path = judge_pipeline_args.get("output_dir") | ||
| input_file = judge_pipeline_args.get("input_file") | ||
| comet_model_path = judge_pipeline_args.get("judge_model") | ||
|
|
||
| # Determine seeds to check | ||
| if input_file is None: | ||
| num_seeds = judge_pipeline_args.get("num_random_seeds", 1) | ||
| random_seeds = list(range(num_seeds)) | ||
| else: | ||
| random_seeds = [None] | ||
|
|
||
| remaining_jobs = get_remaining_jobs( | ||
| cluster_config=cluster_config, | ||
| output_dir=output_dir_path, | ||
| random_seeds=random_seeds, | ||
| chunk_ids=[None], # No chunking for judge task | ||
| rerun_done=rerun_done, | ||
| ) | ||
|
|
||
| if not remaining_jobs or all(not chunks for chunks in remaining_jobs.values()): | ||
| LOG.info(f"Skipping Comet judge for {benchmark} - all output files and .done markers exist") | ||
| return [] | ||
|
|
||
| # Build command to run xCOMET-XXL judge script | ||
| script_args = [f"--output-dir {output_dir_path} --comet-model-path {comet_model_path}"] | ||
|
|
||
| if input_file is None: | ||
| input_dir = judge_pipeline_args.get("input_dir") | ||
| script_args.append(f"--input-dir {input_dir}") | ||
| script_args.append(f"--num-seeds {num_seeds}") | ||
| else: | ||
| script_args.append(f"--input-file {input_file}") | ||
|
|
||
| run_cmd = f"pip install unbabel-comet && python3 -I /nemo_run/code/nemo_skills/evaluation/evaluator/comet.py {' '.join(script_args)}" | ||
|
|
||
| # Create task with GPU support for Comet | ||
| judge_task = pipeline_utils.add_task( | ||
| exp, | ||
| cmd=run_cmd, | ||
| task_name=f"{expname}-{benchmark}-comet-judge", | ||
| log_dir=log_dir + "/judge", | ||
| container=cluster_config["containers"]["vllm"], | ||
| cluster_config=cluster_config, | ||
| num_gpus=judge_server_gpus or 1, | ||
| num_nodes=judge_server_nodes or 1, | ||
| partition=partition, | ||
| run_after=run_after, | ||
| reuse_code_exp=reuse_code_exp, | ||
| reuse_code=reuse_code, | ||
| task_dependencies=( | ||
| dependent_tasks if cluster_config["executor"] == "slurm" else all_tasks + _task_dependencies | ||
| ), | ||
| installation_command=installation_command, | ||
| skip_hf_home_check=skip_hf_home_check, | ||
| sbatch_kwargs=sbatch_kwargs, | ||
| ) | ||
| return [judge_task] | ||
|
|
||
|
|
||
| def _create_nvembed_judge_tasks( | ||
| exp, | ||
| expname, | ||
| benchmark, | ||
| judge_pipeline_args, | ||
| rerun_done, | ||
| log_dir, | ||
| server_parameters, | ||
| cluster_config, | ||
| judge_server_gpus, | ||
| judge_server_nodes, | ||
| partition, | ||
| run_after, | ||
| reuse_code_exp, | ||
| reuse_code, | ||
| dependent_tasks, | ||
| all_tasks, | ||
| _task_dependencies, | ||
| installation_command, | ||
| skip_hf_home_check, | ||
| sbatch_kwargs, | ||
| ): | ||
| """Create tasks for NVEmbed judge evaluation.""" | ||
| from nemo_skills.pipeline.utils.generation import get_remaining_jobs | ||
|
|
||
| output_dir_path = judge_pipeline_args.get("output_dir") | ||
| input_file = judge_pipeline_args.get("input_file") | ||
|
|
||
| # Determine seeds to check | ||
| if input_file is None: | ||
| num_seeds = judge_pipeline_args.get("num_random_seeds", 1) | ||
| random_seeds = list(range(num_seeds)) | ||
| else: | ||
| random_seeds = [None] | ||
|
|
||
| remaining_jobs = get_remaining_jobs( | ||
| cluster_config=cluster_config, | ||
| output_dir=output_dir_path, | ||
| random_seeds=random_seeds, | ||
| chunk_ids=[None], # No chunking for judge task | ||
| rerun_done=rerun_done, | ||
| ) | ||
|
|
||
| if not remaining_jobs or all(not chunks for chunks in remaining_jobs.values()): | ||
| LOG.info(f"Skipping NVEmbed judge for {benchmark} - all output files and .done markers exist") | ||
| return [] | ||
|
|
||
| # Build command to run NVEmbed judge script | ||
| script_args = [f"--output-dir {output_dir_path}"] | ||
|
|
||
| if input_file is None: | ||
| input_dir = judge_pipeline_args.get("input_dir") | ||
| script_args.append(f"--input-dir {input_dir}") | ||
| script_args.append(f"--num-seeds {num_seeds}") | ||
| else: | ||
| script_args.append(f"--input-file {input_file}") | ||
|
|
||
| # Add skip-existing flag unless rerun_done is set | ||
| if not rerun_done: | ||
| script_args.append("--skip-existing") | ||
|
|
||
| run_cmd = f"python3 -I /nemo_run/code/nemo_skills/evaluation/evaluator/nvembed_judge.py {' '.join(script_args)}" | ||
|
|
||
| # Create task with GPU support for NVEmbed | ||
| judge_task = pipeline_utils.add_task( | ||
| exp, | ||
| cmd=run_cmd, | ||
| task_name=f"{expname}-{benchmark}-nvembed-judge", | ||
| log_dir=log_dir + "/judge", | ||
| container=cluster_config["containers"]["vllm"], | ||
| cluster_config=cluster_config, | ||
| num_gpus=judge_server_gpus or 1, | ||
| num_nodes=judge_server_nodes or 1, | ||
| partition=partition, | ||
| run_after=run_after, | ||
| reuse_code_exp=reuse_code_exp, | ||
| reuse_code=reuse_code, | ||
| task_dependencies=( | ||
| dependent_tasks if cluster_config["executor"] == "slurm" else all_tasks + _task_dependencies | ||
| ), | ||
| installation_command=installation_command, | ||
| skip_hf_home_check=skip_hf_home_check, | ||
| sbatch_kwargs=sbatch_kwargs, | ||
| ) | ||
| return [judge_task] | ||
|
|
||
|
|
||
| def _create_llm_judge_tasks( | ||
| ctx, | ||
| expname, | ||
|
|
@@ -325,7 +154,11 @@ def eval( | |
| help="Path to the entrypoint of the server. " | ||
| "If not specified, will use the default entrypoint for the server type.", | ||
| ), | ||
| judge_type: str = typer.Option("llm", help="Type of judge to use: 'llm' (default) or 'nvembed'"), | ||
| judge_step_fn: str = typer.Option( | ||
| None, | ||
| help="Path to the judge step creator function to use for the judge (locate() convention). " | ||
| "Eg: nemo_skills.pipeline.judges.nvembed_judge::create_judge_tasks. Can also accept callable directly.", | ||
| ), | ||
| judge_model: str = typer.Option(None, help="Path to the model to be used as a judge (if applicable)"), | ||
| judge_server_address: str = typer.Option(None, help="Address of the server hosting the judge model"), | ||
| judge_server_type: pipeline_utils.SupportedServers = typer.Option( | ||
|
|
@@ -519,7 +352,7 @@ def eval( | |
| "generation_type": judge_generation_type, | ||
| "generation_module": judge_generation_module, | ||
| } | ||
| eval_requires_judge = any(param_value for param_value in cli_judge_pipeline_args.values()) or judge_type != "llm" | ||
| eval_requires_judge = any(param_value for param_value in cli_judge_pipeline_args.values()) or judge_step_fn | ||
|
|
||
| # Prepare cluster config and mount paths | ||
| cluster_config = pipeline_utils.get_cluster_config(cluster, config_dir) | ||
|
|
@@ -643,43 +476,34 @@ def eval( | |
| benchmark_args.eval_subfolder = benchmark_args.eval_subfolder[4:] | ||
| judge_pipeline_args["output_dir"] = str(Path(output_dir) / benchmark_args.eval_subfolder) | ||
|
|
||
| # Check for per-benchmark judge_type, fall back to global judge_type | ||
| benchmark_judge_type = judge_pipeline_args.pop("judge_type", judge_type) | ||
| # judge_step_fn is a :: path to the judge creator function (locate() convention). | ||
| # Could be set directly in JUDGE_PIPELINE_ARGS; falls back to None for LLM judge. | ||
| judge_step_fn = judge_pipeline_args.pop("judge_step_fn", judge_step_fn) | ||
|
|
||
| # Create judge tasks based on judge type | ||
| if benchmark_judge_type == "nvembed": | ||
| judge_tasks = _create_nvembed_judge_tasks( | ||
| exp=exp, | ||
| expname=expname, | ||
| benchmark=benchmark, | ||
| judge_pipeline_args=judge_pipeline_args, | ||
| rerun_done=rerun_done, | ||
| log_dir=log_dir, | ||
| server_parameters=server_parameters, | ||
| cluster_config=cluster_config, | ||
| judge_server_gpus=judge_server_gpus, | ||
| judge_server_nodes=judge_server_nodes, | ||
| partition=partition, | ||
| run_after=run_after, | ||
| reuse_code_exp=reuse_code_exp, | ||
| reuse_code=reuse_code, | ||
| dependent_tasks=dependent_tasks, | ||
| all_tasks=all_tasks, | ||
| _task_dependencies=_task_dependencies, | ||
| installation_command=installation_command, | ||
| skip_hf_home_check=skip_hf_home_check, | ||
| sbatch_kwargs=sbatch_kwargs, | ||
| ) | ||
| elif benchmark_judge_type == "comet": | ||
| judge_pipeline_args["judge_model"] = judge_model | ||
| judge_tasks = _create_comet_judge_tasks( | ||
| # TODO: we should rework the interface here to have consistent parameters between main llm and custom | ||
| # judge creation steps. E.g. things like judge_model assignment below shouldn't be necessary | ||
|
|
||
| if judge_step_fn: | ||
| has_tasks = True | ||
| if not callable(judge_step_fn): | ||
| # Use locate() to dynamically load judge creator function | ||
| from nemo_skills.dataset.utils import locate | ||
|
|
||
| judge_step_fn = locate(judge_step_fn) | ||
|
Comment on lines
+479
to
+492
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Bug: On line 481, Example: If benchmark A's Save the original CLI value before the loop and use it as the fallback: Proposed fix all_tasks = []
if _task_dependencies is None:
_task_dependencies = []
+ cli_judge_step_fn = judge_step_fn
with pipeline_utils.get_exp(expname, cluster_config, _reuse_exp) as exp:
# scheduling main eval jobs
...
# scheduling judge jobs if needed
for idx, (benchmark, benchmark_args) in enumerate(benchmarks_dict.items()):
...
- judge_step_fn = judge_pipeline_args.pop("judge_step_fn", judge_step_fn)
+ benchmark_judge_step_fn = judge_pipeline_args.pop("judge_step_fn", cli_judge_step_fn)
...
- if judge_step_fn:
+ if benchmark_judge_step_fn:
has_tasks = True
- if not callable(judge_step_fn):
+ if not callable(benchmark_judge_step_fn):
from nemo_skills.dataset.utils import locate
- judge_step_fn = locate(judge_step_fn)
+ benchmark_judge_step_fn = locate(benchmark_judge_step_fn)
...
- judge_tasks = judge_step_fn(
+ judge_tasks = benchmark_judge_step_fn(🤖 Prompt for AI Agents |
||
|
|
||
| # Pass judge_model through so judge implementations can access it if needed (e.g. comet) | ||
| if judge_model: | ||
| judge_pipeline_args.setdefault("judge_model", judge_model) | ||
|
|
||
| # Call with standardized parameters | ||
| judge_tasks = judge_step_fn( | ||
| exp=exp, | ||
| expname=expname, | ||
| benchmark=benchmark, | ||
| judge_pipeline_args=judge_pipeline_args, | ||
| rerun_done=rerun_done, | ||
| log_dir=log_dir, | ||
| server_parameters=server_parameters, | ||
| output_dir=output_dir, | ||
| cluster_config=cluster_config, | ||
| judge_server_gpus=judge_server_gpus, | ||
| judge_server_nodes=judge_server_nodes, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,15 @@ | ||
| # Copyright (c) 2025, NVIDIA CORPORATION. All rights reserved. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
|
|
||
| """Judge implementations for evaluation pipeline.""" |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
has_tasks = Trueis set before confirming the judge creator returns tasks.If
judge_step_fn()returns[](e.g., all outputs already exist),has_tasksis stillTrue. In an edge case where this is the only scheduled work,run_expwould be called on an experiment with no tasks. Consider movinghas_tasks = Trueinside theif judge_tasks:guard at line 554, consistent with how_generatereturningNoneis handled.🤖 Prompt for AI Agents