-
Notifications
You must be signed in to change notification settings - Fork 2.4k
Open
Description
The attempt to execute the following command failed.
MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
MODEL_ARGS="pretrained=$MODEL,dtype=bfloat16,max_model_length=32768,gpu_memory_utilization=0.8,generation_parameters={max_new_tokens:32768,temperature:0.6,top_p:0.95}"
OUTPUT_DIR=data/evals/$MODEL
# AIME 2024
TASK=aime24
lighteval vllm $MODEL_ARGS "custom|$TASK|0|0" \
--custom-tasks src/open_r1/evaluate.py \
--use-chat-template \
--output-dir $OUTPUT_DIR
Here is the error message.
[2025-03-04 17:43:42,101] [ INFO]: PyTorch version 2.5.1 available. (config.py:54)
INFO 03-04 17:43:49 __init__.py:190] Automatically detected platform cuda.
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /data1/liuyang/code/lighteval-main/src/lighteval/main_vllm.py:105 in vllm │
│ │
│ 102 │ from lighteval.logging.evaluation_tracker import EvaluationTracker │
│ 103 │ from lighteval.models.model_input import GenerationParameters │
│ 104 │ from lighteval.models.vllm.vllm_model import VLLMModelConfig │
│ ❱ 105 │ from lighteval.pipeline import EnvConfig, ParallelismManager, Pipeline, PipelinePara │
│ 106 │ │
│ 107 │ TOKEN = os.getenv("HF_TOKEN") │
│ 108 │
│ │
│ /data1/liuyang/code/lighteval-main/src/lighteval/pipeline.py:48 in <module> │
│ │
│ 45 │ ModelResponse, │
│ 46 ) │
│ 47 from lighteval.tasks.lighteval_task import LightevalTask, create_requests_from_tasks │
│ ❱ 48 from lighteval.tasks.registry import Registry, taskinfo_selector │
│ 49 from lighteval.tasks.requests import RequestType, SampleUid │
│ 50 from lighteval.utils.imports import ( │
│ 51 │ NO_ACCELERATE_ERROR_MSG, │
│ │
│ /data1/liuyang/code/lighteval-main/src/lighteval/tasks/registry.py:36 in <module> │
│ │
│ 33 from datasets.load import dataset_module_factory │
│ 34 │
│ 35 import lighteval.tasks.default_tasks as default_tasks │
│ ❱ 36 from lighteval.tasks.extended import AVAILABLE_EXTENDED_TASKS_MODULES │
│ 37 from lighteval.tasks.lighteval_task import LightevalTask, LightevalTaskConfig │
│ 38 from lighteval.utils.imports import CANNOT_USE_EXTENDED_TASKS_MSG, can_load_extended_tas │
│ 39 │
│ │
│ /data1/liuyang/code/lighteval-main/src/lighteval/tasks/extended/__init__.py:29 in <module> │
│ │
│ 26 if can_load_extended_tasks(): │
│ 27 │ import lighteval.tasks.extended.hle.main as hle │
│ 28 │ import lighteval.tasks.extended.ifeval.main as ifeval │
│ ❱ 29 │ import lighteval.tasks.extended.lcb.main as lcb │
│ 30 │ import lighteval.tasks.extended.mix_eval.main as mix_eval │
│ 31 │ import lighteval.tasks.extended.mt_bench.main as mt_bench │
│ 32 │ import lighteval.tasks.extended.olympiade_bench.main as olympiad_bench │
│ │
│ /data1/liuyang/code/lighteval-main/src/lighteval/tasks/extended/lcb/main.py:118 in <module> │
│ │
│ 115 │
│ 116 extend_enum(Metrics, "lcb_codegen_metric", lcb_codegen_metric) │
│ 117 │
│ ❱ 118 configs = get_dataset_config_names("livecodebench/code_generation_lite", trust_remote_co │
│ 119 │
│ 120 tasks = [] │
│ 121 │
│ │
│ /data1/liuyang/xmodel-r1/openr1/lib/python3.11/site-packages/datasets/inspect.py:174 in │
│ get_dataset_config_names │
│ │
│ 171 │ │ **download_kwargs, │
│ 172 │ ) │
│ 173 │ builder_cls = get_dataset_builder_class(dataset_module, dataset_name=os.path.basenam │
│ ❱ 174 │ return list(builder_cls.builder_configs.keys()) or [ │
│ 175 │ │ dataset_module.builder_kwargs.get("config_name", builder_cls.DEFAULT_CONFIG_NAME │
│ 176 │ ] │
│ 177 │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
AttributeError: 'NoneType' object has no attribute 'builder_configs'
Metadata
Metadata
Assignees
Labels
No labels