@@ -46,23 +46,6 @@ def ifeval_prompt(line, task_name: str = None):
46
46
)
47
47
48
48
49
- # We create the task config
50
- ifeval = LightevalTaskConfig (
51
- name = "ifeval" ,
52
- prompt_function = ifeval_prompt ,
53
- suite = ["extended" ],
54
- hf_repo = "wis-k/instruction-following-eval" ,
55
- hf_subset = "default" ,
56
- metric = ["ifeval_metric" ],
57
- hf_avail_splits = ["train" ],
58
- evaluation_splits = ["train" ],
59
- few_shots_split = "train" ,
60
- few_shots_select = "random_sampling" ,
61
- generation_size = 1280 ,
62
- stop_sequence = [], # no stop sequence, will use eot token
63
- )
64
-
65
-
66
49
submetric_names = [
67
50
"prompt_level_strict_acc" ,
68
51
"inst_level_strict_acc" ,
@@ -156,6 +139,22 @@ def agg_inst_level_acc(items):
156
139
},
157
140
)
158
141
142
+ # We create the task config
143
+ ifeval = LightevalTaskConfig (
144
+ name = "ifeval" ,
145
+ prompt_function = ifeval_prompt ,
146
+ suite = ["extended" ],
147
+ hf_repo = "wis-k/instruction-following-eval" ,
148
+ hf_subset = "default" ,
149
+ metric = [ifeval_metrics ],
150
+ hf_avail_splits = ["train" ],
151
+ evaluation_splits = ["train" ],
152
+ few_shots_split = "train" ,
153
+ few_shots_select = "random_sampling" ,
154
+ generation_size = 1280 ,
155
+ stop_sequence = [], # no stop sequence, will use eot token
156
+ )
157
+
159
158
160
159
TASKS_TABLE = [ifeval ]
161
160
0 commit comments