Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 0 additions & 7 deletions mteb/benchmarks/_create_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -344,13 +344,6 @@ def _create_summary_table_mean_public_private(
),
)

# Add zero-shot percentage
tasks = get_tasks(tasks=list(data["task_name"].unique()))
joint_table.insert(
1, "Zero-shot", model_metas.map(lambda m: m.zero_shot_percentage(tasks))
)
joint_table["Zero-shot"] = joint_table["Zero-shot"].fillna(-1)

# Clean up model names (remove HF organization)
joint_table["model_name"] = joint_table["model_name"].map(
lambda name: name.split("/")[-1]
Expand Down
14 changes: 14 additions & 0 deletions mteb/leaderboard/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import mteb
from mteb.abstasks.TaskMetadata import TASK_DOMAIN, TASK_TYPE
from mteb.benchmarks.benchmark import RtebBenchmark
from mteb.custom_validators import MODALITIES
from mteb.leaderboard.benchmark_selector import (
DEFAULT_BENCHMARK_NAME,
Expand Down Expand Up @@ -196,6 +197,14 @@ def filter_models(
return list(models_to_keep)


def should_show_zero_shot_filter(benchmark_name: str) -> bool:
benchmark = mteb.get_benchmark(benchmark_name)

if isinstance(benchmark, RtebBenchmark):
return False
return True


def get_leaderboard_app() -> gr.Blocks:
logger.info("Loading all benchmark results")
all_results = load_results()
Expand Down Expand Up @@ -479,13 +488,16 @@ def on_benchmark_select(benchmark_name):
benchmark_results = all_benchmark_results[benchmark_name]
scores = benchmark_results.get_scores(format="long")
logger.debug(f"on_benchmark_select callback: {elapsed}s")
show_zero_shot = should_show_zero_shot_filter(benchmark_name)

return (
languages,
domains,
types,
modalities,
sorted([task.metadata.name for task in benchmark.tasks]),
scores,
gr.update(visible=show_zero_shot),
)

benchmark_select.change(
Expand All @@ -498,6 +510,7 @@ def on_benchmark_select(benchmark_name):
modality_select,
task_select,
scores,
zero_shot,
],
)

Expand Down Expand Up @@ -839,6 +852,7 @@ def update_tables(
bench_modalities,
bench_tasks,
bench_scores,
zero_shot,
) = on_benchmark_select(benchmark.name)
filtered_models = update_models(
bench_scores,
Expand Down
3 changes: 2 additions & 1 deletion mteb/leaderboard/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,8 @@ def _apply_summary_table_styling(joint_table: pd.DataFrame) -> gr.DataFrame:
numeric_data = joint_table.copy()

# Format data for display
joint_table["Zero-shot"] = joint_table["Zero-shot"].apply(format_zero_shot)
if "Zero-shot" in joint_table.columns:
joint_table["Zero-shot"] = joint_table["Zero-shot"].apply(format_zero_shot)
joint_table[score_columns] = joint_table[score_columns].map(format_scores)

joint_table_style = joint_table.style.format(
Expand Down