Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 127 additions & 4 deletions mteb/benchmarks/_create_table.py
Original file line number Diff line number Diff line change
Expand Up @@ -270,7 +270,6 @@ def _create_summary_table_mean_public_private(
Returns:
DataFrame with model summaries, ready for styling in the leaderboard
"""
print("all tasks:", benchmark_results.task_names)
data = benchmark_results.to_dataframe(format="long")

if data.empty:
Expand All @@ -279,12 +278,9 @@ def _create_summary_table_mean_public_private(
)
return no_results_frame
public_task_name = benchmark_results.filter_tasks(is_public=True).task_names
print("Public tasks:", public_task_name)
private_task_name = benchmark_results.filter_tasks(is_public=False).task_names
print("Private tasks:", private_task_name)
# Convert to DataFrame and pivot
per_task = data.pivot(index="model_name", columns="task_name", values="score")
print(per_task.columns)

# Remove models with no scores
to_remove = per_task.isna().all(axis="columns")
Expand Down Expand Up @@ -384,3 +380,130 @@ def _create_summary_table_mean_public_private(
joint_table.insert(0, "Rank (Borda)", joint_table.pop("borda_rank"))

return joint_table


def _create_summary_table_mean_subset(
benchmark_results: BenchmarkResults,
) -> pd.DataFrame:
"""Create summary table from BenchmarkResults.

Returns a DataFrame with one row per model containing summary statistics
and task type averages. Calculates means where each task-language subset
is weighted equally.

Args:
benchmark_results: BenchmarkResults object containing model results

Returns:
DataFrame with model summaries, ready for styling in the leaderboard
"""
data = benchmark_results.to_dataframe(format="long")

if data.empty:
no_results_frame = pd.DataFrame(
{"No results": ["You can try relaxing your criteria"]}
)
return no_results_frame

# Convert to DataFrame and pivot
per_task = data.pivot(index="model_name", columns="task_name", values="score")

# Remove models with no scores
to_remove = per_task.isna().all(axis="columns")
if to_remove.all():
no_results_frame = pd.DataFrame(
{"No results": ["You can try relaxing your criteria"]}
)
return no_results_frame

models_to_remove = list(per_task[to_remove].index)
per_task = per_task.drop(models_to_remove, axis=0)

# Calculate means by task type
mean_per_type = _get_means_per_types(per_task)
mean_per_type = mean_per_type.pivot(
index="model_name", columns="task_type", values="score"
)
mean_per_type.columns = [
_split_on_capital(column) for column in mean_per_type.columns
]

# Calculate subset means (each task-language combination weighted equally)
detailed_data = benchmark_results.to_dataframe(
aggregation_level="subset", format="long"
)
overall_subset_mean = detailed_data.groupby("model_name")["score"].mean()

per_subset = detailed_data.pivot(
index="model_name", columns=["task_name", "subset"], values="score"
)

# Build joint table
joint_table = mean_per_type.copy()
joint_table = joint_table.drop(models_to_remove, axis=0)
joint_table.insert(0, "mean(subset)", overall_subset_mean)
joint_table["borda_rank"] = _get_borda_rank(per_subset)
joint_table = joint_table.sort_values("mean(subset)", ascending=False)
joint_table = joint_table.reset_index()

# Add model metadata
model_metas = joint_table["model_name"].map(_failsafe_get_model_meta)
joint_table = joint_table[model_metas.notna()]
joint_table["model_link"] = model_metas.map(lambda m: m.reference)

# Insert model metadata columns
joint_table.insert(
1,
"Max Tokens",
model_metas.map(lambda m: _format_max_tokens(m.max_tokens)),
)
joint_table.insert(
1,
"Embedding Dimensions",
model_metas.map(lambda m: str(int(m.embed_dim)) if m.embed_dim else "Unknown"),
)
joint_table.insert(
1,
"Number of Parameters",
model_metas.map(lambda m: _format_n_parameters(m.n_parameters)),
)
joint_table.insert(
1,
"Memory Usage (MB)",
model_metas.map(
lambda m: str(int(m.memory_usage_mb)) if m.memory_usage_mb else "Unknown"
),
)

# Add zero-shot percentage
tasks = get_tasks(tasks=list(data["task_name"].unique()))
joint_table.insert(
1, "Zero-shot", model_metas.map(lambda m: m.zero_shot_percentage(tasks))
)
joint_table["Zero-shot"] = joint_table["Zero-shot"].fillna(-1)

# Clean up model names (remove HF organization)
joint_table["model_name"] = joint_table["model_name"].map(
lambda name: name.split("/")[-1]
)

# Add markdown links to model names
name_w_link = (
"[" + joint_table["model_name"] + "](" + joint_table["model_link"] + ")"
)
joint_table["model_name"] = joint_table["model_name"].mask(
joint_table["model_link"].notna(), name_w_link
)
joint_table = joint_table.drop(columns=["model_link"])

# Rename columns
rename_dict = {
"model_name": "Model",
"mean(subset)": "Mean (Subset)",
}
joint_table = joint_table.rename(columns=rename_dict)

# Move borda rank to front
joint_table.insert(0, "Rank (Borda)", joint_table.pop("borda_rank"))

return joint_table
9 changes: 9 additions & 0 deletions mteb/benchmarks/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
_create_per_task_table_from_benchmark_results,
_create_summary_table_from_benchmark_results,
_create_summary_table_mean_public_private,
_create_summary_table_mean_subset,
)
from mteb.load_results.load_results import load_results

Expand Down Expand Up @@ -98,3 +99,11 @@ def _create_summary_table(
) -> pd.DataFrame:
"""Create summary table. Called by the leaderboard app."""
return _create_summary_table_mean_public_private(benchmark_results)


class HUMEBenchmark(Benchmark):
def _create_summary_table(
self, benchmark_results: BenchmarkResults
) -> pd.DataFrame:
"""Create summary table. Called by the leaderboard app."""
return _create_summary_table_mean_subset(benchmark_results)
12 changes: 9 additions & 3 deletions mteb/benchmarks/benchmarks/benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from pydantic import AnyUrl, BeforeValidator, TypeAdapter

from mteb.benchmarks.benchmark import Benchmark
from mteb.benchmarks.benchmark import Benchmark, HUMEBenchmark
from mteb.overview import MTEBTasks, get_task, get_tasks

if TYPE_CHECKING:
Expand Down Expand Up @@ -2303,7 +2303,7 @@
)


HUME = Benchmark(
HUME = HUMEBenchmark(
name="HUME(v1)",
display_name="Human Benchmark",
# icon="https://raw.githubusercontent.com/huggingface/benchmarks/main/benchmarks/assets/hume.png",
Expand All @@ -2326,7 +2326,13 @@
"HUMESTSBenchmark",
"HUMESTS22",
],
languages=["eng-Latn", "ara-Arab", "rus-Cyrl", "dan-Latn", "nob-Latn"],
languages=[
"eng-Latn",
"ara-Arab",
"rus-Cyrl",
"dan-Latn",
"nob-Latn",
],
),
description="The HUME benchmark is designed to evaluate the performance of text embedding models and humans on a comparable set of tasks. This captures areas where models perform better than human annotators and the reverse. In the paper, we go further into the analysis and what conclusions can be drawn.",
reference="Coming soon (in review)",
Expand Down
4 changes: 3 additions & 1 deletion mteb/leaderboard/benchmark_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,9 @@ class MenuEntry:
name="General Purpose",
description="",
open=False,
benchmarks=mteb.get_benchmarks(["MTEB(Multilingual, v2)", "MTEB(eng, v2)"])
benchmarks=mteb.get_benchmarks(
["MTEB(Multilingual, v2)", "MTEB(eng, v2)", "HUME(v1)"]
)
+ [
MenuEntry(
"Image",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
_LANGUAGES = {
"eng": ["eng-Latn"],
"ara": ["ara-Arab"],
"nor": ["nor-Latn"],
"nor": ["nob-Latn"],
"rus": ["rus-Cyrl"],
}

Expand Down