Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion mteb/model_meta.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ class ModelMeta(BaseModel):
release_date: The date the model's revision was released.
license: The license under which the model is released. Required if open_weights is True.
open_weights: Whether the model is open source or proprietary.
public_training_code: Whether the code used to train the model is publicly available.
public_training_code: A link to the publicly available training code. If none it is assumed that the training code is not publicly available.
public_training_data: A link to the publicly available training data. If none it is assumed that the training data is not publicly available.
similarity_fn_name: The distance metric used by the model.
framework: The framework the model is implemented in, can be a list of frameworks e.g. `["Sentence Transformers", "PyTorch"]`.
reference: A URL to the model's page on huggingface or another source.
Expand Down Expand Up @@ -94,6 +95,7 @@ class ModelMeta(BaseModel):
license: str | None
open_weights: bool | None
public_training_code: str | None
public_training_data: str | None
framework: list[FRAMEWORKS]
reference: STR_URL | None = None
similarity_fn_name: DISTANCE_METRICS | None
Expand Down
22 changes: 15 additions & 7 deletions mteb/models/arctic_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,8 @@
use_instructions=True,
adapted_from="sentence-transformers/all-MiniLM-L6-v2",
superseded_by=None,
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None,
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down Expand Up @@ -151,7 +152,8 @@
use_instructions=True,
adapted_from="intfloat/e5-small-unsupervised",
superseded_by=None,
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None, # couldn't find
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down Expand Up @@ -200,7 +202,8 @@
use_instructions=True,
adapted_from="intfloat/e5-base-unsupervised",
superseded_by="Snowflake/snowflake-arctic-embed-m-v1.5",
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None, # couldn't find
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down Expand Up @@ -249,7 +252,8 @@
use_instructions=True,
adapted_from="nomic-ai/nomic-embed-text-v1-unsupervised",
superseded_by="Snowflake/snowflake-arctic-embed-m-v2.0",
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None, # couldn't find
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down Expand Up @@ -298,7 +302,8 @@
use_instructions=True,
adapted_from="intfloat/e5-base-unsupervised",
superseded_by="Snowflake/snowflake-arctic-embed-l-v2.0",
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None, # couldn't find
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down Expand Up @@ -350,6 +355,7 @@
adapted_from=None,
superseded_by="Snowflake/snowflake-arctic-embed-m-v2.0",
public_training_code=None,
public_training_data=None,
training_datasets=None,
)

Expand All @@ -375,7 +381,8 @@
use_instructions=True,
adapted_from="Alibaba-NLP/gte-multilingual-base",
superseded_by=None,
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None, # couldn't find
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down Expand Up @@ -423,7 +430,8 @@
use_instructions=True,
adapted_from="BAAI/bge-m3-retromae",
superseded_by=None,
public_training_code=None, # couldn't find
public_training_code=None,
public_training_data=None, # couldn't find
training_datasets={
# source: https://arxiv.org/pdf/2405.05374
# splits not specified to assuming everything
Expand Down
16 changes: 12 additions & 4 deletions mteb/models/bge_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,7 +372,8 @@
similarity_fn_name="cosine",
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None, # seemingly released (at least for some models, but the link is broken
public_training_code=None,
public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
training_datasets=bge_training_data,
)

Expand All @@ -397,6 +398,7 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None, # seemingly released (at least for some models, but the link is broken
public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
training_datasets=bge_training_data,
)

Expand All @@ -421,6 +423,7 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None, # seemingly released (at least for some models, but the link is broken
public_training_data="https://data.baai.ac.cn/details/BAAI-MTP",
training_datasets=bge_training_data,
)

Expand All @@ -444,7 +447,8 @@
similarity_fn_name="cosine",
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None, # seemingly released (at least for some models, but the link is broken
public_training_code=None,
public_training_data=None,
training_datasets=bge_chinese_training_data,
)

Expand All @@ -468,7 +472,8 @@
similarity_fn_name="cosine",
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None, # seemingly released (at least for some models, but the link is broken
public_training_code=None,
public_training_data=None,
training_datasets=bge_chinese_training_data,
)

Expand All @@ -492,7 +497,8 @@
similarity_fn_name="cosine",
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None, # seemingly released (at least for some models, but the link is broken
public_training_code=None,
public_training_data=None,
training_datasets=bge_chinese_training_data,
)

Expand All @@ -516,6 +522,7 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=False,
public_training_code=None,
public_training_data=None,
training_datasets=bgem3_training_data,
)

Expand Down Expand Up @@ -549,5 +556,6 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=False,
public_training_code=None,
public_training_data=None,
training_datasets=None, # not disclosed
)
1 change: 1 addition & 0 deletions mteb/models/bm25.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,5 +139,6 @@ def encode(self, texts: list[str], **kwargs):
framework=[],
use_instructions=False,
public_training_code="https://github.com/xhluca/bm25s",
public_training_data=None,
training_datasets=None,
)
12 changes: 8 additions & 4 deletions mteb/models/cohere_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,8 @@ def encode(
similarity_fn_name="cosine",
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)

Expand All @@ -257,7 +258,8 @@ def encode(
similarity_fn_name="cosine",
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)

Expand All @@ -280,7 +282,8 @@ def encode(
similarity_fn_name="cosine",
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)

Expand All @@ -303,6 +306,7 @@ def encode(
similarity_fn_name="cosine",
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)
2 changes: 2 additions & 0 deletions mteb/models/colbert_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,6 +153,7 @@ def similarity(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
open_weights=True,
revision="c1e84128e85ef755c096a95bdb06b47793b13acf",
public_training_code=None,
public_training_data=None,
release_date="2024-09-21",
n_parameters=110 * 1e6,
max_tokens=180, # Reduced for Benchmarking - see ColBERT paper
Expand Down Expand Up @@ -205,6 +206,7 @@ def similarity(self, a: np.ndarray, b: np.ndarray) -> np.ndarray:
open_weights=True,
revision="4cf816e5e2b03167b132a3c847a9ecd48ba708e1",
public_training_code=None,
public_training_data=None,
release_date="2024-08-16",
n_parameters=559 * 1e6,
max_tokens=8192,
Expand Down
12 changes: 12 additions & 0 deletions mteb/models/e5_instruct.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,16 @@
E5_INSTRUCTION = "Instruct: {instruction}\nQuery: "


E5_MISTRAL_TRAINING_DATA = {
**E5_TRAINING_DATA,
"FEVER": ["train"],
"FEVERHardNegatives": ["train"],
"FEVER-PL": ["train"], # translation not trained on
"HotpotQA": ["train"],
"HotpotQAHardNegatives": ["train"],
"HotpotQA-PL": ["train"], # translation not trained on
}

e5_instruct = ModelMeta(
loader=partial( # type: ignore
instruct_wrapper,
Expand All @@ -40,6 +50,7 @@
license="mit",
max_tokens=514,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)

Expand Down Expand Up @@ -70,5 +81,6 @@
license="mit",
max_tokens=32768,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)
25 changes: 22 additions & 3 deletions mteb/models/e5_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,16 @@
"NQ-PL": ["train"], # translation not trained on
}

ME5_TRAINING_DATA = {
**E5_TRAINING_DATA,
"FEVER": ["train"],
"FEVERHardNegatives": ["train"],
"FEVER-PL": ["train"], # translation not trained on
"HotpotQA": ["train"],
"HotpotQAHardNegatives": ["train"],
"HotpotQA-PL": ["train"], # translation not trained on
}

e5_mult_small = ModelMeta(
loader=partial( # type: ignore
sentence_transformers_loader,
Expand All @@ -147,7 +157,8 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None,
training_datasets=E5_TRAINING_DATA,
public_training_data=None,
training_datasets=ME5_TRAINING_DATA,
)

e5_mult_base = ModelMeta(
Expand All @@ -170,7 +181,8 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None,
training_datasets=E5_TRAINING_DATA,
public_training_data=None,
training_datasets=ME5_TRAINING_DATA,
)

e5_mult_large = ModelMeta(
Expand All @@ -194,7 +206,8 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None,
training_datasets=E5_TRAINING_DATA,
public_training_data=None,
training_datasets=ME5_TRAINING_DATA,
)

e5_eng_small_v2 = ModelMeta(
Expand All @@ -217,6 +230,7 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)

Expand All @@ -241,6 +255,7 @@
framework=["Sentence Transformers", "PyTorch"],
use_instructions=True,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)

Expand All @@ -267,6 +282,7 @@
superseded_by=None,
adapted_from=None,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)

Expand All @@ -293,6 +309,7 @@
superseded_by=None,
adapted_from=None,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)

Expand All @@ -319,6 +336,7 @@
superseded_by="intfloat/e5-large-v2",
adapted_from=None,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)

Expand All @@ -345,5 +363,6 @@
superseded_by="intfloat/e5-base-v2",
adapted_from=None,
public_training_code=None,
public_training_data=None,
training_datasets=E5_TRAINING_DATA,
)
9 changes: 6 additions & 3 deletions mteb/models/google_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,8 @@ def encode(
similarity_fn_name="cosine", # assumed
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)

Expand All @@ -173,7 +174,8 @@ def encode(
similarity_fn_name="cosine", # assumed
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)

Expand All @@ -195,6 +197,7 @@ def encode(
similarity_fn_name="cosine", # assumed
framework=["API"],
use_instructions=True,
public_training_code=None, # assumed
public_training_code=None,
public_training_data=None, # assumed
training_datasets=None,
)
Loading
Loading