diff --git a/Makefile b/Makefile index 3c68c9e0dc..7d8ca4d74f 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ install: install-for-tests: @echo "--- ๐Ÿš€ Installing project dependencies for test ---" @echo "This ensures that the project is not installed in editable mode" - pip install ".[dev,speedtask]" + pip install ".[dev,speedtask,bm25s,pylate]" lint: @echo "--- ๐Ÿงน Running linters ---" diff --git a/mteb/evaluation/MTEB.py b/mteb/evaluation/MTEB.py index 3c94f24785..377f8b72eb 100644 --- a/mteb/evaluation/MTEB.py +++ b/mteb/evaluation/MTEB.py @@ -20,7 +20,10 @@ from mteb.abstasks.AbsTask import ScoresDict from mteb.encoder_interface import Encoder from mteb.model_meta import ModelMeta -from mteb.models import model_meta_from_sentence_transformers +from mteb.models import ( + model_meta_from_cross_encoder, + model_meta_from_sentence_transformers, +) from ..abstasks.AbsTask import AbsTask from ..load_results.task_results import TaskResult @@ -495,7 +498,7 @@ def create_model_meta(model: Encoder) -> ModelMeta: meta = model.mteb_model_meta # type: ignore else: try: - meta = model_meta_from_sentence_transformers(model) # type: ignore + meta = MTEB._get_model_meta(model) except AttributeError: logger.warning( "Could not find model metadata. Please set the model.mteb_model_meta attribute or if you are using " @@ -597,3 +600,11 @@ def _get_missing_evaluations( missing_evaluations[split]["missing_subsets"] = missing_subsets return missing_evaluations + + @staticmethod + def _get_model_meta(model: Encoder) -> ModelMeta: + if isinstance(model, CrossEncoder): + meta = model_meta_from_cross_encoder(model) + else: + meta = model_meta_from_sentence_transformers(model) + return meta diff --git a/mteb/model_meta.py b/mteb/model_meta.py index fee525cba1..eed74c5b49 100644 --- a/mteb/model_meta.py +++ b/mteb/model_meta.py @@ -32,7 +32,7 @@ "PyLate", "ColBERT", ] -DISTANCE_METRICS = Literal["cosine", "max_sim", "dot"] +DISTANCE_METRICS = Literal["cosine", "MaxSim", "dot"] def sentence_transformers_loader( @@ -111,7 +111,7 @@ def get_similarity_function(self) -> Callable[[np.ndarray, np.ndarray], np.ndarr return cos_sim elif self.similarity_fn_name == "dot": return dot_score - elif self.similarity_fn_name == "max_sim": + elif self.similarity_fn_name == "MaxSim": return max_sim elif self.similarity_fn_name is None: raise ValueError("Similarity function not specified.") diff --git a/mteb/models/__init__.py b/mteb/models/__init__.py index 1c70b528ce..1389e23982 100644 --- a/mteb/models/__init__.py +++ b/mteb/models/__init__.py @@ -6,6 +6,7 @@ get_model, get_model_meta, get_model_metas, + model_meta_from_cross_encoder, model_meta_from_sentence_transformers, ) from mteb.models.sentence_transformer_wrapper import SentenceTransformerWrapper @@ -17,5 +18,6 @@ "get_model_meta", "get_model_metas", "model_meta_from_sentence_transformers", + "model_meta_from_cross_encoder", "SentenceTransformerWrapper", ] diff --git a/mteb/models/colbert_models.py b/mteb/models/colbert_models.py index f4baca3586..0a8c0e4a57 100644 --- a/mteb/models/colbert_models.py +++ b/mteb/models/colbert_models.py @@ -161,7 +161,7 @@ def similarity(self, a: np.ndarray, b: np.ndarray) -> np.ndarray: max_tokens=180, # Reduced for Benchmarking - see ColBERT paper embed_dim=None, # Bag of Embeddings (128) for each token license="mit", - similarity_fn_name="max_sim", + similarity_fn_name="MaxSim", framework=["PyLate", "ColBERT"], reference="https://huggingface.co/colbert-ir/colbertv2.0", use_instructions=False, @@ -213,7 +213,7 @@ def similarity(self, a: np.ndarray, b: np.ndarray) -> np.ndarray: max_tokens=8192, embed_dim=None, # Bag of Embeddings (128) for each token license="cc-by-nc-4.0", - similarity_fn_name="max_sim", + similarity_fn_name="MaxSim", framework=["PyLate", "ColBERT"], reference="https://huggingface.co/jinaai/jina-colbert-v2", use_instructions=False, diff --git a/mteb/models/overview.py b/mteb/models/overview.py index 93eaa9ab5a..c72fe2ed89 100644 --- a/mteb/models/overview.py +++ b/mteb/models/overview.py @@ -6,7 +6,7 @@ from typing import Any from huggingface_hub import ModelCard -from sentence_transformers import SentenceTransformer +from sentence_transformers import CrossEncoder, SentenceTransformer from mteb.abstasks.AbsTask import AbsTask from mteb.encoder_interface import Encoder @@ -172,6 +172,11 @@ def get_model(model_name: str, revision: str | None = None, **kwargs: Any) -> En if not meta.similarity_fn_name: meta.similarity_fn_name = _meta.similarity_fn_name + elif isinstance(model, CrossEncoder): + _meta = model_meta_from_cross_encoder(model.model) + if meta.revision is None: + meta.revision = _meta.revision if _meta.revision else meta.revision + model.mteb_model_meta = meta # type: ignore return model @@ -251,6 +256,49 @@ def model_meta_from_hf_hub(model_name: str) -> ModelMeta: ) +def model_meta_from_cross_encoder(model: CrossEncoder) -> ModelMeta: + try: + name = model.model.name_or_path + + meta = ModelMeta( + name=name, + revision=model.config._commit_hash, + release_date=None, + languages=None, + framework=["Sentence Transformers"], + similarity_fn_name=None, + n_parameters=None, + max_tokens=None, + embed_dim=None, + license=None, + open_weights=True, + public_training_code=None, + use_instructions=None, + training_datasets=None, + ) + except AttributeError as e: + logger.warning( + f"Failed to extract metadata from model: {e}. Upgrading to sentence-transformers v3.0.0 or above is recommended." + ) + meta = ModelMeta( + name=None, + revision=None, + languages=None, + release_date=None, + n_parameters=None, + max_tokens=None, + embed_dim=None, + license=None, + open_weights=True, + public_training_code=None, + similarity_fn_name=None, + use_instructions=None, + training_datasets=None, + framework=[], + ) + return meta + + def model_meta_from_sentence_transformers(model: SentenceTransformer) -> ModelMeta: try: name = ( @@ -263,6 +311,7 @@ def model_meta_from_sentence_transformers(model: SentenceTransformer) -> ModelMe if isinstance(model.model_card_data.language, str) else model.model_card_data.language ) + embeddings_dim = model.get_sentence_embedding_dimension() meta = ModelMeta( name=name, revision=model.model_card_data.base_model_revision, @@ -272,7 +321,7 @@ def model_meta_from_sentence_transformers(model: SentenceTransformer) -> ModelMe similarity_fn_name=model.similarity_fn_name, n_parameters=None, max_tokens=None, - embed_dim=None, + embed_dim=embeddings_dim, license=None, open_weights=True, public_training_code=None, diff --git a/tests/test_benchmark/test_benchmark.py b/tests/test_benchmark/test_benchmark.py index 1393d46f12..37a226f737 100644 --- a/tests/test_benchmark/test_benchmark.py +++ b/tests/test_benchmark/test_benchmark.py @@ -41,7 +41,7 @@ def test_mulitple_mteb_tasks(tasks: list[AbsTask], model: mteb.Encoder, tmp_path: Path): """Test that multiple tasks can be run""" eval = mteb.MTEB(tasks=tasks) - eval.run(model, output_folder=str(tmp_path), overwrite_results=True) + eval.run(model, output_folder=tmp_path.as_posix(), overwrite_results=True) # ensure that we can generate a readme from the output folder generate_readme(tmp_path) @@ -56,7 +56,9 @@ def test_mulitple_mteb_tasks(tasks: list[AbsTask], model: mteb.Encoder, tmp_path MockTorchbf16Encoder(), ], ) -def test_benchmark_encoders_on_task(task: str | AbsTask, model: mteb.Encoder): +def test_benchmark_encoders_on_task( + task: str | AbsTask, model: mteb.Encoder, tmp_path: Path +): """Test that a task can be fetched and run using a variety of encoders""" if isinstance(task, str): tasks = mteb.get_tasks(tasks=[task]) @@ -64,7 +66,7 @@ def test_benchmark_encoders_on_task(task: str | AbsTask, model: mteb.Encoder): tasks = [task] eval = mteb.MTEB(tasks=tasks) - eval.run(model, output_folder="tests/results", overwrite_results=True) + eval.run(model, output_folder=tmp_path.as_posix()) @pytest.mark.parametrize("task", [MockMultilingualRetrievalTask()]) @@ -72,7 +74,9 @@ def test_benchmark_encoders_on_task(task: str | AbsTask, model: mteb.Encoder): "model", [MockSentenceTransformer()], ) -def test_run_eval_without_co2_tracking(task: str | AbsTask, model: mteb.Encoder): +def test_run_eval_without_co2_tracking( + task: str | AbsTask, model: mteb.Encoder, tmp_path: Path +): """Test that a task can be fetched and run without CO2 tracking""" if isinstance(task, str): tasks = mteb.get_tasks(tasks=[task]) @@ -80,9 +84,7 @@ def test_run_eval_without_co2_tracking(task: str | AbsTask, model: mteb.Encoder) tasks = [task] eval = mteb.MTEB(tasks=tasks) - eval.run( - model, output_folder="tests/results", overwrite_results=True, co2_tracker=False - ) + eval.run(model, output_folder=tmp_path.as_posix(), co2_tracker=False) @pytest.mark.parametrize("task", MOCK_TASK_TEST_GRID[:1]) @@ -95,20 +97,22 @@ def test_reload_results(task: str | AbsTask, model: mteb.Encoder, tmp_path: Path tasks = [task] eval = mteb.MTEB(tasks=tasks) - results = eval.run(model, output_folder=str(tmp_path), overwrite_results=True) + results = eval.run(model, output_folder=tmp_path.as_posix(), overwrite_results=True) assert isinstance(results, list) assert isinstance(results[0], mteb.TaskResult) # reload the results - results = eval.run(model, output_folder=str(tmp_path), overwrite_results=False) + results = eval.run( + model, output_folder=tmp_path.as_posix(), overwrite_results=False + ) assert isinstance(results, list) assert isinstance(results[0], mteb.TaskResult) @pytest.mark.parametrize("task_name", MOCK_TASK_TEST_GRID) -def test_prompt_name_passed_to_all_encodes(task_name: str | AbsTask): +def test_prompt_name_passed_to_all_encodes(task_name: str | AbsTask, tmp_path: Path): """Test that all tasks correctly pass down the prompt_name to the encoder which supports it, and that the encoder which does not support it does not receive it. """ @@ -141,17 +145,17 @@ def encode(self, sentences, **kwargs): eval.run( model, - output_folder="tests/results", + output_folder=tmp_path.as_posix(), overwrite_results=True, ) # Test that the task_name is not passed down to the encoder model = EncoderWithoutInstructions("average_word_embeddings_levy_dependency") assert model.prompts == {}, "The encoder should not have any prompts" - eval.run(model, output_folder="tests/results", overwrite_results=True) + eval.run(model, output_folder=tmp_path.as_posix(), overwrite_results=True) @pytest.mark.parametrize("task_name", MOCK_TASK_TEST_GRID) -def test_encode_kwargs_passed_to_all_encodes(task_name: str | AbsTask): +def test_encode_kwargs_passed_to_all_encodes(task_name: str | AbsTask, tmp_path: Path): """Test that all tasks correctly pass down the encode_kwargs to the encoder.""" my_encode_kwargs = {"no_one_uses_this_args": "but_its_here"} @@ -175,14 +179,14 @@ def encode(self, sentences, task_name: str | None = None, **kwargs): model = MockEncoderWithKwargs() eval.run( model, - output_folder="tests/results", + output_folder=tmp_path.as_posix(), overwrite_results=True, encode_kwargs=my_encode_kwargs, ) @pytest.mark.parametrize("model", [MockNumpyEncoder()]) -def test_run_using_benchmark(model: mteb.Encoder): +def test_run_using_benchmark(model: mteb.Encoder, tmp_path: Path): """Test that a benchmark object can be run using the MTEB class.""" bench = Benchmark( name="test_bench", tasks=mteb.get_tasks(tasks=["STS12", "SummEval"]) @@ -190,12 +194,12 @@ def test_run_using_benchmark(model: mteb.Encoder): eval = mteb.MTEB(tasks=[bench]) eval.run( - model, output_folder="tests/results", overwrite_results=True + model, output_folder=tmp_path.as_posix(), overwrite_results=True ) # we just want to test that it runs @pytest.mark.parametrize("model", [MockNumpyEncoder()]) -def test_run_using_list_of_benchmark(model: mteb.Encoder): +def test_run_using_list_of_benchmark(model: mteb.Encoder, tmp_path: Path): """Test that a list of benchmark objects can be run using the MTEB class.""" bench = [ Benchmark(name="test_bench", tasks=mteb.get_tasks(tasks=["STS12", "SummEval"])) @@ -203,7 +207,7 @@ def test_run_using_list_of_benchmark(model: mteb.Encoder): eval = mteb.MTEB(tasks=bench) eval.run( - model, output_folder="tests/results", overwrite_results=True + model, output_folder=tmp_path.as_posix() ) # we just want to test that it runs @@ -229,7 +233,7 @@ def test_get_benchmark(name): @pytest.mark.parametrize("task", MOCK_TASK_TEST_GRID) @pytest.mark.parametrize("is_task_name", [True, False]) def test_prompt_name_passed_to_all_encodes_with_prompts( - task: AbsTask | str, is_task_name: bool + task: AbsTask | str, is_task_name: bool, tmp_path: Path ): """Test that all tasks and task_types correctly pass down the prompt_name to the encoder with prompts.""" _task_name = task.metadata.name if isinstance(task, AbsTask) else task @@ -258,8 +262,7 @@ def encode(self, sentences, prompt_name: str | None = None, **kwargs): ) eval.run( model, - output_folder="tests/results", - overwrite_results=True, + output_folder=tmp_path.as_posix(), ) class MockEncoderWithExistingPrompts(mteb.Encoder): @@ -275,7 +278,7 @@ def encode(self, sentences, prompt_name: str | None = None, **kwargs): model = MockSentenceTransformerWrapper(MockEncoderWithExistingPrompts()) eval.run( model, - output_folder="tests/results", + output_folder=tmp_path.as_posix(), overwrite_results=True, ) @@ -292,7 +295,9 @@ def encode(self, sentences, prompt_name: str | None = None, **kwargs): ], ) @pytest.mark.parametrize("is_task_name", [True, False]) -def test_model_query_passage_prompts_task_type(task: AbsTask | str, is_task_name: bool): +def test_model_query_passage_prompts_task_type( + task: AbsTask | str, is_task_name: bool, tmp_path: Path +): """Test that the model with prompts is correctly called.""" tasks = [task] @@ -331,8 +336,7 @@ def encode(self, sentences, prompt_name: str | None = None, *args, **kwargs): eval.run( model, model_prompts=prompt_list, - output_folder="tests/results", - overwrite_results=True, + output_folder=tmp_path.as_posix(), ) model = MockSentenceTransformerWrapper( MockSentenceEncoderWithPrompts(), model_prompts=prompt_list @@ -341,6 +345,5 @@ def encode(self, sentences, prompt_name: str | None = None, *args, **kwargs): eval.run( model, model_prompts=prompt_list, - output_folder="tests/results", - overwrite_results=True, + output_folder=tmp_path.as_posix(), ) diff --git a/tests/test_benchmark/test_benchmark_integration_with_datasets.py b/tests/test_benchmark/test_benchmark_integration_with_datasets.py index 81d4c6b676..8288680c3c 100644 --- a/tests/test_benchmark/test_benchmark_integration_with_datasets.py +++ b/tests/test_benchmark/test_benchmark_integration_with_datasets.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +from pathlib import Path import pytest @@ -18,7 +19,7 @@ @pytest.mark.parametrize("task", TASK_TEST_GRID) @pytest.mark.parametrize("model", [MockNumpyEncoder()]) -def test_benchmark_datasets(task: str | AbsTask, model: mteb.Encoder): +def test_benchmark_datasets(task: str | AbsTask, model: mteb.Encoder, tmp_path: Path): """Test that a task can be fetched and run""" eval = MTEB(tasks=[task]) - eval.run(model, output_folder="tests/results", overwrite_results=True) + eval.run(model, output_folder=tmp_path.as_posix(), overwrite_results=True) diff --git a/tests/test_benchmark/test_benchmark_integration_with_sentencetransformers.py b/tests/test_benchmark/test_benchmark_integration_with_sentencetransformers.py index 4ca0056cd7..e79515be56 100644 --- a/tests/test_benchmark/test_benchmark_integration_with_sentencetransformers.py +++ b/tests/test_benchmark/test_benchmark_integration_with_sentencetransformers.py @@ -3,6 +3,7 @@ from __future__ import annotations import logging +from pathlib import Path import pytest from sentence_transformers import SentenceTransformer @@ -22,9 +23,11 @@ "average_word_embeddings_levy_dependency", ], ) -def test_benchmark_sentence_transformer(task: str | AbsTask, model_name: str): +def test_benchmark_sentence_transformer( + task: str | AbsTask, model_name: str, tmp_path: Path +): """Test that a task can be fetched and run""" if isinstance(model_name, str): model = SentenceTransformer(model_name) eval = MTEB(tasks=[task]) - eval.run(model, output_folder="tests/results", overwrite_results=True) + eval.run(model, output_folder=tmp_path.as_posix(), overwrite_results=True) diff --git a/tests/test_benchmark/test_models.py b/tests/test_benchmark/test_models.py index ee5bed091b..5d6cc1a022 100644 --- a/tests/test_benchmark/test_models.py +++ b/tests/test_benchmark/test_models.py @@ -1,5 +1,8 @@ from __future__ import annotations +import sys +from pathlib import Path + import pytest import mteb @@ -9,9 +12,10 @@ from .mock_tasks import MockRetrievalTask +@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10 or higher") @pytest.mark.parametrize("model", ["colbert-ir/colbertv2.0"]) @pytest.mark.parametrize("task", [MockRetrievalTask()]) -def test_colbert_model_e2e(task: AbsTask, model: str): +def test_colbert_model_e2e(task: AbsTask, model: str, tmp_path: Path): pytest.importorskip("pylate", reason="pylate not installed") eval_splits = ["test"] model = mteb.get_model(model) @@ -21,13 +25,14 @@ def test_colbert_model_e2e(task: AbsTask, model: str): model, eval_splits=eval_splits, corpus_chunk_size=500, + output_folder=tmp_path.as_posix(), ) result = results[0] assert result.scores["test"][0]["ndcg_at_1"] == 1.0 -def test_bm25s_e2e(): +def test_bm25s_e2e(tmp_path: Path): # fails for dataset smaller then 1000 pytest.importorskip("bm25s", reason="bm25s not installed") pytest.importorskip("Stemmer", reason="PyStemmer not installed") @@ -38,7 +43,9 @@ def test_bm25s_e2e(): evaluation = MTEB(tasks=tasks) - results = evaluation.run(model, eval_splits=eval_splits) + results = evaluation.run( + model, eval_splits=eval_splits, output_folder=tmp_path.as_posix() + ) result = results[0] assert result.scores["test"][0]["ndcg_at_1"] == 0.42879 diff --git a/tests/test_cli.py b/tests/test_cli.py index 7c71528f0d..fc4a468112 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -50,12 +50,13 @@ def test_run_task( model_name: str, task_name: str, model_revision: str, + tmp_path: Path, ): args = Namespace( model=model_name, tasks=[task_name], model_revision=model_revision, - output_folder="tests/results/test_model", + output_folder=tmp_path.as_posix(), verbosity=3, device=None, categories=None, @@ -71,9 +72,7 @@ def test_run_task( run(args) model_name_as_path = model_name.replace("/", "__").replace(" ", "_") - results_path = Path( - f"tests/results/test_model/{model_name_as_path}/{model_revision}" - ) + results_path = tmp_path / model_name_as_path / model_revision assert results_path.exists(), "Output folder not created" assert "model_meta.json" in [ f.name for f in list(results_path.glob("*.json")) @@ -122,7 +121,7 @@ def test_create_meta(): ), f"Value for {key} does not match" # ensure that the command line interface works as well - command = f"{sys.executable} -m mteb create_meta --results_folder {results} --output_path {output_path} --overwrite" + command = f"{sys.executable} -m mteb create_meta --results_folder {results.as_posix()} --output_path {output_path.as_posix()} --overwrite" result = subprocess.run(command, shell=True, capture_output=True, text=True) assert result.returncode == 0, "Command failed" @@ -134,14 +133,16 @@ def test_create_meta(): ("model_card_without_frontmatter.md", "model_card_gold_without_frontmatter.md"), ], ) -def test_create_meta_from_existing(existing_readme_name: str, gold_readme_name: str): +def test_create_meta_from_existing( + existing_readme_name: str, gold_readme_name: str, tmp_path: Path +): """Test create_meta function directly as well as through the command line interface""" test_folder = Path(__file__).parent output_folder = test_folder / "create_meta" results = ( output_folder / "all-MiniLM-L6-v2" / "8b3219a92973c328a8e22fadcfa821b5dc75636a" ) - output_path = output_folder / "model_card.md" + output_path = tmp_path / "model_card.md" existing_readme = output_folder / existing_readme_name args = Namespace( @@ -183,7 +184,7 @@ def test_create_meta_from_existing(existing_readme_name: str, gold_readme_name: ), f"Value for {key} does not match" assert readme_output == gold_readme # ensure that the command line interface works as well - command = f"{sys.executable} -m mteb create_meta --results_folder {results} --output_path {output_path} --from_existing {existing_readme} --overwrite" + command = f"{sys.executable} -m mteb create_meta --results_folder {results.as_posix()} --output_path {output_path.as_posix()} --from_existing {existing_readme.as_posix()} --overwrite" result = subprocess.run(command, shell=True, capture_output=True, text=True) assert result.returncode == 0, "Command failed" diff --git a/tests/test_model_meta/test_model_meta.py b/tests/test_model_meta/test_model_meta.py new file mode 100644 index 0000000000..2d23bc66cb --- /dev/null +++ b/tests/test_model_meta/test_model_meta.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +import sys +from pathlib import Path + +import pytest +from sentence_transformers import CrossEncoder, SentenceTransformer + +from mteb import MTEB +from mteb.abstasks import AbsTask +from tests.test_benchmark.mock_tasks import MockRetrievalTask + + +def test_create_model_meta_from_sentence_transformers(): + model_name = "sentence-transformers/average_word_embeddings_levy_dependency" + revision = "6d9c09a789ad5dd126b476323fccfeeafcd90509" + model = SentenceTransformer(model_name, revision=revision) + + meta = MTEB.create_model_meta(model) + + assert meta.similarity_fn_name == "cosine" + assert meta.embed_dim == model.get_sentence_embedding_dimension() + assert type(meta.framework) is list + assert meta.framework[0] == "Sentence Transformers" + assert meta.name == model_name + assert meta.revision == revision + + +def test_create_model_meta_from_cross_encoder(): + model_name = "cross-encoder/ms-marco-TinyBERT-L-2-v2" + revision = "841d331b6f34b15d6ac0ab366ae3a3b36eeac691" + model = CrossEncoder(model_name, revision=revision) + + meta = MTEB.create_model_meta(model) + + assert meta.name == model_name + assert meta.revision == revision + + return meta + + +@pytest.mark.parametrize("task", [MockRetrievalTask()]) +def test_output_folder_model_meta(task: AbsTask, tmp_path: Path): + mteb = MTEB(tasks=[task]) + model_name = "cross-encoder/ms-marco-TinyBERT-L-2-v2" + model = CrossEncoder(model_name) + meta = mteb.create_model_meta(model) + output_path = mteb.create_output_folder( + model_meta=meta, output_folder=tmp_path.as_posix() + ) + + output_path = Path(output_path) + assert output_path.exists() + assert output_path.is_dir() + assert output_path.name == model.config._commit_hash + assert output_path.parent.name == "cross-encoder__ms-marco-TinyBERT-L-2-v2" + assert output_path.parent.parent == tmp_path + + +@pytest.mark.skipif(sys.version_info < (3, 10), reason="Requires Python 3.10 or higher") +def test_model_meta_colbert(): + model_name = "colbert-ir/colbertv2.0" + colbert_model = pytest.importorskip("pylate.models", reason="pylate not installed") + revision = "c1e84128e85ef755c096a95bdb06b47793b13acf" + model = colbert_model.ColBERT(model_name, revision=revision) + + meta = MTEB.create_model_meta(model) + + # assert meta.similarity_fn_name == "MaxSim" test with new release of pylate + assert type(meta.framework) is list + assert meta.framework[0] == "Sentence Transformers" + assert meta.name == model_name + assert meta.revision == revision diff --git a/tests/test_reproducible_workflow.py b/tests/test_reproducible_workflow.py index 1c7536076e..1973072bab 100644 --- a/tests/test_reproducible_workflow.py +++ b/tests/test_reproducible_workflow.py @@ -1,6 +1,7 @@ from __future__ import annotations import logging +from pathlib import Path import pytest @@ -18,7 +19,9 @@ @pytest.mark.parametrize("task_name", ["BornholmBitextMining"]) @pytest.mark.parametrize("model_name", ["sentence-transformers/all-MiniLM-L6-v2"]) @pytest.mark.parametrize("model_revision", ["8b3219a92973c328a8e22fadcfa821b5dc75636a"]) -def test_reproducibility_workflow(task_name: str, model_name: str, model_revision: str): +def test_reproducibility_workflow( + task_name: str, model_name: str, model_revision: str, tmp_path: Path +): """Test that a model and a task can be fetched and run in a reproducible fashion.""" model_meta = mteb.get_model_meta(model_name, revision=model_revision) task = mteb.get_task(task_name) @@ -30,7 +33,7 @@ def test_reproducibility_workflow(task_name: str, model_name: str, model_revisio assert isinstance(model, Encoder) eval = MTEB(tasks=[task]) - eval.run(model, output_folder="tests/results", overwrite_results=True) + eval.run(model, output_folder=tmp_path.as_posix(), overwrite_results=True) @pytest.mark.parametrize( diff --git a/tests/test_tasks/test_mteb_rerank.py b/tests/test_tasks/test_mteb_rerank.py index effd76829d..4a535bebbd 100644 --- a/tests/test_tasks/test_mteb_rerank.py +++ b/tests/test_tasks/test_mteb_rerank.py @@ -339,17 +339,16 @@ def test_mteb_rerank(tmp_path: Path): eval.run( model, # type: ignore - output_folder="tests/results", + output_folder=tmp_path.as_posix(), overwrite_results=True, eval_splits=["test"], top_k=2, previous_results=tmp_file, save_predictions=True, ) - tmp_file.unlink() # read in the results - with open("tests/results/SciFact_default_predictions.json") as f: + with (tmp_path / "SciFact_default_predictions.json").open() as f: results = json.load(f) # check that only the top two results are re-orderd @@ -358,7 +357,7 @@ def test_mteb_rerank(tmp_path: Path): assert "18670" in results["1"] -def test_reranker_same_ndcg1(): +def test_reranker_same_ndcg1(tmp_path: Path): de_name = "average_word_embeddings_komninos" revision = "21eec43590414cb8e3a6f654857abed0483ae36e" de = SentenceTransformer(de_name, revision=revision) @@ -382,32 +381,35 @@ def test_reranker_same_ndcg1(): framework=["Sentence Transformers", "PyTorch"], ) eval = MTEB(tasks=mteb.get_tasks(["SciFact"])) + stage1_path = tmp_path / "stage1" eval.run( de, - output_folder="tests/results/stage1", + output_folder=stage1_path.as_posix(), overwrite_results=True, save_predictions=True, eval_splits=["test"], ) + stage2_path = tmp_path / "stage2" eval.run( ce, # type: ignore - output_folder="tests/results/stage2", + output_folder=stage2_path.as_posix(), overwrite_results=True, - previous_results="tests/results/stage1/SciFact_default_predictions.json", + previous_results=(stage1_path / "SciFact_default_predictions.json"), save_predictions=False, eval_splits=["test"], top_k=1, # don't allow it to rerank more than 1 so we can check for top_1 being the same ) # read in stage 1 and stage two and check ndcg@1 is the same - with open( - f"tests/results/stage1/sentence-transformers__{de_name}/{revision}/SciFact.json" - ) as f: + with ( + stage1_path / f"sentence-transformers__{de_name}/{revision}/SciFact.json" + ).open() as f: stage1 = json.load(f) - with open( - f"tests/results/stage2/cross-encoder__ms-marco-TinyBERT-L-2-v2/{ce_revision}/SciFact.json" - ) as f: + with ( + stage2_path + / f"cross-encoder__ms-marco-TinyBERT-L-2-v2/{ce_revision}/SciFact.json" + ).open() as f: stage2 = json.load(f) assert (