From 91ad5650c473a5b7d7e96f4f1ccdb9f96787af21 Mon Sep 17 00:00:00 2001 From: gowitheflow-1998 Date: Mon, 23 Sep 2024 17:04:30 +0100 Subject: [PATCH 1/6] fix ImageTextPair dataloading for large datasets; more compositionality evaluation datasets --- {mieb-docs => docs/mieb-docs}/README.md | 0 .../AbsTaskImageTextPairClassification.py | 20 +-- .../ImageTextPairClassificationEvaluator.py | 156 +++++++++++++----- .../AROCocoOrder.py | 57 +++++++ .../AROFlickrOrder.py | 57 +++++++ .../ImageTextPairClassification/__init__.py | 2 + .../AROCocoOrder.json | 21 +++ .../AROFlickrOrder.json | 21 +++ .../AROCocoOrder.json | 21 +++ .../AROFlickrOrder.json | 21 +++ .../AROCocoOrder.json | 21 +++ .../AROFlickrOrder.json | 21 +++ 12 files changed, 357 insertions(+), 61 deletions(-) rename {mieb-docs => docs/mieb-docs}/README.md (100%) create mode 100644 mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py create mode 100644 mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py create mode 100644 results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROCocoOrder.json create mode 100644 results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROFlickrOrder.json create mode 100644 results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROCocoOrder.json create mode 100644 results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROFlickrOrder.json create mode 100644 results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROCocoOrder.json create mode 100644 results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROFlickrOrder.json diff --git a/mieb-docs/README.md b/docs/mieb-docs/README.md similarity index 100% rename from mieb-docs/README.md rename to docs/mieb-docs/README.md diff --git a/mteb/abstasks/Image/AbsTaskImageTextPairClassification.py b/mteb/abstasks/Image/AbsTaskImageTextPairClassification.py index c6d4a6a2d..49523c58f 100644 --- a/mteb/abstasks/Image/AbsTaskImageTextPairClassification.py +++ b/mteb/abstasks/Image/AbsTaskImageTextPairClassification.py @@ -4,7 +4,6 @@ from typing import Any from datasets import Dataset -from tqdm import tqdm from ...encoder_interface import Encoder, EncoderWithQueryCorpusEncode from ...evaluation.evaluators import ImageTextPairClassificationEvaluator @@ -32,18 +31,6 @@ class AbsTaskImageTextPairClassification(AbsTask): def __init__(self, **kwargs): super().__init__(**kwargs) - def _preprocess_column( - self, dataset: Dataset, column_names: str | list[str] - ) -> list[list[Any]]: - """Group examples from the columns into a list of examples.""" - if isinstance(column_names, str): - return dataset[column_names] - - return [ - [example[col] for col in column_names] - for example in tqdm(dataset, desc=f"Processing columns {column_names}") - ] - def _add_main_score(self, scores) -> None: scores["main_score"] = scores[self.metadata.main_score] @@ -60,11 +47,10 @@ def _evaluate_subset( encode_kwargs: dict[str, Any] = {}, **kwargs, ) -> ScoresDict: - images = self._preprocess_column(dataset, self.images_column_names) - texts = self._preprocess_column(dataset, self.texts_column_names) evaluator = ImageTextPairClassificationEvaluator( - images, - texts, + dataset, + images_column_names=self.images_column_names, + texts_column_names=self.texts_column_names, task_name=self.metadata.name, **kwargs, ) diff --git a/mteb/evaluation/evaluators/Image/ImageTextPairClassificationEvaluator.py b/mteb/evaluation/evaluators/Image/ImageTextPairClassificationEvaluator.py index 403b3758f..b548da365 100644 --- a/mteb/evaluation/evaluators/Image/ImageTextPairClassificationEvaluator.py +++ b/mteb/evaluation/evaluators/Image/ImageTextPairClassificationEvaluator.py @@ -1,18 +1,58 @@ from __future__ import annotations -import itertools import logging from typing import Any import torch import torch.nn.functional as F -from PIL import Image +from torch.utils.data import DataLoader +from torchvision import transforms from mteb.encoder_interface import Encoder, EncoderWithSimilarity from mteb.evaluation.evaluators.Evaluator import Evaluator logger = logging.getLogger(__name__) +transform = transforms.Compose([transforms.PILToTensor()]) + + +class ImageTextDataset(torch.utils.data.Dataset): + def __init__( + self, hf_dataset, images_column_names, texts_column_names, transform=None + ): + self.dataset = hf_dataset + self.transform = transform + self.images_column_names = images_column_names + self.texts_column_names = texts_column_names + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + data = self.dataset[idx] + + # Get images + if isinstance(self.images_column_names, str): + images = data[self.images_column_names] + else: + images = [data[col] for col in self.images_column_names] + + # Apply transforms to images + if self.transform is not None: + images = [self.transform(img) for img in images] + + # Get texts + if isinstance(self.texts_column_names, str): + texts = data[self.texts_column_names] + else: + texts = [data[col] for col in self.texts_column_names] + + return images, texts + + +def custom_collate_fn(batch): + return batch + class ImageTextPairClassificationEvaluator(Evaluator): """Evaluate a model based on the similarity of the embeddings by calculating the accuracy of @@ -30,21 +70,22 @@ class ImageTextPairClassificationEvaluator(Evaluator): def __init__( self, - images: list[list[Image.Image]], - texts: list[list[str]], + dataset, + images_column_names: str | list[str], + texts_column_names: str | list[str], task_name: str | None = None, + transform=None, limit: int | None = None, **kwargs, ): super().__init__(**kwargs) if limit: - images = images[:limit] - texts = texts[:limit] - self.images = images - self.texts = texts + dataset = dataset.select(range(limit)) + self.dataset = dataset + self.images_column_names = images_column_names + self.texts_column_names = texts_column_names self.task_name = task_name - - assert len(self.images) == len(self.texts) + self.transform = transform def __call__( self, @@ -54,21 +95,31 @@ def __call__( if "batch_size" not in encode_kwargs: encode_kwargs["batch_size"] = 64 - num_samples = len(self.images) - num_images_per_sample = len(self.images[0]) - num_texts_per_sample = len(self.texts[0]) - - images = list(itertools.chain.from_iterable(self.images)) - texts = list(itertools.chain.from_iterable(self.texts)) - - image_embeddings = F.normalize( - model.get_image_embeddings(images, batch_size=encode_kwargs["batch_size"]), - dim=-1, - ).view(num_samples, num_images_per_sample, -1) - text_embeddings = F.normalize( - model.get_text_embeddings(texts, batch_size=encode_kwargs["batch_size"]), - dim=-1, - ).view(num_samples, num_texts_per_sample, -1) + data_loader = DataLoader( + ImageTextDataset( + self.dataset, + self.images_column_names, + self.texts_column_names, + transform=self.transform, + ), + batch_size=encode_kwargs["batch_size"], + shuffle=False, + # collate_fn=lambda x: x, # Identity collate function + collate_fn=custom_collate_fn, + num_workers=4, + ) + + num_images_per_sample = ( + len(self.images_column_names) + if isinstance(self.images_column_names, list) + else 1 + ) + num_texts_per_sample = ( + len(self.texts_column_names) + if isinstance(self.texts_column_names, list) + else 1 + ) + img_ground_truths = torch.arange(num_images_per_sample) caption_ground_truths = torch.arange(num_texts_per_sample) @@ -76,25 +127,42 @@ def __call__( text_score = [] score = [] - for i in range(num_samples): - images_emb = image_embeddings[i] - texts_emb = text_embeddings[i] - scores = ( - images_emb @ texts_emb.t() - ) # shape = (num_images_per_sample x num_texts_per_sample) - - image_closest_text = scores.argmax(dim=1) # shape = (num_images_per_sample) - text_closest_image = scores.argmax(dim=0) # shape = (num_texts_per_sample) - pred_text_is_correct = ( - (image_closest_text == img_ground_truths).all().item() - ) - pred_image_is_correct = ( - (text_closest_image == caption_ground_truths).all().item() - ) - all_correct = pred_text_is_correct and pred_image_is_correct - image_score.append(pred_image_is_correct) - text_score.append(pred_text_is_correct) - score.append(all_correct) + for batch in data_loader: + images_list, texts_list = zip(*batch) + images = [img for images in images_list for img in images] + texts = [txt for texts in texts_list for txt in texts] + images_emb = F.normalize( + model.get_image_embeddings(images, batch_size=len(images)), + dim=-1, + ).view(len(batch), num_images_per_sample, -1) + texts_emb = F.normalize( + model.get_text_embeddings(texts, batch_size=len(texts)), + dim=-1, + ).view(len(batch), num_texts_per_sample, -1) + for i in range(len(batch)): + img_emb = images_emb[i] + txt_emb = texts_emb[i] + + scores = ( + img_emb @ txt_emb.t() + ) # shape = (num_images_per_sample x num_texts_per_sample) + + image_closest_text = scores.argmax( + dim=1 + ) # shape = (num_images_per_sample) + text_closest_image = scores.argmax( + dim=0 + ) # shape = (num_texts_per_sample) + pred_text_is_correct = ( + (image_closest_text == img_ground_truths).all().item() + ) + pred_image_is_correct = ( + (text_closest_image == caption_ground_truths).all().item() + ) + all_correct = pred_text_is_correct and pred_image_is_correct + image_score.append(pred_image_is_correct) + text_score.append(pred_text_is_correct) + score.append(all_correct) metrics = {} metrics["image_acc"] = torch.Tensor(image_score).float().mean().item() diff --git a/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py b/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py new file mode 100644 index 000000000..bc9c50a3b --- /dev/null +++ b/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from mteb.abstasks.Image.AbsTaskImageTextPairClassification import ( + AbsTaskImageTextPairClassification, +) +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class AROCocoOrder(AbsTaskImageTextPairClassification): + images_column_names = ["images"] + texts_column_names = [ + "correct_caption", + "hard_text_1", + "hard_text_2", + "hard_text_3", + "hard_text_4", + ] + + metadata = TaskMetadata( + name="AROCocoOrder", + description="Compositionality Evaluation of images to their captions." + + "Each capation has four hard negatives created by order permutations.", + reference="https://proceedings.neurips.cc/paper_files/paper/2023/hash/63461de0b4cb760fc498e85b18a7fe81-Abstract-Datasets_and_Benchmarks.html", + dataset={ + "path": "gowitheflow/ARO-COCO-order", + "revision": "853ec8757226585a38a80886c51fe0f3f268787c", + }, + type="ImageTextPairClassification", + category="i2t", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="text_acc", + date=( + "2022-01-01", + "2022-12-31", + ), # Estimated range for the collection of data + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Caption Pairing"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + modalities=["text", "image"], + sample_creation="created", + bibtex_citation="""@article{hsieh2024sugarcrepe, + title={Sugarcrepe: Fixing hackable benchmarks for vision-language compositionality}, + author={Hsieh, Cheng-Yu and Zhang, Jieyu and Ma, Zixian and Kembhavi, Aniruddha and Krishna, Ranjay}, + journal={Advances in neural information processing systems}, + volume={36}, + year={2024} +}""", + descriptive_stats={ + "n_samples": {"test": 25010}, + "avg_character_length": {"test": 1}, + }, + ) diff --git a/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py b/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py new file mode 100644 index 000000000..b423861f2 --- /dev/null +++ b/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from mteb.abstasks.Image.AbsTaskImageTextPairClassification import ( + AbsTaskImageTextPairClassification, +) +from mteb.abstasks.TaskMetadata import TaskMetadata + + +class AROFlickrOrder(AbsTaskImageTextPairClassification): + images_column_names = ["images"] + texts_column_names = [ + "correct_caption", + "hard_text_1", + "hard_text_2", + "hard_text_3", + "hard_text_4", + ] + + metadata = TaskMetadata( + name="AROFlickrOrder", + description="Compositionality Evaluation of images to their captions." + + "Each capation has four hard negatives created by order permutations.", + reference="https://proceedings.neurips.cc/paper_files/paper/2023/hash/63461de0b4cb760fc498e85b18a7fe81-Abstract-Datasets_and_Benchmarks.html", + dataset={ + "path": "gowitheflow/ARO-Flickr-Order", + "revision": "1f9485f69c87947812378a1aedf86410c86a0aa8", + }, + type="ImageTextPairClassification", + category="i2t", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="text_acc", + date=( + "2022-01-01", + "2022-12-31", + ), # Estimated range for the collection of data + form=["written"], + domains=["Encyclopaedic"], + task_subtypes=["Caption Pairing"], + license="Not specified", + socioeconomic_status="mixed", + annotations_creators="expert-annotated", + dialect=[], + modalities=["text", "image"], + sample_creation="created", + bibtex_citation="""@article{hsieh2024sugarcrepe, + title={Sugarcrepe: Fixing hackable benchmarks for vision-language compositionality}, + author={Hsieh, Cheng-Yu and Zhang, Jieyu and Ma, Zixian and Kembhavi, Aniruddha and Krishna, Ranjay}, + journal={Advances in neural information processing systems}, + volume={36}, + year={2024} +}""", + descriptive_stats={ + "n_samples": {"test": 5000}, + "avg_character_length": {"test": 1}, + }, + ) diff --git a/mteb/tasks/Image/ImageTextPairClassification/__init__.py b/mteb/tasks/Image/ImageTextPairClassification/__init__.py index d35de07c2..69f0a9fbc 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/__init__.py +++ b/mteb/tasks/Image/ImageTextPairClassification/__init__.py @@ -1,5 +1,7 @@ from __future__ import annotations +from .AROCocoOrder import * +from .AROFlickrOrder import * from .AROVisualAttribution import * from .AROVisualRelation import * from .SugarCrepe import * diff --git a/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROCocoOrder.json b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROCocoOrder.json new file mode 100644 index 000000000..8924145dd --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROCocoOrder.json @@ -0,0 +1,21 @@ +{ + "dataset_revision": "853ec8757226585a38a80886c51fe0f3f268787c", + "evaluation_time": 207.2516052722931, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.0, + "hf_subset": "default", + "image_acc": 0.0, + "languages": [ + "eng-Latn" + ], + "main_score": 0.4812075197696686, + "text_acc": 0.4812075197696686 + } + ] + }, + "task_name": "AROCocoOrder" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROFlickrOrder.json b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROFlickrOrder.json new file mode 100644 index 000000000..b69a24d89 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/AROFlickrOrder.json @@ -0,0 +1,21 @@ +{ + "dataset_revision": "1f9485f69c87947812378a1aedf86410c86a0aa8", + "evaluation_time": 37.06100630760193, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.0, + "hf_subset": "default", + "image_acc": 0.0, + "languages": [ + "eng-Latn" + ], + "main_score": 0.5583999752998352, + "text_acc": 0.5583999752998352 + } + ] + }, + "task_name": "AROFlickrOrder" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROCocoOrder.json b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROCocoOrder.json new file mode 100644 index 000000000..ed54c3847 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROCocoOrder.json @@ -0,0 +1,21 @@ +{ + "dataset_revision": "853ec8757226585a38a80886c51fe0f3f268787c", + "evaluation_time": 158.77182126045227, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.0, + "hf_subset": "default", + "image_acc": 0.0, + "languages": [ + "eng-Latn" + ], + "main_score": 0.46009597182273865, + "text_acc": 0.46009597182273865 + } + ] + }, + "task_name": "AROCocoOrder" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROFlickrOrder.json b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROFlickrOrder.json new file mode 100644 index 000000000..6cdefe46f --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/AROFlickrOrder.json @@ -0,0 +1,21 @@ +{ + "dataset_revision": "1f9485f69c87947812378a1aedf86410c86a0aa8", + "evaluation_time": 29.562106609344482, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.0, + "hf_subset": "default", + "image_acc": 0.0, + "languages": [ + "eng-Latn" + ], + "main_score": 0.5612000226974487, + "text_acc": 0.5612000226974487 + } + ] + }, + "task_name": "AROFlickrOrder" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROCocoOrder.json b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROCocoOrder.json new file mode 100644 index 000000000..5d7cf611c --- /dev/null +++ b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROCocoOrder.json @@ -0,0 +1,21 @@ +{ + "dataset_revision": "853ec8757226585a38a80886c51fe0f3f268787c", + "evaluation_time": 432.3775689601898, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.0, + "hf_subset": "default", + "image_acc": 0.0, + "languages": [ + "eng-Latn" + ], + "main_score": 0.4538184702396393, + "text_acc": 0.4538184702396393 + } + ] + }, + "task_name": "AROCocoOrder" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROFlickrOrder.json b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROFlickrOrder.json new file mode 100644 index 000000000..b67b51a5c --- /dev/null +++ b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/AROFlickrOrder.json @@ -0,0 +1,21 @@ +{ + "dataset_revision": "1f9485f69c87947812378a1aedf86410c86a0aa8", + "evaluation_time": 84.40965294837952, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.0, + "hf_subset": "default", + "image_acc": 0.0, + "languages": [ + "eng-Latn" + ], + "main_score": 0.5440000295639038, + "text_acc": 0.5440000295639038 + } + ] + }, + "task_name": "AROFlickrOrder" +} \ No newline at end of file From e42e8680bd36b55090f02eb06970d124b81b5080 Mon Sep 17 00:00:00 2001 From: gowitheflow-1998 Date: Mon, 23 Sep 2024 17:51:19 +0100 Subject: [PATCH 2/6] fix meta data --- mteb/tasks/Image/Any2AnyRetrieval/eng/CIRRIT2IRetrieval.py | 2 -- .../Image/Any2AnyRetrieval/eng/Fashion200kI2TRetrieval.py | 1 - .../Image/Any2AnyRetrieval/eng/Fashion200kT2IRetrieval.py | 1 - .../Image/Any2AnyRetrieval/eng/FashionIQIT2IRetrieval.py | 1 - .../Image/Any2AnyRetrieval/eng/HatefulMemesI2TRetrieval.py | 2 -- .../Image/Any2AnyRetrieval/eng/HatefulMemesT2IRetrieval.py | 2 -- .../Image/Any2AnyRetrieval/eng/InfoSeekIT2ITRetrieval.py | 2 -- .../tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2TRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOI2TRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOT2IRetrieval.py | 1 - mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionI2TRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionT2IRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/NIGHTSI2IRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2ITRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2TRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRI2TRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRT2IRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/TUBerlinT2IRetrieval.py | 2 -- .../Image/Any2AnyRetrieval/eng/VisualNewsI2TRetrieval.py | 2 -- .../Image/Any2AnyRetrieval/eng/VisualNewsT2IRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2ITRetrieval.py | 2 -- mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2TRetrieval.py | 2 -- mteb/tasks/Image/Clustering/eng/CIFAR.py | 1 - mteb/tasks/Image/Clustering/eng/ImageNet.py | 1 - mteb/tasks/Image/Clustering/eng/TinyImageNet.py | 2 -- .../Image/ImageClassification/eng/BirdsnapClassification.py | 1 - mteb/tasks/Image/ImageClassification/eng/CIFAR.py | 1 - .../Image/ImageClassification/eng/Caltech101Classification.py | 1 - .../Image/ImageClassification/eng/Country211Classification.py | 1 - mteb/tasks/Image/ImageClassification/eng/DTDClassification.py | 1 - .../Image/ImageClassification/eng/EuroSATClassification.py | 1 - .../Image/ImageClassification/eng/FER2013Classification.py | 1 - .../ImageClassification/eng/FGVCAircraftClassification.py | 1 - .../Image/ImageClassification/eng/Food101Classification.py | 1 - .../Image/ImageClassification/eng/GTSRBClassification.py | 1 - mteb/tasks/Image/ImageClassification/eng/Imagenet1k.py | 1 - .../Image/ImageClassification/eng/MNISTClassification.py | 1 - .../ImageClassification/eng/OxfordFlowersClassification.py | 1 - .../Image/ImageClassification/eng/OxfordPetsClassification.py | 1 - .../ImageClassification/eng/PatchCamelyonClassification.py | 1 - .../Image/ImageClassification/eng/RESISC45Classification.py | 1 - .../Image/ImageClassification/eng/STL10Classification.py | 1 - .../Image/ImageClassification/eng/SUN397Classification.py | 1 - .../Image/ImageClassification/eng/UCF101Classification.py | 1 - .../Image/ImageMultilabelClassification/eng/PascalVOC2007.py | 1 - mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py | 4 +--- .../tasks/Image/ImageTextPairClassification/AROFlickrOrder.py | 4 +--- .../Image/ImageTextPairClassification/AROVisualAttribution.py | 4 +--- .../Image/ImageTextPairClassification/AROVisualRelation.py | 4 +--- mteb/tasks/Image/ImageTextPairClassification/SugarCrepe.py | 4 +--- mteb/tasks/Image/ImageTextPairClassification/Winoground.py | 4 +--- 51 files changed, 6 insertions(+), 82 deletions(-) diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/CIRRIT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/CIRRIT2IRetrieval.py index 417e5d6ca..2e45933ea 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/CIRRIT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/CIRRIT2IRetrieval.py @@ -20,11 +20,9 @@ class CIRRIT2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2018-01-01", "2018-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kI2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kI2TRetrieval.py index f0e095423..3e24c8691 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kI2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kI2TRetrieval.py @@ -23,7 +23,6 @@ class Fashion200kI2TRetrieval(AbsTaskAny2AnyRetrieval): domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="Apache-2.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kT2IRetrieval.py index 868266d67..f54a3a38b 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/Fashion200kT2IRetrieval.py @@ -23,7 +23,6 @@ class Fashion200kT2IRetrieval(AbsTaskAny2AnyRetrieval): domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="Apache-2.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/FashionIQIT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/FashionIQIT2IRetrieval.py index a58ed15dd..6072354fe 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/FashionIQIT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/FashionIQIT2IRetrieval.py @@ -23,7 +23,6 @@ class FashionIQIT2IRetrieval(AbsTaskAny2AnyRetrieval): domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="Apache-2.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesI2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesI2TRetrieval.py index 817ea1c67..c92a49791 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesI2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesI2TRetrieval.py @@ -74,11 +74,9 @@ class HatefulMemesI2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2020-01-01", "2020-12-31"), - form=["found"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="MIT", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesT2IRetrieval.py index 0a55e446e..067396752 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/HatefulMemesT2IRetrieval.py @@ -74,11 +74,9 @@ class HatefulMemesT2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2020-01-01", "2020-12-31"), - form=["found"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="MIT", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2ITRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2ITRetrieval.py index f7cb041bc..e35da59fc 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2ITRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2ITRetrieval.py @@ -20,11 +20,9 @@ class InfoSeekIT2ITRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2TRetrieval.py index cc2b23ea8..4d88a7ac8 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/InfoSeekIT2TRetrieval.py @@ -20,11 +20,9 @@ class InfoSeekIT2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOI2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOI2TRetrieval.py index 4155a3d31..dff57f5a5 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOI2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOI2TRetrieval.py @@ -20,11 +20,9 @@ class MSCOCOI2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2018-01-01", "2018-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOT2IRetrieval.py index 1c5e440e2..9ce5fd839 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/MSCOCOT2IRetrieval.py @@ -23,7 +23,6 @@ class MSCOCOT2IRetrieval(AbsTaskAny2AnyRetrieval): domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionI2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionI2TRetrieval.py index 9247a12f8..5eda9cd29 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionI2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionI2TRetrieval.py @@ -103,11 +103,9 @@ class MemotionI2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2020-01-01", "2020-12-31"), - form=["found"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="MIT", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionT2IRetrieval.py index f214bd2ea..b82b6367a 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/MemotionT2IRetrieval.py @@ -102,11 +102,9 @@ class MemotionT2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2020-01-01", "2020-12-31"), - form=["found"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="MIT", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/NIGHTSI2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/NIGHTSI2IRetrieval.py index 73d3f7c28..3c7798c77 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/NIGHTSI2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/NIGHTSI2IRetrieval.py @@ -19,11 +19,9 @@ class NIGHTSI2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Duplicate Image Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2ITRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2ITRetrieval.py index 0f53eb7e6..0a720ec99 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2ITRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2ITRetrieval.py @@ -19,11 +19,9 @@ class OVENIT2ITRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["image", "text"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2TRetrieval.py index 3df5b9262..2c171c778 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/OVENIT2TRetrieval.py @@ -19,11 +19,9 @@ class OVENIT2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRI2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRI2TRetrieval.py index eb2c24aeb..cc96d134a 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRI2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRI2TRetrieval.py @@ -79,11 +79,9 @@ class SciMMIRI2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["found"], domains=["Academic"], task_subtypes=["Image Text Retrieval"], license="MIT", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRT2IRetrieval.py index e92bd637f..41c2c98e7 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/SciMMIRT2IRetrieval.py @@ -79,11 +79,9 @@ class SciMMIRT2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2023-01-01", "2023-12-31"), - form=["found"], domains=["Academic"], task_subtypes=["Image Text Retrieval"], license="MIT", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/TUBerlinT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/TUBerlinT2IRetrieval.py index 7c7bddfe4..754fa1491 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/TUBerlinT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/TUBerlinT2IRetrieval.py @@ -20,11 +20,9 @@ class TUBerlinT2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2012-01-01", "2012-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsI2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsI2TRetrieval.py index 2de171309..bf99c199a 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsI2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsI2TRetrieval.py @@ -19,11 +19,9 @@ class VisualNewsI2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2020-01-01", "2020-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["image", "text"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsT2IRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsT2IRetrieval.py index 091d7a7f0..8bd3f8278 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsT2IRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/VisualNewsT2IRetrieval.py @@ -19,11 +19,9 @@ class VisualNewsT2IRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2020-01-01", "2020-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["image", "text"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2ITRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2ITRetrieval.py index 50725b79b..b3f21869e 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2ITRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2ITRetrieval.py @@ -19,11 +19,9 @@ class WebQAT2ITRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2022-01-01", "2022-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["image", "text"], diff --git a/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2TRetrieval.py b/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2TRetrieval.py index 14c9c0214..f53415087 100644 --- a/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2TRetrieval.py +++ b/mteb/tasks/Image/Any2AnyRetrieval/eng/WebQAT2TRetrieval.py @@ -19,11 +19,9 @@ class WebQAT2TRetrieval(AbsTaskAny2AnyRetrieval): eval_langs=["eng-Latn"], main_score="ndcg_at_10", date=("2022-01-01", "2022-12-31"), - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Image Text Retrieval"], license="CC BY-SA 4.0", - socioeconomic_status="medium", annotations_creators="derived", dialect=[], modalities=["text"], diff --git a/mteb/tasks/Image/Clustering/eng/CIFAR.py b/mteb/tasks/Image/Clustering/eng/CIFAR.py index e7f7a1d63..61250cc3f 100644 --- a/mteb/tasks/Image/Clustering/eng/CIFAR.py +++ b/mteb/tasks/Image/Clustering/eng/CIFAR.py @@ -25,7 +25,6 @@ class CIFAR10Clustering(AbsTaskImageClustering): domains=["Web"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/Clustering/eng/ImageNet.py b/mteb/tasks/Image/Clustering/eng/ImageNet.py index dcf858732..daf8ab8da 100644 --- a/mteb/tasks/Image/Clustering/eng/ImageNet.py +++ b/mteb/tasks/Image/Clustering/eng/ImageNet.py @@ -22,7 +22,6 @@ class ImageNetDog15Clustering(AbsTaskImageClustering): domains=["Web"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/Clustering/eng/TinyImageNet.py b/mteb/tasks/Image/Clustering/eng/TinyImageNet.py index 9bafc348a..14123e211 100644 --- a/mteb/tasks/Image/Clustering/eng/TinyImageNet.py +++ b/mteb/tasks/Image/Clustering/eng/TinyImageNet.py @@ -22,11 +22,9 @@ class TinyImageNet(AbsTaskImageClustering): "2012-01-01", "2015-12-31", ), # Estimated range for the collection of reviews - form=["written"], domains=["Reviews"], task_subtypes=["Sentiment/Hate speech"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/BirdsnapClassification.py b/mteb/tasks/Image/ImageClassification/eng/BirdsnapClassification.py index 38016e5e7..f29259ae2 100644 --- a/mteb/tasks/Image/ImageClassification/eng/BirdsnapClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/BirdsnapClassification.py @@ -25,7 +25,6 @@ class BirdsnapClassification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/CIFAR.py b/mteb/tasks/Image/ImageClassification/eng/CIFAR.py index 9b4f45e38..2fe4fc280 100644 --- a/mteb/tasks/Image/ImageClassification/eng/CIFAR.py +++ b/mteb/tasks/Image/ImageClassification/eng/CIFAR.py @@ -25,7 +25,6 @@ class CIFAR10Classification(AbsTaskImageClassification): domains=["Web"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/Caltech101Classification.py b/mteb/tasks/Image/ImageClassification/eng/Caltech101Classification.py index fe62f955b..5c79a4104 100644 --- a/mteb/tasks/Image/ImageClassification/eng/Caltech101Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/Caltech101Classification.py @@ -26,7 +26,6 @@ class Caltech101Classification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/Country211Classification.py b/mteb/tasks/Image/ImageClassification/eng/Country211Classification.py index f02f0e319..5f34c09a1 100644 --- a/mteb/tasks/Image/ImageClassification/eng/Country211Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/Country211Classification.py @@ -25,7 +25,6 @@ class Country211Classification(AbsTaskImageClassification): domains=["Scene"], task_subtypes=["Scene recognition"], license="CC BY-SA 4.0", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/DTDClassification.py b/mteb/tasks/Image/ImageClassification/eng/DTDClassification.py index 25f6ba040..aabb03f02 100644 --- a/mteb/tasks/Image/ImageClassification/eng/DTDClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/DTDClassification.py @@ -25,7 +25,6 @@ class DTDClassification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Textures recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/EuroSATClassification.py b/mteb/tasks/Image/ImageClassification/eng/EuroSATClassification.py index 4930c13d1..6ef26a0db 100644 --- a/mteb/tasks/Image/ImageClassification/eng/EuroSATClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/EuroSATClassification.py @@ -25,7 +25,6 @@ class EuroSATClassification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Scene recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/FER2013Classification.py b/mteb/tasks/Image/ImageClassification/eng/FER2013Classification.py index 9db8b017f..ea987fb4e 100644 --- a/mteb/tasks/Image/ImageClassification/eng/FER2013Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/FER2013Classification.py @@ -25,7 +25,6 @@ class FER2013Classification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Emotion recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/FGVCAircraftClassification.py b/mteb/tasks/Image/ImageClassification/eng/FGVCAircraftClassification.py index 9b061e6dd..74659b5e9 100644 --- a/mteb/tasks/Image/ImageClassification/eng/FGVCAircraftClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/FGVCAircraftClassification.py @@ -25,7 +25,6 @@ class FGVCAircraftClassification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/Food101Classification.py b/mteb/tasks/Image/ImageClassification/eng/Food101Classification.py index 04389db8f..34b2592e2 100644 --- a/mteb/tasks/Image/ImageClassification/eng/Food101Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/Food101Classification.py @@ -25,7 +25,6 @@ class Food101Classification(AbsTaskImageClassification): domains=["Web"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/GTSRBClassification.py b/mteb/tasks/Image/ImageClassification/eng/GTSRBClassification.py index e1a222d24..659615132 100644 --- a/mteb/tasks/Image/ImageClassification/eng/GTSRBClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/GTSRBClassification.py @@ -25,7 +25,6 @@ class GTSRBClassification(AbsTaskImageClassification): task_subtypes=["Activity recognition"], domains=["Scene"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/Imagenet1k.py b/mteb/tasks/Image/ImageClassification/eng/Imagenet1k.py index d15116039..d3b847480 100644 --- a/mteb/tasks/Image/ImageClassification/eng/Imagenet1k.py +++ b/mteb/tasks/Image/ImageClassification/eng/Imagenet1k.py @@ -25,7 +25,6 @@ class Imagenet1kClassification(AbsTaskImageClassification): domains=["Scene"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="human-annotated", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/MNISTClassification.py b/mteb/tasks/Image/ImageClassification/eng/MNISTClassification.py index f3831abdb..4ea68ddea 100644 --- a/mteb/tasks/Image/ImageClassification/eng/MNISTClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/MNISTClassification.py @@ -25,7 +25,6 @@ class MNISTClassification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/OxfordFlowersClassification.py b/mteb/tasks/Image/ImageClassification/eng/OxfordFlowersClassification.py index c0a10de48..d07badc30 100644 --- a/mteb/tasks/Image/ImageClassification/eng/OxfordFlowersClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/OxfordFlowersClassification.py @@ -25,7 +25,6 @@ class OxfordFlowersClassification(AbsTaskImageClassification): domains=["Reviews"], task_subtypes=["Sentiment/Hate speech"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/OxfordPetsClassification.py b/mteb/tasks/Image/ImageClassification/eng/OxfordPetsClassification.py index cf537648e..603dad127 100644 --- a/mteb/tasks/Image/ImageClassification/eng/OxfordPetsClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/OxfordPetsClassification.py @@ -25,7 +25,6 @@ class OxfordPetsClassification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/PatchCamelyonClassification.py b/mteb/tasks/Image/ImageClassification/eng/PatchCamelyonClassification.py index 6998c7628..a6f946667 100644 --- a/mteb/tasks/Image/ImageClassification/eng/PatchCamelyonClassification.py +++ b/mteb/tasks/Image/ImageClassification/eng/PatchCamelyonClassification.py @@ -25,7 +25,6 @@ class PatchCamelyonClassification(AbsTaskImageClassification): domains=["Medical"], task_subtypes=["Tumor detection"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/RESISC45Classification.py b/mteb/tasks/Image/ImageClassification/eng/RESISC45Classification.py index afbc8fe1d..c767e3b33 100644 --- a/mteb/tasks/Image/ImageClassification/eng/RESISC45Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/RESISC45Classification.py @@ -25,7 +25,6 @@ class RESISC45Classification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/STL10Classification.py b/mteb/tasks/Image/ImageClassification/eng/STL10Classification.py index 9531e1c1f..02593fe4e 100644 --- a/mteb/tasks/Image/ImageClassification/eng/STL10Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/STL10Classification.py @@ -25,7 +25,6 @@ class STL10Classification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/SUN397Classification.py b/mteb/tasks/Image/ImageClassification/eng/SUN397Classification.py index eef0ccbfc..d23844ec4 100644 --- a/mteb/tasks/Image/ImageClassification/eng/SUN397Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/SUN397Classification.py @@ -25,7 +25,6 @@ class SUN397Classification(AbsTaskImageClassification): domains=["Encyclopaedic"], task_subtypes=["Scene recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageClassification/eng/UCF101Classification.py b/mteb/tasks/Image/ImageClassification/eng/UCF101Classification.py index 5a7640910..ef82d99d9 100644 --- a/mteb/tasks/Image/ImageClassification/eng/UCF101Classification.py +++ b/mteb/tasks/Image/ImageClassification/eng/UCF101Classification.py @@ -29,7 +29,6 @@ class UCF101Classification(AbsTaskImageClassification): domains=["Scene"], task_subtypes=["Activity recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageMultilabelClassification/eng/PascalVOC2007.py b/mteb/tasks/Image/ImageMultilabelClassification/eng/PascalVOC2007.py index a29108cf6..fa0628b35 100644 --- a/mteb/tasks/Image/ImageMultilabelClassification/eng/PascalVOC2007.py +++ b/mteb/tasks/Image/ImageMultilabelClassification/eng/PascalVOC2007.py @@ -28,7 +28,6 @@ class VOC2007Classification(AbsTaskImageMultilabelClassification): domains=["Encyclopaedic"], task_subtypes=["Object recognition"], license="Not specified", - socioeconomic_status="mixed", annotations_creators="derived", dialect=[], modalities=["image"], diff --git a/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py b/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py index bc9c50a3b..c72ef004b 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py +++ b/mteb/tasks/Image/ImageTextPairClassification/AROCocoOrder.py @@ -34,11 +34,9 @@ class AROCocoOrder(AbsTaskImageTextPairClassification): "2022-01-01", "2022-12-31", ), # Estimated range for the collection of data - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Caption Pairing"], - license="Not specified", - socioeconomic_status="mixed", + license="MIT", annotations_creators="expert-annotated", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py b/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py index b423861f2..bd8ec152b 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py +++ b/mteb/tasks/Image/ImageTextPairClassification/AROFlickrOrder.py @@ -34,11 +34,9 @@ class AROFlickrOrder(AbsTaskImageTextPairClassification): "2022-01-01", "2022-12-31", ), # Estimated range for the collection of data - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Caption Pairing"], - license="Not specified", - socioeconomic_status="mixed", + license="MIT", annotations_creators="expert-annotated", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/ImageTextPairClassification/AROVisualAttribution.py b/mteb/tasks/Image/ImageTextPairClassification/AROVisualAttribution.py index 7d49f49d2..83755ca48 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/AROVisualAttribution.py +++ b/mteb/tasks/Image/ImageTextPairClassification/AROVisualAttribution.py @@ -27,11 +27,9 @@ class AROVisualAttribution(AbsTaskImageTextPairClassification): "2022-01-01", "2022-12-31", ), # Estimated range for the collection of data - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Caption Pairing"], - license="Not specified", - socioeconomic_status="mixed", + license="MIT", annotations_creators="expert-annotated", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/ImageTextPairClassification/AROVisualRelation.py b/mteb/tasks/Image/ImageTextPairClassification/AROVisualRelation.py index 980638ee9..6d222b90b 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/AROVisualRelation.py +++ b/mteb/tasks/Image/ImageTextPairClassification/AROVisualRelation.py @@ -27,11 +27,9 @@ class AROVisualRelation(AbsTaskImageTextPairClassification): "2022-01-01", "2022-12-31", ), # Estimated range for the collection of data - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Caption Pairing"], - license="Not specified", - socioeconomic_status="mixed", + license="MIT", annotations_creators="expert-annotated", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/ImageTextPairClassification/SugarCrepe.py b/mteb/tasks/Image/ImageTextPairClassification/SugarCrepe.py index 1ecdf253c..1db8fd656 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/SugarCrepe.py +++ b/mteb/tasks/Image/ImageTextPairClassification/SugarCrepe.py @@ -29,11 +29,9 @@ class SugarCrepe(AbsTaskImageTextPairClassification): "2022-01-01", "2022-12-31", ), # Estimated range for the collection of data - form=["written"], domains=["Encyclopaedic"], task_subtypes=["Caption Pairing"], - license="Not specified", - socioeconomic_status="mixed", + license="MIT", annotations_creators="expert-annotated", dialect=[], modalities=["text", "image"], diff --git a/mteb/tasks/Image/ImageTextPairClassification/Winoground.py b/mteb/tasks/Image/ImageTextPairClassification/Winoground.py index c8efea08e..e598b9f4c 100644 --- a/mteb/tasks/Image/ImageTextPairClassification/Winoground.py +++ b/mteb/tasks/Image/ImageTextPairClassification/Winoground.py @@ -28,11 +28,9 @@ class Winoground(AbsTaskImageTextPairClassification): "2022-01-01", "2022-04-07", ), # Estimated range for the collection of data - form=["written"], domains=["Social"], # Getty Images. Could be constructed? task_subtypes=["Caption Pairing"], - license="Not specified", - socioeconomic_status="mixed", + license="META Images Reseaerch License", annotations_creators="expert-annotated", dialect=[], modalities=["text", "image"], From 6b76812f936f2b9f758736ff4944479598ed5a96 Mon Sep 17 00:00:00 2001 From: Isaac Chung Date: Thu, 26 Sep 2024 15:04:17 +0000 Subject: [PATCH 3/6] fix validate points --- docs/mmteb/validate_points.py | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/mmteb/validate_points.py b/docs/mmteb/validate_points.py index 2a898de9b..5b2a4614a 100644 --- a/docs/mmteb/validate_points.py +++ b/docs/mmteb/validate_points.py @@ -2,6 +2,7 @@ import logging import os +from typing import Optional from jsonlines import Reader from pydantic import BaseModel, ConfigDict, Field, ValidationError, conint, constr From aabf0d4aed6cd48878c5453112fdfe60abe7939f Mon Sep 17 00:00:00 2001 From: gowitheflow-1998 Date: Thu, 10 Oct 2024 18:52:51 +0100 Subject: [PATCH 4/6] CV-Bench --- .../Image/AbsTaskAny2TextMultipleChoice.py | 65 +++++ mteb/abstasks/TaskMetadata.py | 1 + mteb/abstasks/__init__.py | 1 + .../Image/Any2TextMultipleChoiceEvaluator.py | 98 +++++++ mteb/evaluation/evaluators/__init__.py | 1 + .../Image/Any2TextMultipleChoice/__init__.py | 3 + .../Any2TextMultipleChoice/eng/CVBench.py | 266 ++++++++++++++++++ .../Any2TextMultipleChoice/eng/__init__.py | 0 mteb/tasks/Image/__init__.py | 1 + .../CVBenchCount.json | 19 ++ .../CVBenchDepth.json | 19 ++ .../CVBenchDistance.json | 19 ++ .../CVBenchRelation.json | 19 ++ .../CVBenchCount.json | 19 ++ .../CVBenchDepth.json | 19 ++ .../CVBenchDistance.json | 19 ++ .../CVBenchRelation.json | 19 ++ .../CVBenchCount.json | 19 ++ .../CVBenchDepth.json | 19 ++ .../CVBenchDistance.json | 19 ++ .../CVBenchRelation.json | 19 ++ 21 files changed, 664 insertions(+) create mode 100644 mteb/abstasks/Image/AbsTaskAny2TextMultipleChoice.py create mode 100644 mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py create mode 100644 mteb/tasks/Image/Any2TextMultipleChoice/__init__.py create mode 100644 mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py create mode 100644 mteb/tasks/Image/Any2TextMultipleChoice/eng/__init__.py create mode 100644 results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchCount.json create mode 100644 results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDepth.json create mode 100644 results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDistance.json create mode 100644 results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchRelation.json create mode 100644 results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchCount.json create mode 100644 results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDepth.json create mode 100644 results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDistance.json create mode 100644 results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchRelation.json create mode 100644 results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchCount.json create mode 100644 results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDepth.json create mode 100644 results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDistance.json create mode 100644 results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchRelation.json diff --git a/mteb/abstasks/Image/AbsTaskAny2TextMultipleChoice.py b/mteb/abstasks/Image/AbsTaskAny2TextMultipleChoice.py new file mode 100644 index 000000000..6172eae3f --- /dev/null +++ b/mteb/abstasks/Image/AbsTaskAny2TextMultipleChoice.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import logging +from typing import Any + +from datasets import Dataset + +from ...encoder_interface import Encoder, EncoderWithQueryCorpusEncode +from ...evaluation.evaluators import Any2TextMultipleChoiceEvaluator +from ...load_results.mteb_results import ScoresDict +from ..AbsTask import AbsTask + +logger = logging.getLogger(__name__) + + +class AbsTaskAny2TextMultipleChoice(AbsTask): + """Abstract class for Any to Text Multiple Choice tasks, + where the queries and be either text or image, or both. + This task assess interleaved encoding of queries, + the similarity computed between the queries and the candidate choices is ranked. + + self.load_data() must generate a huggingface dataset with a split matching self.metadata_dict["eval_splits"], and assign it to self.dataset. + """ + + query_modalities: list[str] | str = ["image", "text"] + query_column_names: dict = {"image": "image", "text": "question"} + label_column_name: str = "answer" + choices_column_name: str = "choices" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def _add_main_score(self, scores) -> None: + scores["main_score"] = scores[self.metadata.main_score] + + def _calculate_metrics_from_split( + self, split: str, hf_subset: str | None = None, compute_overall: bool = False + ): + pass + + def _evaluate_subset( + self, + model: Encoder | EncoderWithQueryCorpusEncode, + dataset: Dataset, + *, + encode_kwargs: dict[str, Any] = {}, + **kwargs, + ) -> ScoresDict: + for modality in self.query_modalities: + if modality not in self.query_column_names: + raise KeyError( + f"query column name of modality {modality} is not defined" + ) + evaluator = Any2TextMultipleChoiceEvaluator( + dataset, + query_modalities=self.query_modalities, + query_column_names=self.query_column_names, + label_column_name=self.label_column_name, + choices_column_name=self.choices_column_name, + task_name=self.metadata.name, + **kwargs, + ) + scores = evaluator(model, encode_kwargs=encode_kwargs) + self._add_main_score(scores) + return scores diff --git a/mteb/abstasks/TaskMetadata.py b/mteb/abstasks/TaskMetadata.py index 290932dd2..09582d779 100644 --- a/mteb/abstasks/TaskMetadata.py +++ b/mteb/abstasks/TaskMetadata.py @@ -100,6 +100,7 @@ "Speed", "ZeroShotClassification", "ImageTextPairClassification", + "Any2TextMutipleChoice", ] TASK_CATEGORY = Literal[ diff --git a/mteb/abstasks/__init__.py b/mteb/abstasks/__init__.py index f188430f4..f70cbd532 100644 --- a/mteb/abstasks/__init__.py +++ b/mteb/abstasks/__init__.py @@ -14,6 +14,7 @@ from .AbsTaskSTS import * from .AbsTaskSummarization import * from .Image.AbsTaskAny2AnyRetrieval import * +from .Image.AbsTaskAny2TextMultipleChoice import * from .Image.AbsTaskImageClassification import * from .Image.AbsTaskImageClustering import * from .Image.AbsTaskImageMultilabelClassification import * diff --git a/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py b/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py new file mode 100644 index 000000000..8af1b1d37 --- /dev/null +++ b/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import logging +from typing import Any + +import numpy as np +import torch +from sklearn.metrics import accuracy_score +from sklearn.metrics.pairwise import cosine_similarity +from torchvision import transforms +from tqdm import tqdm + +from mteb.encoder_interface import Encoder, EncoderWithSimilarity +from mteb.evaluation.evaluators.Evaluator import Evaluator + +logger = logging.getLogger(__name__) + +transform = transforms.Compose([transforms.PILToTensor()]) + + +class Any2TextMultipleChoiceEvaluator(Evaluator): + """Evaluate a model based on the similarity of queries (can be interleaved) and candidate answers. + The goal is to find the correct text in multiple candidates that + forms the correct answer of the interleaved query. + + Args: + images: Each row is a list of images. + texts: Each row is a list of captions. + batch_size: Batch size used to compute embeddings + """ + + def __init__( + self, + dataset, + query_modalities: str | list[str], + query_column_names: dict, + label_column_name: str, + choices_column_name: str, + task_name: str | None = None, + transform=None, + limit: int | None = None, + **kwargs, + ): + super().__init__(**kwargs) + if limit: + dataset = dataset.select(range(limit)) + self.dataset = dataset + self.query_modalities = query_modalities + self.query_column_names = query_column_names + self.label_column_name = label_column_name + self.choices_column_name = choices_column_name + self.task_name = task_name + self.transform = transform + + def __call__( + self, + model: Encoder | EncoderWithSimilarity, + encode_kwargs: dict[str, Any] = {}, + ): + if "batch_size" not in encode_kwargs: + encode_kwargs["batch_size"] = 64 + + label_list = list( + set([x for n in self.dataset[self.choices_column_name] for x in n]) + ) + label_embeddings = model.get_text_embeddings(label_list) + label_embedding_dict = {} + for label, embedding in zip(label_list, label_embeddings): + label_embedding_dict[label] = embedding + + if "text" in self.query_modalities: + questions = self.dataset[self.query_column_names["text"]] + else: + questions = None + if "image" in self.query_modalities: + images = self.dataset[self.query_column_names["image"]] + query_embeddings = model.get_fused_embeddings( + texts=questions, + images=images, + batch_size=encode_kwargs["batch_size"], + ) + + answers = self.dataset[self.label_column_name] + choices = self.dataset[self.choices_column_name] + + # note that answers are the indeces + predictions = [] + for q_embedding, choice in tqdm(zip(query_embeddings, choices)): + choice_embeddings = torch.vstack( + [label_embedding_dict[c] for c in choice] + ) # (choice_size, embedding_dim) + q_embedding = q_embedding[np.newaxis, :] + cos_sim = cosine_similarity(q_embedding, choice_embeddings) + predictions.append(np.argmax(cos_sim)) + + metrics = {} + metrics["accuracy"] = accuracy_score(predictions, answers) + return metrics diff --git a/mteb/evaluation/evaluators/__init__.py b/mteb/evaluation/evaluators/__init__.py index ce7da0db5..2fb90b655 100644 --- a/mteb/evaluation/evaluators/__init__.py +++ b/mteb/evaluation/evaluators/__init__.py @@ -4,6 +4,7 @@ from .ClassificationEvaluator import * from .ClusteringEvaluator import * from .Image.Any2AnyRetrievalEvaluator import * +from .Image.Any2TextMultipleChoiceEvaluator import * from .Image.ClassificationEvaluator import * from .Image.ClusteringEvaluator import * from .Image.ImageTextPairClassificationEvaluator import * diff --git a/mteb/tasks/Image/Any2TextMultipleChoice/__init__.py b/mteb/tasks/Image/Any2TextMultipleChoice/__init__.py new file mode 100644 index 000000000..e1433ec94 --- /dev/null +++ b/mteb/tasks/Image/Any2TextMultipleChoice/__init__.py @@ -0,0 +1,3 @@ +from __future__ import annotations + +from .eng.CVBench import * diff --git a/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py b/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py new file mode 100644 index 000000000..147c9041f --- /dev/null +++ b/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py @@ -0,0 +1,266 @@ +from __future__ import annotations + +import datasets + +from mteb.abstasks.Image.AbsTaskAny2TextMultipleChoice import ( + AbsTaskAny2TextMultipleChoice, +) +from mteb.abstasks.TaskMetadata import TaskMetadata + + +def transform_choices(example): + mapping = {"(A)": 0, "(B)": 1, "(C)": 2, "(D)": 3, "(E)": 4, "(F)": 5} + example["answer"] = mapping[example["answer"]] + return example + + +class CVBenchCount(AbsTaskAny2TextMultipleChoice): + metadata = TaskMetadata( + name="CVBenchCount", + description="count the number of objects in the image.", + reference="https://arxiv.org/pdf/2406.16860", + dataset={ + "path": "nyu-visionx/CV-Bench", + "revision": "22409a927ab5cf68e3655023d51694587455fc99", + }, + type="Any2TextMutipleChoice", + category="it2t", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2024-01-01", "2024-12-31"), + domains=["Academic"], + task_subtypes=["Question answering"], + license="MIT", + annotations_creators="derived", + dialect=[], + modalities=["text", "image"], + sample_creation="found", + bibtex_citation="""@article{wu2024scimmir, + title={placeholder}, + author={placeholder and others}, + journal={arXiv preprint arXiv:2401.13478}, + year={2024} +}""", + descriptive_stats={ + "n_samples": {"test": 788}, + "avg_character_length": { + "test": { + # to do + } + }, + }, + ) + + def load_data(self, **kwargs): + # if self.data_loaded: + # return + self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) + self.dataset_transform() + self.dataset = self.dataset.filter(lambda example: example["task"] == "Count") + self.dataset = self.dataset.map( + transform_choices, + remove_columns=[ + "idx", + "type", + "filename", + "source", + "source_dataset", + "source_filename", + "target_class", + "target_size", + "bbox", + "prompt", + ], + ) + self.data_loaded = True + + +class CVBenchRelation(AbsTaskAny2TextMultipleChoice): + metadata = TaskMetadata( + name="CVBenchRelation", + description="decide the relation of the objects in the image.", + reference="https://arxiv.org/pdf/2406.16860", + dataset={ + "path": "nyu-visionx/CV-Bench", + "revision": "22409a927ab5cf68e3655023d51694587455fc99", + }, + type="Any2TextMutipleChoice", + category="it2t", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2024-01-01", "2024-12-31"), + domains=["Academic"], + task_subtypes=["Question answering"], + license="MIT", + annotations_creators="derived", + dialect=[], + modalities=["text", "image"], + sample_creation="found", + bibtex_citation="""@article{wu2024scimmir, + title={placeholder}, + author={placeholder and others}, + journal={arXiv preprint arXiv:2401.13478}, + year={2024} +}""", + descriptive_stats={ + "n_samples": {"test": 650}, + "avg_character_length": { + "test": { + # to do + } + }, + }, + ) + + def load_data(self, **kwargs): + # if self.data_loaded: + # return + self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) + self.dataset_transform() + self.dataset = self.dataset.filter( + lambda example: example["task"] == "Relation" + ) + self.dataset = self.dataset.map( + transform_choices, + remove_columns=[ + "idx", + "type", + "filename", + "source", + "source_dataset", + "source_filename", + "target_class", + "target_size", + "bbox", + "prompt", + ], + ) + self.data_loaded = True + + +class CVBenchDepth(AbsTaskAny2TextMultipleChoice): + metadata = TaskMetadata( + name="CVBenchDepth", + description="judge the depth of the objects in the image with similarity matching.", + reference="https://arxiv.org/pdf/2406.16860", + dataset={ + "path": "nyu-visionx/CV-Bench", + "revision": "22409a927ab5cf68e3655023d51694587455fc99", + }, + type="Any2TextMutipleChoice", + category="it2t", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2024-01-01", "2024-12-31"), + domains=["Academic"], + task_subtypes=["Question answering"], + license="MIT", + annotations_creators="derived", + dialect=[], + modalities=["text", "image"], + sample_creation="found", + bibtex_citation="""@article{wu2024scimmir, + title={placeholder}, + author={placeholder and others}, + journal={arXiv preprint arXiv:2401.13478}, + year={2024} +}""", + descriptive_stats={ + "n_samples": {"test": 600}, + "avg_character_length": { + "test": { + # to do + } + }, + }, + ) + + def load_data(self, **kwargs): + # if self.data_loaded: + # return + self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) + self.dataset_transform() + self.dataset = self.dataset.filter(lambda example: example["task"] == "Depth") + self.dataset = self.dataset.map( + transform_choices, + remove_columns=[ + "idx", + "type", + "filename", + "source", + "source_dataset", + "source_filename", + "target_class", + "target_size", + "bbox", + "prompt", + ], + ) + self.data_loaded = True + + +class CVBenchDistance(AbsTaskAny2TextMultipleChoice): + metadata = TaskMetadata( + name="CVBenchDistance", + description="judge the distance of the objects in the image with similarity matching.", + reference="https://arxiv.org/pdf/2406.16860", + dataset={ + "path": "nyu-visionx/CV-Bench", + "revision": "22409a927ab5cf68e3655023d51694587455fc99", + }, + type="Any2TextMutipleChoice", + category="it2t", + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="accuracy", + date=("2024-01-01", "2024-12-31"), + domains=["Academic"], + task_subtypes=["Question answering"], + license="MIT", + annotations_creators="derived", + dialect=[], + modalities=["text", "image"], + sample_creation="found", + bibtex_citation="""@article{tong2024cambrian, + title={Cambrian-1: A fully open, vision-centric exploration of multimodal llms}, + author={Tong, Shengbang and Brown, Ellis and Wu, Penghao and Woo, Sanghyun and Middepogu, Manoj and Akula, Sai Charitha and Yang, Jihan and Yang, Shusheng and Iyer, Adithya and Pan, Xichen and others}, + journal={arXiv preprint arXiv:2406.16860}, + year={2024} +}""", + descriptive_stats={ + "n_samples": {"test": 600}, + "avg_character_length": { + "test": { + # to do + } + }, + }, + ) + + def load_data(self, **kwargs): + # if self.data_loaded: + # return + self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) + self.dataset_transform() + self.dataset = self.dataset.filter( + lambda example: example["task"] == "Distance" + ) + self.dataset = self.dataset.map( + transform_choices, + remove_columns=[ + "idx", + "type", + "filename", + "source", + "source_dataset", + "source_filename", + "target_class", + "target_size", + "bbox", + "prompt", + ], + ) + self.data_loaded = True diff --git a/mteb/tasks/Image/Any2TextMultipleChoice/eng/__init__.py b/mteb/tasks/Image/Any2TextMultipleChoice/eng/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/mteb/tasks/Image/__init__.py b/mteb/tasks/Image/__init__.py index cf632fe73..845cc136f 100644 --- a/mteb/tasks/Image/__init__.py +++ b/mteb/tasks/Image/__init__.py @@ -1,6 +1,7 @@ from __future__ import annotations from .Any2AnyRetrieval import * +from .Any2TextMultipleChoice import * from .Clustering import * from .ImageClassification import * from .ImageMultilabelClassification import * diff --git a/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchCount.json b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchCount.json new file mode 100644 index 000000000..2a11efd94 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchCount.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 12.502729654312134, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.2017766497461929, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.2017766497461929 + } + ] + }, + "task_name": "CVBenchCount" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDepth.json b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDepth.json new file mode 100644 index 000000000..b420a0688 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDepth.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 9.667015790939331, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.5116666666666667, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.5116666666666667 + } + ] + }, + "task_name": "CVBenchDepth" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDistance.json b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDistance.json new file mode 100644 index 000000000..742d2cc2d --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchDistance.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 9.924455404281616, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.4633333333333333, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.4633333333333333 + } + ] + }, + "task_name": "CVBenchDistance" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchRelation.json b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchRelation.json new file mode 100644 index 000000000..7c9c4d123 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch16/57c216476eefef5ab752ec549e440a49ae4ae5f3/CVBenchRelation.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 6.079412221908569, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.5030769230769231, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.5030769230769231 + } + ] + }, + "task_name": "CVBenchRelation" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchCount.json b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchCount.json new file mode 100644 index 000000000..a888119c3 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchCount.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 11.933090686798096, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.06598984771573604, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.06598984771573604 + } + ] + }, + "task_name": "CVBenchCount" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDepth.json b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDepth.json new file mode 100644 index 000000000..cb647a340 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDepth.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 8.507234334945679, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.4533333333333333, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.4533333333333333 + } + ] + }, + "task_name": "CVBenchDepth" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDistance.json b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDistance.json new file mode 100644 index 000000000..d8246da01 --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchDistance.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 8.697332382202148, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.46166666666666667, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.46166666666666667 + } + ] + }, + "task_name": "CVBenchDistance" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchRelation.json b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchRelation.json new file mode 100644 index 000000000..02145addc --- /dev/null +++ b/results-mieb/openai__clip-vit-base-patch32/3d74acf9a28c67741b2f4f2ea7635f0aaf6f0268/CVBenchRelation.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 4.9446187019348145, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.4907692307692308, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.4907692307692308 + } + ] + }, + "task_name": "CVBenchRelation" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchCount.json b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchCount.json new file mode 100644 index 000000000..d69a7b62f --- /dev/null +++ b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchCount.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 18.0276620388031, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.027918781725888325, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.027918781725888325 + } + ] + }, + "task_name": "CVBenchCount" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDepth.json b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDepth.json new file mode 100644 index 000000000..f6e87d0e0 --- /dev/null +++ b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDepth.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 14.530367136001587, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.5266666666666666, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.5266666666666666 + } + ] + }, + "task_name": "CVBenchDepth" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDistance.json b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDistance.json new file mode 100644 index 000000000..f8d0a0fc6 --- /dev/null +++ b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchDistance.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 14.808384895324707, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.47, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.47 + } + ] + }, + "task_name": "CVBenchDistance" +} \ No newline at end of file diff --git a/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchRelation.json b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchRelation.json new file mode 100644 index 000000000..21cf37be5 --- /dev/null +++ b/results-mieb/openai__clip-vit-large-patch14/32bd64288804d66eefd0ccbe215aa642df71cc41/CVBenchRelation.json @@ -0,0 +1,19 @@ +{ + "dataset_revision": "default", + "evaluation_time": 11.672912359237671, + "kg_co2_emissions": null, + "mteb_version": "1.14.15", + "scores": { + "test": [ + { + "accuracy": 0.5092307692307693, + "hf_subset": "default", + "languages": [ + "eng-Latn" + ], + "main_score": 0.5092307692307693 + } + ] + }, + "task_name": "CVBenchRelation" +} \ No newline at end of file From d951d259049638fb629933f9a0ace167baf345ca Mon Sep 17 00:00:00 2001 From: gowitheflow-1998 Date: Thu, 10 Oct 2024 19:14:49 +0100 Subject: [PATCH 5/6] evaluator args comment --- .../evaluators/Image/Any2TextMultipleChoiceEvaluator.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py b/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py index 8af1b1d37..f682225ba 100644 --- a/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py +++ b/mteb/evaluation/evaluators/Image/Any2TextMultipleChoiceEvaluator.py @@ -24,9 +24,10 @@ class Any2TextMultipleChoiceEvaluator(Evaluator): forms the correct answer of the interleaved query. Args: - images: Each row is a list of images. - texts: Each row is a list of captions. - batch_size: Batch size used to compute embeddings + query_modalities: the modality of queries; supports image and text or either at the moment, + query_column_names: column names of queries; should align with query modalities. + label_column_name: column name of labels; + choices_column_names: column name of candidate choices; """ def __init__( From e817e3f65e470936108001b25beec18af7ce6117 Mon Sep 17 00:00:00 2001 From: gowitheflow-1998 Date: Thu, 10 Oct 2024 20:44:10 +0100 Subject: [PATCH 6/6] fix --- .../Image/Any2TextMultipleChoice/eng/CVBench.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py b/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py index 147c9041f..e42ec28f7 100644 --- a/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py +++ b/mteb/tasks/Image/Any2TextMultipleChoice/eng/CVBench.py @@ -28,7 +28,7 @@ class CVBenchCount(AbsTaskAny2TextMultipleChoice): eval_splits=["test"], eval_langs=["eng-Latn"], main_score="accuracy", - date=("2024-01-01", "2024-12-31"), + date=("2024-01-01", "2024-06-24"), domains=["Academic"], task_subtypes=["Question answering"], license="MIT", @@ -53,8 +53,6 @@ class CVBenchCount(AbsTaskAny2TextMultipleChoice): ) def load_data(self, **kwargs): - # if self.data_loaded: - # return self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) self.dataset_transform() self.dataset = self.dataset.filter(lambda example: example["task"] == "Count") @@ -90,7 +88,7 @@ class CVBenchRelation(AbsTaskAny2TextMultipleChoice): eval_splits=["test"], eval_langs=["eng-Latn"], main_score="accuracy", - date=("2024-01-01", "2024-12-31"), + date=("2024-01-01", "2024-06-24"), domains=["Academic"], task_subtypes=["Question answering"], license="MIT", @@ -115,8 +113,6 @@ class CVBenchRelation(AbsTaskAny2TextMultipleChoice): ) def load_data(self, **kwargs): - # if self.data_loaded: - # return self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) self.dataset_transform() self.dataset = self.dataset.filter( @@ -154,7 +150,7 @@ class CVBenchDepth(AbsTaskAny2TextMultipleChoice): eval_splits=["test"], eval_langs=["eng-Latn"], main_score="accuracy", - date=("2024-01-01", "2024-12-31"), + date=("2024-01-01", "2024-06-24"), domains=["Academic"], task_subtypes=["Question answering"], license="MIT", @@ -179,8 +175,6 @@ class CVBenchDepth(AbsTaskAny2TextMultipleChoice): ) def load_data(self, **kwargs): - # if self.data_loaded: - # return self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) self.dataset_transform() self.dataset = self.dataset.filter(lambda example: example["task"] == "Depth") @@ -216,7 +210,7 @@ class CVBenchDistance(AbsTaskAny2TextMultipleChoice): eval_splits=["test"], eval_langs=["eng-Latn"], main_score="accuracy", - date=("2024-01-01", "2024-12-31"), + date=("2024-01-01", "2024-06-24"), domains=["Academic"], task_subtypes=["Question answering"], license="MIT", @@ -241,8 +235,6 @@ class CVBenchDistance(AbsTaskAny2TextMultipleChoice): ) def load_data(self, **kwargs): - # if self.data_loaded: - # return self.dataset = datasets.load_dataset(**self.metadata_dict["dataset"]) self.dataset_transform() self.dataset = self.dataset.filter(