Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions docs/mieb-docs/run_vista.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
## set up VISTA

the latest FlagEmbedding repo doesn't support VISTA anymore so we use a old version.
```
git clone --no-checkout https://github.com/FlagOpen/FlagEmbedding.git
cd FlagEmbedding
git checkout 5c9260277977f8f8e256e56a8e12387552693af9
pip install -e .
pip install torchvision timm einops ftfy
```
download the vision tower for bge-base
```
wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_base_en_v1.5.pth?download=true
```
rename it to `visualized_base_en_V1.5.pth`
```
mv Visualized_base_en_v1.5.pth?download=true visualized_base_en_V1.5.pth
```
download the vision tower for bge-m3
```
wget https://huggingface.co/BAAI/bge-visualized/resolve/main/Visualized_m3.pth?download=true
```
rename it to `visualized_m3.pth`
```
mv Visualized_m3.pth?download=true visualized_m3.pth
```
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import io
import json
import logging
import math
import os
from collections import defaultdict
from typing import Any
Expand Down Expand Up @@ -132,7 +133,7 @@ def search(
batch_size=self.encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=max(1, os.cpu_count() // 2),
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)
if q_modality == "image":
query_embeddings = self.model.get_image_embeddings(
Expand Down Expand Up @@ -182,7 +183,7 @@ def search(
batch_size=self.encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=max(1, os.cpu_count() // 2),
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)
if corpus_modality == "image":
sub_corpus_embeddings = self.model.get_image_embeddings(
Expand Down
5 changes: 3 additions & 2 deletions mteb/evaluation/evaluators/Image/Any2AnyRetrievalEvaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import io
import json
import logging
import math
import os
from collections import defaultdict
from typing import Any
Expand Down Expand Up @@ -131,7 +132,7 @@ def search(
batch_size=self.encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=max(1, os.cpu_count() // 2),
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)
if q_modality == "image":
query_embeddings = self.model.get_image_embeddings(
Expand Down Expand Up @@ -181,7 +182,7 @@ def search(
batch_size=self.encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=max(1, os.cpu_count() // 2),
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)
if corpus_modality == "image":
sub_corpus_embeddings = self.model.get_image_embeddings(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

import logging
import math
import os
from typing import Any

import torch
Expand Down Expand Up @@ -106,7 +108,7 @@ def __call__(
shuffle=False,
# collate_fn=lambda x: x, # Identity collate function
collate_fn=custom_collate_fn,
num_workers=4,
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)

num_images_per_sample = (
Expand Down
4 changes: 2 additions & 2 deletions mteb/evaluation/evaluators/Image/VisualSTSEvaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,14 +78,14 @@ def __call__(
batch_size=encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=math.floor(os.cpu_count() / 2),
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)
sentence2_dataloader = DataLoader(
self.sentence2_dataset,
batch_size=encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=math.floor(os.cpu_count() / 2),
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)

embeddings1 = model.get_image_embeddings(
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from __future__ import annotations

import logging
import math
import os
from typing import Any

import torch
Expand Down Expand Up @@ -66,7 +68,7 @@ def __call__(self, model: Encoder, *, encode_kwargs: dict[str, Any] = {}):
batch_size=encode_kwargs["batch_size"],
shuffle=False,
collate_fn=custom_collate_fn,
num_workers=16,
num_workers=min(math.floor(os.cpu_count() / 2), 16),
)

text_embeddings = model.get_text_embeddings(
Expand Down
9 changes: 5 additions & 4 deletions mteb/models/cohere_v.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
from __future__ import annotations

import base64
import io
import os
import time
from functools import partial
from typing import Any

Expand All @@ -8,11 +12,8 @@
from torch.utils.data import DataLoader
from torchvision import transforms
from tqdm import tqdm
import os
import io
import base64

import mteb
import time
from mteb.model_meta import ModelMeta

api_key = os.getenv("COHERE_API_KEY")
Expand Down
2 changes: 2 additions & 0 deletions mteb/models/e5_v.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@ def __init__(
):
self.model_name = model_name
self.processor = LlavaNextProcessor.from_pretrained(model_name)
if "device" in kwargs:
self.device = kwargs.pop("device")
self.model = LlavaNextForConditionalGeneration.from_pretrained(
model_name, **kwargs
)
Expand Down
2 changes: 1 addition & 1 deletion mteb/models/evaclip_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,8 @@

def evaclip_loader(**kwargs):
try:
import sys
import os
import sys

sys.path.insert(0, os.path.join(os.getcwd(), "EVA/EVA-CLIP/rei"))

Expand Down
8 changes: 8 additions & 0 deletions scripts/run_mieb.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,14 @@
"laion/CLIP-ViT-B-32-laion2B-s34B-b79K",
"TIGER-Lab/VLM2Vec-LoRA",
"TIGER-Lab/VLM2Vec-Full",
"Salesforce/blip-itm-base-coco",
"Salesforce/blip-itm-large-coco",
"Salesforce/blip-itm-base-flickr",
"Salesforce/blip-itm-large-flickr",
"EVA02-CLIP-B-16",
"EVA02-CLIP-L-14",
"EVA02-CLIP-bigE-14",
"EVA02-CLIP-bigE-14-plus",
# "embed-english-v3.0-v", # not feasible to run due to the 40 images/min constraint
]:
model = mteb.get_model(model_name)
Expand Down