Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
109 changes: 109 additions & 0 deletions mteb/models/kalm_models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
from __future__ import annotations

from functools import partial

from mteb.model_meta import ModelMeta
from mteb.models.instruct_wrapper import InstructSentenceTransformerWrapper


MODEL_PROMPTS = {
"Classification": "Instruct: classify the query into different classes. \n Query: ",
"MultilabelClassification": "Instruct: classify the query into different classes. \n Query: ",
"Clustering": "Instruct: classify the query into different classes. \n Query: ",
"Reranking-query": "Instruct: Given a query, retrieve documents that answer the query. \n Query: ",
"Retrieval-query": "Instruct: Given a query, retrieve documents that answer the query. \n Query: ",
}

kalm_training_data = {
# from technical report
# not in MTEB:
# ExpertQA
# MEDI2BGE
# OpenOrca
# PAQ
# PubMedQA
# SearchQA
# arxiv_qa
# rag-dataset-12000
# CC-News
# SQuAD 2.0
# TriviaQA
# WebGPT Comparisons
# MultiNLI
# NLLB
# WikiAnswers
# SimCSE NLI
# SNLI
# Aya Dataset
# eli5
# ----
# in MTEB:
"CodeFeedbackMT": ["train"],
"CodeFeedbackST": ["train"],
"ArxivClusteringP2P": ["train"],
"ArxivClusteringS2S": ["train"],
"ArxivClusteringP2P.v2": ["train"],
"TRECCOVID": ["train"],
"DBPedia": ["train"],
"ESCIReranking": ["train"],
"FEVER": ["train"],
"FiQA2018": ["train"],
"FEVERHardNegatives": ["train"],
"NanoFEVERRetrieval": ["train"],
"HotpotQAHardNegatives": ["train"],
"MultiLongDocRetrieval": ["train"],
"MSMARCO": ["train"],
"MSMARCOHardNegatives": ["train"],
"NanoMSMARCORetrieval": ["train"],
"MSMARCOv2": ["train"],
"NFCorpus": ["train"],
"SciFact": ["train"],
"NQ": ["train"],
"NQHardNegatives": ["train"],
"NanoNQRetrieval": ["train"],
"QuoraRetrieval": ["train"],
"NanoQuoraRetrieval": ["train"],
"BiorxivClusteringP2P.v2": ["train"],
"BiorxivClusteringS2S.v2": ["train"],
"MedrxivClusteringP2P.v2": ["train"],
"MedrxivClusteringS2S.v2": ["train"],
"Banking77Classification": ["train"],
"AmazonPolarityClassification": ["train"],
"ImdbClassification": ["train"],
"EmotionClassification": ["train"],
"TweetSentimentExtractionClassification": ["train"],
"ToxicConversationsClassification": ["train"],
"MIRACLRetrieval": ["train"],
"MIRACLRetrievalHardNegatives": ["train"],
"MIRACLReranking": ["train"],
"MrTidyRetrieval": ["train"],
"PawsXPairClassification": ["train"],
"AmazonReviewsClassification": ["train"],
"AmazonCounterfactualClassification": ["train"],
"MultilingualSentiment": ["train"],
"MassiveIntentClassification": ["train"],
"MassiveScenarioClassification": ["train"],
"MTOPDomainClassification": ["train"],
"MTOPIntentClassification": ["train"],
}

KaLM_Embedding_X_0605 = ModelMeta(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you add implementation of your model? If it similar to original KALM, I can push work on that PR

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If it similar to original KALM, I can push work on that PR

Yes, it is almost the same implementation of HIT_TMG__KaLM_embedding_multilingual_mini_instruct_v1.

Perhaps the entire set of models related to KaLM should be moved to kalm_models.py.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, I will try to finish work on #2478 on weekends then

Copy link
Member

@Samoed Samoed Jun 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@YanshekWoo can you try to run your models with implementation from #2478? It was merged to main

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Samoed OK, I will try to test it. Thanks.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@Samoed
I have tested the latest version of MTEB (1.38.30), and I believe its results are completely fine now.

Some of the results (from different task type) for HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5 are as follows:

Task Reported Reproduced
EmotionClassification 0.86900 0.86885
FiQA2018 0.44741 0.44072
SprintDuplicateQuestions 0.93057 0.930568
STS12 0.80167 0.801666

The code of evaluation is as follows:

import mteb

# Specify the model that we want to evaluate
model = mteb.get_model("HIT-TMG/KaLM-embedding-multilingual-mini-instruct-v1.5")

# specify what you want to evaluate it on
tasks = mteb.get_tasks(tasks=["EmotionClassification", "FiQA2018", "STS12", "SprintDuplicateQuestions"])

# run the evaluation
evaluation = mteb.MTEB(tasks=tasks)

results = evaluation.run(model,
            encode_kwargs={"batch_size": 256},
            verbosity=2,)

name="KaLM-Team/KaLM-Embedding-X-0605",
loader=None,
languages=None,
open_weights=False,
revision="1",
release_date="2025-06-05",
n_parameters=9.24 * 1e9,
memory_usage_mb=35254,
max_tokens=8192,
embed_dim=3584,
license=None,
reference="https://github.com/KaLM-Team/KaLM-Embedding-X",
similarity_fn_name="cosine",
framework=["Sentence Transformers","PyTorch"],
use_instructions=True,
public_training_code="https://github.com/HITsz-TMG/KaLM-Embedding",
public_training_data=None,
training_datasets=kalm_training_data,
)
4 changes: 2 additions & 2 deletions mteb/models/misc_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@
similarity_fn_name="cosine",
use_instructions=None,
training_datasets=kalm_training_data,
adapted_from="/mnt/shgeminicephfs/wx-dc-plt-hpc/xinshuohu/Output/Embedding/Qwen2-0.5B-eos_mean_pretrain_0806_1e-4_uen_sft_1022_filtered_v2_inst_3node_g8_1e-5_sin-0.1_mrl",
adapted_from="Qwen/Qwen2-0.5B",
superseded_by=None,
)
HIT_TMG__KaLM_embedding_multilingual_mini_v1 = ModelMeta(
Expand All @@ -265,7 +265,7 @@
similarity_fn_name="cosine",
use_instructions=None,
training_datasets=kalm_training_data,
adapted_from="/mnt/shgeminicephfs/wx-dc-plt-hpc/xinshuohu/Output/Embedding/Qwen2-0.5B-eos_mean_pretrain_0806_1e-4_uen_sft_0902_filtered_v2_3node_g8_1e-5_sin-0.1",
adapted_from="Qwen/Qwen2-0.5B",
superseded_by=None,
)
Hum_Works__lodestone_base_4096_v1 = ModelMeta(
Expand Down
2 changes: 2 additions & 0 deletions mteb/models/overview.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,7 @@
jasper_models,
jina_clip,
jina_models,
kalm_models,
lens_models,
linq_models,
llm2clip_models,
Expand Down Expand Up @@ -122,6 +123,7 @@
jasper_models,
jina_models,
jina_clip,
kalm_models,
lens_models,
linq_models,
llm2clip_models,
Expand Down