diff --git a/mteb/descriptive_stats/Retrieval/NanoClimateFEVER-VN.json b/mteb/descriptive_stats/Retrieval/NanoClimateFEVER-VN.json new file mode 100644 index 0000000000..70b7e21eeb --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/NanoClimateFEVER-VN.json @@ -0,0 +1,30 @@ +{ + "test": { + "num_samples": 102198, + "number_of_characters": 47870352, + "documents_text_statistics": { + "total_text_length": 47719757, + "min_text_length": 9, + "average_text_length": 472.01951591046225, + "max_text_length": 8686, + "unique_texts": 101097 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 150595, + "min_text_length": 30, + "average_text_length": 136.78019981834694, + "max_text_length": 404, + "unique_texts": 1099 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 3401, + "min_relevant_docs_per_query": 1, + "average_relevant_docs_per_query": 3.089009990917348, + "max_relevant_docs_per_query": 5, + "unique_relevant_docs": 1123 + }, + "top_ranked_statistics": null + } +} diff --git a/mteb/descriptive_stats/Retrieval/NanoDBPedia-VN.json b/mteb/descriptive_stats/Retrieval/NanoDBPedia-VN.json new file mode 100644 index 0000000000..8c93cd39bf --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/NanoDBPedia-VN.json @@ -0,0 +1,30 @@ +{ + "test": { + "num_samples": 132137, + "number_of_characters": 43323279, + "documents_text_statistics": { + "total_text_length": 43311486, + "min_text_length": 11, + "average_text_length": 328.5778249819823, + "max_text_length": 8576, + "unique_texts": 131814 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 11793, + "min_text_length": 6, + "average_text_length": 36.62422360248447, + "max_text_length": 100, + "unique_texts": 321 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 11620, + "min_relevant_docs_per_query": 31, + "average_relevant_docs_per_query": 36.08695652173913, + "max_relevant_docs_per_query": 1288, + "unique_relevant_docs": 32537 + }, + "top_ranked_statistics": null + } +} diff --git a/mteb/descriptive_stats/Retrieval/NanoFEVER-VN.json b/mteb/descriptive_stats/Retrieval/NanoFEVER-VN.json new file mode 100644 index 0000000000..7bc61e06b6 --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/NanoFEVER-VN.json @@ -0,0 +1,30 @@ +{ + "test": { + "num_samples": 106558, + "number_of_characters": 48164581, + "documents_text_statistics": { + "total_text_length": 47886101, + "min_text_length": 9, + "average_text_length": 472.6783768310499, + "max_text_length": 8689, + "unique_texts": 101308 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 278480, + "min_text_length": 11, + "average_text_length": 53.04380952380952, + "max_text_length": 196, + "unique_texts": 5124 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 6254, + "min_relevant_docs_per_query": 1, + "average_relevant_docs_per_query": 1.1912380952380952, + "max_relevant_docs_per_query": 15, + "unique_relevant_docs": 1324 + }, + "top_ranked_statistics": null + } +} diff --git a/mteb/descriptive_stats/Retrieval/NanoHotpotQA-VN.json b/mteb/descriptive_stats/Retrieval/NanoHotpotQA-VN.json new file mode 100644 index 0000000000..90c8884bb4 --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/NanoHotpotQA-VN.json @@ -0,0 +1,30 @@ +{ + "test": { + "num_samples": 117974, + "number_of_characters": 35927363, + "documents_text_statistics": { + "total_text_length": 35335613, + "min_text_length": 22, + "average_text_length": 316.47705838625023, + "max_text_length": 4105, + "unique_texts": 111651 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 591750, + "min_text_length": 21, + "average_text_length": 93.61651637399146, + "max_text_length": 280, + "unique_texts": 6321 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 12642, + "min_relevant_docs_per_query": 2, + "average_relevant_docs_per_query": 2.0, + "max_relevant_docs_per_query": 2, + "unique_relevant_docs": 11874 + }, + "top_ranked_statistics": null + } +} diff --git a/mteb/descriptive_stats/Retrieval/NanoMSMARCO-VN.json b/mteb/descriptive_stats/Retrieval/NanoMSMARCO-VN.json new file mode 100644 index 0000000000..37c72f1bd6 --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/NanoMSMARCO-VN.json @@ -0,0 +1,30 @@ +{ + "dev": { + "num_samples": 107153, + "number_of_characters": 33316879, + "documents_text_statistics": { + "total_text_length": 33200903, + "min_text_length": 2, + "average_text_length": 320.30199218561575, + "max_text_length": 1712, + "unique_texts": 103641 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 115976, + "min_text_length": 8, + "average_text_length": 33.15494568324757, + "max_text_length": 190, + "unique_texts": 3498 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 3700, + "min_relevant_docs_per_query": 1, + "average_relevant_docs_per_query": 1.0577472841623785, + "max_relevant_docs_per_query": 4, + "unique_relevant_docs": 3698 + }, + "top_ranked_statistics": null + } +} diff --git a/mteb/descriptive_stats/Retrieval/NanoNQ-VN.json b/mteb/descriptive_stats/Retrieval/NanoNQ-VN.json new file mode 100644 index 0000000000..801bc2626e --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/NanoNQ-VN.json @@ -0,0 +1,30 @@ +{ + "test": { + "num_samples": 104095, + "number_of_characters": 52312680, + "documents_text_statistics": { + "total_text_length": 52220289, + "min_text_length": 10, + "average_text_length": 510.98673124908265, + "max_text_length": 10245, + "unique_texts": 102181 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 92391, + "min_text_length": 22, + "average_text_length": 48.62684210526316, + "max_text_length": 113, + "unique_texts": 1900 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 2283, + "min_relevant_docs_per_query": 1, + "average_relevant_docs_per_query": 1.201578947368421, + "max_relevant_docs_per_query": 4, + "unique_relevant_docs": 2283 + }, + "top_ranked_statistics": null + } +} diff --git a/mteb/descriptive_stats/Retrieval/TVPLRetrieval.json b/mteb/descriptive_stats/Retrieval/TVPLRetrieval.json new file mode 100644 index 0000000000..b0594b891d --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/TVPLRetrieval.json @@ -0,0 +1,30 @@ +{ + "test": { + "num_samples": 20561, + "number_of_characters": 10832770, + "documents_text_statistics": { + "total_text_length": 9929303, + "min_text_length": 9, + "average_text_length": 938.8524016641452, + "max_text_length": 6319, + "unique_texts": 10573 + }, + "documents_image_statistics": null, + "queries_text_statistics": { + "total_text_length": 903467, + "min_text_length": 13, + "average_text_length": 90.48242363545317, + "max_text_length": 228, + "unique_texts": 9985 + }, + "queries_image_statistics": null, + "relevant_docs_statistics": { + "num_relevant_docs": 11158, + "min_relevant_docs_per_query": 1, + "average_relevant_docs_per_query": 1.1174762143214823, + "max_relevant_docs_per_query": 8, + "unique_relevant_docs": 10576 + }, + "top_ranked_statistics": null + } +} \ No newline at end of file diff --git a/mteb/tasks/retrieval/vie/__init__.py b/mteb/tasks/retrieval/vie/__init__.py index 8956f3ad6b..8e7a49624d 100644 --- a/mteb/tasks/retrieval/vie/__init__.py +++ b/mteb/tasks/retrieval/vie/__init__.py @@ -1,5 +1,5 @@ from .argu_ana_vn_retrieval import ArguAnaVN -from .climate_fevervn_retrieval import ClimateFEVERVN +from .climate_fevervn_retrieval import ClimateFEVERVN, NanoClimateFEVERVN from .cqa_dupstack_android_vn_retrieval import CQADupstackAndroidVN from .cqa_dupstack_gis_vn_retrieval import CQADupstackGisVN from .cqa_dupstack_mathematica_vn_retrieval import CQADupstackMathematicaVN @@ -10,19 +10,20 @@ from .cqa_dupstack_unix_vn_retrieval import CQADupstackUnixVN from .cqa_dupstack_webmasters_vn_retrieval import CQADupstackWebmastersVN from .cqa_dupstack_wordpress_vn_retrieval import CQADupstackWordpressVN -from .db_pedia_vn_retrieval import DBPediaVN -from .fevervn_retrieval import FEVERVN +from .db_pedia_vn_retrieval import DBPediaVN, NanoDBPediaVN +from .fevervn_retrieval import FEVERVN, NanoFEVERVN from .fi_qa2018_vn_retrieval import FiQA2018VN from .green_node_table_markdown_retrieval import GreenNodeTableMarkdownRetrieval -from .hotpot_qavn_retrieval import HotpotQAVN -from .msmarcovn_retrieval import MSMARCOVN +from .hotpot_qavn_retrieval import HotpotQAVN, NanoHotpotQAVN +from .msmarcovn_retrieval import MSMARCOVN, NanoMSMARCOVN from .nf_corpus_vn_retrieval import NFCorpusVN -from .nqvn_retrieval import NQVN +from .nqvn_retrieval import NQVN, NanoNQVN from .quora_vn_retrieval import QuoraVN from .sci_fact_vn_retrieval import SciFactVN from .scidocsvn_retrieval import SCIDOCSVN from .touche2020_vn_retrieval import Touche2020VN from .treccovidvn_retrieval import TRECCOVIDVN +from .tvpl_retrieval import TVPLRetrieval from .vie_qu_ad_retrieval import VieQuADRetrieval from .zac_legal_text_retrieval import ZacLegalTextRetrieval @@ -49,8 +50,15 @@ "GreenNodeTableMarkdownRetrieval", "HotpotQAVN", "NFCorpusVN", + "NanoClimateFEVERVN", + "NanoDBPediaVN", + "NanoFEVERVN", + "NanoHotpotQAVN", + "NanoMSMARCOVN", + "NanoNQVN", "QuoraVN", "SciFactVN", + "TVPLRetrieval", "Touche2020VN", "VieQuADRetrieval", "ZacLegalTextRetrieval", diff --git a/mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py b/mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py index 0338ac4bdd..83a409579a 100644 --- a/mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py +++ b/mteb/tasks/retrieval/vie/climate_fevervn_retrieval.py @@ -36,3 +36,42 @@ class ClimateFEVERVN(AbsTaskRetrieval): """, adapted_from=["ClimateFEVER"], ) + + +class NanoClimateFEVERVN(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NanoClimateFEVER-VN", + description="NanoClimateFEVERVN is a small version of A translated dataset from CLIMATE-FEVER is a dataset adopting the FEVER methodology that consists of 1,535 real-world claims regarding climate-change. The process of creating the VN-MTEB (Vietnamese Massive Text Embedding Benchmark) from English samples involves a new automated system: - The system uses large language models (LLMs), specifically Coherence's Aya model, for translation. - Applies advanced embedding models to filter the translations. - Use LLM-as-a-judge to scoring the quality of the samples base on multiple criteria.", + reference="https://www.sustainablefinance.uzh.ch/en/research/climate-fever.html", + dataset={ + "path": "GreenNode/nano-climate-fever-vn", + "revision": "1852e852f07403d4529a8520d52b91ff6d57869b", + }, + type="Retrieval", + category="t2t", + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + annotations_creators="derived", + dialect=[], + sample_creation="machine-translated and LM verified", + domains=["Encyclopaedic", "Written"], + task_subtypes=["Claim verification"], + bibtex_citation=r""" +@misc{pham2025vnmtebvietnamesemassivetext, + archiveprefix = {arXiv}, + author = {Loc Pham and Tung Luu and Thu Vo and Minh Nguyen and Viet Hoang}, + eprint = {2507.21500}, + primaryclass = {cs.CL}, + title = {VN-MTEB: Vietnamese Massive Text Embedding Benchmark}, + url = {https://arxiv.org/abs/2507.21500}, + year = {2025}, +} +""", + prompt={ + "query": "Given a claim about climate change, retrieve documents that support or refute the claim" + }, + adapted_from=["ClimateFEVER-VN"], + ) diff --git a/mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py b/mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py index 75d696b3a9..42e3b13568 100644 --- a/mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py +++ b/mteb/tasks/retrieval/vie/db_pedia_vn_retrieval.py @@ -36,3 +36,42 @@ class DBPediaVN(AbsTaskRetrieval): """, adapted_from=["DBPedia"], ) + + +class NanoDBPediaVN(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NanoDBPedia-VN", + description="NanoDBPediaVN is a small version of A translated dataset from DBpedia-Entity is a standard test collection for entity search over the DBpedia knowledge base The process of creating the VN-MTEB (Vietnamese Massive Text Embedding Benchmark) from English samples involves a new automated system: - The system uses large language models (LLMs), specifically Coherence's Aya model, for translation. - Applies advanced embedding models to filter the translations. - Use LLM-as-a-judge to scoring the quality of the samples base on multiple criteria.", + reference="https://github.com/iai-group/DBpedia-Entity/", + dataset={ + "path": "GreenNode/nano-dbpedia-vn", + "revision": "bbc3259bc63bf1e250d7034024092cc3230d5850", + }, + type="Retrieval", + category="t2t", + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + annotations_creators="derived", + dialect=[], + sample_creation="machine-translated and LM verified", + domains=["Written", "Encyclopaedic"], + task_subtypes=[], + bibtex_citation=r""" +@misc{pham2025vnmtebvietnamesemassivetext, + archiveprefix = {arXiv}, + author = {Loc Pham and Tung Luu and Thu Vo and Minh Nguyen and Viet Hoang}, + eprint = {2507.21500}, + primaryclass = {cs.CL}, + title = {VN-MTEB: Vietnamese Massive Text Embedding Benchmark}, + url = {https://arxiv.org/abs/2507.21500}, + year = {2025}, +} +""", + prompt={ + "query": "Given a query, retrieve relevant entity descriptions from DBPedia" + }, + adapted_from=["DBPedia-VN"], + ) diff --git a/mteb/tasks/retrieval/vie/fevervn_retrieval.py b/mteb/tasks/retrieval/vie/fevervn_retrieval.py index 9e4d9fa46f..e4e4a9de83 100644 --- a/mteb/tasks/retrieval/vie/fevervn_retrieval.py +++ b/mteb/tasks/retrieval/vie/fevervn_retrieval.py @@ -36,3 +36,42 @@ class FEVERVN(AbsTaskRetrieval): """, adapted_from=["FEVER"], ) + + +class NanoFEVERVN(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NanoFEVER-VN", + dataset={ + "path": "GreenNode/nano-fever-vn", + "revision": "457ca6b058ed19b28f2359e2d816d7527af6bef8", + }, + description="NanoFEVERVN is a small version of A translated dataset from FEVER (Fact Extraction and VERification) consists of 185,445 claims generated by altering sentences extracted from Wikipedia and subsequently verified without knowledge of the sentence they were derived from. The process of creating the VN-MTEB (Vietnamese Massive Text Embedding Benchmark) from English samples involves a new automated system: - The system uses large language models (LLMs), specifically Coherence's Aya model, for translation. - Applies advanced embedding models to filter the translations. - Use LLM-as-a-judge to scoring the quality of the samples base on multiple criteria.", + reference="https://fever.ai/", + type="Retrieval", + category="t2t", + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + annotations_creators="derived", + dialect=[], + sample_creation="machine-translated and LM verified", + domains=["Encyclopaedic", "Written"], + task_subtypes=["Claim verification"], + bibtex_citation=r""" +@misc{pham2025vnmtebvietnamesemassivetext, + archiveprefix = {arXiv}, + author = {Loc Pham and Tung Luu and Thu Vo and Minh Nguyen and Viet Hoang}, + eprint = {2507.21500}, + primaryclass = {cs.CL}, + title = {VN-MTEB: Vietnamese Massive Text Embedding Benchmark}, + url = {https://arxiv.org/abs/2507.21500}, + year = {2025}, +} +""", + prompt={ + "query": "Given a claim, retrieve documents that support or refute the claim" + }, + adapted_from=["FEVER-VN"], + ) diff --git a/mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py b/mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py index 8c055d9c28..deeb60ce50 100644 --- a/mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py +++ b/mteb/tasks/retrieval/vie/hotpot_qavn_retrieval.py @@ -36,3 +36,42 @@ class HotpotQAVN(AbsTaskRetrieval): """, adapted_from=["HotpotQA"], ) + + +class NanoHotpotQAVN(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NanoHotpotQA-VN", + dataset={ + "path": "GreenNode/nano-hotpotqa-vn", + "revision": "f4de19a2fae1a582de114e5bcd178bb262183113", + }, + description="NanoHotpotQAVN is a small version of A translated dataset from HotpotQA is a question answering dataset featuring natural, multi-hop questions, with strong supervision for supporting facts to enable more explainable question answering systems. The process of creating the VN-MTEB (Vietnamese Massive Text Embedding Benchmark) from English samples involves a new automated system: - The system uses large language models (LLMs), specifically Coherence's Aya model, for translation. - Applies advanced embedding models to filter the translations. - Use LLM-as-a-judge to scoring the quality of the samples base on multiple criteria.", + reference="https://hotpotqa.github.io/", + type="Retrieval", + category="t2t", + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + annotations_creators="derived", + dialect=[], + sample_creation="machine-translated and LM verified", + domains=["Web", "Written"], + task_subtypes=["Question answering"], + bibtex_citation=r""" +@misc{pham2025vnmtebvietnamesemassivetext, + archiveprefix = {arXiv}, + author = {Loc Pham and Tung Luu and Thu Vo and Minh Nguyen and Viet Hoang}, + eprint = {2507.21500}, + primaryclass = {cs.CL}, + title = {VN-MTEB: Vietnamese Massive Text Embedding Benchmark}, + url = {https://arxiv.org/abs/2507.21500}, + year = {2025}, +} +""", + prompt={ + "query": "Given a multi-hop question, retrieve documents that can help answer the question" + }, + adapted_from=["HotpotQA-VN"], + ) diff --git a/mteb/tasks/retrieval/vie/msmarcovn_retrieval.py b/mteb/tasks/retrieval/vie/msmarcovn_retrieval.py index 43d429a803..22b50f592b 100644 --- a/mteb/tasks/retrieval/vie/msmarcovn_retrieval.py +++ b/mteb/tasks/retrieval/vie/msmarcovn_retrieval.py @@ -47,3 +47,51 @@ class MSMARCOVN(AbsTaskRetrieval): """, adapted_from=["MSMARCO"], ) + + +class NanoMSMARCOVN(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NanoMSMARCO-VN", + dataset={ + "path": "GreenNode/nano-msmarco-vn", + "revision": "f149369c82ec228b05b0f6677699ab4bfbab73f6", + }, + description="NanoMSMARCOVN is a small version of A translated dataset from MS MARCO is a collection of datasets focused on deep learning in search The process of creating the VN-MTEB (Vietnamese Massive Text Embedding Benchmark) from English samples involves a new automated system: - The system uses large language models (LLMs), specifically Coherence's Aya model, for translation. - Applies advanced embedding models to filter the translations. - Use LLM-as-a-judge to scoring the quality of the samples base on multiple criteria.", + reference="https://microsoft.github.io/msmarco/", + type="Retrieval", + category="t2t", + eval_splits=["dev"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + annotations_creators="derived", + dialect=[], + sample_creation="machine-translated and LM verified", + domains=[ + "Encyclopaedic", + "Academic", + "Blog", + "News", + "Medical", + "Government", + "Reviews", + "Non-fiction", + "Social", + "Web", + ], + task_subtypes=["Question answering"], + bibtex_citation=r""" +@misc{pham2025vnmtebvietnamesemassivetext, + archiveprefix = {arXiv}, + author = {Loc Pham and Tung Luu and Thu Vo and Minh Nguyen and Viet Hoang}, + eprint = {2507.21500}, + primaryclass = {cs.CL}, + title = {VN-MTEB: Vietnamese Massive Text Embedding Benchmark}, + url = {https://arxiv.org/abs/2507.21500}, + year = {2025}, +} +""", + prompt={"query": "Given a query, retrieve relevant documents from MS MARCO-VN"}, + adapted_from=["MSMARCO-VN"], + ) diff --git a/mteb/tasks/retrieval/vie/nqvn_retrieval.py b/mteb/tasks/retrieval/vie/nqvn_retrieval.py index f992f9b1dd..3417adeac2 100644 --- a/mteb/tasks/retrieval/vie/nqvn_retrieval.py +++ b/mteb/tasks/retrieval/vie/nqvn_retrieval.py @@ -36,3 +36,42 @@ class NQVN(AbsTaskRetrieval): """, adapted_from=["NQ"], ) + + +class NanoNQVN(AbsTaskRetrieval): + metadata = TaskMetadata( + name="NanoNQ-VN", + dataset={ + "path": "GreenNode/nano-nq-vn", + "revision": "1ad4d6556fe0e5314994839089ce070fb0db8b19", + }, + description="NanoNQVN is a small version of A translated dataset from NFCorpus: A Full-Text Learning to Rank Dataset for Medical Information Retrieval The process of creating the VN-MTEB (Vietnamese Massive Text Embedding Benchmark) from English samples involves a new automated system: - The system uses large language models (LLMs), specifically Coherence's Aya model, for translation. - Applies advanced embedding models to filter the translations. - Use LLM-as-a-judge to scoring the quality of the samples base on multiple criteria.", + reference="https://ai.google.com/research/NaturalQuestions/", + type="Retrieval", + category="t2t", + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + annotations_creators="derived", + dialect=[], + sample_creation="machine-translated and LM verified", + domains=["Written", "Encyclopaedic"], + task_subtypes=["Question answering"], + bibtex_citation=r""" +@misc{pham2025vnmtebvietnamesemassivetext, + archiveprefix = {arXiv}, + author = {Loc Pham and Tung Luu and Thu Vo and Minh Nguyen and Viet Hoang}, + eprint = {2507.21500}, + primaryclass = {cs.CL}, + title = {VN-MTEB: Vietnamese Massive Text Embedding Benchmark}, + url = {https://arxiv.org/abs/2507.21500}, + year = {2025}, +} +""", + prompt={ + "query": "Given a question, retrieve Wikipedia passages that answer the question" + }, + adapted_from=["NQ-VN"], + ) diff --git a/mteb/tasks/retrieval/vie/tvpl_retrieval.py b/mteb/tasks/retrieval/vie/tvpl_retrieval.py new file mode 100644 index 0000000000..5e46d109a4 --- /dev/null +++ b/mteb/tasks/retrieval/vie/tvpl_retrieval.py @@ -0,0 +1,42 @@ +from mteb.abstasks.retrieval import AbsTaskRetrieval +from mteb.abstasks.task_metadata import TaskMetadata + +TEST_SAMPLES = 2048 + + +class TVPLRetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="TVPLRetrieval", + description="A Vietnamese dataset for evaluating legal text retrieval. From Thu vien phap luat (TVPL) dataset: Optimizing Answer Generator in Vietnamese Legal Question Answering Systems Using Language Models.", + reference="https://aclanthology.org/2020.coling-main.233.pdf", + dataset={ + "path": "GreenNode/TVPL-Retrieval-VN", + "revision": "6661dba4dfedff606537732d9f35f2c3738b081a", + }, + type="Retrieval", + category="t2t", + modalities=["text"], + eval_splits=["test"], + eval_langs=["vie-Latn"], + main_score="ndcg_at_10", + date=("2025-07-29", "2025-07-30"), + license="cc-by-sa-4.0", + dialect=[], + annotations_creators="human-annotated", + domains=["Legal"], + task_subtypes=["Question answering"], + sample_creation="found", + bibtex_citation=r""" +@article{10.1145/3732938, + address = {New York, NY, USA}, + author = {Le, Huong and Luu, Ngoc and Nguyen, Thanh and Dao, Tuan and Dinh, Sang}, + doi = {10.1145/3732938}, + issn = {2375-4699}, + journal = {ACM Trans. Asian Low-Resour. Lang. Inf. Process.}, + publisher = {Association for Computing Machinery}, + title = {Optimizing Answer Generator in Vietnamese Legal Question Answering Systems Using Language Models}, + url = {https://doi.org/10.1145/3732938}, + year = {2025}, +} +""", + ) diff --git a/mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py b/mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py index 8b5aba46cd..81aa0a2e4c 100644 --- a/mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py +++ b/mteb/tasks/retrieval/vie/zac_legal_text_retrieval.py @@ -24,5 +24,19 @@ class ZacLegalTextRetrieval(AbsTaskRetrieval): annotations_creators="human-annotated", dialect=[], sample_creation="found", - bibtex_citation="", # TODO: Add bibtex citation when the paper is published + bibtex_citation=r""" +@inproceedings{10.1007/978-981-95-1746-6_17, + address = {Singapore}, + author = {Pham, Bao Loc +and Hoang, Quoc Viet +and Luu, Quy Tung +and Vo, Trong Thu}, + booktitle = {Proceedings of the Fifth International Conference on Intelligent Systems and Networks}, + isbn = {978-981-95-1746-6}, + pages = {153--163}, + publisher = {Springer Nature Singapore}, + title = {GN-TRVN: A Benchmark for Vietnamese Table Markdown Retrieval Task}, + year = {2026}, +} +""", )