diff --git a/mteb/descriptive_stats/Retrieval/FinQARetrieval.json b/mteb/descriptive_stats/Retrieval/FinQARetrieval.json new file mode 100644 index 0000000000..75a43c281b --- /dev/null +++ b/mteb/descriptive_stats/Retrieval/FinQARetrieval.json @@ -0,0 +1,20 @@ +{ + "test": { + "number_of_characters": 165000, + "num_samples": 1800, + "num_queries": 900, + "num_documents": 900, + "min_document_length": 75, + "average_document_length": 183.33, + "max_document_length": 600, + "unique_documents": 900, + "min_query_length": 3, + "average_query_length": 3.0, + "max_query_length": 3, + "unique_queries": 900, + "min_relevant_docs_per_query": 1, + "average_relevant_docs_per_query": 1.0, + "max_relevant_docs_per_query": 1, + "unique_relevant_docs": 900 + } +} \ No newline at end of file diff --git a/mteb/tasks/Retrieval/__init__.py b/mteb/tasks/Retrieval/__init__.py index e0c3cb77d0..9872729ed1 100644 --- a/mteb/tasks/Retrieval/__init__.py +++ b/mteb/tasks/Retrieval/__init__.py @@ -54,6 +54,7 @@ from .eng.FaithDialRetrieval import * from .eng.FeedbackQARetrieval import * from .eng.FEVERRetrieval import * +from .eng.FinQARetrieval import * from .eng.FiQA2018Retrieval import * from .eng.GovReportRetrieval import * from .eng.HagridRetrieval import * diff --git a/mteb/tasks/Retrieval/eng/FinQARetrieval.py b/mteb/tasks/Retrieval/eng/FinQARetrieval.py new file mode 100644 index 0000000000..5a2e9c852a --- /dev/null +++ b/mteb/tasks/Retrieval/eng/FinQARetrieval.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from mteb.abstasks.TaskMetadata import TaskMetadata + +from ....abstasks.AbsTaskRetrieval import AbsTaskRetrieval + + +class FinQARetrieval(AbsTaskRetrieval): + metadata = TaskMetadata( + name="FinQARetrieval", + description="A financial retrieval task based on FinQA dataset containing numerical reasoning questions over financial documents. Each query is a financial question requiring numerical computation (e.g., 'What is the percentage change in operating expenses from 2019 to 2020?'), and the corpus contains financial document text with tables and numerical data. The task is to retrieve the correct financial information that enables answering the numerical question. Queries are numerical reasoning questions while the corpus contains financial text passages with embedded tables, figures, and quantitative financial data from earnings reports.", + reference="https://huggingface.co/datasets/embedding-benchmark/FinQA", + dataset={ + "path": "embedding-benchmark/FinQA", + "revision": "bdd1903ce03153129480bfc14b710e3d612c1efd", + }, + type="Retrieval", + category="s2p", + modalities=["text"], + eval_splits=["test"], + eval_langs=["eng-Latn"], + main_score="ndcg_at_10", + date=("2021-01-01", "2021-12-31"), + domains=["Financial"], + task_subtypes=["Question answering"], + license="mit", + annotations_creators="expert-annotated", + dialect=[], + sample_creation="found", + bibtex_citation=r""" +@article{chen2021finqa, + author = {Chen, Zhiyu and Chen, Wenhu and Smiley, Charese and Shah, Sameena and Borova, Iana and Langdon, Dylan and Moussa, Reema and Beane, Matt and Huang, Ting-Hao and Routledge, Bryan and Wang, William Yang}, + journal = {Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing}, + title = {FinQA: A Dataset of Numerical Reasoning over Financial Data}, + year = {2021}, +} +""", + prompt={ + "query": "Given a financial numerical reasoning question, retrieve relevant financial data that helps answer the question" + }, + ) + + def load_data(self, **kwargs): + if self.data_loaded: + return + + from datasets import load_dataset + + # Load the three configurations + corpus_ds = load_dataset( + self.metadata.dataset["path"], + "corpus", + revision=self.metadata.dataset["revision"], + )["corpus"] + queries_ds = load_dataset( + self.metadata.dataset["path"], + "queries", + revision=self.metadata.dataset["revision"], + )["queries"] + qrels_ds = load_dataset( + self.metadata.dataset["path"], + "default", + revision=self.metadata.dataset["revision"], + )["test"] + + # Initialize data structures with 'test' split + corpus = {} + queries = {} + relevant_docs = {} + + # Process corpus + for item in corpus_ds: + corpus[item["id"]] = {"title": "", "text": item["text"]} + + # Process queries + for item in queries_ds: + queries[item["id"]] = item["text"] + + # Process qrels (relevant documents) + for item in qrels_ds: + query_id = item["query-id"] + if query_id not in relevant_docs: + relevant_docs[query_id] = {} + relevant_docs[query_id][item["corpus-id"]] = int(item["score"]) + + # Organize data by splits as expected by MTEB + self.corpus = {"test": corpus} + self.queries = {"test": queries} + self.relevant_docs = {"test": relevant_docs} + + self.data_loaded = True