From f11cdcf15c94b07d19ea2d83948039c295591bb3 Mon Sep 17 00:00:00 2001 From: Dylan Bouchard Date: Wed, 15 Jan 2025 11:55:55 -0500 Subject: [PATCH 1/2] update version --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index e1832f4..d44a206 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "langfair" -version = "0.3.1" +version = "0.3.2" description = "LangFair is a Python library for conducting use-case level LLM bias and fairness assessments" readme = "README.md" authors = ["Dylan Bouchard ", From d0dad1a0c5b79187b961fe551f080156c7cf0baf Mon Sep 17 00:00:00 2001 From: Dylan Bouchard Date: Wed, 15 Jan 2025 12:04:56 -0500 Subject: [PATCH 2/2] minor rephrase --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 5d01c5a..43d8725 100644 --- a/README.md +++ b/README.md @@ -199,7 +199,7 @@ Bias and fairness metrics offered by LangFair are grouped into several categorie ## 📖 Associated Research -A technical description and a practitioner's guide for selecting evaluation metrics is contained in **[this paper](https://arxiv.org/abs/2407.10853)**. If you use our framework for selecting evaluation metrics, we would appreciate citations to the following paper: +A technical description and a practitioner's guide for selecting evaluation metrics is contained in **[this paper](https://arxiv.org/abs/2407.10853)**. If you use our evaluation approach, we would appreciate citations to the following paper: ```bibtex @misc{bouchard2024actionableframeworkassessingbias,