@inproceedings{di-bonaventura-etal-2025-wanted,
title = "Wanted: Personalised Bias Warnings for Gender Bias in Language Models",
author = "Di Bonaventura, Chiara and
Nwachukwu, Michelle and
Stoica, Maria",
editor = "Fale{\'n}ska, Agnieszka and
Basta, Christine and
Costa-juss{\`a}, Marta and
Sta{\'n}czak, Karolina and
Nozza, Debora",
booktitle = "Proceedings of the 6th Workshop on Gender Bias in Natural Language Processing (GeBNLP)",
month = aug,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.gebnlp-1.13/",
pages = "124--136",
ISBN = "979-8-89176-277-0",
abstract = "The widespread use of language models, especially Large Language Models, paired with their inherent biases can propagate and amplify societal inequalities. While research has extensively explored methods for bias mitigation and measurement, limited attention has been paid to how such biases are communicated to users, which instead can have a positive impact on increasing user trust and understanding of these models. Our study addresses this gap by investigating user preferences for gender bias mitigation, measurement and communication in language models. To this end, we conducted a user study targeting female AI practitioners with eighteen female and one male participant. Our findings reveal that user preferences for bias mitigation and measurement show strong consensus, whereas they vary widely for bias communication, underscoring the importance of tailoring warnings to individual needs.Building on these findings, we propose a framework for user-centred bias reporting, which leverages runtime monitoring techniques to assess and visualise bias in real time and in a customizable fashion."
}
Markdown (Informal)
[Wanted: Personalised Bias Warnings for Gender Bias in Language Models](https://preview.aclanthology.org/display_plenaries/2025.gebnlp-1.13/) (Di Bonaventura et al., GeBNLP 2025)
ACL