@inproceedings{quiroga-etal-2025-adapting,
title = "Adapting Bias Evaluation to Domain Contexts using Generative Models",
author = "Quiroga, Tamara and
Bravo-Marquez, Felipe and
Barriere, Valentin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1424/",
pages = "28043--28054",
ISBN = "979-8-89176-332-6",
abstract = "Numerous datasets have been proposed to evaluate social bias in Natural Language Processing (NLP) systems. However, assessing bias within specific application domains remains challenging, as existing approaches often face limitations in scalability and fidelity across domains. In this work, we introduce a domain-adaptive framework that utilizes prompting with Large Language Models (LLMs) to automatically transform template-based bias datasets into domain-specific variants. We apply our method to two widely used benchmarks{---}\textit{Equity Evaluation Corpus} (EEC) and \textit{Identity Phrase Templates Test Set} (IPTTS){---}adapting them to the Twitter and Wikipedia Talk data. Our results show that the adapted datasets yield bias estimates more closely aligned with real-world data. These findings highlight the potential of LLM-based prompting to enhance the realism and contextual relevance of bias evaluation in NLP systems."
}Markdown (Informal)
[Adapting Bias Evaluation to Domain Contexts using Generative Models](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1424/) (Quiroga et al., EMNLP 2025)
ACL