@inproceedings{wei-etal-2025-mitigating,
title = "Mitigating Gender Bias via Fostering Exploratory Thinking in {LLM}s",
author = "Wei, Kangda and
Abdullah, Hasnat Md and
Huang, Ruihong",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.364/",
doi = "10.18653/v1/2025.findings-emnlp.364",
pages = "6895--6917",
ISBN = "979-8-89176-335-7",
abstract = "Large Language Models (LLMs) often exhibit gender bias, resulting in unequal treatment of male and female subjects across different contexts. To address this issue, we propose a novel data generation framework that fosters exploratory thinking in LLMs. Our approach prompts models to generate story pairs featuring male and female protagonists in structurally identical, morally ambiguous scenarios, then elicits and compares their moral judgments. When inconsistencies arise, the model is guided to produce balanced, gender-neutral judgments. These story-judgment pairs are used to fine-tune or optimize the models via Direct Preference Optimization (DPO). Experimental results show that our method significantly reduces gender bias while preserving or even enhancing general model capabilities. We will release the code and generated data."
}Markdown (Informal)
[Mitigating Gender Bias via Fostering Exploratory Thinking in LLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.364/) (Wei et al., Findings 2025)
ACL