@inproceedings{wang-demberg-2024-parameter,
title = "A Parameter-Efficient Multi-Objective Approach to Mitigate Stereotypical Bias in Language Models",
author = "Wang, Yifan and
Demberg, Vera",
editor = "Fale{\'n}ska, Agnieszka and
Basta, Christine and
Costa-juss{\`a}, Marta and
Goldfarb-Tarrant, Seraphina and
Nozza, Debora",
booktitle = "Proceedings of the 5th Workshop on Gender Bias in Natural Language Processing (GeBNLP)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.gebnlp-1.1",
doi = "10.18653/v1/2024.gebnlp-1.1",
pages = "1--19",
abstract = "Pre-trained language models have shown impressive abilities of understanding and generating natural languages. However, they typically inherit undesired human-like bias and stereotypes from training data, which raises concerns about putting these models into use in real-world scenarios. Although prior research has proposed to reduce bias using different fairness objectives, they usually fail to capture different representations of bias and, therefore, struggle with fully debiasing models. In this work, we introduce a multi-objective probability alignment approach to overcome current challenges by incorporating multiple debiasing losses to locate and penalize bias in different forms. Compared to existing methods, our proposed method can more effectively and comprehensively reduce stereotypical bias, and maintains the language ability of pre-trained models at the same time. Besides, we adopt prefix-tuning to optimize fairness objectives, and results show that it can achieve better bias removal than full fine-tuning while requiring much fewer computational resources. Our code and data are available at https://github.com/Ewanwong/debias{\_}NLG.",
}
Markdown (Informal)
[A Parameter-Efficient Multi-Objective Approach to Mitigate Stereotypical Bias in Language Models](https://aclanthology.org/2024.gebnlp-1.1) (Wang & Demberg, GeBNLP-WS 2024)
ACL