@inproceedings{fatemi-etal-2023-improving,
title = "Improving Gender Fairness of Pre-Trained Language Models without Catastrophic Forgetting",
author = "Fatemi, Zahra and
Xing, Chen and
Liu, Wenhao and
Xiong, Caimming",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.acl-short.108/",
doi = "10.18653/v1/2023.acl-short.108",
pages = "1249--1262",
abstract = "Existing studies addressing gender bias of pre-trained language models, usually build a small gender-neutral data set and conduct a second phase pre-training on the model with such data. However, given the limited size and concentrated focus of the gender-neutral data, catastrophic forgetting would occur during second-phase pre-training. Forgetting information in the original training data may damage the model`s downstream performance by a large margin. In this work, we empirically show that catastrophic forgetting occurs in such methods by evaluating them with general NLP tasks in GLUE. Then, we propose a new method, GEnder Equality Prompt (GEEP), to improve gender fairness of pre-trained models with less forgetting. GEEP freezes the pre-trained model and learns gender-related prompts with gender-neutral data. Empirical results show that GEEP not only achieves SOTA performances on gender fairness tasks, but also forgets less and performs better on GLUE by a large margin."
}
Markdown (Informal)
[Improving Gender Fairness of Pre-Trained Language Models without Catastrophic Forgetting](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.acl-short.108/) (Fatemi et al., ACL 2023)
ACL