@inproceedings{panda-etal-2022-dont,
title = "Don{'}t Just Clean It, Proxy Clean It: Mitigating Bias by Proxy in Pre-Trained Models",
author = "Panda, Swetasudha and
Kobren, Ari and
Wick, Michael and
Shen, Qinlan",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.372/",
doi = "10.18653/v1/2022.findings-emnlp.372",
pages = "5073--5085",
abstract = "Transformer-based pre-trained models are known to encode societal biases not only in their contextual representations, but also in downstream predictions when fine-tuned on task-specific data.We present D-Bias, an approach that selectively eliminates stereotypical associations (e.g, co-occurrence statistics) at fine-tuning, such that the model doesn{'}t learn to excessively rely on those signals.D-Bias attenuates biases from both identity words and frequently co-occurring proxies, which we select using pointwise mutual information.We apply D-Bias to a) occupation classification, and b) toxicity classification and find that our approach substantially reduces downstream biases (e.g. by {\ensuremath{>}} 60{\%} in toxicity classification, for identities that are most frequently flagged as toxic on online platforms).In addition, we show that D-Bias dramatically improves upon scrubbing, i.e., removing only the identity words in question.We also demonstrate that D-Bias easily extends to multiple identities, and achieves competitive performance with two recently proposed debiasing approaches: R-LACE and INLP."
}
Markdown (Informal)
[Don’t Just Clean It, Proxy Clean It: Mitigating Bias by Proxy in Pre-Trained Models](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.372/) (Panda et al., Findings 2022)
ACL