@inproceedings{wang-etal-2021-dynamically,
title = "Dynamically Disentangling Social Bias from Task-Oriented Representations with Adversarial Attack",
author = "Wang, Liwen and
Yan, Yuanmeng and
He, Keqing and
Wu, Yanan and
Xu, Weiran",
editor = "Toutanova, Kristina and
Rumshisky, Anna and
Zettlemoyer, Luke and
Hakkani-Tur, Dilek and
Beltagy, Iz and
Bethard, Steven and
Cotterell, Ryan and
Chakraborty, Tanmoy and
Zhou, Yichao",
booktitle = "Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jun,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.naacl-main.293/",
doi = "10.18653/v1/2021.naacl-main.293",
pages = "3740--3750",
abstract = "Representation learning is widely used in NLP for a vast range of tasks. However, representations derived from text corpora often reflect social biases. This phenomenon is pervasive and consistent across different neural models, causing serious concern. Previous methods mostly rely on a pre-specified, user-provided direction or suffer from unstable training. In this paper, we propose an adversarial disentangled debiasing model to dynamically decouple social bias attributes from the intermediate representations trained on the main task. We aim to denoise bias information while training on the downstream task, rather than completely remove social bias and pursue static unbiased representations. Experiments show the effectiveness of our method, both on the effect of debiasing and the main task performance."
}
Markdown (Informal)
[Dynamically Disentangling Social Bias from Task-Oriented Representations with Adversarial Attack](https://preview.aclanthology.org/fix-sig-urls/2021.naacl-main.293/) (Wang et al., NAACL 2021)
ACL