@inproceedings{xie-etal-2024-addressing,
title = "Addressing Healthcare-related Racial and {LGBTQ}+ Biases in Pretrained Language Models",
author = "Xie, Sean and
Hassanpour, Saeed and
Vosoughi, Soroush",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-naacl.278/",
doi = "10.18653/v1/2024.findings-naacl.278",
pages = "4451--4464",
abstract = "Recent studies have highlighted the issue of Pretrained Language Models (PLMs) inadvertently propagating social stigmas and stereotypes, a critical concern given their widespread use. This is particularly problematic in sensitive areas like healthcare, where such biases could lead to detrimental outcomes. Our research addresses this by adapting two intrinsic bias benchmarks to quantify racial and LGBTQ+ biases in prevalent PLMs. We also empirically evaluate the effectiveness of various debiasing methods in mitigating these biases. Furthermore, we assess the impact of debiasing on both Natural Language Understanding and specific biomedical applications. Our findings reveal that while PLMs commonly exhibit healthcare-related racial and LGBTQ+ biases, the applied debiasing techniques successfully reduce these biases without compromising the models' performance in downstream tasks."
}
Markdown (Informal)
[Addressing Healthcare-related Racial and LGBTQ+ Biases in Pretrained Language Models](https://preview.aclanthology.org/fix-sig-urls/2024.findings-naacl.278/) (Xie et al., Findings 2024)
ACL