@inproceedings{sun-etal-2025-aligned,
title = "Aligned but Blind: Alignment Increases Implicit Bias by Reducing Awareness of Race",
author = "Sun, Lihao and
Mao, Chengzhi and
Hofmann, Valentin and
Bai, Xuechunzi",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1078/",
pages = "22167--22184",
ISBN = "979-8-89176-251-0",
abstract = "Although value-aligned language models (LMs) appear unbiased in explicit bias evaluations, they often exhibit stereotypes in implicit word association tasks, raising concerns about their fair usage. We investigate the mechanisms behind this discrepancy and find that alignment surprisingly amplifies implicit bias in model outputs. Specifically, we show that aligned LMs, unlike their unaligned counterparts, overlook racial concepts in early internal representations when the context is ambiguous. Not representing race likely fails to activate safety guardrails, leading to unintended biases. Inspired by this insight, we propose a new bias mitigation strategy that works by incentivizing the representation of racial concepts in the early model layers. In contrast to conventional mitigation methods of machine unlearning, our interventions find that steering the model to be more aware of racial concepts effectively mitigates implicit bias. Similar to race blindness in humans, ignoring racial nuances can inadvertently perpetuate subtle biases in LMs."
}
Markdown (Informal)
[Aligned but Blind: Alignment Increases Implicit Bias by Reducing Awareness of Race](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1078/) (Sun et al., ACL 2025)
ACL