@inproceedings{pan-etal-2025-whats,
title = "What{'}s Not Said Still Hurts: A Description-Based Evaluation Framework for Measuring Social Bias in {LLM}s",
author = "Pan, Jinhao and
Raj, Chahat and
Yao, Ziyu and
Zhu, Ziwei",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.76/",
doi = "10.18653/v1/2025.findings-emnlp.76",
pages = "1438--1459",
ISBN = "979-8-89176-335-7",
abstract = "Large Language Models (LLMs) often exhibit social biases inherited from their training data. While existing benchmarks evaluate bias by term-based mode through direct term associations between demographic terms and bias terms, LLMs have become increasingly adept at avoiding biased responses, leading to seemingly low levels of bias. However, biases persist in subtler, contextually hidden forms that traditional benchmarks fail to capture. We introduce the Description-based Bias Benchmark (DBB), a novel dataset designed to assess bias at the semantic level that bias concepts are hidden within naturalistic, subtly framed contexts in real-world scenarios rather than superficial terms. We analyze six state-of-the-art LLMs, revealing that while models reduce bias in response at the term level, they continue to reinforce biases in nuanced settings. Data, code, and results are available at \url{https://github.com/JP-25/Description-based-Bias-Benchmark}."
}Markdown (Informal)
[What’s Not Said Still Hurts: A Description-Based Evaluation Framework for Measuring Social Bias in LLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.76/) (Pan et al., Findings 2025)
ACL