@inproceedings{luo-etal-2025-large-language-models,
title = "Large Language Models as Reader for Bias Detection",
author = "Luo, Xuan and
Li, Jing and
Wenzhong, Zhong and
Tu, Geng and
Xu, Ruifeng",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.976/",
doi = "10.18653/v1/2025.findings-emnlp.976",
pages = "17957--17967",
ISBN = "979-8-89176-335-7",
abstract = "Detecting bias in media content is crucial for maintaining information integrity and promoting inclusivity. Traditional methods analyze text from the writer{'}s perspective, which analyzes textual features directly from the writer{'}s intent, leaving the reader{'}s perspective underexplored. This paper investigates whether Large Language Models (LLMs) can be leveraged as readers for bias detection by generating reader-perspective comments. Experiments are conducted on the BASIL (news bias) and BeyondGender (gender bias) datasets with LLMs Gemma-7B, Phi-3-3.8B, Llama3.1-8B, Llama3.1-70B, and GPT4. The results demonstrate the effectiveness of reader-perspective comments for open-source LLMs, achieving performance comparable to GPT4{'}s. The findings highlight the significance of emotion-related comments, which are generally more beneficial than value-related ones in bias detection. In addition, experiments on Llamas show that comment selection ensures consistent performance regardless of model sizes and comment combinations. This study is particularly beneficial for small-size open-source LLMs."
}Markdown (Informal)
[Large Language Models as Reader for Bias Detection](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.976/) (Luo et al., Findings 2025)
ACL
- Xuan Luo, Jing Li, Zhong Wenzhong, Geng Tu, and Ruifeng Xu. 2025. Large Language Models as Reader for Bias Detection. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 17957–17967, Suzhou, China. Association for Computational Linguistics.