@inproceedings{yoo-shin-2025-fair,
title = "Fair or Framed? Political Bias in News Articles Generated by {LLM}s",
author = "Yoo, Junho and
Shin, Youhyun",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.856/",
doi = "10.18653/v1/2025.emnlp-main.856",
pages = "16915--16941",
ISBN = "979-8-89176-332-6",
abstract = "Despite biases in Large Language Models (LLMs) being widely researched, systematic explorations of political biases in news article generation tasks remain underexplored. This study evaluates political bias across seven LLMs by leveraging our PublicViews dataset-extracted from the TwinViews-13K corpus-comprising 31 topics and 31,692 statements. We analyze 10,850 articles, finding left-leaning bias persists in generation tasks, with neutral content remaining rare even under balanced opinion settings. Models exhibit asymmetric behavior in minority opinion scenarios, amplifying preferred viewpoints when in minority while conforming to majority opinions otherwise. Notably, all models employ `stance-flipping quotations'' (altering supporters' statements to express opposite viewpoints) in 33-38{\%} of quotations despite explicit instructions against distortion. Consistent with prior research, increased model size failed to enhance neutrality. This research measures political bias in LLM-generated news, analyzes its mechanisms, and reveals how opinion distribution and explicitness affect bias expression. Our results highlight how LLMs can introduce unintended political bias in generative contexts. We publicly release our PublicViews corpus and code at https://anonymous.4open.science/r/Fair-or-Framed-46F1."
}Markdown (Informal)
[Fair or Framed? Political Bias in News Articles Generated by LLMs](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.856/) (Yoo & Shin, EMNLP 2025)
ACL