@inproceedings{lim-rottger-2026-bias,
title = "Bias in the East, Bias in the West: A Bilingual Analysis of {LLM} Political Bias on {U}.{S}.- and {C}hina-Related Issues",
author = {Lim, Ying Ying and
R{\"o}ttger, Paul},
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.122/",
pages = "2301--2326",
ISBN = "979-8-89176-386-9",
abstract = "Large language models (LLMs) can exhibit political biases, which creates a risk of undue influence on LLM users and public opinion.Yet despite LLMs being used across the world, there is little evidence on how political biases vary across languagesAnd despite a growing number of frontier LLMs (e.g., DeepSeek) released by non-U.S. organizations, there is limited understanding of how political biases vary across LLMs developed in different political contexts.To address these gaps, we measure LLM bias on U.S.- and China-related issues, and how bias varies by 1) prompt language (English vs. Chinese) and 2) model origin (U.S. vs. Chinese).For this purpose, we create a new parallel dataset of 36k realistic test prompts asking models to write about a balanced set of 60 political issues sourced from national U.S. and Chinese news outlets.Using this dataset, we show that both model origin and prompt language systematically influence bias.Language effects dominate on China-related issues, particularly those involving sovereignty and human rights, while model origin better predicts variation in bias on U.S.-related governance and foreign policy topics.Overall, our results highlight a need for language and context-specific measurement of LLM political bias."
}Markdown (Informal)
[Bias in the East, Bias in the West: A Bilingual Analysis of LLM Political Bias on U.S.- and China-Related Issues](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.122/) (Lim & Röttger, Findings 2026)
ACL