@inproceedings{ozbagriacik-dubossarsky-2026-language,
title = "Language Matters: Target-Language Supervision for Political Bias Detection in {T}urkish News",
author = "Ozbagriacik, Umut and
Dubossarsky, Haim",
editor = {Oflazer, Kemal and
K{\"o}ksal, Abdullatif and
Varol, Onur},
booktitle = "Proceedings of the Second Workshop Natural Language Processing for {T}urkic Languages ({SIGTURK} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/manual-author-scripts/2026.sigturk-1.7/",
pages = "72--81",
ISBN = "979-8-89176-370-8",
abstract = {We present, to our knowledge, the first systematic transformer-based outlet-ideology classification study for Turkish news. Using a topic-balanced corpus of Turkish political articles drawn from six outlets commonly perceived as left-, centre-, or right-leaning, we formulate a three-way outlet-ideology classification task. On this dataset, we evaluate a monolingual encoder (BERTurk), two multilingual encoders (mBERT, XLM-R), and a LoRA-adapted decoder model (Mistral). BERTurk achieves the best performance among individual models (70{\%} accuracy, 71{\%} macro-F1), reaching levels comparable to English-language studies despite operating in a lower-resource setting. Error analyses show that all encoders reliably distinguish centrist from partisan articles, but frequently confuse left- and right-leaning articles with each other. Moreover, BERTurk is relatively stronger on right-leaning content, whereas the multilingual models favour left-leaning content, suggesting an ``ideological fingerprint'' of their pre-training data. Crucially, models fine-tuned on an English political-bias task fail to transfer to Turkish, collapsing to near-chance performance. Taken together, these results demonstrate that effective political bias detection requires target-language supervision and cannot be achieved through na{\"i}ve cross-lingual transfer. Our work establishes a first baseline for Turkish political bias detection and underscores the need for open, carefully designed Turkish (and broader Turkic) bias benchmarks to support robust and fair media analysis.}
}Markdown (Informal)
[Language Matters: Target-Language Supervision for Political Bias Detection in Turkish News](https://preview.aclanthology.org/manual-author-scripts/2026.sigturk-1.7/) (Ozbagriacik & Dubossarsky, SIGTURK 2026)
ACL