@inproceedings{ai-etal-2025-knowledge,
title = "Are Knowledge and Reference in Multilingual Language Models Cross-Lingually Consistent?",
author = "Ai, Xi and
Ihsani, Mahardika Krisna and
Kan, Min-Yen",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.267/",
doi = "10.18653/v1/2025.findings-emnlp.267",
pages = "4975--5011",
ISBN = "979-8-89176-335-7",
abstract = "Cross-lingual consistency should be considered to assess cross-lingual transferability, maintain the factuality of the model knowledge across languages, and preserve the parity of language model performance. We are thus interested in analyzing, evaluating, and interpreting cross-lingual consistency for factual knowledge.To facilitate our study, we examine multiple pretrained models and tuned models with code-mixed coreferential statements that convey identical knowledge across languages. Interpretability approaches are leveraged to analyze the behavior of a model in cross-lingual contexts, showing different levels of consistency in multilingual models, subject to language families, linguistic factors, scripts, and a bottleneck in cross-lingual consistency on a particular layer. Code-switching training and cross-lingual word alignment objectives show the most promising results, emphasizing the worthiness of cross-lingual alignment supervision and code-switching strategies for both multilingual performance and cross-lingual consistency enhancement. In addition, experimental results suggest promising result for calibrating consistency on test time via activation patching."
}Markdown (Informal)
[Are Knowledge and Reference in Multilingual Language Models Cross-Lingually Consistent?](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.267/) (Ai et al., Findings 2025)
ACL