@inproceedings{wang-etal-2025-calm,
title = "{CALM}: Unleashing the Cross-Lingual Self-Aligning Ability of Language Model Question Answering",
author = "Wang, Yumeng and
Fan, Zhiyuan and
Wang, Qingyun and
Fung, Yi R. and
Ji, Heng",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-naacl.152/",
pages = "2809--2817",
ISBN = "979-8-89176-195-7",
abstract = "Large Language Models (LLMs) are pretrained on extensive multilingual corpora to acquire both language-specific cultural knowledge and general knowledge. Ideally, while LLMs should provide consistent responses to culture-independent questions across languages, we observe significant performance disparities. To address this, we explore the **C**ross-Lingual Self-**A**ligning ability of **L**anguage **M**odels (**CALM**) to align knowledge across languages. Specifically, for a given question, we sample multiple responses across different languages and select the most self-consistent response as the target, leaving the remaining responses as negative examples. We then employ direct preference optimization (DPO) to align the model{'}s knowledge across different languages. Evaluations on the MEDQA and X-CSQA datasets demonstrate CALM{'}s effectiveness in enhancing cross-lingual knowledge question answering, both in zero-shot and retrieval-augmented settings. We also found that increasing the number of languages involved in CALM training leads to higher accuracy and consistency. We offer a qualitative analysis of how cross-lingual consistency can enhance knowledge alignment and explore the method{'}s generalizability."
}
Markdown (Informal)
[CALM: Unleashing the Cross-Lingual Self-Aligning Ability of Language Model Question Answering](https://preview.aclanthology.org/landing_page/2025.findings-naacl.152/) (Wang et al., Findings 2025)
ACL