@inproceedings{roh-etal-2025-xlqa,
title = "{XLQA}: A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering",
author = "Roh, Keonwoo and
Ju, Yeong-Joon and
Lee, Seong-Whan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1466/",
pages = "28797--28809",
ISBN = "979-8-89176-332-6",
abstract = "Large Language Models (LLMs) have shown significant progress in Open-domain question answering (ODQA), yet most evaluations focus on English and assume locale-invariant answers across languages. This assumption neglects the cultural and regional variations that affect question understanding and answer, leading to biased evaluation in multilingual benchmarks. To address these limitations, we introduce XLQA, a novel benchmark explicitly designed for locale-sensitive multilingual ODQA. XLQA contains 3,000 English seed questions expanded to eight languages, with careful filtering for semantic consistency and human-verified annotations distinguishing locale-invariant and locale-sensitive cases. Our evaluation of five state-of-the-art multilingual LLMs reveals notable failures on locale-sensitive questions, exposing gaps between English and other languages due to a lack of locale-grounding knowledge. We provide a systematic framework and scalable methodology for assessing multilingual QA under diverse cultural contexts, offering a critical resource to advance real-world applicability of multilingual ODQA systems. Our findings suggest that disparities in training data distribution contribute to differences in both linguistic competence and locale-awareness across models."
}Markdown (Informal)
[XLQA: A Benchmark for Locale-Aware Multilingual Open-Domain Question Answering](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1466/) (Roh et al., EMNLP 2025)
ACL