@inproceedings{semenov-sennrich-2025-measuring,
title = "Measuring the Effect of Disfluency in Multilingual Knowledge Probing Benchmarks",
author = "Semenov, Kirill and
Sennrich, Rico",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.539/",
pages = "10665--10683",
ISBN = "979-8-89176-332-6",
abstract = "For multilingual factual knowledge assessment of LLMs, benchmarks such as MLAMA use template translations that do not take into account the grammatical and semantic information of the named entities inserted in the sentence. This leads to numerous instances of ungrammaticality or wrong wording of the final prompts, which complicates the interpretation of scores, especially for languages that have a rich morphological inventory. In this work, we sample 4 Slavic languages from the MLAMA dataset and compare the knowledge retrieval scores between the initial (templated) MLAMA dataset and its sentence-level translations made by Google Translate and ChatGPT. We observe a significant increase in knowledge retrieval scores, and provide a qualitative analysis for possible reasons behind it. We also make an additional analysis of 5 more languages from different families and see similar patterns. Therefore, we encourage the community to control the grammaticality of highly multilingual datasets for higher and more interpretable results, which is well approximated by whole sentence translation with neural MT or LLM systems."
}Markdown (Informal)
[Measuring the Effect of Disfluency in Multilingual Knowledge Probing Benchmarks](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.539/) (Semenov & Sennrich, EMNLP 2025)
ACL