@inproceedings{man-etal-2025-dmdteval,
title = "{DMDTE}val: An Evaluation and Analysis of {LLM}s on Disambiguation in Multi-domain Translation",
author = "Man, Zhibo and
Chen, Yuanmeng and
Zhang, Yujie and
Xu, Jinan",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.309/",
pages = "6065--6082",
ISBN = "979-8-89176-332-6",
abstract = "Currently, Large Language Models (LLMs) have achieved remarkable results in machine translation. However, their performance in multi-domain translation (MDT) is less satisfactory, the meanings of words can vary across different domains, highlighting the significant ambiguity inherent in MDT. Therefore, evaluating the disambiguation ability of LLMs in MDT, remains an open problem. To this end, we present an evaluation and analysis of LLMs on disambiguation in multi-domain translation (DMDTEval), our systematic evaluation framework consisting of three aspects: (1) we construct a translation test set with multi-domain ambiguous word annotation, (2) we curate a diverse set of disambiguation prompt strategies, and (3) we design precise disambiguation metrics, and study the efficacy of various prompt strategies on multiple state-of-the-art LLMs. We conduct comprehensive experiments across 4 language pairs and 13 domains, our extensive experiments reveal a number of crucial findings that we believe will pave the way and also facilitate further research in the critical area of improving the disambiguation of LLMs."
}Markdown (Informal)
[DMDTEval: An Evaluation and Analysis of LLMs on Disambiguation in Multi-domain Translation](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.309/) (Man et al., EMNLP 2025)
ACL