@inproceedings{tang-etal-2025-mitigating,
title = "Mitigating Hallucinated Translations in Large Language Models with Hallucination-focused Preference Optimization",
author = "Tang, Zilu and
Chatterjee, Rajen and
Garg, Sarthak",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.175/",
pages = "3410--3433",
ISBN = "979-8-89176-189-6",
abstract = "Machine Translation (MT) is undergoing a paradigm shift, with systems based on fine-tuned large language models (LLM) becoming increasingly competitive with traditional encoder-decoder models trained specifically for translation tasks. However, LLM-based systems are at a higher risk of generating hallucinations, which can severely undermine user`s trust and safety. Most prior research on hallucination mitigation focuses on traditional MT models, with solutions that involve *post-hoc* mitigation - detecting hallucinated translations and re-translating them. While effective, this approach introduces additional complexity in deploying extra tools in production and also increases latency.To address these limitations, we propose a method that intrinsically learns to mitigate hallucinations during the model training phase. Specifically, we introduce a data creation framework to generate hallucination focused preference datasets. Fine-tuning LLMs on these preference datasets reduces the hallucination rate by an average of 96{\%} across five language pairs, while preserving overall translation quality. In a zero-shot setting our approach reduces hallucinations by 89{\%} on an average across three unseen target languages."
}
Markdown (Informal)
[Mitigating Hallucinated Translations in Large Language Models with Hallucination-focused Preference Optimization](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.175/) (Tang et al., NAACL 2025)
ACL