@inproceedings{vajda-etal-2025-improving,
title = "Improving {LLM}s for Machine Translation Using Synthetic Preference Data",
author = "Vajda, Dario and
Vre{\v{s}}, Domen and
{\v{S}}ikonja, Marko Robnik",
editor = "Cardoso, Henrique Lopes and
Sousa-Silva, Rui and
Koponen, Maarit and
Pareja-Lora, Antonio",
booktitle = "Proceedings of the 2nd LUHME Workshop",
month = oct,
year = "2025",
address = "Bologna, Italy",
publisher = "LUHME",
url = "https://preview.aclanthology.org/ingest-luhme/2025.luhme-1.7/",
pages = "67--73",
abstract = "Large language models have emerged as effective machine translation systems. In this paper, we explore how a general instruction-tuned large language model can be improved for machine translation using relatively few easily produced data resources. Using Slovene as a use case, we improve the GaMS-9B-Instruct model using Direct Preference Optimization (DPO) training on a programmatically curated and enhanced subset of a public dataset. As DPO requires pairs of quality-ranked instances, we generated its training dataset by translating English Wikipedia articles using two LLMs, GaMS-9B-Instruct and EuroLLM-9B-Instruct. We ranked the resulting translations based on heuristics coupled with automatic evaluation metrics such as COMET. The evaluation shows that our fine-tuned model outperforms both models involved in the dataset generation. In comparison to the baseline models, the fine-tuned model achieved a COMET score gain of around 0.04 and 0.02, respectively, on translating Wikipedia articles. It also more consistently avoids language and formatting errors."
}Markdown (Informal)
[Improving LLMs for Machine Translation Using Synthetic Preference Data](https://preview.aclanthology.org/ingest-luhme/2025.luhme-1.7/) (Vajda et al., LUHME 2025)
ACL