@inproceedings{stap-monz-2025-effect,
title = "The Effect of Language Diversity When Fine-Tuning Large Language Models for Translation",
author = "Stap, David and
Monz, Christof",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.224/",
doi = "10.18653/v1/2025.findings-emnlp.224",
pages = "4199--4211",
ISBN = "979-8-89176-335-7",
abstract = "Prior research diverges on language diversity in LLM fine-tuning: Some studies report benefits while others find no advantages. Through controlled fine-tuning experiments across 132 translation directions, we systematically resolve these disparities. We find that expanding language diversity during fine-tuning improves translation quality for both unsupervised and{---}surprisingly{---}supervised pairs, despite less diverse models being fine-tuned exclusively on these supervised pairs. However, benefits plateau or decrease beyond a certain diversity threshold. We show that increased language diversity creates more language-agnostic representations. These representational adaptations help explain the improved performance in models fine-tuned with greater diversity."
}Markdown (Informal)
[The Effect of Language Diversity When Fine-Tuning Large Language Models for Translation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.224/) (Stap & Monz, Findings 2025)
ACL