@inproceedings{allemann-etal-2025-optimizing,
title = "Optimizing the Training Schedule of Multilingual {NMT} using Reinforcement Learning",
author = "Allemann, Alexis and
Atrio, {\`A}lex R. and
Popescu-Belis, Andrei",
editor = "Bouillon, Pierrette and
Gerlach, Johanna and
Girletti, Sabrina and
Volkart, Lise and
Rubino, Raphael and
Sennrich, Rico and
Farinha, Ana C. and
Gaido, Marco and
Daems, Joke and
Kenny, Dorothy and
Moniz, Helena and
Szoc, Sara",
booktitle = "Proceedings of Machine Translation Summit XX: Volume 1",
month = jun,
year = "2025",
address = "Geneva, Switzerland",
publisher = "European Association for Machine Translation",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.mtsummit-1.6/",
pages = "65--80",
ISBN = "978-2-9701897-0-1",
abstract = "Multilingual NMT is a viable solution for translating low-resource languages (LRLs) when data from high-resource languages (HRLs) from the same language family is available. However, the training schedule, i.e. the order of presentation of languages, has an impact on the quality of such systems. Here, in a many-to-one translation setting, we propose to apply two algorithms that use reinforcement learning to optimize the training schedule of NMT: (1) Teacher-Student Curriculum Learning and (2) Deep Q Network. The former uses an exponentially smoothed estimate of the returns of each action based on the loss on monolingual or multilingual development subsets, while the latter estimates rewards using an additional neural network trained from the history of actions selected in different states of the system, together with the rewards received. On a 8-to-1 translation dataset with LRLs and HRLs, our second method improves BLEU and COMET scores with respect to both random selection of monolingual batches and shuffled multilingual batches, by adjusting the number of presentations of LRL vs. HRL batches."
}
Markdown (Informal)
[Optimizing the Training Schedule of Multilingual NMT using Reinforcement Learning](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.mtsummit-1.6/) (Allemann et al., MTSummit 2025)
ACL