@inproceedings{signoroni-etal-2025-efficient,
title = "Efficient Architectures For Low-Resource Machine Translation",
author = "Signoroni, Edoardo and
Rychly, Pavel and
Signoroni, Ruggero",
editor = "Estevanell-Valladares, Ernesto Luis and
Picazo-Izquierdo, Alicia and
Ranasinghe, Tharindu and
Mikaberidze, Besik and
Ostermann, Simon and
Gurgurov, Daniil and
Mueller, Philipp and
Borg, Claudia and
{\v{S}}imko, Mari{\'a}n",
booktitle = "Proceedings of the First Workshop on Advancing NLP for Low-Resource Languages",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://preview.aclanthology.org/corrections-2026-01/2025.lowresnlp-1.6/",
pages = "39--64",
abstract = "Low-resource Neural Machine Translation is highly sensitive to hyperparameters and needs careful tuning to achieve the best results with small amounts of training data. We focus on exploring the impact of changes in the Transformer architecture on downstream translation quality, and propose a metric to score the computational efficiency of such changes. By experimenting on English-Akkadian, German-Lower Sorbian, English-Italian, and English-Manipuri, we confirm previous finding in low-resource machine translation optimization, and show that smaller and more parameter-efficient models can achieve the same translation quality of larger and unwieldy ones at a fraction of the computational cost. Optimized models have around 95{\%} less parameters, while dropping only up to 14.8{\%} ChrF. We compile a list of optimal ranges for each hyperparameter."
}Markdown (Informal)
[Efficient Architectures For Low-Resource Machine Translation](https://preview.aclanthology.org/corrections-2026-01/2025.lowresnlp-1.6/) (Signoroni et al., LowResNLP 2025)
ACL