@inproceedings{brahma-etal-2024-nlip,
title = "{NLIP}-Lab-{IITH} Multilingual {MT} System for {WAT}24 {MT} Shared Task",
author = "Brahma, Maharaj and
Sahoo, Pramit and
Desarkar, Maunendra Sankar",
editor = "Haddow, Barry and
Kocmi, Tom and
Koehn, Philipp and
Monz, Christof",
booktitle = "Proceedings of the Ninth Conference on Machine Translation",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.wmt-1.74/",
doi = "10.18653/v1/2024.wmt-1.74",
pages = "804--809",
abstract = "This paper describes NLIP Lab`s multilingual machine translation system for the WAT24 shared task on multilingual Indic MT task for 22 scheduled languages belonging to 4 language families. We explore pre-training for Indic languages using alignment agreement objectives. We utilize bi-lingual dictionaries to substitute words from source sentences. Furthermore, we fine-tuned language direction-specific multilingual translation models using small and high-quality seed data. Our primary submission is a 243M parameters multilingual translation model covering 22 Indic languages. In the IN22-Gen benchmark, we achieved an average chrF++ score of 46.80 and 18.19 BLEU score for the En-Indic direction. In the Indic-En direction, we achieved an average chrF++ score of 56.34 and 30.82 BLEU score. In the In22-Conv benchmark, we achieved an average chrF++ score of 43.43 and BLEU score of 16.58 in the En-Indic direction, and in the Indic-En direction, we achieved an average of 52.44 and 29.77 for chrF++ and BLEU respectively. Our model is competitive with IndicTransv1 (474M parameter model)."
}
Markdown (Informal)
[NLIP-Lab-IITH Multilingual MT System for WAT24 MT Shared Task](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.wmt-1.74/) (Brahma et al., WMT 2024)
ACL