@inproceedings{richburg-carpuat-2022-data,
title = "Data Cartography for Low-Resource Neural Machine Translation",
author = "Richburg, Aquia and
Carpuat, Marine",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.410/",
doi = "10.18653/v1/2022.findings-emnlp.410",
pages = "5594--5607",
abstract = "While collecting or generating more parallel data is necessary to improve machine translation (MT) in low-resource settings, we lack an understanding of how the limited amounts of existing data are actually used to help guide the collection of further resources. In this paper, we apply data cartography techniques (Swayamdipta et al., 2020) to characterize the contribution of training samples in two low-resource MT tasks (Swahili-English and Turkish-English) throughout the training of standard neural MT models. Our empirical study shows that, unlike in prior work for classification tasks, most samples contribute to model training in low-resource MT, albeit not uniformly throughout the training process. Furthermore, uni-dimensional characterizations of samples {--} e.g., based on dual cross-entropy or word frequency {--} do not suffice to characterize to what degree they are hard or easy to learn. Taken together, our results suggest that data augmentation strategies for low-resource MT would benefit from model-in-the-loop strategies to maximize improvements."
}
Markdown (Informal)
[Data Cartography for Low-Resource Neural Machine Translation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.410/) (Richburg & Carpuat, Findings 2022)
ACL