@inproceedings{li-etal-2024-improving-context,
title = "Improving In-context Learning of Multilingual Generative Language Models with Cross-lingual Alignment",
author = "Li, Chong and
Wang, Shaonan and
Zhang, Jiajun and
Zong, Chengqing",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.naacl-long.445/",
doi = "10.18653/v1/2024.naacl-long.445",
pages = "8058--8076",
abstract = "Multilingual generative models obtain remarkable cross-lingual in-context learning capabilities through pre-training on large-scale corpora. However, they still exhibit a performance bias toward high-resource languages and learn isolated distributions of multilingual sentence representations, which may hinder knowledge transfer across languages. To bridge this gap, we propose a simple yet effective cross-lingual alignment framework exploiting pairs of translation sentences. It aligns the internal sentence representations across different languages via multilingual contrastive learning and aligns outputs by following cross-lingual instructions in the target language. Experimental results show that even with less than 0.1${\textperthousand}$ of pre-training tokens, our alignment framework significantly boosts the cross-lingual abilities of generative language models and mitigates the performance gap. Further analyses reveal that it results in a better internal multilingual representation distribution of multilingual models."
}
Markdown (Informal)
[Improving In-context Learning of Multilingual Generative Language Models with Cross-lingual Alignment](https://preview.aclanthology.org/fix-sig-urls/2024.naacl-long.445/) (Li et al., NAACL 2024)
ACL