@inproceedings{fazili-jyothi-2022-aligning,
title = "Aligning Multilingual Embeddings for Improved Code-switched Natural Language Understanding",
author = "Fazili, Barah and
Jyothi, Preethi",
editor = "Calzolari, Nicoletta and
Huang, Chu-Ren and
Kim, Hansaem and
Pustejovsky, James and
Wanner, Leo and
Choi, Key-Sun and
Ryu, Pum-Mo and
Chen, Hsin-Hsi and
Donatelli, Lucia and
Ji, Heng and
Kurohashi, Sadao and
Paggio, Patrizia and
Xue, Nianwen and
Kim, Seokhwan and
Hahm, Younggyun and
He, Zhong and
Lee, Tony Kyungil and
Santus, Enrico and
Bond, Francis and
Na, Seung-Hoon",
booktitle = "Proceedings of the 29th International Conference on Computational Linguistics",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "International Committee on Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.coling-1.375/",
pages = "4268--4273",
abstract = "Multilingual pretrained models, while effective on monolingual data, need additional training to work well with code-switched text. In this work, we present a novel idea of training multilingual models with alignment objectives using parallel text so as to explicitly align word representations with the same underlying semantics across languages. Such an explicit alignment step has a positive downstream effect and improves performance on multiple code-switched NLP tasks. We explore two alignment strategies and report improvements of up to 7.32{\%}, 0.76{\%} and 1.9{\%} on Hindi-English Sentiment Analysis, Named Entity Recognition and Question Answering tasks compared to a competitive baseline model."
}
Markdown (Informal)
[Aligning Multilingual Embeddings for Improved Code-switched Natural Language Understanding](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.coling-1.375/) (Fazili & Jyothi, COLING 2022)
ACL