@inproceedings{faisal-anastasopoulos-2021-investigating,
title = "Investigating Post-pretraining Representation Alignment for Cross-Lingual Question Answering",
author = "Faisal, Fahim and
Anastasopoulos, Antonios",
editor = "Fisch, Adam and
Talmor, Alon and
Chen, Danqi and
Choi, Eunsol and
Seo, Minjoon and
Lewis, Patrick and
Jia, Robin and
Min, Sewon",
booktitle = "Proceedings of the 3rd Workshop on Machine Reading for Question Answering",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2021.mrqa-1.14/",
doi = "10.18653/v1/2021.mrqa-1.14",
pages = "133--148",
abstract = "Human knowledge is collectively encoded in the roughly 6500 languages spoken around the world, but it is not distributed equally across languages. Hence, for information-seeking question answering (QA) systems to adequately serve speakers of all languages, they need to operate cross-lingually. In this work we investigate the capabilities of multilingually pretrained language models on cross-lingual QA. We find that explicitly aligning the representations across languages with a post-hoc finetuning step generally leads to improved performance. We additionally investigate the effect of data size as well as the language choice in this fine-tuning step, also releasing a dataset for evaluating cross-lingual QA systems."
}
Markdown (Informal)
[Investigating Post-pretraining Representation Alignment for Cross-Lingual Question Answering](https://preview.aclanthology.org/Add-Cong-Liu-Florida-Atlantic-University-author-id/2021.mrqa-1.14/) (Faisal & Anastasopoulos, MRQA 2021)
ACL