@inproceedings{muller-etal-2021-first,
title = "First Align, then Predict: Understanding the Cross-Lingual Ability of Multilingual {BERT}",
author = "Muller, Benjamin and
Elazar, Yanai and
Sagot, Beno{\^i}t and
Seddah, Djam{\'e}",
editor = "Merlo, Paola and
Tiedemann, Jorg and
Tsarfaty, Reut",
booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.eacl-main.189/",
doi = "10.18653/v1/2021.eacl-main.189",
pages = "2214--2231",
abstract = "Multilingual pretrained language models have demonstrated remarkable zero-shot cross-lingual transfer capabilities. Such transfer emerges by fine-tuning on a task of interest in one language and evaluating on a distinct language, not seen during the fine-tuning. Despite promising results, we still lack a proper understanding of the source of this transfer. Using a novel layer ablation technique and analyses of the model{'}s internal representations, we show that multilingual BERT, a popular multilingual language model, can be viewed as the stacking of two sub-networks: a multilingual encoder followed by a task-specific language-agnostic predictor. While the encoder is crucial for cross-lingual transfer and remains mostly unchanged during fine-tuning, the task predictor has little importance on the transfer and can be reinitialized during fine-tuning. We present extensive experiments with three distinct tasks, seventeen typologically diverse languages and multiple domains to support our hypothesis."
}
Markdown (Informal)
[First Align, then Predict: Understanding the Cross-Lingual Ability of Multilingual BERT](https://preview.aclanthology.org/fix-sig-urls/2021.eacl-main.189/) (Muller et al., EACL 2021)
ACL