@inproceedings{choenni-etal-2024-examining,
title = "Examining Modularity in Multilingual {LM}s via Language-Specialized Subnetworks",
author = "Choenni, Rochelle and
Shutova, Ekaterina and
Garrette, Dan",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-naacl.21/",
doi = "10.18653/v1/2024.findings-naacl.21",
pages = "287--301",
abstract = "Recent work has proposed explicitly inducing language-wise modularity in multilingual LMs via sparse fine-tuning (SFT) on per-language subnetworks as a means of better guiding cross-lingual sharing. In this paper, we investigate (1) the degree to which language-wise modularity *naturally* arises within models with no special modularity interventions, and (2) how cross-lingual sharing and interference differ between such models and those with explicit SFT-guided subnetwork modularity. In order to do so, we use XLM-R as our multilingual LM. Moreover, to quantify language specialization and cross-lingual interaction, we use a Training Data Attribution method that estimates the degree to which a model`s predictions are influenced by in-language or cross-language training examples. Our results show that language-specialized subnetworks do naturally arise, and that SFT, rather than always increasing modularity, can decrease language specialization of subnetworks in favor of more cross-lingual sharing."
}
Markdown (Informal)
[Examining Modularity in Multilingual LMs via Language-Specialized Subnetworks](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-naacl.21/) (Choenni et al., Findings 2024)
ACL