@inproceedings{wilie-etal-2025-high,
title = "High-Dimensional Interlingual Representations of Large Language Models",
author = "Wilie, Bryan and
Cahyawijaya, Samuel and
He, Junxian and
Fung, Pascale",
editor = "Hahn, Michael and
Rani, Priya and
Kumar, Ritesh and
Shcherbakov, Andreas and
Sorokin, Alexey and
Serikov, Oleg and
Cotterell, Ryan and
Vylomova, Ekaterina",
booktitle = "Proceedings of the 7th Workshop on Research in Computational Linguistic Typology and Multilingual NLP",
month = aug,
year = "2025",
address = "Vinenna. Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.sigtyp-1.14/",
pages = "122--155",
ISBN = "979-8-89176-281-7",
abstract = "Large language models (LLMs) trained on massive multilingual datasets hint at the formation of interlingual constructs{--}a shared region in the representation space. However, evidence regarding this phenomenon is mixed, leaving it unclear whether these models truly develop unified interlingual representations, or present a partially aligned constructs. We explore 31 diverse languages varying on their resource-levels, typologies, and geographical regions; and find that multilingual LLMs exhibit inconsistent cross-lingual alignments. To address this, we propose an interlingual representation framework identifying both the shared interlingual semantic region and fragmented components, existed due to representational limitations. We introduce Interlingual Local Overlap (ILO) score to quantify interlingual alignment by comparing the local neighborhood structures of high-dimensional representations. We utilize ILO to investigate the impact of single-language fine-tuning on the interlingual alignment in multilingual LLMs. Our results indicate that training exclusively on a single language disrupts the alignment in early layers, while freezing these layers preserves the alignment of interlingual representations, leading to improved cross-lingual generalization. These results validate our framework and metric for evaluating interlingual representation, and further underscore that interlingual alignment is crucial for scalable multilingual learning."
}
Markdown (Informal)
[High-Dimensional Interlingual Representations of Large Language Models](https://preview.aclanthology.org/landing_page/2025.sigtyp-1.14/) (Wilie et al., SIGTYP 2025)
ACL