@inproceedings{tosolini-blevins-2025-analyzing,
title = "Analyzing the Linguistic Priors of Language Models with Synthetic Languages",
author = "Tosolini, Alessio and
Blevins, Terra",
editor = "Hahn, Michael and
Rani, Priya and
Kumar, Ritesh and
Shcherbakov, Andreas and
Sorokin, Alexey and
Serikov, Oleg and
Cotterell, Ryan and
Vylomova, Ekaterina",
booktitle = "Proceedings of the 7th Workshop on Research in Computational Linguistic Typology and Multilingual NLP",
month = aug,
year = "2025",
address = "Vinenna. Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.sigtyp-1.2/",
pages = "7--15",
ISBN = "979-8-89176-281-7",
abstract = "While modern language model architectures are often assumed to be language-agnostic, there is limited evidence as to whether these models actually process the wide diversity of natural languages equally well. We investigate this question by analyzing how well LMs learn carefully constructed artificial languages containing a variety of verbal complexity, ranging from simple paradigms to covering far more verb classes than occur in natural languages. Rather than learning all languages equally efficiently, models trained on these languages show strict preferences for processing simpler languages. Furthermore, while some observed behaviors mimic human linguistic priors, we find that they indicate the model memorizes its training data rather than generalizes from it."
}
Markdown (Informal)
[Analyzing the Linguistic Priors of Language Models with Synthetic Languages](https://preview.aclanthology.org/landing_page/2025.sigtyp-1.2/) (Tosolini & Blevins, SIGTYP 2025)
ACL