@inproceedings{yang-etal-2025-anything,
title = "Anything Goes? A Crosslinguistic Study of (Im)possible Language Learning in {LM}s",
author = "Yang, Xiulin and
Aoyama, Tatsuya and
Yao, Yuekun and
Wilcox, Ethan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1264/",
pages = "26058--26077",
ISBN = "979-8-89176-251-0",
abstract = "Do language models (LMs) offer insights into human language learning? A common argument against this idea is that because their architecture and training paradigm are so vastly different from humans, LMs can learn arbitrary inputs as easily as natural languages. We test this claim by training LMs to model impossible and typologically unattested languages.Unlike previous work, which has focused exclusively on English, we conduct experiments on 12 languages from 4 language families with two newly constructed parallel corpora. Our results show that while GPT-2 small can largely distinguish attested languages from their impossible counterparts, it does not achieve perfect separation between all the attested languages and all the impossible ones. We further test whether GPT-2 small distinguishes typologically attested from unattested languages with different NP orders by manipulating word order based on Greenberg{'}s Universal 20. We find that the model{'}s perplexity scores do not distinguish attested vs. unattested word orders, while its performance on the generalization test does. These findings suggest that LMs exhibit some human-like inductive biases, though these biases are weaker than those found in human learners."
}
Markdown (Informal)
[Anything Goes? A Crosslinguistic Study of (Im)possible Language Learning in LMs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1264/) (Yang et al., ACL 2025)
ACL