@inproceedings{askari-etal-2025-babylms,
title = "Are {B}aby{LM}s Deaf to {G}ricean Maxims? A Pragmatic Evaluation of Sample-efficient Language Models",
author = {Askari, Raha and
Zarrie{\ss}, Sina and
Alacam, {\"O}zge and
Sieker, Judith},
editor = "Charpentier, Lucas and
Choshen, Leshem and
Cotterell, Ryan and
Gul, Mustafa Omer and
Hu, Michael Y. and
Liu, Jing and
Jumelet, Jaap and
Linzen, Tal and
Mueller, Aaron and
Ross, Candace and
Shah, Raj Sanjay and
Warstadt, Alex and
Wilcox, Ethan Gotlieb and
Williams, Adina",
booktitle = "Proceedings of the First BabyLM Workshop",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.babylm-main.4/",
pages = "52--65",
ISBN = "TODO",
abstract = "Implicit meanings are integral to human communication, making it essential for language models to be capable of identifying and interpreting them. Grice (1975) proposed a set of conversational maxims that guide cooperative dialogue, noting that speakers may deliberately violate these principles to express meanings beyond literal words, and that listeners, in turn, recognize such violations to draw pragmatic inferences.Building on Surian et al. (1996){'}s study of children{'}s sensitivity to violations of Gricean maxims, we introduce a novel benchmark to test whether language models pretrained on {\ensuremath{<}}10M and {\ensuremath{<}}100M tokens can distinguish maxim-adhering from maxim-violating utterances. We compare these BabyLMs across five maxims and situate their performance relative to children and a Large Language Model (LLM) pretrained on 3T tokens.We find that overall, models trained on {\ensuremath{<}}100M tokens outperform those trained on {\ensuremath{<}}10M, yet fall short of child-level and LLM competence. Our results suggest that modest data increases improve some aspects of pragmatic behavior, leading to finer-grained differentiation between pragmatic dimensions."
}Markdown (Informal)
[Are BabyLMs Deaf to Gricean Maxims? A Pragmatic Evaluation of Sample-efficient Language Models](https://preview.aclanthology.org/ingest-emnlp/2025.babylm-main.4/) (Askari et al., BabyLM 2025)
ACL