@inproceedings{haller-etal-2025-sample,
title = "Sample-Efficient Language Modeling with Linear Attention and Lightweight Enhancements",
author = "Haller, Patrick and
Golde, Jonas and
Akbik, Alan",
editor = "Charpentier, Lucas and
Choshen, Leshem and
Cotterell, Ryan and
Gul, Mustafa Omer and
Hu, Michael Y. and
Liu, Jing and
Jumelet, Jaap and
Linzen, Tal and
Mueller, Aaron and
Ross, Candace and
Shah, Raj Sanjay and
Warstadt, Alex and
Wilcox, Ethan Gotlieb and
Williams, Adina",
booktitle = "Proceedings of the First BabyLM Workshop",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.babylm-main.14/",
pages = "175--191",
ISBN = "TODO",
abstract = "We study architectural and optimization techniques for sample-efficient language modeling under the constraints of the BabyLM 2025 shared task. Our model, \textbf{BLaLM}, replaces self-attention with a linear-time mLSTM token mixer and explores lightweight enhancements, including short convolutions, sliding window attention with dynamic modulation, and Hedgehog feature maps. To support training in low-resource settings, we curate a high-quality corpus emphasizing readability and pedagogical structure. Experiments across both strict and strict-small tracks show that (1) linear attention combined with sliding window attention consistently improves zero-shot performance, and (2) the Muon optimizer stabilizes convergence and reduces perplexity over AdamW. These results highlight effective strategies for efficient language modeling without relying on scale."
}Markdown (Informal)
[Sample-Efficient Language Modeling with Linear Attention and Lightweight Enhancements](https://preview.aclanthology.org/ingest-emnlp/2025.babylm-main.14/) (Haller et al., BabyLM 2025)
ACL