@inproceedings{lv-etal-2025-language,
title = "Language Models ``Grok'' to Copy",
author = "Lv, Ang and
Xie, Ruobing and
Sun, Xingwu and
Kang, Zhanhui and
Yan, Rui",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-short.61/",
pages = "735--741",
ISBN = "979-8-89176-190-2",
abstract = "We examine the pre-training dynamics of language models, focusing on their ability to copy text from preceding context{---}a fundamental skill for various LLM applications, including in-context learning (ICL) and retrieval-augmented generation (RAG). We propose a novel perspective that Transformer-based language models develop copying abilities similarly to grokking, which refers to sudden generalization on test set long after the model fit to the training set. Our experiments yield three arguments: (1) The pre-training loss decreases rapidly, while the context copying ability of models initially lags and then abruptly saturates. (2) The speed of developing copying ability is independent of the number of tokens trained, similarly to how grokking speed is unaffected by dataset size as long as the data distribution is preserved. (3) Induction heads, the attention heads responsible for copying, form from shallow to deep layers during training, mirroring the development of circuits in deeper layers during grokking. We contend that the connection between grokking and context copying can provide valuable insights for more effective language model training, ultimately improving in-context performance. For example, we demonstrated that techniques that enhance grokking, such as regularization, either accelerate or enhance the development of context copying."
}
Markdown (Informal)
[Language Models “Grok” to Copy](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-short.61/) (Lv et al., NAACL 2025)
ACL
- Ang Lv, Ruobing Xie, Xingwu Sun, Zhanhui Kang, and Rui Yan. 2025. Language Models “Grok” to Copy. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pages 735–741, Albuquerque, New Mexico. Association for Computational Linguistics.