@inproceedings{hiraoka-inui-2025-repetition,
title = "Repetition Neurons: How Do Language Models Produce Repetitions?",
author = "Hiraoka, Tatsuya and
Inui, Kentaro",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-short.41/",
pages = "483--495",
ISBN = "979-8-89176-190-2",
abstract = "This paper introduces repetition neurons, which can be regarded as ``skill neurons'' responsible for the repetition problem in text generation tasks. These neurons are progressively activated more strongly as repetition continues, indicating that they perceive repetition as a task to copy the previous context repeatedly, similar to in-context learning. We identify these repetition neurons by comparing activation values before and after the onset of repetition in texts generated by recent pre-trained language models. We analyze the repetition neurons in three English and one Japanese pre-trained language models and observe similar patterns across them."
}
Markdown (Informal)
[Repetition Neurons: How Do Language Models Produce Repetitions?](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-short.41/) (Hiraoka & Inui, NAACL 2025)
ACL
- Tatsuya Hiraoka and Kentaro Inui. 2025. Repetition Neurons: How Do Language Models Produce Repetitions?. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 2: Short Papers), pages 483–495, Albuquerque, New Mexico. Association for Computational Linguistics.