@inproceedings{eyal-etal-2025-layer,
title = "Layer Duplication in {LLM}s",
author = "Eyal, Neo and
Dershowitz, Nachum and
Bar, Kfir",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.967/",
doi = "10.18653/v1/2025.findings-emnlp.967",
pages = "17797--17807",
ISBN = "979-8-89176-335-7",
abstract = "We investigate the effect of duplicating multihead self-attention layers in large language models (LLMs) across a range of language tasks, with and without fine-tuning. The results demonstrate that duplicating the initial layers once or twice often yields a significant performance boost. Attention analysis uncovered the underlying mechanisms driving the improvement when performing layer duplication. This method enhances LLM capabilities with or without additional training or labeled data."
}Markdown (Informal)
[Layer Duplication in LLMs](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.967/) (Eyal et al., Findings 2025)
ACL
- Neo Eyal, Nachum Dershowitz, and Kfir Bar. 2025. Layer Duplication in LLMs. In Findings of the Association for Computational Linguistics: EMNLP 2025, pages 17797–17807, Suzhou, China. Association for Computational Linguistics.