@inproceedings{lei-2021-attention,
title = "When Attention Meets Fast Recurrence: Training Language Models with Reduced Compute",
author = "Lei, Tao",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.emnlp-main.602/",
doi = "10.18653/v1/2021.emnlp-main.602",
pages = "7633--7648",
abstract = "Large language models have become increasingly difficult to train because of the growing computation time and cost. In this work, we present SRU++, a highly-efficient architecture that combines fast recurrence and attention for sequence modeling. SRU++ exhibits strong modeling capacity and training efficiency. On standard language modeling tasks such as Enwik8, Wiki-103 and Billion Word datasets, our model obtains better bits-per-character and perplexity while using 3x-10x less training cost compared to top-performing Transformer models. For instance, our model achieves a state-of-the-art result on the Enwik8 dataset using 1.6 days of training on an 8-GPU machine. We further demonstrate that SRU++ requires minimal attention for near state-of-the-art performance. Our results suggest jointly leveraging fast recurrence with little attention as a promising direction for accelerating model training and inference."
}
Markdown (Informal)
[When Attention Meets Fast Recurrence: Training Language Models with Reduced Compute](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.emnlp-main.602/) (Lei, EMNLP 2021)
ACL