@inproceedings{irie-etal-2023-practical,
title = "Practical Computational Power of Linear Transformers and Their Recurrent and Self-Referential Extensions",
author = {Irie, Kazuki and
Csord{\'a}s, R{\'o}bert and
Schmidhuber, J{\"u}rgen},
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.emnlp-main.588/",
doi = "10.18653/v1/2023.emnlp-main.588",
pages = "9455--9465",
abstract = "Recent studies of the computational power of recurrent neural networks (RNNs) reveal a hierarchy of RNN architectures, given real-time and finite-precision assumptions. Here we study auto-regressive Transformers with linearised attention, a.k.a. linear Transformers (LTs) or Fast Weight Programmers (FWPs). LTs are special in the sense that they are equivalent to RNN-like sequence processors with a fixed-size state, while they can also be expressed as the now-popular self-attention networks. We show that many well-known results for the standard Transformer directly transfer to LTs/FWPs. Our formal language recognition experiments demonstrate how recently proposed FWP extensions such as recurrent FWPs and self-referential weight matrices successfully overcome certain limitations of the LT, e.g., allowing for generalisation on the parity problem. Our code is public."
}
Markdown (Informal)
[Practical Computational Power of Linear Transformers and Their Recurrent and Self-Referential Extensions](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.emnlp-main.588/) (Irie et al., EMNLP 2023)
ACL