@inproceedings{li-etal-2024-transformer,
title = "A Transformer with Stack Attention",
author = "Li, Jiaoda and
White, Jennifer and
Sachan, Mrinmaya and
Cotterell, Ryan",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-naacl.269/",
doi = "10.18653/v1/2024.findings-naacl.269",
pages = "4318--4335",
abstract = "Natural languages are believed to be (mildly) context-sensitive. Despite underpinning remarkably capable large language models, transformers are unable to model many context-free language tasks. In an attempt to address this limitation in the modeling power of transformer-based language models, we propose augmenting them with a differentiable, stack-based attention mechanism. Our stack-basedattention mechanism can be incorporated into any transformer-based language model and adds a level of interpretability to the model. We show that the addition of our stack-based attention mechanism enables the transformer to model some, but not all, deterministic context-freelanguages."
}
Markdown (Informal)
[A Transformer with Stack Attention](https://preview.aclanthology.org/fix-sig-urls/2024.findings-naacl.269/) (Li et al., Findings 2024)
ACL
- Jiaoda Li, Jennifer White, Mrinmaya Sachan, and Ryan Cotterell. 2024. A Transformer with Stack Attention. In Findings of the Association for Computational Linguistics: NAACL 2024, pages 4318–4335, Mexico City, Mexico. Association for Computational Linguistics.