@inproceedings{xue-etal-2025-deconstructing,
title = "Deconstructing Attention: Investigating Design Principles for Effective Language Modeling",
author = "Xue, Huiyin and
Moosavi, Nafise Sadat and
Aletras, Nikolaos",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.40/",
pages = "708--727",
ISBN = "979-8-89176-298-5",
abstract = "The success of Transformer language models is widely credited to their dot-product attention mechanism, which interweaves a set of key design principles: mixing information across positions (enabling multi-token interactions), sequence-dependent activations (where attention weights adapt to each input), a specific mathematical form (dot-product similarities plus softmax weighting), and coupling of queries and keys to evolving hidden states (grounding attention in the current layer). However, the necessity of each of these principles remains largely untested. In this work, we systematically deconstruct attention by designing controlled variants that selectively relax these principles, applied both uniformly across all layers and in hybrid architectures where only some layers retain standard attention. Our empirical analysis reveals that mechanisms for mixing tokens are indispensable, as their absence collapses models to near-random behavior, while the exact mathematical form and sequence dependency can be substantially relaxed, especially when preserved in just a subset of layers. Surprisingly, even variants that fail in isolation can achieve robust performance when interleaved with standard attention, highlighting a cooperative effect. These findings deepen our understanding of what truly underpins attention{'}s effectiveness and open new avenues for simplifying language models without sacrificing performance."
}Markdown (Informal)
[Deconstructing Attention: Investigating Design Principles for Effective Language Modeling](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.40/) (Xue et al., IJCNLP-AACL 2025)
ACL
- Huiyin Xue, Nafise Sadat Moosavi, and Nikolaos Aletras. 2025. Deconstructing Attention: Investigating Design Principles for Effective Language Modeling. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 708–727, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.