@inproceedings{katz-etal-2025-segment,
title = "Segment-Based Attention Masking for {GPT}s",
author = "Katz, Shahar and
Ringel, Liran and
Romano, Yaniv and
Wolf, Lior",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.947/",
pages = "19308--19322",
ISBN = "979-8-89176-251-0",
abstract = "Causal masking is a fundamental component in Generative Pre-Trained Transformer (GPT) models, playing a crucial role during training. Although GPTs can process the entire user prompt at once, the causal masking is applied to all input tokens step-by-step, mimicking the generation process. This imposes an unnecessary constraint during the initial ``prefill'' phase when the model processes the input prompt and generates the internal representations before producing any output tokens. In this work, attention is masked based on the known block structure at the prefill phase, followed by the conventional token-by-token autoregressive process after that. For example, in a typical chat prompt, the system prompt is treated as one block, and the user prompt as the next one. Each of these is treated as a unit for the purpose of masking, such that the first tokens in each block can access the subsequent tokens in a non-causal manner. Then, the model answer is generated in the conventional causal manner. The Segment-by-Segment scheme entails no additional computational overhead. When integrated using a lightweight fine-tuning into already trained models such as Llama and Qwen, MAS quickly increases models' performances."
}
Markdown (Informal)
[Segment-Based Attention Masking for GPTs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.947/) (Katz et al., ACL 2025)
ACL
- Shahar Katz, Liran Ringel, Yaniv Romano, and Lior Wolf. 2025. Segment-Based Attention Masking for GPTs. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 19308–19322, Vienna, Austria. Association for Computational Linguistics.