@inproceedings{li-etal-2025-hierarchical-attention,
title = "Hierarchical Attention Adapter for Abstractive Dialogue Summarization",
author = "Li, Raymond and
Li, Chuyuan and
Murray, Gabriel and
Carenini, Giuseppe",
editor = "Dong, Yue and
Xiao, Wen and
Zhang, Haopeng and
Zhang, Rui and
Ernst, Ori and
Wang, Lu and
Liu, Fei",
booktitle = "Proceedings of The 5th New Frontiers in Summarization Workshop",
month = nov,
year = "2025",
address = "Hybrid",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.newsum-main.2/",
pages = "17--30",
ISBN = "979-8-89176-337-1",
abstract = "Dialogue summarization is still a very challenging task even for large language models (LLMs). On the one hand, some previous approaches have pre-trained language models specifically for dialogue understanding and summarization, but they have been limited to relatively small models. On the other hand, other works have tried to directly exploit the dialogue semantics and discourse structures in their modeling effort, but by construction, they require access to those structures, which is in itself a largely unsolved problem. In this paper, we synergistically combine these two ideas in an approach that can be seamlessly integrated into the decoder-only architecture adopted by the most state-of-the-art LLMs. In particular, our novel solution leverages the parameter-efficient fine-tuning (PEFT) paradigm to model the hierarchical structure of dialogues, where input sequences are naturally segmented into dialogue turns, and then fine-tune the model for abstractive summarization. From experiments on two datasets, we find that Hierarchical Attention Adapter outperforms all baseline adapter methods on SummScreen, where our approach can also be combined with LoRA to achieve the best performance on SamSum."
}Markdown (Informal)
[Hierarchical Attention Adapter for Abstractive Dialogue Summarization](https://preview.aclanthology.org/ingest-emnlp/2025.newsum-main.2/) (Li et al., NewSum 2025)
ACL