@inproceedings{ji-etal-2023-improving,
title = "Improving Span Representation by Efficient Span-Level Attention",
author = "Ji, Pengyu and
Yang, Songlin and
Tu, Kewei",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.747/",
doi = "10.18653/v1/2023.findings-emnlp.747",
pages = "11184--11192",
abstract = "High-quality span representations are crucial to natural language processing tasks involving span prediction and classification. Most existing methods derive a span representation by aggregation of token representations within the span. In contrast, we aim to improve span representations by considering span-span interactions as well as more comprehensive span-token interactions. Specifically, we introduce layers of span-level attention on top of a normal token-level transformer encoder. Given that attention between all span pairs results in $O(n^4)$ complexity ($n$ being the sentence length) and not all span interactions are intuitively meaningful, we restrict the range of spans that a given span could attend to, thereby reducing overall complexity to $O(n^3)$. We conduct experiments on various span-related tasks and show superior performance of our model surpassing baseline models. Our code is publicly available at \url{https://github.com/jipy0222/Span-Level-Attention}."
}
Markdown (Informal)
[Improving Span Representation by Efficient Span-Level Attention](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.747/) (Ji et al., Findings 2023)
ACL