@inproceedings{yu-gildea-2022-sequence,
title = "Sequence-to-sequence {AMR} Parsing with Ancestor Information",
author = "Yu, Chen and
Gildea, Daniel",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.acl-short.63/",
doi = "10.18653/v1/2022.acl-short.63",
pages = "571--577",
abstract = "AMR parsing is the task that maps a sentence to an AMR semantic graph automatically. The difficulty comes from generating the complex graph structure. The previous state-of-the-art method translates the AMR graph into a sequence, then directly fine-tunes a pretrained sequence-to-sequence Transformer model (BART). However, purely treating the graph as a sequence does not take advantage of structural information about the graph. In this paper, we design several strategies to add the important \textit{ancestor information} into the Transformer Decoder. Our experiments show that we can improve the performance for both AMR 2.0 and AMR 3.0 dataset and achieve new state-of-the-art results."
}
Markdown (Informal)
[Sequence-to-sequence AMR Parsing with Ancestor Information](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.acl-short.63/) (Yu & Gildea, ACL 2022)
ACL