@inproceedings{fernandez-astudillo-etal-2020-transition,
title = "Transition-based Parsing with Stack-Transformers",
author = "Fernandez Astudillo, Ram{\'o}n and
Ballesteros, Miguel and
Naseem, Tahira and
Blodgett, Austin and
Florian, Radu",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.findings-emnlp.89",
doi = "10.18653/v1/2020.findings-emnlp.89",
pages = "1001--1007",
abstract = "Modeling the parser state is key to good performance in transition-based parsing. Recurrent Neural Networks considerably improved the performance of transition-based systems by modelling the global state, e.g. stack-LSTM parsers, or local state modeling of contextualized features, e.g. Bi-LSTM parsers. Given the success of Transformer architectures in recent parsing systems, this work explores modifications of the sequence-to-sequence Transformer architecture to model either global or local parser states in transition-based parsing. We show that modifications of the cross attention mechanism of the Transformer considerably strengthen performance both on dependency and Abstract Meaning Representation (AMR) parsing tasks, particularly for smaller models or limited training data.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="fernandez-astudillo-etal-2020-transition">
<titleInfo>
<title>Transition-based Parsing with Stack-Transformers</title>
</titleInfo>
<name type="personal">
<namePart type="given">Ramón</namePart>
<namePart type="family">Fernandez Astudillo</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Miguel</namePart>
<namePart type="family">Ballesteros</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tahira</namePart>
<namePart type="family">Naseem</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Austin</namePart>
<namePart type="family">Blodgett</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Radu</namePart>
<namePart type="family">Florian</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Findings of the Association for Computational Linguistics: EMNLP 2020</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Modeling the parser state is key to good performance in transition-based parsing. Recurrent Neural Networks considerably improved the performance of transition-based systems by modelling the global state, e.g. stack-LSTM parsers, or local state modeling of contextualized features, e.g. Bi-LSTM parsers. Given the success of Transformer architectures in recent parsing systems, this work explores modifications of the sequence-to-sequence Transformer architecture to model either global or local parser states in transition-based parsing. We show that modifications of the cross attention mechanism of the Transformer considerably strengthen performance both on dependency and Abstract Meaning Representation (AMR) parsing tasks, particularly for smaller models or limited training data.</abstract>
<identifier type="citekey">fernandez-astudillo-etal-2020-transition</identifier>
<identifier type="doi">10.18653/v1/2020.findings-emnlp.89</identifier>
<location>
<url>https://aclanthology.org/2020.findings-emnlp.89</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>1001</start>
<end>1007</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transition-based Parsing with Stack-Transformers
%A Fernandez Astudillo, Ramón
%A Ballesteros, Miguel
%A Naseem, Tahira
%A Blodgett, Austin
%A Florian, Radu
%S Findings of the Association for Computational Linguistics: EMNLP 2020
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F fernandez-astudillo-etal-2020-transition
%X Modeling the parser state is key to good performance in transition-based parsing. Recurrent Neural Networks considerably improved the performance of transition-based systems by modelling the global state, e.g. stack-LSTM parsers, or local state modeling of contextualized features, e.g. Bi-LSTM parsers. Given the success of Transformer architectures in recent parsing systems, this work explores modifications of the sequence-to-sequence Transformer architecture to model either global or local parser states in transition-based parsing. We show that modifications of the cross attention mechanism of the Transformer considerably strengthen performance both on dependency and Abstract Meaning Representation (AMR) parsing tasks, particularly for smaller models or limited training data.
%R 10.18653/v1/2020.findings-emnlp.89
%U https://aclanthology.org/2020.findings-emnlp.89
%U https://doi.org/10.18653/v1/2020.findings-emnlp.89
%P 1001-1007
Markdown (Informal)
[Transition-based Parsing with Stack-Transformers](https://aclanthology.org/2020.findings-emnlp.89) (Fernandez Astudillo et al., Findings 2020)
ACL
- Ramón Fernandez Astudillo, Miguel Ballesteros, Tahira Naseem, Austin Blodgett, and Radu Florian. 2020. Transition-based Parsing with Stack-Transformers. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1001–1007, Online. Association for Computational Linguistics.