@article{liu-zhang-2017-shift,
title = "Shift-Reduce Constituent Parsing with Neural Lookahead Features",
author = "Liu, Jiangming and
Zhang, Yue",
editor = "Lee, Lillian and
Johnson, Mark and
Toutanova, Kristina",
journal = "Transactions of the Association for Computational Linguistics",
volume = "5",
year = "2017",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://preview.aclanthology.org/fix-sig-urls/Q17-1004/",
doi = "10.1162/tacl_a_00045",
pages = "45--58",
abstract = "Transition-based models can be fast and accurate for constituent parsing. Compared with chart-based models, they leverage richer features by extracting history information from a parser stack, which consists of a sequence of non-local constituents. On the other hand, during incremental parsing, constituent information on the right hand side of the current word is not utilized, which is a relative weakness of shift-reduce parsing. To address this limitation, we leverage a fast neural model to extract lookahead features. In particular, we build a bidirectional LSTM model, which leverages full sentence information to predict the hierarchy of constituents that each word starts and ends. The results are then passed to a strong transition-based constituent parser as lookahead features. The resulting parser gives 1.3{\%} absolute improvement in WSJ and 2.3{\%} in CTB compared to the baseline, giving the highest reported accuracies for fully-supervised parsing."
}
Markdown (Informal)
[Shift-Reduce Constituent Parsing with Neural Lookahead Features](https://preview.aclanthology.org/fix-sig-urls/Q17-1004/) (Liu & Zhang, TACL 2017)
ACL