@inproceedings{yoshida-etal-2021-modeling,
title = "Modeling Human Sentence Processing with Left-Corner Recurrent Neural Network Grammars",
author = "Yoshida, Ryo and
Noji, Hiroshi and
Oseki, Yohei",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.emnlp-main.235/",
doi = "10.18653/v1/2021.emnlp-main.235",
pages = "2964--2973",
abstract = "In computational linguistics, it has been shown that hierarchical structures make language models (LMs) more human-like. However, the previous literature has been agnostic about a parsing strategy of the hierarchical models. In this paper, we investigated whether hierarchical structures make LMs more human-like, and if so, which parsing strategy is most cognitively plausible. In order to address this question, we evaluated three LMs against human reading times in Japanese with head-final left-branching structures: Long Short-Term Memory (LSTM) as a sequential model and Recurrent Neural Network Grammars (RNNGs) with top-down and left-corner parsing strategies as hierarchical models. Our computational modeling demonstrated that left-corner RNNGs outperformed top-down RNNGs and LSTM, suggesting that hierarchical and left-corner architectures are more cognitively plausible than top-down or sequential architectures. In addition, the relationships between the cognitive plausibility and (i) perplexity, (ii) parsing, and (iii) beam size will also be discussed."
}
Markdown (Informal)
[Modeling Human Sentence Processing with Left-Corner Recurrent Neural Network Grammars](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.emnlp-main.235/) (Yoshida et al., EMNLP 2021)
ACL