@inproceedings{yao-etal-2022-modal,
title = "Modal Dependency Parsing via Language Model Priming",
author = "Yao, Jiarui and
Xue, Nianwen and
Min, Bonan",
editor = "Carpuat, Marine and
de Marneffe, Marie-Catherine and
Meza Ruiz, Ivan Vladimir",
booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.naacl-main.211/",
doi = "10.18653/v1/2022.naacl-main.211",
pages = "2913--2919",
abstract = "The task of modal dependency parsing aims to parse a text into its modal dependency structure, which is a representation for the factuality of events in the text. We design a modal dependency parser that is based on priming pre-trained language models, and evaluate the parser on two data sets. Compared to baselines, we show an improvement of 2.6{\%} in F-score for English and 4.6{\%} for Chinese. To the best of our knowledge, this is also the first work on Chinese modal dependency parsing."
}
Markdown (Informal)
[Modal Dependency Parsing via Language Model Priming](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.naacl-main.211/) (Yao et al., NAACL 2022)
ACL
- Jiarui Yao, Nianwen Xue, and Bonan Min. 2022. Modal Dependency Parsing via Language Model Priming. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2913–2919, Seattle, United States. Association for Computational Linguistics.