@inproceedings{chun-xue-2025-modal,
title = "Modal Dependency Parsing via Biaffine Attention with Self-Loop",
author = "Chun, Jayeol and
Xue, Nianwen",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.findings-acl.1093/",
pages = "21226--21238",
ISBN = "979-8-89176-256-5",
abstract = "A modal dependency structure represents a web of connections between events and sources of information in a document that allows for tracing of who-said-what with what levels of certainty, thereby establishing factuality in an event-centric approach. Obtaining such graphs defines the task of modal dependency parsing, which involves event and source identification along with the modal relations between them. In this paper, we propose a simple yet effective solution based on biaffine attention that specifically optimizes against the domain-specific challenges of modal dependency parsing by integrating self-loop. We show that our approach, when coupled with data augmentation by leveraging the Large Language Models to translate annotations from one language to another, outperforms the previous state-of-the-art on English and Chinese datasets by 2{\%} and 4{\%} respectively."
}
Markdown (Informal)
[Modal Dependency Parsing via Biaffine Attention with Self-Loop](https://preview.aclanthology.org/landing_page/2025.findings-acl.1093/) (Chun & Xue, Findings 2025)
ACL