@inproceedings{betz-richardson-2022-deepa2,
title = "{D}eep{A}2: A Modular Framework for Deep Argument Analysis with Pretrained Neural {T}ext2{T}ext Language Models",
author = "Betz, Gregor and
Richardson, Kyle",
booktitle = "Proceedings of the 11th Joint Conference on Lexical and Computational Semantics",
month = jul,
year = "2022",
address = "Seattle, Washington",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.starsem-1.2",
doi = "10.18653/v1/2022.starsem-1.2",
pages = "12--27",
abstract = "In this paper, we present and implement a multi-dimensional, modular framework for performing deep argument analysis (DeepA2) using current pre-trained language models (PTLMs). ArgumentAnalyst {--} a T5 model [Raffel et al. 2020] set up and trained within DeepA2 {--} reconstructs argumentative texts, which advance an informal argumentation, as valid arguments: It inserts, e.g., missing premises and conclusions, formalizes inferences, and coherently links the logical reconstruction to the source text. We create a synthetic corpus for deep argument analysis, and evaluate ArgumentAnalyst on this new dataset as well as on existing data, specifically EntailmentBank [Dalvi et al. 2021]. Our empirical findings vindicate the overall framework and highlight the advantages of a modular design, in particular its ability to emulate established heuristics (such as hermeneutic cycles), to explore the model{'}s uncertainty, to cope with the plurality of correct solutions (underdetermination), and to exploit higher-order evidence.",
}
Markdown (Informal)
[DeepA2: A Modular Framework for Deep Argument Analysis with Pretrained Neural Text2Text Language Models](https://aclanthology.org/2022.starsem-1.2) (Betz & Richardson, *SEM 2022)
ACL