@inproceedings{zhu-etal-2020-examining,
title = "Examining the rhetorical capacities of neural language models",
author = "Zhu, Zining and
Pan, Chuer and
Abdalla, Mohamed and
Rudzicz, Frank",
editor = "Alishahi, Afra and
Belinkov, Yonatan and
Chrupa{\l}a, Grzegorz and
Hupkes, Dieuwke and
Pinter, Yuval and
Sajjad, Hassan",
booktitle = "Proceedings of the Third BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2020.blackboxnlp-1.3/",
doi = "10.18653/v1/2020.blackboxnlp-1.3",
pages = "16--32",
abstract = "Recently, neural language models (LMs) have demonstrated impressive abilities in generating high-quality discourse. While many recent papers have analyzed the syntactic aspects encoded in LMs, there has been no analysis to date of the inter-sentential, rhetorical knowledge. In this paper, we propose a method that quantitatively evaluates the rhetorical capacities of neural LMs. We examine the capacities of neural LMs understanding the rhetoric of discourse by evaluating their abilities to encode a set of linguistic features derived from Rhetorical Structure Theory (RST). Our experiments show that BERT-based LMs outperform other Transformer LMs, revealing the richer discourse knowledge in their intermediate layer representations. In addition, GPT-2 and XLNet apparently encode less rhetorical knowledge, and we suggest an explanation drawing from linguistic philosophy. Our method shows an avenue towards quantifying the rhetorical capacities of neural LMs."
}
Markdown (Informal)
[Examining the rhetorical capacities of neural language models](https://preview.aclanthology.org/add-emnlp-2024-awards/2020.blackboxnlp-1.3/) (Zhu et al., BlackboxNLP 2020)
ACL