@inproceedings{liang-surdeanu-2020-transformers,
title = "Do Transformers Dream of Inference, or Can Pretrained Generative Models Learn Implicit Inferential Rules?",
author = "Liang, Zhengzhong and
Surdeanu, Mihai",
editor = "Rogers, Anna and
Sedoc, Jo{\~a}o and
Rumshisky, Anna",
booktitle = "Proceedings of the First Workshop on Insights from Negative Results in NLP",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.insights-1.12/",
doi = "10.18653/v1/2020.insights-1.12",
pages = "76--81",
abstract = "Large pretrained language models (LM) have been used successfully for multi-hop question answering. However, most of these directions are not interpretable, as they do not make the inference hops necessary to explain a candidate answer explicitly. In this work, we investigate the capability of a state-of-the-art transformer LM to generate explicit inference hops, i.e., to infer a new statement necessary to answer a question given some premise input statements. Our analysis shows that such LMs can generate new statements for some simple inference types, but performance remains poor for complex, real-world inference types such as those that require monotonicity, composition, and commonsense knowledge."
}
Markdown (Informal)
[Do Transformers Dream of Inference, or Can Pretrained Generative Models Learn Implicit Inferential Rules?](https://preview.aclanthology.org/jlcl-multiple-ingestion/2020.insights-1.12/) (Liang & Surdeanu, insights 2020)
ACL