@inproceedings{strout-etal-2019-human,
title = "Do Human Rationales Improve Machine Explanations?",
author = "Strout, Julia and
Zhang, Ye and
Mooney, Raymond",
editor = "Linzen, Tal and
Chrupa{\l}a, Grzegorz and
Belinkov, Yonatan and
Hupkes, Dieuwke",
booktitle = "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/W19-4807/",
doi = "10.18653/v1/W19-4807",
pages = "56--62",
abstract = "Work on {\textquotedblleft}learning with rationales{\textquotedblright} shows that humans providing explanations to a machine learning system can improve the system`s predictive accuracy. However, this work has not been connected to work in {\textquotedblleft}explainable AI{\textquotedblright} which concerns machines explaining their reasoning to humans. In this work, we show that learning with rationales can also improve the quality of the machine`s explanations as evaluated by human judges. Specifically, we present experiments showing that, for CNN-based text classification, explanations generated using {\textquotedblleft}supervised attention{\textquotedblright} are judged superior to explanations generated using normal unsupervised attention."
}
Markdown (Informal)
[Do Human Rationales Improve Machine Explanations?](https://preview.aclanthology.org/jlcl-multiple-ingestion/W19-4807/) (Strout et al., BlackboxNLP 2019)
ACL
- Julia Strout, Ye Zhang, and Raymond Mooney. 2019. Do Human Rationales Improve Machine Explanations?. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 56–62, Florence, Italy. Association for Computational Linguistics.