@inproceedings{amini-kosseim-2022-un,
title = "How (Un)Faithful is Attention?",
author = "Amini, Hessam and
Kosseim, Leila",
editor = "Bastings, Jasmijn and
Belinkov, Yonatan and
Elazar, Yanai and
Hupkes, Dieuwke and
Saphra, Naomi and
Wiegreffe, Sarah",
booktitle = "Proceedings of the Fifth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates (Hybrid)",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.blackboxnlp-1.10/",
doi = "10.18653/v1/2022.blackboxnlp-1.10",
pages = "119--130",
abstract = "Although attention weights have been commonly used as a means to provide explanations for deep learning models, the approach has been widely criticized due to its lack of faithfulness. In this work, we present a simple approach to compute the newly proposed metric AtteFa, which can quantitatively represent the degree of faithfulness of the attention weights. Using this metric, we further validate the effect of the frequency of informative input elements and the use of contextual vs. non-contextual encoders on the faithfulness of the attention mechanism. Finally, we apply the approach on several real-life binary classification datasets to measure the faithfulness of attention weights in real-life settings."
}
Markdown (Informal)
[How (Un)Faithful is Attention?](https://preview.aclanthology.org/fix-sig-urls/2022.blackboxnlp-1.10/) (Amini & Kosseim, BlackboxNLP 2022)
ACL
- Hessam Amini and Leila Kosseim. 2022. How (Un)Faithful is Attention?. In Proceedings of the Fifth BlackboxNLP Workshop on Analyzing and Interpreting Neural Networks for NLP, pages 119–130, Abu Dhabi, United Arab Emirates (Hybrid). Association for Computational Linguistics.