@inproceedings{cheng-etal-2024-learning,
title = "Learning Intrinsic Dimension via Information Bottleneck for Explainable Aspect-based Sentiment Analysis",
author = "Cheng, Zhenxiao and
Zhou, Jie and
Wu, Wen and
Chen, Qin and
He, Liang",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.lrec-main.897/",
pages = "10274--10285",
abstract = "Gradient-based explanation methods are increasingly used to interpret neural models in natural language processing (NLP) due to their high fidelity. Such methods determine word-level importance using dimension-level gradient values through a norm function, often presuming equal significance for all gradient dimensions. However, in the context of Aspect-based Sentiment Analysis (ABSA), our preliminary research suggests that only specific dimensions are pertinent. To address this, we propose the Information Bottleneck-based Gradient (IBG) explanation framework for ABSA. This framework leverages an information bottleneck to refine word embeddings into a concise intrinsic dimension, maintaining essential features and omitting unrelated information. Comprehensive tests show that our IBG approach considerably improves both the models' performance and the explanations' clarity by identifying sentiment-aware features."
}
Markdown (Informal)
[Learning Intrinsic Dimension via Information Bottleneck for Explainable Aspect-based Sentiment Analysis](https://preview.aclanthology.org/fix-sig-urls/2024.lrec-main.897/) (Cheng et al., LREC-COLING 2024)
ACL