@inproceedings{xiao-etal-2024-vanessa,
title = "Vanessa: Visual Connotation and Aesthetic Attributes Understanding Network for Multimodal Aspect-based Sentiment Analysis",
author = "Xiao, Luwei and
Mao, Rui and
Zhang, Xulang and
He, Liang and
Cambria, Erik",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.671/",
doi = "10.18653/v1/2024.findings-emnlp.671",
pages = "11486--11500",
abstract = "Prevailing research concentrates on superficial features or descriptions of images, revealing a significant gap in the systematic exploration of their connotative and aesthetic attributes. Furthermore, the use of cross-modal relation detection modules to eliminate noise from comprehensive image representations leads to the omission of subtle contextual information. In this paper, we present a Visual Connotation and Aesthetic Attributes Understanding Network (Vanessa) for Multimodal Aspect-based Sentiment Analysis. Concretely, Vanessa incorporates a Multi-Aesthetic Attributes Aggregation (MA3) module that models intra- and inter-dependencies among bi-modal representations as well as emotion-laden aesthetic attributes. Moreover, we devise a self-supervised contrastive learning framework to explore the pairwise relevance between images and text via the Gaussian distribution of their CLIP scores. By dynamically clustering and merging multi-modal tokens, Vanessa effectively captures both implicit and explicit sentimental cues. Extensive experiments on widely adopted two benchmarks verify Vanessa`s effectiveness."
}
Markdown (Informal)
[Vanessa: Visual Connotation and Aesthetic Attributes Understanding Network for Multimodal Aspect-based Sentiment Analysis](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.671/) (Xiao et al., Findings 2024)
ACL