@inproceedings{du-etal-2025-sentiment,
title = "Sentiment Analysis on Video Transcripts: Comparing the Value of Textual and Multimodal Annotations",
author = "Du, Quanqi and
De Langhe, Loic and
Lefever, Els and
Hoste, Veronique",
editor = "Bak, JinYeong and
Goot, Rob van der and
Jang, Hyeju and
Buaphet, Weerayut and
Ramponi, Alan and
Xu, Wei and
Ritter, Alan",
booktitle = "Proceedings of the Tenth Workshop on Noisy and User-generated Text",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.wnut-1.2/",
pages = "10--15",
ISBN = "979-8-89176-232-9",
abstract = "This study explores the differences between textual and multimodal sentiment annotations on videos and their impact on transcript-based sentiment modelling. Using the UniC and CH-SIMS datasets which are annotated at both the unimodal and multimodal level, we conducted a statistical analysis and sentiment modelling experiments. Results reveal significant differences between the two annotation types, with textual annotations yielding better performance in sentiment modelling and demonstrating superior generalization ability. These findings highlight the challenges of cross-modality generalization and provide insights for advancing sentiment analysis."
}
Markdown (Informal)
[Sentiment Analysis on Video Transcripts: Comparing the Value of Textual and Multimodal Annotations](https://preview.aclanthology.org/landing_page/2025.wnut-1.2/) (Du et al., WNUT 2025)
ACL