@inproceedings{qiu-etal-2023-topic,
title = "Topic and Style-aware Transformer for Multimodal Emotion Recognition",
author = "Qiu, Shuwen and
Sekhar, Nitesh and
Singhal, Prateek",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-acl.130/",
doi = "10.18653/v1/2023.findings-acl.130",
pages = "2074--2082",
abstract = "Understanding emotion expressions in multimodal signals is key for machines to have a better understanding of human communication. While language, visual and acoustic modalities can provide clues from different perspectives, the visual modality is shown to make minimal contribution to the performance in the emotion recognition field due to its high dimensionality. Therefore, we first leverage the strong multimodality backbone VATT to project the visual signal to the common space with language and acoustic signals. Also, we propose content-oriented features Topic and Speaking style on top of it to approach the subjectivity issues. Experiments conducted on the benchmark dataset MOSEI show our model can outperform SOTA results and effectively incorporate visual signals and handle subjectivity issues by serving as content {\textquotedblleft}normalization{\textquotedblright}."
}
Markdown (Informal)
[Topic and Style-aware Transformer for Multimodal Emotion Recognition](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-acl.130/) (Qiu et al., Findings 2023)
ACL