@inproceedings{li-etal-2022-emocaps,
title = "{E}mo{C}aps: Emotion Capsule based Model for Conversational Emotion Recognition",
author = "Li, Zaijing and
Tang, Fengxiao and
Zhao, Ming and
Zhu, Yusen",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-acl.126/",
doi = "10.18653/v1/2022.findings-acl.126",
pages = "1610--1618",
abstract = "Emotion recognition in conversation (ERC) aims to analyze the speaker`s state and identify their emotion in the conversation. Recent works in ERC focus on context modeling but ignore the representation of contextual emotional tendency. In order to extract multi-modal information and the emotional tendency of the utterance effectively, we propose a new structure named Emoformer to extract multi-modal emotion vectors from different modalities and fuse them with sentence vector to be an emotion capsule. Furthermore, we design an end-to-end ERC model called EmoCaps, which extracts emotion vectors through the Emoformer structure and obtain the emotion classification results from a context analysis model. Through the experiments with two benchmark datasets, our model shows better performance than the existing state-of-the-art models."
}
Markdown (Informal)
[EmoCaps: Emotion Capsule based Model for Conversational Emotion Recognition](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-acl.126/) (Li et al., Findings 2022)
ACL