@inproceedings{hu-etal-2023-supervised,
title = "Supervised Adversarial Contrastive Learning for Emotion Recognition in Conversations",
author = "Hu, Dou and
Bao, Yinan and
Wei, Lingwei and
Zhou, Wei and
Hu, Songlin",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.acl-long.606/",
doi = "10.18653/v1/2023.acl-long.606",
pages = "10835--10852",
abstract = "Extracting generalized and robust representations is a major challenge in emotion recognition in conversations (ERC). To address this, we propose a supervised adversarial contrastive learning (SACL) framework for learning class-spread structured representations in a supervised manner. SACL applies contrast-aware adversarial training to generate worst-case samples and uses joint class-spread contrastive learning to extract structured representations. It can effectively utilize label-level feature consistency and retain fine-grained intra-class features. To avoid the negative impact of adversarial perturbations on context-dependent data, we design a contextual adversarial training (CAT) strategy to learn more diverse features from context and enhance the model`s context robustness. Under the framework with CAT, we develop a sequence-based SACL-LSTM to learn label-consistent and context-robust features for ERC. Experiments on three datasets show that SACL-LSTM achieves state-of-the-art performance on ERC. Extended experiments prove the effectiveness of SACL and CAT."
}
Markdown (Informal)
[Supervised Adversarial Contrastive Learning for Emotion Recognition in Conversations](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.acl-long.606/) (Hu et al., ACL 2023)
ACL