@inproceedings{xiao-etal-2026-emotion,
title = "Emotion Recognition in Multi-Speaker Conversations through Speaker Identification, Knowledge Distillation, and Hierarchical Fusion",
author = "Xiao, Li and
Funakoshi, Kotaro and
Okumura, Manabu",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.212/",
pages = "4091--4106",
ISBN = "979-8-89176-386-9",
abstract = "Emotion recognition in multi-speaker conversations faces significant challenges due to speaker ambiguity and severe class imbalance. We propose a novel framework that addresses these issues through three key innovations: (1) a speaker identification module that leverages audio-visual synchronization to accurately identify the active speaker, (2) a knowledge distillation strategy that transfers superior textual emotion understanding to audio and visual modalities, and (3) hierarchical attention fusion with composite loss functions to handle class imbalance. Comprehensive evaluations on MELD and IEMOCAP datasets demonstrate superior performance, achieving 67.75{\%} and 72.44{\%} weighted F1 scores respectively, with particularly notable improvements on minority emotion classes."
}Markdown (Informal)
[Emotion Recognition in Multi-Speaker Conversations through Speaker Identification, Knowledge Distillation, and Hierarchical Fusion](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.212/) (Xiao et al., Findings 2026)
ACL