@inproceedings{jiang-2024-towards,
title = "Towards a Real-Time Multimodal Emotion Estimation Model for Dialogue Systems",
author = "Jiang, Jingjing",
editor = "Inoue, Koji and
Fu, Yahui and
Axelsson, Agnes and
Ohashi, Atsumoto and
Madureira, Brielen and
Zenimoto, Yuki and
Mohapatra, Biswesh and
Stricker, Armand and
Khosla, Sopan",
booktitle = "Proceedings of the 20th Workshop of Young Researchers' Roundtable on Spoken Dialogue Systems",
month = sep,
year = "2024",
address = "Kyoto, Japan",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.yrrsds-1.22/",
pages = "60--61",
abstract = "This position paper presents my research interest in establishing human-like chat-oriented dialogue systems. To this end, my work focuses on two main areas: the construction and utilization of multimodal datasets and real-time multimodal affective computing. I discuss the limitations of current multimodal dialogue corpora and multimodal affective computing models. As a solution, I have constructed a human-human dialogue dataset containing various synchronized multimodal information, and I have conducted preliminary analyses on it. In future work, I will further analyze the collected data and build a real-time multimodal emotion estimation model for dialogue systems."
}
Markdown (Informal)
[Towards a Real-Time Multimodal Emotion Estimation Model for Dialogue Systems](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.yrrsds-1.22/) (Jiang, YRRSDS 2024)
ACL