@inproceedings{miah-etal-2023-hierarchical,
title = "Hierarchical Fusion for Online Multimodal Dialog Act Classification",
author = "Miah, Md Messal Monem and
Pyarelal, Adarsh and
Huang, Ruihong",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.505/",
doi = "10.18653/v1/2023.findings-emnlp.505",
pages = "7532--7545",
abstract = "We propose a framework for online multimodal dialog act (DA) classification based on raw audio and ASR-generated transcriptions of current and past utterances. Existing multimodal DA classification approaches are limited by ineffective audio modeling and late-stage fusion. We showcase significant improvements in multimodal DA classification by integrating modalities at a more granular level and incorporating recent advancements in large language and audio models for audio feature extraction. We further investigate the effectiveness of self-attention and cross-attention mechanisms in modeling utterances and dialogs for DA classification. We achieve a substantial increase of 3 percentage points in the F1 score relative to current state-of-the-art models on two prominent DA classification datasets, MRDA and EMOTyDA."
}
Markdown (Informal)
[Hierarchical Fusion for Online Multimodal Dialog Act Classification](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.505/) (Miah et al., Findings 2023)
ACL