@inproceedings{akhtar-etal-2026-divine,
title = "{DIVINE} : Coordinating Multimodal Disentangled Representations for {O}ro-Facial Neurological Disorder Assessment",
author = "Akhtar, Mohd Mujtaba and
Girish and
Singh, Muskaan",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.248/",
pages = "5379--5392",
ISBN = "979-8-89176-380-7",
abstract = "In this study, we present a multimodal framework for predicting neuro-facial disorders by capturing both vocal and facial cues. We hypothesize that explicitly disentangling shared and modality-specific representations within multimodal foundation model embeddings can enhance clinical interpretability and generalization. To validate this hypothesis, we propose DIVINE a fully disentangled multimodal framework that operates on representations extracted from state-of-the-art (SOTA) audio and video foundation models, incorporating hierarchical variational bottlenecks, sparse gated fusion, and learnable symptom tokens. DIVINE operates in a multitask learning setup to jointly predict diagnostic categories (Healthy Control,ALS, Stroke) and severity levels (Mild, Moderate, Severe). The model is trained using synchronized audio and video inputs and evaluated on the Toronto NeuroFace dataset under full (audio-video) as well as single-modality (audio-only and video-only) test conditions. Our proposed approach, DIVINE achieves SOTA result, with the DeepSeek-VL2 and TRILLssoncombination reaching 98.26{\%} accuracy and 97.51{\%} F1-score. Under modality-constrained scenarios, the framework performs well, show-ing strong generalization when tested with video-only or audio-only inputs. It consistently yields superior performance compared to uni-modal models and baseline fusion techniques. To the best of our knowledge, DIVINE is the first fully disentangled multimodal frameworkto jointly perform categorical diagnosis and severity estimation for oro-facial neurological disorders using synchronized speech and facialvideo."
}Markdown (Informal)
[DIVINE : Coordinating Multimodal Disentangled Representations for Oro-Facial Neurological Disorder Assessment](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.248/) (Akhtar et al., EACL 2026)
ACL