@inproceedings{li-etal-2025-midas,
title = "{MIDAS}: Multi-level Intent, Domain, And Slot Knowledge Distillation for Multi-turn {NLU}",
author = "Li, Yan and
Kim, So-Eon and
Park, Seong-Bae and
Han, Caren",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.445/",
pages = "7989--8012",
ISBN = "979-8-89176-195-7",
abstract = "Although Large Language Models (LLMs) can generate coherent text, they often struggle to recognise user intent behind queries. In contrast, Natural Language Understanding (NLU) models interpret the purpose and key information of user input for responsive interactions. Existing NLU models typically map utterances to a dual-level semantic frame, involving sentence-level intent (SI) and word-level slot (WS) labels. However, real-life conversations primarily consist of multi-turn dialogues, requiring the interpretation of complex and extended exchanges. Researchers encounter challenges in addressing all facets of multi-turn dialogue using a unified NLU model. This paper introduces MIDAS, a novel approach leveraging multi-level intent, domain, and slot knowledge distillation for multi-turn NLU. We construct distinct teachers for SI detection, WS filling, and conversation-level domain (CD) classification, each fine-tuned for specific knowledge. A multi-teacher loss is proposed to facilitate the integration of these teachers, guiding a student model in multi-turn dialogue tasks. Results demonstrate the efficacy of our model in improving multi-turn conversation understanding, showcasing the potential for advancements in NLU through multi-level dialogue knowledge distillation. Our implementation is open-sourced on GitHub (https://github.com/adlnlp/Midas)."
}
Markdown (Informal)
[MIDAS: Multi-level Intent, Domain, And Slot Knowledge Distillation for Multi-turn NLU](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.445/) (Li et al., Findings 2025)
ACL