@inproceedings{xing-etal-2024-dc,
title = "{DC}-Instruct: An Effective Framework for Generative Multi-intent Spoken Language Understanding",
author = "Xing, Bowen and
Liao, Lizi and
Huang, Minlie and
Tsang, Ivor",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.804/",
doi = "10.18653/v1/2024.emnlp-main.804",
pages = "14520--14534",
abstract = "In the realm of multi-intent spoken language understanding, recent advancements have leveraged the potential of prompt learning frameworks. However, critical gaps exist in these frameworks: the lack of explicit modeling of dual-task dependencies and the oversight of task-specific semantic differences among utterances. To address these shortcomings, we propose DC-Instruct, a novel generative framework based on Dual-task Inter-dependent Instructions (DII) and Supervised Contrastive Instructions (SCI). Specifically, DII guides large language models (LLMs) to generate labels for one task based on the other task`s labels, thereby explicitly capturing dual-task inter-dependencies. Moreover, SCI leverages utterance semantics differences by guiding LLMs to determine whether a pair of utterances share the same or similar labels. This can improve LLMs on extracting and discriminating task-specific semantics, thus enhancing their SLU reasoning abilities. Extensive experiments on public benchmark datasets show that DC-Instruct markedly outperforms current generative models and state-of-the-art methods, demonstrating its effectiveness in enhancing dialogue language understanding and reasoning."
}
Markdown (Informal)
[DC-Instruct: An Effective Framework for Generative Multi-intent Spoken Language Understanding](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.804/) (Xing et al., EMNLP 2024)
ACL