@inproceedings{cheng-etal-2023-mrrl,
title = "{MRRL}: Modifying the Reference via Reinforcement Learning for Non-Autoregressive Joint Multiple Intent Detection and Slot Filling",
author = "Cheng, Xuxin and
Zhu, Zhihong and
Cao, Bowen and
Ye, Qichen and
Zou, Yuexian",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.704/",
doi = "10.18653/v1/2023.findings-emnlp.704",
pages = "10495--10505",
abstract = "With the rise of non-autoregressive approach, some non-autoregressive models for joint multiple intent detection and slot filling have obtained the promising inference speed. However, most existing SLU models (1) suffer from the multi-modality problem that leads to reference intents and slots may not be suitable for training; (2) lack of alignment between the correct predictions of the two tasks, which extremely limits the overall accuracy. Therefore, in this paper, we propose $\textbf{M}$odifying the $\textbf{R}$eference via $\textbf{R}$einforcement $\textbf{L}$earning (MRRL), a novel method for multiple intent detection and slot filling, which introduces a modifier and employs reinforcement learning. Specifically, we try to provide the better training target for the non-autoregressive SLU model via modifying the reference based on the output of the non-autoregressive SLU model, and propose a suitability reward to ensure that the output of the modifier module could fit well with the output of the non-autoregressive SLU model and does not deviate too far from the reference. In addition, we also propose a compromise reward to realize a flexible trade-off between the two subtasks. Experiments on two multi-intent datasets and non-autoregressive baselines demonstrate that our MRRL could consistently improve the performance of baselines. More encouragingly, our best variant achieves new state-of-the-art results, outperforming the previous best approach by 3.6 overall accuracy on MixATIS dataset."
}
Markdown (Informal)
[MRRL: Modifying the Reference via Reinforcement Learning for Non-Autoregressive Joint Multiple Intent Detection and Slot Filling](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.704/) (Cheng et al., Findings 2023)
ACL