@inproceedings{hudecek-dusek-2023-large,
title = "Are Large Language Models All You Need for Task-Oriented Dialogue?",
author = "Hude{\v{c}}ek, Vojt{\v{e}}ch and
Dusek, Ondrej",
editor = "Stoyanchev, Svetlana and
Joty, Shafiq and
Schlangen, David and
Dusek, Ondrej and
Kennington, Casey and
Alikhani, Malihe",
booktitle = "Proceedings of the 24th Annual Meeting of the Special Interest Group on Discourse and Dialogue",
month = sep,
year = "2023",
address = "Prague, Czechia",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.sigdial-1.21/",
doi = "10.18653/v1/2023.sigdial-1.21",
pages = "216--228",
abstract = "Instruction-finetuned large language models (LLMs) gained a huge popularity recently, thanks to their ability to interact with users through conversation. In this work, we aim to evaluate their ability to complete multi-turn tasks and interact with external databases in the context of established task-oriented dialogue benchmarks. We show that in explicit belief state tracking, LLMs underperform compared to specialized task-specific models. Nevertheless, they show some ability to guide the dialogue to a successful ending through their generated responses if they are provided with correct slot values. Furthermore, this ability improves with few-shot in-domain examples."
}
Markdown (Informal)
[Are Large Language Models All You Need for Task-Oriented Dialogue?](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.sigdial-1.21/) (Hudeček & Dusek, SIGDIAL 2023)
ACL