@inproceedings{valvoda-etal-2022-prompting,
title = "Prompting for a conversation: How to control a dialog model?",
author = "Valvoda, Josef and
Fang, Yimai and
Vandyke, David",
editor = "Wu, Xianchao and
Ruan, Peiying and
Li, Sheng and
Dong, Yi",
booktitle = "Proceedings of the Second Workshop on When Creative AI Meets Conversational AI",
month = oct,
year = "2022",
address = "Gyeongju, Republic of Korea",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.cai-1.1",
pages = "1--8",
abstract = "Dialog modelling faces a difficult trade-off. Models are trained on a large amount of text, yet their responses need to be limited to a desired scope and style of a dialog agent. Because the datasets used to achieve the former contain language that is not compatible with the latter, pre-trained dialog models are fine-tuned on smaller curated datasets. However, the fine-tuning process robs them of the ability to produce diverse responses, eventually reducing them to dull conversation partners. In this paper we investigate if prompting can help with mitigating the above trade-off. Specifically, we experiment with conditioning the prompt on the query, rather than training a single prompt for all queries. By following the intuition that freezing the pre-trained language model will conserve its expressivity, we find that compared to fine-tuning, prompting can achieve a higher BLEU score and substantially improve the diversity and novelty of the responses.",
}
Markdown (Informal)
[Prompting for a conversation: How to control a dialog model?](https://aclanthology.org/2022.cai-1.1) (Valvoda et al., CAI 2022)
ACL