@inproceedings{zeng-etal-2024-boottod,
title = "{B}oot{TOD}: Bootstrap Task-oriented Dialogue Representations by Aligning Diverse Responses",
author = "Zeng, Weihao and
He, Keqing and
Wang, Yejie and
Fu, Dayuan and
Xu, Weiran",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.lrec-main.221/",
pages = "2458--2467",
abstract = "Pre-trained language models have been successful in many scenarios. However, their usefulness in task-oriented dialogues is limited due to the intrinsic linguistic differences between general text and task-oriented dialogues. Current task-oriented dialogue pre-training methods rely on a contrastive framework, which faces challenges such as selecting true positives and hard negatives, as well as lacking diversity. In this paper, we propose a novel dialogue pre-training model called BootTOD. It learns task-oriented dialogue representations via a self-bootstrapping framework. Unlike contrastive counterparts, BootTOD aligns context and context+response representations and dismisses the requirements of contrastive pairs. BootTOD also uses multiple appropriate response targets to model the intrinsic one-to-many diversity of human conversations. Experimental results show that BootTOD outperforms strong TOD baselines on diverse downstream dialogue tasks."
}
Markdown (Informal)
[BootTOD: Bootstrap Task-oriented Dialogue Representations by Aligning Diverse Responses](https://preview.aclanthology.org/fix-sig-urls/2024.lrec-main.221/) (Zeng et al., LREC-COLING 2024)
ACL