@inproceedings{chi-rudnicky-2021-zero,
title = "Zero-Shot Dialogue Disentanglement by Self-Supervised Entangled Response Selection",
author = "Chi, Ta-Chung and
Rudnicky, Alexander",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.400/",
doi = "10.18653/v1/2021.emnlp-main.400",
pages = "4897--4902",
abstract = "Dialogue disentanglement aims to group utterances in a long and multi-participant dialogue into threads. This is useful for discourse analysis and downstream applications such as dialogue response selection, where it can be the first step to construct a clean context/response set. Unfortunately, labeling all \textit{reply-to} links takes quadratic effort w.r.t the number of utterances: an annotator must check all preceding utterances to identify the one to which the current utterance is a reply. In this paper, we are the first to propose a \textbf{zero-shot} dialogue disentanglement solution. Firstly, we train a model on a multi-participant response selection dataset harvested from the web which is not annotated; we then apply the trained model to perform zero-shot dialogue disentanglement. Without any labeled data, our model can achieve a cluster F1 score of 25. We also fine-tune the model using various amounts of labeled data. Experiments show that with only 10{\%} of the data, we achieve nearly the same performance of using the full dataset."
}
Markdown (Informal)
[Zero-Shot Dialogue Disentanglement by Self-Supervised Entangled Response Selection](https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.400/) (Chi & Rudnicky, EMNLP 2021)
ACL