@inproceedings{li-zhao-2023-em,
title = "{EM} Pre-training for Multi-party Dialogue Response Generation",
author = "Li, Yiyang and
Zhao, Hai",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.7/",
doi = "10.18653/v1/2023.acl-long.7",
pages = "92--103",
abstract = "Dialogue response generation requires an agent to generate a response according to the current dialogue history, in terms of which two-party dialogues have been well studied, but leaving a great gap for multi-party dialogues at the same time. Different from two-party dialogues where each response is a direct reply to its previous utterance, the addressee of a response utterance should be specified before it is generated in the multi-party scenario. Thanks to the huge amount of two-party conversational data, various pre-trained language models for two-party dialogue response generation have been proposed. However, due to the lack of annotated addressee labels in multi-party dialogue datasets, it is hard to use them to pre-train a response generation model for multi-party dialogues. To tackle this obstacle, we propose an Expectation-Maximization (EM) approach that iteratively performs the expectation steps to generate addressee labels, and the maximization steps to optimize a response generation model. Theoretical analyses and extensive experiments have justified the feasibility and effectiveness of our proposed method. The official implementation of this paper is available at \url{https://github.com/EricLee8/MPDRG}."
}
Markdown (Informal)
[EM Pre-training for Multi-party Dialogue Response Generation](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.7/) (Li & Zhao, ACL 2023)
ACL