@inproceedings{dou-etal-2022-improving,
title = "Improving Large-scale Paraphrase Acquisition and Generation",
author = "Dou, Yao and
Jiang, Chao and
Xu, Wei",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.emnlp-main.631/",
doi = "10.18653/v1/2022.emnlp-main.631",
pages = "9301--9323",
abstract = "This paper addresses the quality issues in existing Twitter-based paraphrase datasets, and discusses the necessity of using two separate definitions of paraphrase for identification and generation tasks. We present a new Multi-Topic Paraphrase in Twitter (MultiPIT) corpus that consists of a total of 130k sentence pairs with crowdsoursing (MultiPIT{\_}crowd) and expert (MultiPIT{\_}expert) annotations using two different paraphrase definitions for paraphrase identification, in addition to a multi-reference test set (MultiPIT{\_}NMR) and a large automatically constructed training set (MultiPIT{\_}Auto) for paraphrase generation. With improved data annotation quality and task-specific paraphrase definition, the best pre-trained language model fine-tuned on our dataset achieves the state-of-the-art performance of 84.2 F1 for automatic paraphrase identification. Furthermore, our empirical results also demonstrate that the paraphrase generation models trained on MultiPIT{\_}Auto generate more diverse and high-quality paraphrases compared to their counterparts fine-tuned on other corpora such as Quora, MSCOCO, and ParaNMT."
}
Markdown (Informal)
[Improving Large-scale Paraphrase Acquisition and Generation](https://preview.aclanthology.org/fix-sig-urls/2022.emnlp-main.631/) (Dou et al., EMNLP 2022)
ACL