@inproceedings{weston-etal-2022-generative,
title = "Generative Pretraining for Paraphrase Evaluation",
author = "Weston, Jack and
Lenain, Raphael and
Meepegama, Udeepa and
Fristed, Emil",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.acl-long.280/",
doi = "10.18653/v1/2022.acl-long.280",
pages = "4052--4073",
abstract = "We introduce ParaBLEU, a paraphrase representation learning model and evaluation metric for text generation. Unlike previous approaches, ParaBLEU learns to understand paraphrasis using generative conditioning as a pretraining objective. ParaBLEU correlates more strongly with human judgements than existing metrics, obtaining new state-of-the-art results on the 2017 WMT Metrics Shared Task. We show that our model is robust to data scarcity, exceeding previous state-of-the-art performance using only 50{\%} of the available training data and surpassing BLEU, ROUGE and METEOR with only 40 labelled examples. Finally, we demonstrate that ParaBLEU can be used to conditionally generate novel paraphrases from a single demonstration, which we use to confirm our hypothesis that it learns abstract, generalized paraphrase representations."
}
Markdown (Informal)
[Generative Pretraining for Paraphrase Evaluation](https://preview.aclanthology.org/fix-sig-urls/2022.acl-long.280/) (Weston et al., ACL 2022)
ACL
- Jack Weston, Raphael Lenain, Udeepa Meepegama, and Emil Fristed. 2022. Generative Pretraining for Paraphrase Evaluation. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4052–4073, Dublin, Ireland. Association for Computational Linguistics.