@inproceedings{wu-zhao-2022-sentence,
title = "Sentence Representation Learning with Generative Objective rather than Contrastive Objective",
author = "Wu, Bohong and
Zhao, Hai",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.emnlp-main.221/",
doi = "10.18653/v1/2022.emnlp-main.221",
pages = "3356--3368",
abstract = "Though offering amazing contextualized token-level representations, current pre-trained language models take less attention on accurately acquiring sentence-level representation during their self-supervised pre-training. However, contrastive objectives which dominate the current sentence representation learning bring little linguistic interpretability and no performance guarantee on downstream semantic tasks. We instead propose a novel generative self-supervised learning objective based on phrase reconstruction. To overcome the drawbacks of previous generative methods, we carefully model intra-sentence structure by breaking down one sentence into pieces of important phrases. Empirical studies show that our generative learning achieves powerful enough performance improvement and outperforms the current state-of-the-art contrastive methods not only on the STS benchmarks, but also on downstream semantic retrieval and reranking tasks. Our code is available at https://github.com/chengzhipanpan/PaSeR."
}
Markdown (Informal)
[Sentence Representation Learning with Generative Objective rather than Contrastive Objective](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.emnlp-main.221/) (Wu & Zhao, EMNLP 2022)
ACL