@inproceedings{xu-etal-2022-laprador,
title = "{L}a{P}ra{D}o{R}: Unsupervised Pretrained Dense Retriever for Zero-Shot Text Retrieval",
author = "Xu, Canwen and
Guo, Daya and
Duan, Nan and
McAuley, Julian",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2022",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-acl.281/",
doi = "10.18653/v1/2022.findings-acl.281",
pages = "3557--3569",
abstract = "In this paper, we propose LaPraDoR, a pretrained dual-tower dense retriever that does not require any supervised data for training. Specifically, we first present Iterative Contrastive Learning (ICoL) that iteratively trains the query and document encoders with a cache mechanism. ICoL not only enlarges the number of negative instances but also keeps representations of cached examples in the same hidden space. We then propose Lexicon-Enhanced Dense Retrieval (LEDR) as a simple yet effective way to enhance dense retrieval with lexical matching. We evaluate LaPraDoR on the recently proposed BEIR benchmark, including 18 datasets of 9 zero-shot text retrieval tasks. Experimental results show that LaPraDoR achieves state-of-the-art performance compared with supervised dense retrieval models, and further analysis reveals the effectiveness of our training strategy and objectives. Compared to re-ranking, our lexicon-enhanced approach can be run in milliseconds (22.5x faster) while achieving superior performance."
}
Markdown (Informal)
[LaPraDoR: Unsupervised Pretrained Dense Retriever for Zero-Shot Text Retrieval](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-acl.281/) (Xu et al., Findings 2022)
ACL