@inproceedings{zhao-etal-2021-distantly,
title = "Distantly-Supervised Dense Retrieval Enables Open-Domain Question Answering without Evidence Annotation",
author = "Zhao, Chen and
Xiong, Chenyan and
Boyd-Graber, Jordan and
Daum{\'e} III, Hal",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.756/",
doi = "10.18653/v1/2021.emnlp-main.756",
pages = "9612--9622",
abstract = "Open-domain question answering answers a question based on evidence retrieved from a large corpus. State-of-the-art neural approaches require intermediate evidence annotations for training. However, such intermediate annotations are expensive, and methods that rely on them cannot transfer to the more common setting, where only question{--}answer pairs are available. This paper investigates whether models can learn to find evidence from a large corpus, with only distant supervision from answer labels for model training, thereby generating no additional annotation cost. We introduce a novel approach (DistDR) that iteratively improves over a weak retriever by alternately finding evidence from the up-to-date model and encouraging the model to learn the most likely evidence. Without using any evidence labels, DistDR is on par with fully-supervised state-of-the-art methods on both multi-hop and single-hop QA benchmarks. Our analysis confirms that DistDR finds more accurate evidence over iterations, which leads to model improvements. The code is available at \url{https://github.com/henryzhao5852/DistDR}."
}
Markdown (Informal)
[Distantly-Supervised Dense Retrieval Enables Open-Domain Question Answering without Evidence Annotation](https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.756/) (Zhao et al., EMNLP 2021)
ACL