@inproceedings{hu-etal-2021-relation-guided,
title = "Relation-Guided Pre-Training for Open-Domain Question Answering",
author = "Hu, Ziniu and
Sun, Yizhou and
Chang, Kai-Wei",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2021",
month = nov,
year = "2021",
address = "Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.findings-emnlp.292/",
doi = "10.18653/v1/2021.findings-emnlp.292",
pages = "3431--3448",
abstract = "Answering complex open-domain questions requires understanding the latent relations between involving entities. However, we found that the existing QA datasets are extremely imbalanced in some types of relations, which hurts the generalization performance over questions with long-tail relations. To remedy this problem, in this paper, we propose a Relation-Guided Pre-Training (RGPT-QA) framework. We first generate a relational QA dataset covering a wide range of relations from both the Wikidata triplets and Wikipedia hyperlinks. We then pre-train a QA model to infer the latent relations from the question, and then conduct extractive QA to get the target answer entity. We demonstrate that by pre-training with propoed RGPT-QA techique, the popular open-domain QA model, Dense Passage Retriever (DPR), achieves 2.2{\%}, 2.4{\%}, and 6.3{\%} absolute improvement in Exact Match accuracy on Natural Questions, TriviaQA, and WebQuestions. Particularly, we show that RGPT-QA improves significantly on questions with long-tail relations."
}
Markdown (Informal)
[Relation-Guided Pre-Training for Open-Domain Question Answering](https://preview.aclanthology.org/fix-sig-urls/2021.findings-emnlp.292/) (Hu et al., Findings 2021)
ACL