@inproceedings{yu-etal-2023-augmentation,
title = "Augmentation-Adapted Retriever Improves Generalization of Language Models as Generic Plug-In",
author = "Yu, Zichun and
Xiong, Chenyan and
Yu, Shi and
Liu, Zhiyuan",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.136/",
doi = "10.18653/v1/2023.acl-long.136",
pages = "2421--2436",
abstract = "Retrieval augmentation can aid language models (LMs) in knowledge-intensive tasks by supplying them with external information. Prior works on retrieval augmentation usually jointly fine-tune the retriever and the LM, making them closely coupled. In this paper, we explore the scheme of generic retrieval plug-in: the retriever is to assist target LMs that may not be known beforehand or are unable to be fine-tuned together. To retrieve useful documents for unseen target LMs, we propose augmentation-adapted retriever (AAR), which learns LM{'}s preferences obtained from a known source LM. Experiments on the MMLU and PopQA datasets demonstrate that our AAR trained with a small source LM is able to significantly improve the zero-shot generalization of larger target LMs ranging from 250M Flan-T5 to 175B InstructGPT. Further analysis indicates that the preferences of different LMs overlap, enabling AAR trained with a single source LM to serve as a generic plug-in for various target LMs. Our code is open-sourced at \url{https://github.com/OpenMatch/Augmentation-Adapted-Retriever}."
}
Markdown (Informal)
[Augmentation-Adapted Retriever Improves Generalization of Language Models as Generic Plug-In](https://preview.aclanthology.org/fix-sig-urls/2023.acl-long.136/) (Yu et al., ACL 2023)
ACL