@inproceedings{jin-etal-2024-bider,
title = "{BIDER}: Bridging Knowledge Inconsistency for Efficient Retrieval-Augmented {LLM}s via Key Supporting Evidence",
author = "Jin, Jiajie and
Zhu, Yutao and
Zhou, Yujia and
Dou, Zhicheng",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-acl.42/",
doi = "10.18653/v1/2024.findings-acl.42",
pages = "750--761",
abstract = "Retrieval-augmented large language models (LLMs) have demonstrated efficacy in knowledge-intensive tasks such as open-domain QA, addressing inherent challenges in knowledge update and factual inadequacy.However, inconsistencies between retrieval knowledge and the necessary knowledge for LLMs, leading to a decline in LLM{'}s answer quality. This paper introduces BIDER, an approach that refines retrieval documents into Key Supporting Evidence (KSE) through knowledge synthesis, supervised fine-tuning (SFT), and preference alignment. We train BIDER by learning from crafting KSE, while maximizing its output to align with LLM{'}s information acquisition preferences through reinforcement learning. Evaluations across five datasets show BIDER boosts LLMs' answer quality by 7{\%} while reducing input content length in retrieval documents by 80{\%}, outperforming existing methods. The proposed KSE simulation effectively equips LLMs with essential information for accurate question answering."
}
Markdown (Informal)
[BIDER: Bridging Knowledge Inconsistency for Efficient Retrieval-Augmented LLMs via Key Supporting Evidence](https://preview.aclanthology.org/fix-sig-urls/2024.findings-acl.42/) (Jin et al., Findings 2024)
ACL