@inproceedings{jiang-etal-2025-gainrag,
title = "{G}ain{RAG}: Preference Alignment in Retrieval-Augmented Generation through Gain Signal Synthesis",
author = "Jiang, Yi and
Zhao, Sendong and
Li, Jianbo and
Wang, Haochun and
Qin, Bing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.527/",
pages = "10746--10757",
ISBN = "979-8-89176-251-0",
abstract = "The Retrieval-Augmented Generation (RAG) framework introduces a retrieval module to dynamicaslly inject retrieved information into the input context of large language models (LLMs), and has demonstrated significant success in various NLP tasks. However, the current study points out that there is a preference gap between retrievers and LLMs in the RAG framework, which limit the further improvement of system performance. Some highly relevant passages may interfere with LLM reasoning because they contain complex or contradictory information; while some indirectly related or even inaccurate content may help LLM generate more accurate answers by providing suggestive information or logical clues. To solve this, we propose **GainRAG**, a novel approach that aligns the retriever{'}s and LLM{'}s preferences by defining a new metric, ``gain'', which measure how well an input passage contributes to correct outputs.We then propose a method to estimate these gain signals and train a middleware that aligns the preferences of the retriever and the LLM using only limited data.In addition, we introduce a pseudo-passage strategy to mitigate degradation.The experimental results on 6 datasets verify the effectiveness of GainRAG."
}
Markdown (Informal)
[GainRAG: Preference Alignment in Retrieval-Augmented Generation through Gain Signal Synthesis](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.527/) (Jiang et al., ACL 2025)
ACL