@inproceedings{jung-etal-2025-familiarity,
title = "Familiarity-Aware Evidence Compression for Retrieval-Augmented Generation",
author = "Jung, Dongwon and
Liu, Qin and
Huang, Tenghao and
Zhou, Ben and
Chen, Muhao",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.878/",
doi = "10.18653/v1/2025.findings-emnlp.878",
pages = "16181--16196",
ISBN = "979-8-89176-335-7",
abstract = "Retrieval-augmented generation (RAG) improves large language models (LMs) by incorporating non-parametric knowledge through evidence retrieved from external sources. However, it often struggles to cope with inconsistent and irrelevant information that can distract the LM from its tasks, especially when multiple evidence pieces are required. While compressing the retrieved evidence with a compression model aims to address this issue, the compressed evidence may still be unfamiliar to the target model used for downstream tasks, potentially failing to utilize the evidence effectively. We propose FaviComp (Familarity-Aware Evidence Compression), a novel training-free evidence compression technique that makes retrieved evidence more familiar to the target model, while seamlessly integrating parametric knowledge from the model. Experimental results show that FaviComp consistently outperforms the most recent evidence compression baselines across multiple open-domain QA datasets, improving accuracy by up to 28.1{\%} while achieving high compression rates. Additionally, we demonstrate the effective integration of both parametric and non-parametric knowledge during evidence compression."
}Markdown (Informal)
[Familiarity-Aware Evidence Compression for Retrieval-Augmented Generation](https://preview.aclanthology.org/ingest-luhme/2025.findings-emnlp.878/) (Jung et al., Findings 2025)
ACL