@inproceedings{yoon-etal-2025-hypothetical,
title = "Hypothetical Documents or Knowledge Leakage? Rethinking {LLM}-based Query Expansion",
author = "Yoon, Yejun and
Jung, Jaeyoon and
Yoon, Seunghyun and
Park, Kunwoo",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.980/",
pages = "19170--19187",
ISBN = "979-8-89176-256-5",
abstract = "Query expansion methods powered by large language models (LLMs) have demonstrated effectiveness in zero-shot retrieval tasks. These methods assume that LLMs can generate hypothetical documents that, when incorporated into a query vector, enhance the retrieval of real evidence. However, we challenge this assumption by investigating whether knowledge leakage in benchmarks contributes to the observed performance gains. Using fact verification as a testbed, we analyze whether the generated documents contain information entailed by ground-truth evidence and assess their impact on performance. Our findings indicate that, on average, performance improvements consistently occurred for claims whose generated documents included sentences entailed by gold evidence. This suggests that knowledge leakage may be present in fact-verification benchmarks, potentially inflating the perceived performance of LLM-based query expansion methods."
}
Markdown (Informal)
[Hypothetical Documents or Knowledge Leakage? Rethinking LLM-based Query Expansion](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.980/) (Yoon et al., Findings 2025)
ACL