@inproceedings{bhat-etal-2025-ur2n,
title = "{UR}2{N}: Unified Retriever and {R}era{N}ker",
author = "Bhat, Riyaz Ahmad and
Sen, Jaydeep and
Murthy, Rudra and
P, Vignesh",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven and
Darwish, Kareem and
Agarwal, Apoorv",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics: Industry Track",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-industry.51/",
pages = "595--602",
abstract = "The two-stage retrieval paradigm has gained popularity, where a neural model serves as a re-ranker atop a non-neural first-stage retriever. We argue that this approach, involving two disparate models without interaction, represents a suboptimal choice. To address this, we propose a unified encoder-decoder architecture with a novel training regimen which enables the encoder representation to be used for retrieval and the decoder for re-ranking within a single unified model, facilitating end-to-end retrieval. We incorporate XTR-style retrieval on top of the trained MonoT5 reranker to specifically concentrate on addressing practical constraints to create a lightweight model. Results on the BIER benchmark demonstrate the effectiveness of our unified architecture, featuring a highly optimized index and parameters. It outperforms ColBERT, XTR, and even serves as a superior re-ranker compared to the Mono-T5 reranker. The performance gains of our proposed system in reranking become increasingly evident as model capacity grows, particularly when compared to rerankers operating over traditional first-stage retrievers like BM25. This is encouraging, as it suggests that we can integrate more advanced retrievers to further enhance final reranking performance. In contrast, BM25`s static nature limits its potential for such improvements."
}
Markdown (Informal)
[UR2N: Unified Retriever and ReraNker](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-industry.51/) (Bhat et al., COLING 2025)
ACL
- Riyaz Ahmad Bhat, Jaydeep Sen, Rudra Murthy, and Vignesh P. 2025. UR2N: Unified Retriever and ReraNker. In Proceedings of the 31st International Conference on Computational Linguistics: Industry Track, pages 595–602, Abu Dhabi, UAE. Association for Computational Linguistics.