@inproceedings{wu-etal-2025-efficient-opamp,
title = "Efficient {O}p{A}mp Adaptation for Zoom Attention to Golden Contexts",
author = "Wu, Haoyuan and
Ming, Rui and
Zheng, Haisheng and
He, Zhuolun and
Yu, Bei",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.653/",
pages = "13319--13331",
ISBN = "979-8-89176-251-0",
abstract = "Large language models (LLMs) have shown significant promise in question-answering (QA) tasks, particularly in retrieval-augmented generation (RAG) scenarios and long-context applications. However, their performance is hindered by noisy reference documents, which often distract from essential information. Despite fine-tuning efforts, Transformer-based architectures struggle to prioritize relevant content. This is evidenced by their tendency to allocate disproportionate attention to irrelevant or later-positioned documents. Recent work proposes the differential attention mechanism to address this issue, but this mechanism is limited by an unsuitable common-mode rejection ratio (CMRR) and high computational costs. Inspired by the operational amplifier (OpAmp), we propose the OpAmp adaptation to address these challenges, which is implemented with adapters efficiently. By integrating the adapter into pre-trained Transformer blocks, our approach enhances focus on the golden context without costly training from scratch. Empirical evaluations on noisy-context benchmarks reveal that our Qwen2.5-OpAmp-72B model, trained with our OpAmp adaptation, surpasses the performance of state-of-the-art LLMs, including DeepSeek-V3 and GPT-4o.Our code is available at https://github.com/wuhy68/OpampAdapter."
}
Markdown (Informal)
[Efficient OpAmp Adaptation for Zoom Attention to Golden Contexts](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.653/) (Wu et al., ACL 2025)
ACL
- Haoyuan Wu, Rui Ming, Haisheng Zheng, Zhuolun He, and Bei Yu. 2025. Efficient OpAmp Adaptation for Zoom Attention to Golden Contexts. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 13319–13331, Vienna, Austria. Association for Computational Linguistics.