@inproceedings{zhang-etal-2024-instruct,
title = "Can We Instruct {LLM}s to Compensate for Position Bias?",
author = "Zhang, Meiru and
Meng, Zaiqiao and
Collier, Nigel",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.732/",
doi = "10.18653/v1/2024.findings-emnlp.732",
pages = "12545--12556",
abstract = "Position bias in large language models (LLMs) leads to difficulty in accessing information retrieved from the retriever, thus downgrading the effectiveness of Retrieval-Augmented Generation (RAG) approaches in open-question answering. Recent studies reveal that this bias is related to disproportional attention across the context. In this work, we examine how to direct LLMs to allocate more attention towards a selected segment of the context through prompting, aiming to compensate for the shortage of attention. We find that language models do not have relative position awareness of the context but can be directed by promoting instruction with an exact document index. Our analysis contributes to a deeper understanding of position bias in LLMs and provides a pathway to mitigate this bias by instruction, thus benefiting LLMs in locating and utilizing relevant information from retrieved documents in RAG applications. The code and data in our study have been made publicly available."
}
Markdown (Informal)
[Can We Instruct LLMs to Compensate for Position Bias?](https://preview.aclanthology.org/fix-sig-urls/2024.findings-emnlp.732/) (Zhang et al., Findings 2024)
ACL