@inproceedings{saito-etal-2025-answer,
title = "Where is the answer? An empirical study of positional bias for parametric knowledge extraction in language model",
author = "Saito, Kuniaki and
Lee, Chen-Yu and
Sohn, Kihyuk and
Ushiku, Yoshitaka",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.58/",
pages = "1252--1269",
ISBN = "979-8-89176-189-6",
abstract = "Language model (LM) stores diverse factual knowledge in their parameters, which is learned during self-supervised training on unlabeled documents and is made extractable by instruction-tuning. For knowledge-intensive tasks, it is essential to memorize information in a way that makes it extractable from LM{'}s parameters with diverse queries. However, LMs suffer from a phenomenon called ``perplexity curse''; despite minimizing document perplexity during training, LMs struggle to extract information via a question prompt. In this paper, we study the problem by fine-tuning LMs for new data and find a very intriguing fact that all studied LMs suffer from positional bias in the training document, i.e., they struggle to answer questions about the information described in the middle or at the end of the training document. Our study indicates that this problem stems from the auto-regressive training, ie., predicting the next token given all previous tokens, thus adding regularization mitigates the issue. Our discoveries supported by extensive analysis will be an important key to extracting knowledge from the parameters of LMs. We will publish our code and dataset upon acceptance."
}
Markdown (Informal)
[Where is the answer? An empirical study of positional bias for parametric knowledge extraction in language model](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.58/) (Saito et al., NAACL 2025)
ACL