@inproceedings{schoch-ji-2025-context,
title = "In-Context Learning (and Unlearning) of Length Biases",
author = "Schoch, Stephanie and
Ji, Yangfeng",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.390/",
pages = "7633--7671",
ISBN = "979-8-89176-189-6",
abstract = "Large language models have demonstrated strong capabilities to learn in-context, where exemplar input-output pairings are appended to the prompt for demonstration. However, existing work has demonstrated the ability of models to learn lexical and label biases in-context, which negatively impacts both performance and robustness of models. The impact of other statistical data biases remains under-explored, which this work aims to address. We specifically investigate the impact of length biases on in-context learning. We demonstrate that models do learn length biases in the context window for their predictions, and further empirically analyze the factors that modulate the level of bias exhibited by the model. In addition, we show that learning length information in-context can be used to counter the length bias that has been encoded in models (e.g., via fine-tuning). This reveals the power of in-context learning in debiasing model prediction behaviors without the need for costly parameter updates."
}
Markdown (Informal)
[In-Context Learning (and Unlearning) of Length Biases](https://preview.aclanthology.org/Ingest-2025-COMPUTEL/2025.naacl-long.390/) (Schoch & Ji, NAACL 2025)
ACL
- Stephanie Schoch and Yangfeng Ji. 2025. In-Context Learning (and Unlearning) of Length Biases. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 7633–7671, Albuquerque, New Mexico. Association for Computational Linguistics.