@inproceedings{thai-iyyer-2025-literary,
title = "Literary Evidence Retrieval via Long-Context Language Models",
author = "Thai, Katherine and
Iyyer, Mohit",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.29/",
pages = "369--380",
ISBN = "979-8-89176-252-7",
abstract = "How well do modern long-context language models understand literary fiction? We explore this question via the task of literary evidence retrieval, repurposing the RELiC dataset of Thai et al. (2022) to construct a benchmark where the entire text of a primary source (e.g., The Great Gatsby) is provided to an LLM alongside literary criticism with a missing quotation from that work. This setting, in which the model must generate the missing quotation, mirrors the human process of literary analysis by requiring models to perform both global narrative reasoning and close textual examination. We curate a high-quality subset of 292 examples through extensive filtering and human verification. Our experiments show that recent reasoning models, such as Gemini 2.5 Pro can exceed human expert performance (62.5{\%} vs. 50{\%} accuracy). In contrast, the best open-weight model achieves only 29.1{\%} accuracy, highlighting a wide gap in interpretive reasoning between open and closed-weight models. Despite their speed and apparent accuracy, even the strongest models struggle with nuanced literary signals and overgeneration, signaling open challenges for applying LLMs to literary analysis. We release our dataset and evaluation code to encourage future work in this direction."
}
Markdown (Informal)
[Literary Evidence Retrieval via Long-Context Language Models](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.29/) (Thai & Iyyer, ACL 2025)
ACL