@inproceedings{sileo-2026-logic,
title = "Logic Haystacks: Probing {LLM}s' Long-Context Logical Reasoning (Without Easily Identifiable Unrelated Padding)",
author = "Sileo, Damien",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 2: Short Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-short.3/",
pages = "66--75",
ISBN = "979-8-89176-381-4",
abstract = "Large language models demonstrate promising long-context processing capabilities, with recent models touting context windows close to one million tokens. However, the evaluations supporting these claims often involve simple retrieval tasks or synthetic tasks padded with irrelevant text, which models may easily detect and discard. In this work, we generate lengthy, simplified English text with first-order logic representations spanning up to 2048 sentences ({\textasciitilde}25$k GPT-4 tokens). We formulate an evaluation task with evidence retrieval for contradiction detection. The long, homogeneous text is filled with distractors that are both hard to distinguish from relevant evidence and provably non-interfering. Our evaluation of evidence retrieval reveals that the effective context window is much smaller with such realistic distractors, already crumbling at 128 sentences.$"
}Markdown (Informal)
[Logic Haystacks: Probing LLMs’ Long-Context Logical Reasoning (Without Easily Identifiable Unrelated Padding)](https://preview.aclanthology.org/ingest-eacl/2026.eacl-short.3/) (Sileo, EACL 2026)
ACL