@inproceedings{mi-etal-2025-input,
title = "From Input Perception to Predictive Insight: Modeling Model Blind Spots Before They Become Errors",
author = "Mi, Maggie and
Villavicencio, Aline and
Moosavi, Nafise Sadat",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1740/",
pages = "34316--34329",
ISBN = "979-8-89176-332-6",
abstract = "Language models often struggle with idiomatic, figurative, or context-sensitive inputs, not because they produce flawed outputs, but because they misinterpret the input from the outset. We propose an input-only method for anticipating such failures using token-level likelihood features inspired by surprisal and the Uniform Information Density hypothesis. These features capture localized uncertainty in input comprehension and outperform standard baselines across five linguistically challenging datasets. We show that span-localized features improve error detection for larger models, while smaller models benefit from global patterns. Our method requires no access to outputs or hidden activations, offering a lightweight and generalizable approach to pre-generation error prediction."
}Markdown (Informal)
[From Input Perception to Predictive Insight: Modeling Model Blind Spots Before They Become Errors](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1740/) (Mi et al., EMNLP 2025)
ACL