@inproceedings{zhang-etal-2025-recognizing,
title = "Recognizing Limits: Investigating Infeasibility in Large Language Models",
author = "Zhang, Wenbo and
Xu, Zihang and
Cai, Hengrui",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.535/",
doi = "10.18653/v1/2025.findings-emnlp.535",
pages = "10092--10112",
ISBN = "979-8-89176-335-7",
abstract = "Large language models (LLMs) have shown remarkable performance in various tasks but often fail to handle queries that exceed their knowledge and capabilities, leading to incorrect or fabricated responses. This paper addresses the need for LLMs to recognize and refuse infeasible tasks due to the requests surpassing their capabilities. We conceptualize four main categories of infeasible tasks for LLMs, which cover a broad spectrum of hallucination-related challenges identified in prior literature. We develop and benchmark a new dataset comprising diverse infeasible and feasible tasks to evaluate multiple LLMs' abilities to decline infeasible tasks. Furthermore, we explore the potential of increasing LLMs' refusal capabilities with fine-tuning. Experiments validate the effectiveness of our trained models, offering promising directions for refining the operational boundaries of LLMs in real applications."
}Markdown (Informal)
[Recognizing Limits: Investigating Infeasibility in Large Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.535/) (Zhang et al., Findings 2025)
ACL