@inproceedings{bao-etal-2025-permitted,
title = "Permitted Knowledge Boundary: Evaluating the Knowledge-Constrained Responsiveness of Large Language Models",
author = "Bao, Wenrui and
Wang, Kai and
Luo, Siqiang and
Li, Xiang",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.722/",
doi = "10.18653/v1/2025.findings-emnlp.722",
pages = "13390--13405",
ISBN = "979-8-89176-335-7",
abstract = "With the advancement of large language models (LLMs), recent research has raised concerns about their controllability.. In this paper, we argue for the importance of Knowledge-Constrained Responsiveness (KCR), ensuring that LLMs comply with human-defined constraints. However, KCR is an implicit and unobservable capability of LLMs, functioning as a black box that currently eludes quantitative assessment. To address this issue, we first introduce the definition of ``permitted boundary'' and define the ``boundary bias'' to depict KCR. We propose six metrics to quantify the boundary bias of LLMs and subsequently assess the KCR. Furthermore, we establish a benchmark with two new datasets, KCR-SimpleQA and KCR-WebNLG, to evaluate the performance of LLMs. Our extensive experiments show that several tested LLMs still struggle to varying degrees when adhering to constraints, especially without the corresponding knowledge."
}Markdown (Informal)
[Permitted Knowledge Boundary: Evaluating the Knowledge-Constrained Responsiveness of Large Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.722/) (Bao et al., Findings 2025)
ACL