@inproceedings{ye-etal-2025-input,
title = "Can Input Attributions Explain Inductive Reasoning in In-Context Learning?",
author = "Ye, Mengyu and
Kuribayashi, Tatsuki and
Kobayashi, Goro and
Suzuki, Jun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.1092/",
pages = "21199--21225",
ISBN = "979-8-89176-256-5",
abstract = "Interpreting the internal process of neural models has long been a challenge. This challenge remains relevant in the era of large language models (LLMs) and in-context learning (ICL); for example, ICL poses a new issue of interpreting which example in the few-shot examples contributed to identifying/solving the task. To this end, in this paper, we design synthetic diagnostic tasks of inductive reasoning, inspired by the generalization tests in linguistics; here, most in-context examples are ambiguous w.r.t. their underlying rule, and one critical example disambiguates the task demonstrated. The question is whether conventional input attribution (IA) methods can track such a reasoning process, i.e., identify the influential example, in ICL. Our experiments provide several practical findings; for example, a certain simple IA method works the best, and the larger the model, the generally harder it is to interpret the ICL with gradient-based IA methods."
}
Markdown (Informal)
[Can Input Attributions Explain Inductive Reasoning in In-Context Learning?](https://preview.aclanthology.org/ingestion-acl-25/2025.findings-acl.1092/) (Ye et al., Findings 2025)
ACL