@inproceedings{chang-etal-2025-jopa,
title = "{J}o{PA}: Explaining Large Language Model{'}s Generation via Joint Prompt Attribution",
author = "Chang, Yurui and
Cao, Bochuan and
Wang, Yujia and
Chen, Jinghui and
Lin, Lu",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1074/",
pages = "22106--22122",
ISBN = "979-8-89176-251-0",
abstract = "Large Language Models (LLMs) have demonstrated impressive performances in complex text generation tasks. However, the contribution of the input prompt to the generated content still remains obscure to humans, underscoring the necessity of understanding the causality between input and output pairs. Existing works for providing prompt-specific explanation often confine model output to be classification or next-word prediction. Few initial attempts aiming to explain the entire language generation often treat input prompt texts independently, ignoring their combinatorial effects on the follow-up generation. In this study, we introduce a counterfactual explanation framework based on joint prompt attribution, JoPA, which aims to explain how a few prompt texts collaboratively influences the LLM{'}s complete generation. Particularly, we formulate the task of prompt attribution for generation interpretation as a combinatorial optimization problem, and introduce a probabilistic algorithm to search for the casual input combination in the discrete space. We define and utilize multiple metrics to evaluate the produced explanations, demonstrating both the faithfulness and efficiency of our framework."
}
Markdown (Informal)
[JoPA: Explaining Large Language Model’s Generation via Joint Prompt Attribution](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1074/) (Chang et al., ACL 2025)
ACL