@inproceedings{sun-etal-2025-causalabstain,
title = "{C}ausal{A}bstain: Enhancing Multilingual {LLM}s with Causal Reasoning for Trustworthy Abstention",
author = "Sun, Yuxi and
Zuo, Aoqi and
Gao, Wei and
Ma, Jing",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.723/",
doi = "10.18653/v1/2025.findings-acl.723",
pages = "14060--14076",
ISBN = "979-8-89176-256-5",
abstract = "Large Language Models (LLMs) often exhibit knowledge disparities across languages. Encouraging LLMs to $\textit{abstain}$ when faced with knowledge gaps is a promising strategy to reduce hallucinations in multilingual settings. Current abstention strategies for multilingual scenarios primarily rely on generating feedback in various languages using LLMs and performing self-reflection. However, these methods can be adversely impacted by inaccuracies and biases in the generated feedback. To address this, from a causal perspective, we introduce $\textit{CausalAbstain}$, a method that helps LLMs determine whether to utilize multiple generated feedback responses and how to identify the most useful ones. Extensive experiments demonstrate that $\textit{CausalAbstain}$ effectively selects helpful feedback and enhances abstention decisions with interpretability in both native language ($\textit{Casual-native}$) and multilingual ($\textit{Causal-multi}$) settings, outperforming strong baselines on two benchmark datasets covering encyclopedic and commonsense knowledge QA tasks."
}
Markdown (Informal)
[CausalAbstain: Enhancing Multilingual LLMs with Causal Reasoning for Trustworthy Abstention](https://preview.aclanthology.org/corrections-2025-08/2025.findings-acl.723/) (Sun et al., Findings 2025)
ACL