@inproceedings{liu-etal-2024-explicit,
title = "Explicit Inductive Inference using Large Language Models",
author = "Liu, Tianyang and
Li, Tianyi and
Cheng, Liang and
Steedman, Mark",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.926/",
doi = "10.18653/v1/2024.findings-emnlp.926",
pages = "15779--15786",
abstract = "Large Language Models (LLMs) are reported to hold undesirable attestation bias on inference tasks: when asked to predict if a premise $P$ entails a hypothesis $H$, instead of considering $H${\textquoteleft}s conditional truthfulness entailed by $P$, LLMs tend to use the out-of-context truth label of $H$ as a fragile proxy. In this paper, we propose a pipeline that exploits this bias to do explicit inductive inference. Our pipeline uses an LLM to transform a premise into a set of attested alternatives, and then aggregate answers of the derived new entailment inquiries to support the original inference prediction. On a directional predicate entailment benchmark, we demonstrate that by applying this simple pipeline, we can improve the overall performance of LLMs on inference and substantially alleviate the impact of their attestation bias."
}
Markdown (Informal)
[Explicit Inductive Inference using Large Language Models](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.926/) (Liu et al., Findings 2024)
ACL