@inproceedings{jeong-etal-2025-large,
title = "Large Language Models Are Better Logical Fallacy Reasoners with Counterargument, Explanation, and Goal-Aware Prompt Formulation",
author = "Jeong, Jiwon and
Jang, Hyeju and
Park, Hogun",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.384/",
pages = "6918--6937",
ISBN = "979-8-89176-195-7",
abstract = "The advancement of Large Language Models (LLMs) has greatly improved our ability to process complex language. However, accurately detecting logical fallacies remains a significant challenge. This study presents a novel and effective prompt formulation approach for logical fallacy detection, applicable in both supervised (fine-tuned) and unsupervised (zero-shot) settings. Our method enriches input text by incorporating implicit contextual information{---}counterarguments, explanations, and goals{---}which we query for validity within the argument{'}s context. We then rank these queries based on confidence scores to inform classification. We evaluate our approach across multiple datasets from 5 domains, covering 29 distinct fallacy types, using models from GPT and LLaMA series. The results show substantial improvements over state-of-the-art models: up to a 0.57 increase in F1-score in zero-shot settings and up to 0.45 in fine-tuned models. Extensive analyses further illustrate why and how our method excels."
}
Markdown (Informal)
[Large Language Models Are Better Logical Fallacy Reasoners with Counterargument, Explanation, and Goal-Aware Prompt Formulation](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.384/) (Jeong et al., Findings 2025)
ACL