@inproceedings{alhindi-etal-2024-large,
title = "Large Language Models are Few-Shot Training Example Generators: A Case Study in Fallacy Recognition",
author = "Alhindi, Tariq and
Muresan, Smaranda and
Nakov, Preslav",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2024",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.732/",
doi = "10.18653/v1/2024.findings-acl.732",
pages = "12323--12334",
abstract = "Recognizing fallacies is crucial for ensuring the quality and validity of arguments across various domains. However, computational fallacy recognition faces challenges due to the diverse genres, domains, and types of fallacies found in datasets. This leads to a highly multi-class, and even multi-label, setup with substantial class imbalance. In this study, we aim to enhance existing models for fallacy recognition by incorporating additional context and by leveraging large language models to generate synthetic data, thus increasing the representation of the infrequent classes. We experiment with GPT3.5 to generate synthetic examples and we examine the impact of prompt settings for this. Moreover, we explore zero-shot and few-shot scenarios to evaluate the effectiveness of using the generated examples for training smaller models within a unified fallacy recognition framework. Furthermore, we analyze the overlap between the synthetic data and existing fallacy datasets. Finally, we investigate the usefulness of providing supplementary context for detecting fallacy types that need such context, e.g., diversion fallacies. Our evaluation results demonstrate consistent improvements across fallacy types, datasets, and generators. The code and the synthetic datasets are all publicly available."
}
Markdown (Informal)
[Large Language Models are Few-Shot Training Example Generators: A Case Study in Fallacy Recognition](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-acl.732/) (Alhindi et al., Findings 2024)
ACL