@inproceedings{ajjour-wachsmuth-2025-exploring,
title = "Exploring {LLM} Priming Strategies for Few-Shot Stance Classification",
author = "Ajjour, Yamen and
Wachsmuth, Henning",
editor = "Chistova, Elena and
Cimiano, Philipp and
Haddadan, Shohreh and
Lapesa, Gabriella and
Ruiz-Dolz, Ramon",
booktitle = "Proceedings of the 12th Argument mining Workshop",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.argmining-1.2/",
doi = "10.18653/v1/2025.argmining-1.2",
pages = "11--23",
ISBN = "979-8-89176-258-9",
abstract = "Large language models (LLMs) are effective in predicting the labels of unseen target instances if instructed for the task and training instances via the prompt. LLMs generate a text with higher probability if the prompt contains text with similar characteristics, a phenomenon, called priming, that especially affects argumentation. An open question in NLP is how to systematically exploit priming to choose a set of instances suitable for a given task. For stance classification, LLMs may be primed with few-shot instances prior to identifying whether a given argument is pro or con a topic. In this paper, we explore two priming strategies for few-shot stance classification: one takes those instances that are most semantically similar, and the other chooses those that are most stance-similar. Experiments on three common stance datasets suggest that priming an LLM with stance-similar instances is particularly effective in few-shot stance classification compared to baseline strategies, and behaves largely consistently across different LLM variants."
}
Markdown (Informal)
[Exploring LLM Priming Strategies for Few-Shot Stance Classification](https://preview.aclanthology.org/landing_page/2025.argmining-1.2/) (Ajjour & Wachsmuth, ArgMining 2025)
ACL