@inproceedings{schaefer-2025-integrating,
title = "On Integrating {LLM}s Into an Argument Annotation Workflow",
author = "Schaefer, Robin",
editor = "Chistova, Elena and
Cimiano, Philipp and
Haddadan, Shohreh and
Lapesa, Gabriella and
Ruiz-Dolz, Ramon",
booktitle = "Proceedings of the 12th Argument mining Workshop",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.argmining-1.8/",
pages = "87--99",
ISBN = "979-8-89176-258-9",
abstract = "Given the recent success of LLMs across different NLP tasks, their usability for data annotation has become a promising area of research. In this work, we investigate to what extent LLMs can be used as annotators for argument components and their semantic types in German tweets through a series of experiments combining different models and prompt configurations. Each prompt is constructed from modular components, such as class definitions or contextual information. Our results suggest that LLMs can indeed perform argument annotation, particularly of semantic argument types, if provided with precise class definitions. However, a fine-tuned BERT baseline remains a strong contender, often matching or exceeding LLM performance. These findings highlight the importance of considering not only model performance, but also ecological and financial costs when defining an annotation workflow."
}
Markdown (Informal)
[On Integrating LLMs Into an Argument Annotation Workflow](https://preview.aclanthology.org/display_plenaries/2025.argmining-1.8/) (Schaefer, ArgMining 2025)
ACL