@inproceedings{pichler-etal-2025-evaluating,
title = "Evaluating {LLM}-Prompting for Sequence Labeling Tasks in Computational Literary Studies",
author = "Pichler, Axel and
Pagel, Janis and
Reiter, Nils",
editor = "Kazantseva, Anna and
Szpakowicz, Stan and
Degaetano-Ortlieb, Stefania and
Bizzoni, Yuri and
Pagel, Janis",
booktitle = "Proceedings of the 9th Joint SIGHUM Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature (LaTeCH-CLfL 2025)",
month = may,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.latechclfl-1.5/",
pages = "32--46",
ISBN = "979-8-89176-241-1",
abstract = "Prompt engineering holds the promise for the computational literary studies (CLS) to obtain high quality markup for literary research questions by simply prompting large language models with natural language strings. We test prompt engineering{'}s validity for two CLS sequence labeling tasks under the following aspects: (i) how generalizable are the results of identical prompts on different dataset splits?, (ii) how robust are performance results when re-formulating the prompts?, and (iii) how generalizable are certain fixed phrases added to the prompts that are generally considered to increase performance. We find that results are sensitive to data splits and prompt formulation, while the addition of fixed phrases does not change performance in most cases, depending on the chosen model."
}
Markdown (Informal)
[Evaluating LLM-Prompting for Sequence Labeling Tasks in Computational Literary Studies](https://preview.aclanthology.org/landing_page/2025.latechclfl-1.5/) (Pichler et al., LaTeCHCLfL 2025)
ACL