@inproceedings{giovanni-moller-maria-aiello-2025-prompt,
    title = "Prompt Refinement or Fine-tuning? Best Practices for using {LLM}s in Computational Social Science Tasks",
    author = "Giovanni M{\o}ller, Anders  and
      Maria Aiello, Luca",
    editor = "Hale, James  and
      Deuksin Kwon, Brian  and
      Dutt, Ritam",
    booktitle = "Proceedings of the Third Workshop on Social Influence in Conversations (SICon 2025)",
    month = jul,
    year = "2025",
    address = "Vienna, Austria",
    publisher = "Association for Computational Linguistics",
    url = "https://preview.aclanthology.org/ingest-emnlp/2025.sicon-1.2/",
    doi = "10.18653/v1/2025.sicon-1.2",
    pages = "27--49",
    ISBN = "979-8-89176-266-4",
    abstract = "Large Language Models are expressive tools that enable complex tasks of text understanding within Computational Social Science. Their versatility, while beneficial, poses a barrier for establishing standardized best practices within the field. To bring clarity on the values of different strategies, we present an overview of the performance of modern LLM-based classification methods on a benchmark of 23 social knowledge tasks. Our results point to three best practices: prioritize models with larger vocabulary and pre-training corpora; avoid simple zero-shot in favor of AI-enhanced prompting; fine-tune on task-specific data, and consider more complex forms instruction-tuning on multiple datasets only when only training data is more abundant."
}Markdown (Informal)
[Prompt Refinement or Fine-tuning? Best Practices for using LLMs in Computational Social Science Tasks](https://preview.aclanthology.org/ingest-emnlp/2025.sicon-1.2/) (Giovanni Møller & Maria Aiello, SICon 2025)
ACL