@inproceedings{bell-2025-less,
title = "Less Can be More: An Empirical Evaluation of Small and Large Language Models for Sentence-level Claim Detection",
author = "Bell, Andrew",
editor = "Akhtar, Mubashara and
Aly, Rami and
Christodoulopoulos, Christos and
Cocarascu, Oana and
Guo, Zhijiang and
Mittal, Arpit and
Schlichtkrull, Michael and
Thorne, James and
Vlachos, Andreas",
booktitle = "Proceedings of the Eighth Fact Extraction and VERification Workshop (FEVER)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.fever-1.6/",
pages = "85--90",
ISBN = "978-1-959429-53-1",
abstract = "Sentence-level claim detection is a critical first step in the fact-checking process. While Large Language Models (LLMs) seem well-suited for claim detection, their computational cost poses challenges for real-world deployment. This paper investigates the effectiveness of both small and large pretrained Language Models for the task of claim detection. We conduct a comprehensive empirical evaluation using BERT, ModernBERT, RoBERTa, Llama, and ChatGPT-based models. Our results reveal that smaller models, when finetuned appropriately, can achieve competitive performance with significantly lower computational overhead on in-domain tasks. Notably, we also find that BERT-based models transfer poorly on sentence-level claim detection in out-of-domain tasks. We discuss the implications of these findings for practitioners and highlight directions for future research."
}
Markdown (Informal)
[Less Can be More: An Empirical Evaluation of Small and Large Language Models for Sentence-level Claim Detection](https://preview.aclanthology.org/landing_page/2025.fever-1.6/) (Bell, FEVER 2025)
ACL