@inproceedings{bakke-heggelund-2025-fact,
title = "(Fact) Check Your Bias",
author = "Bakke, Eivind Morris and
Heggelund, Nora Winger",
editor = "Akhtar, Mubashara and
Aly, Rami and
Christodoulopoulos, Christos and
Cocarascu, Oana and
Guo, Zhijiang and
Mittal, Arpit and
Schlichtkrull, Michael and
Thorne, James and
Vlachos, Andreas",
booktitle = "Proceedings of the Eighth Fact Extraction and VERification Workshop (FEVER)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.fever-1.12/",
pages = "162--178",
ISBN = "978-1-959429-53-1",
abstract = "Automatic fact verification systems increasingly rely on large language models (LLMs). We investigate how parametric knowledge biases in these models affect fact-checking outcomes of the HerO system (baseline for FEVER-25). We examine how the system is affected by: (1) potential bias in Llama 3.1{'}s parametric knowledge and (2) intentionally injected bias. When prompted directly to perform fact-verification, Llama 3.1 labels nearly half the claims as ``Not Enough Evidence''. Using only its parametric knowledge it is able to reach a verdict on the remaining half of the claims. In the second experiment, we prompt the model to generate supporting, refuting, or neutral fact-checking documents. These prompts significantly influence retrieval outcomes, with approximately 50{\%} of retrieved evidence being unique to each perspective. Notably, the model sometimes refuses to generate supporting documents for claims it believes to be false, creating an inherent negative bias. Despite differences in retrieved evidence, final verdict predictions show stability across prompting strategies. The code is available at: \url{https://github.com/eibakke/FEVER-8-Shared-Task}"
}
Markdown (Informal)
[(Fact) Check Your Bias](https://preview.aclanthology.org/landing_page/2025.fever-1.12/) (Bakke & Heggelund, FEVER 2025)
ACL
- Eivind Morris Bakke and Nora Winger Heggelund. 2025. (Fact) Check Your Bias. In Proceedings of the Eighth Fact Extraction and VERification Workshop (FEVER), pages 162–178, Vienna, Austria. Association for Computational Linguistics.