@inproceedings{carla-uban-2025-scibert,
title = "{S}ci{BERT} Meets Contrastive Learning: A Solution for Scientific Hallucination Detection",
author = "Carla, Crivoi and
Uban, Ana Sabina",
editor = "Ghosal, Tirthankar and
Mayr, Philipp and
Singh, Amanpreet and
Naik, Aakanksha and
Rehm, Georg and
Freitag, Dayne and
Li, Dan and
Schimmler, Sonja and
De Waard, Anita",
booktitle = "Proceedings of the Fifth Workshop on Scholarly Document Processing (SDP 2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.sdp-1.32/",
doi = "10.18653/v1/2025.sdp-1.32",
pages = "336--343",
ISBN = "979-8-89176-265-7",
abstract = "As AI systems become more involved in scientific research, there is growing concern about the accuracy of their outputs. Tools powered by large language models can generate summaries and answers that appear well-formed, but sometimes include claims that are not actually supported by the cited references. In this paper, we focus on identifying these hallucinated claims. We propose a system built on SciBERT and contrastive learning to detect whether a scientific claim can be inferred from the referenced content. Our method was evaluated in the SciHal 2025 shared task, which includes both coarse and fine-grained hallucination labels. The results show that our model performs well on supported and clearly unsupported claims, but struggles with ambiguous or low-resource categories. These findings highlight both the promise and the limitations of current models in improving the trustworthiness of AI-generated scientific content."
}
Markdown (Informal)
[SciBERT Meets Contrastive Learning: A Solution for Scientific Hallucination Detection](https://preview.aclanthology.org/landing_page/2025.sdp-1.32/) (Carla & Uban, sdp 2025)
ACL