@inproceedings{savelli-etal-2025-malto,
title = "{MALTO} at {S}em{E}val-2025 Task 3: Detecting Hallucinations in {LLM}s via Uncertainty Quantification and Larger Model Validation",
author = "Savelli, Claudio and
Koudounas, Alkis and
Giobergia, Flavio",
editor = "Rosenthal, Sara and
Ros{\'a}, Aiala and
Ghosh, Debanjan and
Zampieri, Marcos",
booktitle = "Proceedings of the 19th International Workshop on Semantic Evaluation (SemEval-2025)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.175/",
pages = "1318--1324",
ISBN = "979-8-89176-273-2",
abstract = "Large language models (LLMs) often produce {\{}textit{\{}hallucinations{\}}{\}} {---}factually incorrect statements that appear highly persuasive. These errors pose risks in fields like healthcare, law, and journalism. This paper presents our approach to the Mu-SHROOM shared task at SemEval 2025, which challenges researchers to detect hallucination spans in LLM outputs. We introduce a new method that combines probability-based analysis with Natural Language Inference to evaluate hallucinations at the word level. Our technique aims to better align with human judgments while working independently of the underlying model. Our experimental results demonstrate the effectiveness of this method compared to existing baselines."
}
Markdown (Informal)
[MALTO at SemEval-2025 Task 3: Detecting Hallucinations in LLMs via Uncertainty Quantification and Larger Model Validation](https://preview.aclanthology.org/corrections-2025-08/2025.semeval-1.175/) (Savelli et al., SemEval 2025)
ACL