@inproceedings{joo-cho-2025-cleanse,
title = "Cleanse: Uncertainty Estimation Approach Using Clustering-based Semantic Consistency in {LLM}s",
author = "Joo, Minsuh and
Cho, Hyunsoo",
editor = "Dhole, Kaustubh and
Clinciu, Miruna",
booktitle = "Proceedings of the Fourth Workshop on Generation, Evaluation and Metrics (GEM{\texttwosuperior})",
month = jul,
year = "2025",
address = "Vienna, Austria and virtual meeting",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/transition-to-people-yaml/2025.gem-1.25/",
pages = "291--301",
ISBN = "979-8-89176-261-9",
abstract = "Despite the outstanding performance of large language models (LLMs) across various NLP tasks, hallucinations in LLMs{--}where LLMs generate inaccurate responses{--}remains as a critical problem as it can be directly connected to a crisis of building safe and reliable LLMs. Uncertainty estimation is primarily used to measure hallucination levels in LLM responses so that correct and incorrect answers can be distinguished clearly. This study proposes an effective uncertainty estimation approach, Clustering-based semantic consistency (Cleanse). Cleanse quantifies the uncertainty with the proportion of the intra-cluster consistency in the total consistency between LLM hidden embeddings which contain adequate semantic information of generations, by employing clustering. The effectiveness of Cleanse for detecting hallucination is validated using four off-the-shelf models, LLaMA-7B, LLaMA-13B, LLaMA2-7B and Mistral-7B and two question-answering benchmarks, SQuAD and CoQA."
}
Markdown (Informal)
[Cleanse: Uncertainty Estimation Approach Using Clustering-based Semantic Consistency in LLMs](https://preview.aclanthology.org/transition-to-people-yaml/2025.gem-1.25/) (Joo & Cho, GEM 2025)
ACL