@inproceedings{sheng-etal-2025-regularized,
title = "Regularized Contrastive Decoding with Hard Negative Samples for {LLM} Hallucination Mitigation",
author = "Sheng, Haonan and
Hu, Dou and
Wei, Lingwei and
Zhou, Wei and
Hu, Songlin",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.322/",
doi = "10.18653/v1/2025.findings-emnlp.322",
pages = "6061--6073",
ISBN = "979-8-89176-335-7",
abstract = "Large language models are prone to generate hallucinations, which can undermine their reliability in high-stakes applications. Some works on LLM hallucination mitigation use the model{'}s internal signals to contrast different output during inference stage. However, these works often focus on simple forms of hallucinations, and struggle to effectively mitigate hallucinations. To address the issue, this paper exploits hard negative samples to construct a factually weaker model for improving contrastive decoding. We propose a new inference-time method, Regularized Contrastive Decoding (RCD), to capture correct hallucination signals for mitigating hallucinations in LLMs. RCD learns more diverse hallucination patterns via adversarial-aware fine-tuning and mitigates hallucinations via contrastive decoding. Experiments on four hallucination benchmarks demonstrate that our method achieves better LLM hallucination mitigation performance. Further analysis shows RCD generalizes well across different model sizes, task formats, perturbation methods and training data sizes."
}Markdown (Informal)
[Regularized Contrastive Decoding with Hard Negative Samples for LLM Hallucination Mitigation](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.322/) (Sheng et al., Findings 2025)
ACL