@inproceedings{das-etal-2022-diving,
title = "Diving Deep into Modes of Fact Hallucinations in Dialogue Systems",
author = "Das, Souvik and
Saha, Sougata and
Srihari, Rohini",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.48/",
doi = "10.18653/v1/2022.findings-emnlp.48",
pages = "684--699",
abstract = "Knowledge Graph(KG) grounded conversations often use large pre-trained models and usually suffer from fact hallucination. Frequently entities with no references in knowledge sources and conversation history are introduced into responses, thus hindering the flow of the conversation{---}existing work attempt to overcome this issue by tweaking the training procedure or using a multi-step refining method. However, minimal effort is put into constructing an entity-level hallucination detection system, which would provide fine-grained signals that control fallacious content while generating responses. As a first step to address this issue, we dive deep to identify various modes of hallucination in KG-grounded chatbots through human feedback analysis. Secondly, we propose a series of perturbation strategies to create a synthetic dataset named FADE (FActual Dialogue Hallucination DEtection Dataset). Finally, we conduct comprehensive data analyses and create multiple baseline models for hallucination detection to compare against human-verified data and already established benchmarks."
}
Markdown (Informal)
[Diving Deep into Modes of Fact Hallucinations in Dialogue Systems](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.findings-emnlp.48/) (Das et al., Findings 2022)
ACL