@inproceedings{zhao-etal-2023-hallucination,
title = "Hallucination Detection for Grounded Instruction Generation",
author = "Zhao, Lingjun and
Nguyen, Khanh and
Daum{\'e} III, Hal",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.266/",
doi = "10.18653/v1/2023.findings-emnlp.266",
pages = "4044--4053",
abstract = "We investigate the problem of generating instructions to guide humans to navigate in simulated residential environments. A major issue with current models is hallucination: they generate references to actions or objects that are inconsistent with what a human follower would perform or encounter along the described path. We develop a model that detects these hallucinated references by adopting a model pre-trained on a large corpus of image-text pairs, and fine-tuning it with a contrastive loss that separates correct instructions from instructions containing synthesized hallucinations. Our final model outperforms several baselines, including using word probability estimated by the instruction-generation model, and supervised models based on LSTM and Transformer."
}
Markdown (Informal)
[Hallucination Detection for Grounded Instruction Generation](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.findings-emnlp.266/) (Zhao et al., Findings 2023)
ACL