@inproceedings{naszadi-etal-2023-aligning,
title = "Aligning Predictive Uncertainty with Clarification Questions in Grounded Dialog",
author = "Naszadi, Kata and
Manggala, Putra and
Monz, Christof",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.999/",
doi = "10.18653/v1/2023.findings-emnlp.999",
pages = "14988--14998",
abstract = "Asking for clarification is fundamental to effective collaboration. An interactive artificial agent must know when to ask a human instructor for more information in order to ascertain their goals. Previous work bases the timing of questions on supervised models learned from interactions between humans. Instead of a supervised classification task, we wish to ground the need for questions in the acting agent`s predictive uncertainty. In this work, we investigate if ambiguous linguistic instructions can be aligned with uncertainty in neural models. We train an agent using the T5 encoder-decoder architecture to solve the Minecraft Collaborative Building Task and identify uncertainty metrics that achieve better distributional separation between clear and ambiguous instructions. We further show that well-calibrated prediction probabilities benefit the detection of ambiguous instructions. Lastly, we provide a novel empirical analysis on the relationship between uncertainty and dialog history length and highlight an important property that poses a difficulty for detection."
}
Markdown (Informal)
[Aligning Predictive Uncertainty with Clarification Questions in Grounded Dialog](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.findings-emnlp.999/) (Naszadi et al., Findings 2023)
ACL