@inproceedings{louis-etal-2020-id,
title = "{\textquotedblleft}{I}`d rather just go to bed{\textquotedblright}: Understanding Indirect Answers",
author = "Louis, Annie and
Roth, Dan and
Radlinski, Filip",
editor = "Webber, Bonnie and
Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest_wac_2008/2020.emnlp-main.601/",
doi = "10.18653/v1/2020.emnlp-main.601",
pages = "7411--7425",
abstract = "We revisit a pragmatic inference problem in dialog: Understanding indirect responses to questions. Humans can interpret {\textquoteleft}I`m starving.' in response to {\textquoteleft}Hungry?', even without direct cue words such as {\textquoteleft}yes' and {\textquoteleft}no'. In dialog systems, allowing natural responses rather than closed vocabularies would be similarly beneficial. However, today`s systems are only as sensitive to these pragmatic moves as their language model allows. We create and release the first large-scale English language corpus {\textquoteleft}Circa' with 34,268 (polar question, indirect answer) pairs to enable progress on this task. The data was collected via elaborate crowdsourcing, and contains utterances with yes/no meaning, as well as uncertain, middle-ground, and conditional responses. We also present BERT-based neural models to predict such categories for a question-answer pair. We find that while transfer learning from entailment works reasonably, performance is not yet sufficient for robust dialog. Our models reach 82-88{\%} accuracy for a 4-class distinction, and 74-85{\%} for 6 classes."
}
Markdown (Informal)
[“I’d rather just go to bed”: Understanding Indirect Answers](https://preview.aclanthology.org/ingest_wac_2008/2020.emnlp-main.601/) (Louis et al., EMNLP 2020)
ACL