@inproceedings{zhu-etal-2023-chain,
title = "Chain-of-Questions Training with Latent Answers for Robust Multistep Question Answering",
author = "Zhu, Wang and
Thomason, Jesse and
Jia, Robin",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2023.emnlp-main.547/",
doi = "10.18653/v1/2023.emnlp-main.547",
pages = "8845--8860",
abstract = "We propose Chain-of-Questions, a framework that trains a model to robustly answer multistep questions by generating and answering sub-questions. We obtain supervision for sub-questions from human-annotated question decomposition meaning representation (QDMR), but QDMR does not include annotated answers to sub-questions. To overcome this technical challenge, we treat sub-answers as latent variables and infer them with a novel dynamic mixture of Hard-EM and MAPO. Chain-of-Questions is effective and robust, greatly outperforming strong neuro-symbolic methods by 9.0 F1 on a DROP contrast set and GPT-3.5 by 24.3 F1 on a HotpotQA adversarial set."
}
Markdown (Informal)
[Chain-of-Questions Training with Latent Answers for Robust Multistep Question Answering](https://preview.aclanthology.org/add-emnlp-2024-awards/2023.emnlp-main.547/) (Zhu et al., EMNLP 2023)
ACL