@inproceedings{zhu-etal-2024-fanoutqa,
title = "{F}an{O}ut{QA}: A Multi-Hop, Multi-Document Question Answering Benchmark for Large Language Models",
author = "Zhu, Andrew and
Hwang, Alyssa and
Dugan, Liam and
Callison-Burch, Chris",
editor = "Ku, Lun-Wei and
Martins, Andre and
Srikumar, Vivek",
booktitle = "Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = aug,
year = "2024",
address = "Bangkok, Thailand",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.acl-short.2",
doi = "10.18653/v1/2024.acl-short.2",
pages = "18--37",
abstract = "One type of question that is commonly found in day-to-day scenarios is {``}fan-out{''} questions, complex multi-hop, multi-document reasoning questions that require finding information about a large number of entities. However, there exist few resources to evaluate this type of question-answering capability among large language models. To evaluate complex reasoning in LLMs more fully, we present FanOutQA, a high-quality dataset of fan-out question-answer pairs and human-annotated decompositions with English Wikipedia as the knowledge base. We formulate three benchmark settings across our dataset and benchmark 7 LLMs, including GPT-4, LLaMA 2, Claude-2.1, and Mixtral-8x7B, finding that contemporary models still have room to improve reasoning over inter-document dependencies in a long context. We provide our dataset, along with open-source tools to run models to encourage evaluation.",
}
Markdown (Informal)
[FanOutQA: A Multi-Hop, Multi-Document Question Answering Benchmark for Large Language Models](https://aclanthology.org/2024.acl-short.2) (Zhu et al., ACL 2024)
ACL