@inproceedings{jhamtani-etal-2025-llm,
title = "{LLM} Agents for Coordinating Multi-User Information Gathering",
author = "Jhamtani, Harsh and
Andreas, Jacob and
Van Durme, Benjamin",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.916/",
pages = "17800--17826",
ISBN = "979-8-89176-256-5",
abstract = "This paper introduces PeopleJoin, a benchmark for evaluating LM-mediated collaborative problem solving. Given a user request, PeopleJoin agents must identify teammates who might be able to assist, converse with these teammates to gather information, and finally compile a useful answer or summary for the original user. PeopleJoin comprises two evaluation domains: PeopleJoin-QA, focused on questions about tabular data, and PeopleJoin-DocCreation, focused on document creation tasks. The two domains are adapted from existing NLP benchmarks for database question answering and multi-document summarization; here, however, the information needed to complete these tasks is distributed across synthetic ``organizations'' of 2{--}20 users, simulating natural multi-user collaboration scenarios. We implemented several popular LM agent architectures, evaluating their accuracy and efficiency at completing tasks, and highlight new research questions that can be studied using PeopleJoin."
}
Markdown (Informal)
[LLM Agents for Coordinating Multi-User Information Gathering](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.916/) (Jhamtani et al., Findings 2025)
ACL