@inproceedings{kim-etal-2024-qube,
title = "{Q}u{BE}: Question-based Belief Enhancement for Agentic {LLM} Reasoning",
author = "Kim, Minsoo and
Kim, Jongyoon and
Kim, Jihyuk and
Hwang, Seung-won",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.1193/",
doi = "10.18653/v1/2024.emnlp-main.1193",
pages = "21403--21423",
abstract = "Despite advancements in Large Language Models (LLMs), many complex tasks are not easily solved in a single inference step, requiring the use of agentic LLMs in interactive environments. However, agentic LLMs suffer from a phenomenon known as reasoning derailment, due to the indiscriminate incorporation of observations from partially observable environments. We introduce QuBE, a method that enhances agents' focus on task-relevant contexts, by constructing a belief state via question answering. We validate QuBE through experiments in two agentic LLM scenarios with partial observability: 1) a canonical interactive decision-making scenario using text-based game engines, and 2) an interactive retrieval-augmented generation (RAG) scenario using search engines. In the AlfWorld text-based game, QuBE outperforms established baselines by substantial margins, and in the search engine scenario, it achieves marked improvements on the BeIR zero-shot retrieval benchmark. The results demonstrate that QuBE significantly mitigates reasoning derailment, refining the decision-making process of LLM agents in partially observed environments."
}
Markdown (Informal)
[QuBE: Question-based Belief Enhancement for Agentic LLM Reasoning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.1193/) (Kim et al., EMNLP 2024)
ACL