@inproceedings{liu-etal-2024-uncertainty,
title = "Uncertainty Calibration for Tool-Using Language Agents",
author = "Liu, Hao and
Dou, Zi-Yi and
Wang, Yixin and
Peng, Nanyun and
Yue, Yisong",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.978/",
doi = "10.18653/v1/2024.findings-emnlp.978",
pages = "16781--16805",
abstract = "There is increasing interest in equipping language models with the ability to leverage external tools for complex, goal-oriented tasks. However, interacting with external tools introduces inherent uncertainties due to imperfections and misalignments between the tools' outputs and the agents' internal models, often leading to suboptimal outcomes. We thus study the problem of tool-use calibration in language agents, and identify prompt design and execution trace selection as two primary areas that suffer from miscalibration. We then propose ProbeCal, which recalibrates the internal probabilities of tool-using language agents to better reflect the actual effectiveness of tool, and enables a more appropriate selection of prompts and execution paths. We empirically show that ProbeCal can significantly and consistently improve off-the-shelf language models in tool-using applications."
}
Markdown (Informal)
[Uncertainty Calibration for Tool-Using Language Agents](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.978/) (Liu et al., Findings 2024)
ACL
- Hao Liu, Zi-Yi Dou, Yixin Wang, Nanyun Peng, and Yisong Yue. 2024. Uncertainty Calibration for Tool-Using Language Agents. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 16781–16805, Miami, Florida, USA. Association for Computational Linguistics.