@inproceedings{chen-etal-2025-theory,
title = "Theory of Mind in Large Language Models: Assessment and Enhancement",
author = "Chen, Ruirui and
Jiang, Weifeng and
Qin, Chengwei and
Tan, Cheston",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1522/",
pages = "31539--31558",
ISBN = "979-8-89176-251-0",
abstract = "Theory of Mind (ToM){---}the ability to reason about the mental states of oneself and others{---}is a cornerstone of human social intelligence. As Large Language Models (LLMs) become increasingly integrated into daily life, understanding their ability to interpret and respond to human mental states is crucial for enabling effective interactions. In this paper, we review LLMs' ToM capabilities by analyzing both evaluation benchmarks and enhancement strategies. For evaluation, we focus on recently proposed and widely used story-based benchmarks. For enhancement, we provide an in-depth analysis of recent methods aimed at improving LLMs' ToM abilities. Furthermore, we outline promising directions for future research to further advance these capabilities and better adapt LLMs to more realistic and diverse scenarios. Our survey serves as a valuable resource for researchers interested in evaluating and advancing LLMs' ToM capabilities."
}
Markdown (Informal)
[Theory of Mind in Large Language Models: Assessment and Enhancement](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1522/) (Chen et al., ACL 2025)
ACL