@inproceedings{kim-etal-2025-persona,
title = "Persona is a Double-Edged Sword: Rethinking the Impact of Role-play Prompts in Zero-shot Reasoning Tasks",
author = "Kim, Junseok and
Yang, Nakyeong and
Jung, Kyomin",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.51/",
pages = "848--862",
ISBN = "979-8-89176-303-6",
abstract = "Recent studies have shown that prompting large language models (LLMs) with role-playing personas can enhance their reasoning capabilities. While the benefits of role-playing personas in reasoning tasks are widely recognized, it remains uncertain whether a persona aligned with the given dataset can consistently achieve these improvements. In this work, we empirically investigate the potential drawbacks of using dataset-aligned personas (referred to as **coarsely aligned personas**) and introduce Jekyll {\&} Hyde, a novel framework that enhances reasoning robustness by ensembling solutions from both role-playing and neutral (non-persona) prompts.Jekyll {\&} Hyde first predicts an instance-specific persona tailored to each query using an LLM, then generates answers with both persona and neutral prompts, and finally selects the superior output through an LLM-based evaluator.Experimental results claim that across twelve widely used natural language reasoning datasets and three backbone large language models, Jekyll {\&} Hyde consistently outperforms single-perspective LLMs, achieving an average accuracy gain of **9.98{\%}** on GPT{-}4.We further demonstrate that using instance{-}aligned personas yields more accurate and stable performance than using dataset-aligned personas."
}Markdown (Informal)
[Persona is a Double-Edged Sword: Rethinking the Impact of Role-play Prompts in Zero-shot Reasoning Tasks](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.findings-ijcnlp.51/) (Kim et al., Findings 2025)
ACL