@inproceedings{xu-etal-2024-adaption,
title = "Adaption-of-Thought: Learning Question Difficulty Improves Large Language Models for Reasoning",
author = "Xu, Mayi and
Li, Yongqi and
Sun, Ke and
Qian, Tieyun",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.313/",
doi = "10.18653/v1/2024.emnlp-main.313",
pages = "5468--5495",
abstract = "Large language models (LLMs) have shown excellent capability for solving reasoning problems. Existing approaches do not differentiate the question difficulty when designing prompting methods for them. Clearly, a simple method cannot elicit sufficient knowledge from LLMs to answer a hard question. Meanwhile, a sophisticated one will force the LLM to generate redundant or even inaccurate intermediate steps toward a simple question. Consequently, the performance of existing methods fluctuates among various questions.In this work, we propose Adaption-of-Thought (AdoT), an adaptive method to improve LLMs for the reasoning problem, which first measures the question difficulty and then tailors demonstration set construction and difficulty-adapted retrieval strategies for the adaptive demonstration construction. Experimental results on three reasoning tasks prove the superiority of our proposed method, showing an absolute improvement of up to 5.5{\%} on arithmetic reasoning, 7.4{\%} on symbolic reasoning, and 2.3{\%} on commonsense reasoning. Our codes and implementation details are available at: https://github.com/NLPGM/AdoT"
}
Markdown (Informal)
[Adaption-of-Thought: Learning Question Difficulty Improves Large Language Models for Reasoning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.emnlp-main.313/) (Xu et al., EMNLP 2024)
ACL