@inproceedings{xu-ma-2025-llm,
title = "{LLM} The Genius Paradox: A Linguistic and Math Expert`s Struggle with Simple Word-based Counting Problems",
author = "Xu, Nan and
Ma, Xuezhe",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2025.naacl-long.172/",
pages = "3344--3370",
ISBN = "979-8-89176-189-6",
abstract = "Interestingly, LLMs yet struggle with some basic tasks that humans find trivial to handle, e.g., counting the number of character r`s in the word {\textquotedblleft}strawberry{\textquotedblright}. There are several popular conjectures (e.g., tokenization, architecture and training data) regarding the reason for deficiency of LLMs in simple word-based counting problems, sharing the similar belief that such failure stems from model pretraining hence probably inevitable during deployment. In this paper, we carefully design multiple evaluation settings to investigate validity of prevalent conjectures. Meanwhile, we measure transferability of advanced mathematical and coding reasoning capabilities from specialized LLMs to simple counting tasks. Although specialized LLMs suffer from counting problems as well, we find conjectures about inherent deficiency of LLMs invalid and further seek opportunities to elicit knowledge and capabilities from LLMs which are beneficial to counting tasks. Compared with strategies such as finetuning and in-context learning that are commonly adopted to enhance performance on new or challenging tasks, we show that engaging reasoning is the most robust and efficient way to help LLMs better perceive tasks with more accurate responses.We hope our conjecture validation design could provide insights to study future critical failure modes of LLMs. Based on challenges in transferring advanced capabilities to much simpler tasks, we call for more attention to model capability acquisition and evaluation. We also highlight the importance of cultivating consciousness of {\textquotedblleft}reasoning before responding{\textquotedblright} during model pretraining."
}
Markdown (Informal)
[LLM The Genius Paradox: A Linguistic and Math Expert’s Struggle with Simple Word-based Counting Problems](https://preview.aclanthology.org/Author-page-Marten-During-lu/2025.naacl-long.172/) (Xu & Ma, NAACL 2025)
ACL