@inproceedings{jang-etal-2025-p,
title = "{P}-{C}o{T}: A Pedagogically-motivated Participatory Chain-of-Thought Prompting for Phonological Reasoning in {LLM}s",
author = "Jang, Dongjun and
Ahn, Youngchae and
Shin, Hyopil",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.1132/",
doi = "10.18653/v1/2025.findings-acl.1132",
pages = "21958--21979",
ISBN = "979-8-89176-256-5",
abstract = "This study explores the potential of phonological reasoning within text-based large language models (LLMs). Utilizing the PhonologyBench benchmark, we assess tasks like rhyme word generation, g2p conversion, and syllable counting. Our evaluations across 12 LLMs reveal that while few-shot learning offers inconsistent gains, the introduction of a novel Pedagogically-motivated Participatory Chain-of-Thought (P-CoT) prompt, which is anchored in educational theories like scaffolding and discovery learning, consistently enhances performance. This method leverages structured guidance to activate latent phonological abilities, achieving up to 52{\%} improvement and even surpassing human baselines in certain tasks. Future work could aim to optimize P-CoT prompts for specific models or explore their application across different linguistic domains."
}
Markdown (Informal)
[P-CoT: A Pedagogically-motivated Participatory Chain-of-Thought Prompting for Phonological Reasoning in LLMs](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.1132/) (Jang et al., Findings 2025)
ACL