@inproceedings{leang-etal-2025-comat,
title = "{C}o{MAT}: Chain of Mathematically Annotated Thought Improves Mathematical Reasoning",
author = "Leang, Joshua Ong Jun and
Gema, Aryo Pradipta and
Cohen, Shay B",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1024/",
pages = "20256--20285",
ISBN = "979-8-89176-332-6",
abstract = "Mathematical reasoning remains a significant challenge for large language models (LLMs), despite progress in prompting techniques such as Chain-of-Thought (CoT). We present **Chain of Mathematically Annotated Thought (CoMAT)**, which enhances reasoning through two stages: *Symbolic Conversion* (converting natural language queries into symbolic form) and *Reasoning Execution* (deriving answers from symbolic representations). CoMAT operates entirely with a single LLM and without external solvers. Across four LLMs, CoMAT outperforms traditional CoT on six out of seven benchmarks, achieving gains of 4.48{\%} on MMLU-Redux (MATH) and 4.58{\%} on GaoKao MCQ. In addition to improved performance, CoMAT ensures faithfulness and verifiability, offering a transparent reasoning process for complex mathematical tasks."
}Markdown (Informal)
[CoMAT: Chain of Mathematically Annotated Thought Improves Mathematical Reasoning](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1024/) (Leang et al., EMNLP 2025)
ACL