@inproceedings{ko-etal-2025-understand,
title = "Understand, Solve and Translate: Bridging the Multilingual Mathematical Reasoning Gap",
author = "Ko, Hyunwoo and
Son, Guijin and
Choi, Dasol",
editor = "Adelani, David Ifeoluwa and
Arnett, Catherine and
Ataman, Duygu and
Chang, Tyler A. and
Gonen, Hila and
Raja, Rahul and
Schmidt, Fabian and
Stap, David and
Wang, Jiayi",
booktitle = "Proceedings of the 5th Workshop on Multilingual Representation Learning (MRL 2025)",
month = nov,
year = "2025",
address = "Suzhuo, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.mrl-main.6/",
doi = "10.18653/v1/2025.mrl-main.6",
pages = "78--95",
ISBN = "979-8-89176-345-6",
abstract = "Large language models (LLMs) demonstrate exceptional performance on complex reasoning tasks. However, despite their strong reasoning capabilities in high-resource languages (e.g., English and Chinese), a significant performance gap persists in other languages. To investigate this gap in Korean, we introduce HRM8K, a benchmark comprising 8,011 English-Korean parallel bilingual math problems. Through systematic analysis of model behaviors, we identify a key finding: these performance disparities stem primarily from difficulties in comprehending non-English inputs, rather than limitations in reasoning capabilities. Based on these findings, we propose UST(Understand, Solve, and Translate), a method that strategically uses English as an anchor for reasoning and solution generation. By fine-tuning the model on 130k synthetically generated data points, method achieves a 10.91{\%} improvement on the HRM8K benchmark and reduces the multilingual performance gap from 11.6{\%}{\%} to 0.7{\%}{\%}. Additionally, we show that improvements from method generalize effectively to different Korean domains, demonstrating that capabilities acquired from machine-verifiable content can be generalized to other areas. We publicly release the benchmark, training dataset, and models."
}