@inproceedings{sun-etal-2024-mm,
title = "{MM}-{MATH}: Advancing Multimodal Math Evaluation with Process Evaluation and Fine-grained Classification",
author = "Sun, Kai and
Bai, Yushi and
Qi, Ji and
Hou, Lei and
Li, Juanzi",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2024.findings-emnlp.73/",
doi = "10.18653/v1/2024.findings-emnlp.73",
pages = "1358--1375",
abstract = "To advance the evaluation of multimodal math reasoning in large multimodal models (LMMs), this paper introduces a novel benchmark, MM-MATH. MM-MATH consists of 5,929 open-ended middle school math problems with visual contexts, with fine-grained classification across difficulty, grade level, and knowledge points. Unlike existing benchmarks relying on binary answer comparison, MM-MATH incorporates both outcome and process evaluations. Process evaluation employs LMM-as-a-judge to automatically analyze solution steps, identifying and categorizing errors into specific error types. Extensive evaluation of ten models on MM-MATH reveals significant challenges for existing LMMs, highlighting their limited utilization of visual information and struggles with higher-difficulty problems. The best-performing model achieves only 31{\%} accuracy on MM-MATH, compared to 82{\%} for humans. This highlights the challenging nature of our benchmark for existing models and the significant gap between the multimodal reasoning capabilities of current models and humans. Our process evaluation reveals that diagram misinterpretation is the most common error, accounting for more than half of the total error cases, underscoring the need for improved image comprehension in multimodal reasoning."
}
Markdown (Informal)
[MM-MATH: Advancing Multimodal Math Evaluation with Process Evaluation and Fine-grained Classification](https://preview.aclanthology.org/Author-page-Marten-During-lu/2024.findings-emnlp.73/) (Sun et al., Findings 2024)
ACL