@inproceedings{zhang-etal-2025-milic,
title = "{M}i{L}i{C}-Eval: Benchmarking Multilingual {LLM}s for {C}hina{'}s Minority Languages",
author = "Zhang, Chen and
Tao, Mingxu and
Liao, Zhiyuan and
Feng, Yansong",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.578/",
doi = "10.18653/v1/2025.findings-acl.578",
pages = "11086--11102",
ISBN = "979-8-89176-256-5",
abstract = "Large language models (LLMs) excel in high-resource languages but struggle with low-resource languages (LRLs), particularly those spoken by minority communities in China, such as Tibetan, Uyghur, Kazakh, and Mongolian. To systematically track the progress in these languages, we introduce MiLiC-Eval, a benchmark designed for minority languages in China, featuring 24K instances across 9 tasks. MiLiC-Eval focuses on underrepresented writing systems. Its parallelism between tasks and languages can provide a faithful and fine-grained assessment of linguistic and problem-solving skills. Our evaluation reveals that open-source LLMs perform poorly on syntax-intensive tasks and multi-script languages. We further demonstrate how MiLiC-Eval can help advance LRL research in handling diverse writing systems and understanding the process of language adaptation."
}
Markdown (Informal)
[MiLiC-Eval: Benchmarking Multilingual LLMs for China’s Minority Languages](https://preview.aclanthology.org/mtsummit-25-ingestion/2025.findings-acl.578/) (Zhang et al., Findings 2025)
ACL