@inproceedings{jiang-etal-2025-jmedbench,
title = "{JM}ed{B}ench: A Benchmark for Evaluating {J}apanese Biomedical Large Language Models",
author = "Jiang, Junfeng and
Huang, Jiahao and
Aizawa, Akiko",
editor = "Rambow, Owen and
Wanner, Leo and
Apidianaki, Marianna and
Al-Khalifa, Hend and
Eugenio, Barbara Di and
Schockaert, Steven",
booktitle = "Proceedings of the 31st International Conference on Computational Linguistics",
month = jan,
year = "2025",
address = "Abu Dhabi, UAE",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.395/",
pages = "5918--5935",
abstract = "Recent developments in Japanese large language models (LLMs) primarily focus on general domains, with fewer advancements in Japanese biomedical LLMs. One obstacle is the absence of a comprehensive, large-scale benchmark for comparison. Furthermore, the resources for evaluating Japanese biomedical LLMs are insufficient. To advance this field, we propose a new benchmark including eight LLMs across four categories and 20 Japanese biomedical datasets across five tasks. Experimental results indicate that: (1) LLMs with a better understanding of Japanese and richer biomedical knowledge achieve better performance in Japanese biomedical tasks, (2) LLMs that are not mainly designed for Japanese biomedical domains can still perform unexpectedly well, and (3) there is still much room for improving the existing LLMs in certain Japanese biomedical tasks. Moreover, we offer insights that could further enhance development in this field. Our evaluation tools tailored to our benchmark as well as the datasets are publicly available to facilitate future research."
}
Markdown (Informal)
[JMedBench: A Benchmark for Evaluating Japanese Biomedical Large Language Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/2025.coling-main.395/) (Jiang et al., COLING 2025)
ACL