@inproceedings{lee-etal-2025-controlmed,
title = "{C}ontrol{M}ed: Adding Reasoning Control to Medical Language Model",
author = "Lee, Sung-Min and
Lee, Siyoon and
Kim, Juyeon and
Roh, Kyoungmin",
editor = "Inui, Kentaro and
Sakti, Sakriani and
Wang, Haofen and
Wong, Derek F. and
Bhattacharyya, Pushpak and
Banerjee, Biplab and
Ekbal, Asif and
Chakraborty, Tanmoy and
Singh, Dhirendra Pratap",
booktitle = "Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics",
month = dec,
year = "2025",
address = "Mumbai, India",
publisher = "The Asian Federation of Natural Language Processing and The Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.149/",
pages = "2786--2799",
ISBN = "979-8-89176-298-5",
abstract = "Reasoning Large Language Models (LLMs) with enhanced accuracy and explainability are increasingly being adopted in the medical domain, as the life-critical nature of clinical decision-making demands reliable support. Despite these advancements, existing reasoning LLMs often generate unnecessarily lengthy reasoning processes, leading to significant computational overhead and response latency. These limitations hinder their practical deployment in real-world clinical environments. To address these challenges, we introduce \textbf{ControlMed}, a medical language model that enables users to actively control the length of the reasoning process at inference time through fine-grained control markers. ControlMed is trained through a three-stage pipeline: 1) pre-training on a large-scale synthetic medical instruction dataset covering both \textit{direct} and \textit{reasoning responses}; 2) supervised fine-tuning with multi-length reasoning data and explicit length-control markers; and 3) reinforcement learning with model-based reward signals to enhance factual accuracy and response quality. Experimental results on a variety of English and Korean medical benchmarks demonstrate that our model achieves similar or better performance compared to state-of-the-art models. Furthermore, users can flexibly balance reasoning accuracy and computational efficiency by controlling the reasoning length as needed. These findings demonstrate that ControlMed is a practical and adaptable solution for clinical question answering and medical information analysis."
}Markdown (Informal)
[ControlMed: Adding Reasoning Control to Medical Language Model](https://preview.aclanthology.org/ingest-ijcnlp-aacl/2025.ijcnlp-long.149/) (Lee et al., IJCNLP-AACL 2025)
ACL
- Sung-Min Lee, Siyoon Lee, Juyeon Kim, and Kyoungmin Roh. 2025. ControlMed: Adding Reasoning Control to Medical Language Model. In Proceedings of the 14th International Joint Conference on Natural Language Processing and the 4th Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics, pages 2786–2799, Mumbai, India. The Asian Federation of Natural Language Processing and The Association for Computational Linguistics.