@inproceedings{guo-etal-2025-med,
title = "{M}ed-{VRA}gent: A Framework for Medical Visual Reasoning-Enhanced Agents",
author = "Guo, Guangfu and
Lu, Xiaoqian and
Feng, Yue",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.939/",
doi = "10.18653/v1/2025.emnlp-main.939",
pages = "18613--18627",
ISBN = "979-8-89176-332-6",
abstract = "Vision-language models (VLMs) achieve promising results in medical reasoning but struggle with hallucinations, vague descriptions, Inconsistent logic and poor localization. To address this, we propose a agent framework named Medical Visual Reasoning Agent (\textbf{Med-VRAgent}). The approach is based on Visual Guidance and Self-Reward paradigms and Monte Carlo Tree Search (MCTS). By combining the Visual Guidance with tree search, Med-VRAgent improves the medical visual reasoning capabilities of VLMs. We use the trajectories collected by Med-RAgent as feedback to further improve the performance by fine-tuning the VLMs with the proximal policy optimization (PPO) objective. Experiments on multiple medical VQA benchmarks demonstrate that our method outperforms existing approaches."
}Markdown (Informal)
[Med-VRAgent: A Framework for Medical Visual Reasoning-Enhanced Agents](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.939/) (Guo et al., EMNLP 2025)
ACL