@inproceedings{sun-etal-2024-self,
title = "Self-Training Large Language and Vision Assistant for Medical Question Answering",
author = "Sun, Guohao and
Qin, Can and
Fu, Huazhu and
Wang, Linwei and
Tao, Zhiqiang",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.1119/",
doi = "10.18653/v1/2024.emnlp-main.1119",
pages = "20052--20060",
abstract = "Large Vision-Language Models (LVLMs) have shown significant potential in assisting medical diagnosis by leveraging extensive biomedical datasets. However, the advancement of medical image understanding and reasoning critically depends on building high-quality visual instruction data, which is costly and labor-intensive to obtain, particularly in the medical domain. To mitigate this data-starving issue, we introduce Self-Training Large Language and Vision Assistant for Medical (STLLaVA-Med). The proposed method is designed to train a policy model (an LVLM) capable of auto-generating medical visual instruction data to improve data efficiency, guided through Direct Preference Optimization (DPO). Specifically, a more powerful and larger LVLM (e.g., GPT-4o) is involved as a biomedical expert to oversee the DPO fine-tuning process on the auto-generated data, encouraging the policy model to align efficiently with human preferences. We validate the efficacy and data efficiency of STLLaVA-Med across three major medical Visual Question Answering (VQA) benchmarks, demonstrating competitive zero-shot performance with the utilization of only 9{\%} of the medical data."
}
Markdown (Informal)
[Self-Training Large Language and Vision Assistant for Medical Question Answering](https://preview.aclanthology.org/fix-sig-urls/2024.emnlp-main.1119/) (Sun et al., EMNLP 2024)
ACL