@inproceedings{lin-etal-2025-guiding,
title = "Guiding Large Language Models for Biomedical Entity Linking via Restrictive and Contrastive Decoding",
author = "Lin, Zhenxi and
Zhang, Ziheng and
Wu, Jian and
Zheng, Yefeng and
Wu, Xian",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1292/",
doi = "10.18653/v1/2025.findings-emnlp.1292",
pages = "23745--23759",
ISBN = "979-8-89176-335-7",
abstract = "Biomedical entity linking (BioEL) aims at mapping biomedical mentions to pre-defined entities. While extensive research efforts have been devoted to BioEL, applying large language models (LLMs) for BioEL has not been fully explored. Previous attempts have revealed difficulties when directly applying LLMs to the task of BioEL. Possible errors include generating non-entity sentences, invalid entities, or incorrect answers. To this end, we introduce LLM4BioEL, a concise yet effective framework that enables LLMs to adapt well to the BioEL task. LLM4BioEL employs restrictive decoding to ensure the generation of valid entities and utilizes entropy-based contrastive decoding to incorporate additional biomedical knowledge without requiring further tuning. Besides, we implement few-shot prompting to maximize the in-context learning capabilities of LLM. Extensive experiments demonstrate the effectiveness and applicability of LLM4BioEL across different BioEL tasks and with different LLM backbones, and the best-performing LLM4BioEL variant outperforms the traditional and LLM-based BioEL baselines."
}Markdown (Informal)
[Guiding Large Language Models for Biomedical Entity Linking via Restrictive and Contrastive Decoding](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.findings-emnlp.1292/) (Lin et al., Findings 2025)
ACL