@inproceedings{naguib-etal-2024-shot,
title = "Few-shot clinical entity recognition in {E}nglish, {F}rench and {S}panish: masked language models outperform generative model prompting",
author = "Naguib, Marco and
Tannier, Xavier and
N{\'e}v{\'e}ol, Aur{\'e}lie",
editor = "Al-Onaizan, Yaser and
Bansal, Mohit and
Chen, Yun-Nung",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2024",
month = nov,
year = "2024",
address = "Miami, Florida, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.400/",
doi = "10.18653/v1/2024.findings-emnlp.400",
pages = "6829--6852",
abstract = "Large language models (LLMs) have become the preferred solution for many natural language processing tasks. In low-resource environments such as specialized domains, their few-shot capabilities are expected to deliver high performance. Named Entity Recognition (NER) is a critical task in information extraction that is not covered in recent LLM benchmarks. There is a need for better understanding the performance of LLMs for NER in a variety of settings including languages other than English. This study aims to evaluate generative LLMs, employed through prompt engineering, for few-shot clinical NER. We compare 13 auto-regressive models using prompting and 16 masked models using fine-tuning on 14 NER datasets covering English, French and Spanish. While prompt-based auto-regressive models achieve competitive F1 for general NER, they are outperformed within the clinical domain by lighter biLSTM-CRF taggers based on masked models. Additionally, masked models exhibit lower environmental impact compared to auto-regressive models. Findings are consistent across the three languages studied, which suggests that LLM prompting is not yet suited for NER production in the clinical domain."
}
Markdown (Informal)
[Few-shot clinical entity recognition in English, French and Spanish: masked language models outperform generative model prompting](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.findings-emnlp.400/) (Naguib et al., Findings 2024)
ACL