@inproceedings{xu-etal-2025-llms-weakness,
title = "{LLM}{'}s Weakness in {NER} Doesn{'}t Stop It from Enhancing a Stronger {SLM}",
author = "Xu, Weilu and
Dang, Renfei and
Huang, Shujian",
editor = "Anderson, Adam and
Gordin, Shai and
Li, Bin and
Liu, Yudong and
Passarotti, Marco C. and
Sprugnoli, Rachele",
booktitle = "Proceedings of the Second Workshop on Ancient Language Processing",
month = may,
year = "2025",
address = "The Albuquerque Convention Center, Laguna",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.alp-1.21/",
pages = "170--175",
ISBN = "979-8-89176-235-0",
abstract = "Large Language Models (LLMs) demonstrate strong semantic understanding ability and extensive knowledge, but struggle with Named Entity Recognition (NER) due to hallucination and high training costs. Meanwhile, supervised Small Language Models (SLMs) efficiently provide structured predictions but lack adaptability to unseen entities and complex contexts. In this study, we investigate how a relatively weaker LLM can effectively support a supervised model in NER tasks. We first improve the LLM using LoRA-based fine-tuning and similarity-based prompting, achieving performance comparable to a SLM baseline. To further improve results, we propose a fusion strategy that integrates both models: prioritising SLM{'}s predictions while using LLM guidance in low confidence cases. Our hybrid approach outperforms both baselines on three classic Chinese NER datasets."
}
Markdown (Informal)
[LLM’s Weakness in NER Doesn’t Stop It from Enhancing a Stronger SLM](https://preview.aclanthology.org/fix-sig-urls/2025.alp-1.21/) (Xu et al., ALP 2025)
ACL