@inproceedings{mansour-etal-2025-well,
title = "How Well Do Large Language Models Extract Keywords? A Systematic Evaluation on Scientific Corpora",
author = "Mansour, Nacef Ben and
Rahimi, Hamed and
Alrahabi, Motasem",
editor = "Jansen, Peter and
Dalvi Mishra, Bhavana and
Trivedi, Harsh and
Prasad Majumder, Bodhisattwa and
Hope, Tom and
Khot, Tushar and
Downey, Doug and
Horvitz, Eric",
booktitle = "Proceedings of the 1st Workshop on AI and Scientific Discovery: Directions and Opportunities",
month = may,
year = "2025",
address = "Albuquerque, New Mexico, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.aisd-main.2/",
pages = "13--21",
ISBN = "979-8-89176-224-4",
abstract = "Automatic keyword extraction from scientific articles is pivotal for organizing scholarly archives, powering semantic search engines, and mapping interdisciplinary research trends. However, existing methods{---}including statistical and graph-based approaches{---}struggle to handle domain-specific challenges such as technical terminology, cross-disciplinary ambiguity, and dynamic scientific jargon. This paper presents an empirical comparison of traditional keyword extraction methods (e.g. TextRank and YAKE) with approaches based on Large Language Model. We introduce a novel evaluation framework that combines fuzzy semantic matching based on Levenshtein Distance with exact-match metrics (F1, precision, recall) to address inconsistencies in keyword normalization across scientific corpora. Through an extensive ablation study across nine different LLMs, we analyze their performance and associated costs. Our findings reveal that LLM-based methods consistently achieve superior precision and relevance compared to traditional approaches. This performance advantage suggests significant potential for improving scientific search systems and information retrieval in academic contexts."
}
Markdown (Informal)
[How Well Do Large Language Models Extract Keywords? A Systematic Evaluation on Scientific Corpora](https://preview.aclanthology.org/fix-sig-urls/2025.aisd-main.2/) (Mansour et al., AISD 2025)
ACL