@inproceedings{bannour-etal-2024-team,
title = "Team {NLP}eers at Chemotimelines 2024: Evaluation of two timeline extraction methods, can generative {LLM} do it all or is smaller model fine-tuning still relevant ?",
author = "Bannour, Nesrine and
Andrew, Judith Jeyafreeda and
Vincent, Marc",
editor = "Naumann, Tristan and
Ben Abacha, Asma and
Bethard, Steven and
Roberts, Kirk and
Bitterman, Danielle",
booktitle = "Proceedings of the 6th Clinical Natural Language Processing Workshop",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.clinicalnlp-1.39/",
doi = "10.18653/v1/2024.clinicalnlp-1.39",
pages = "406--416",
abstract = "This paper presents our two deep learning-based approaches to participate in subtask 1 of the Chemotimelines 2024 Shared task. The first uses a fine-tuning strategy on a relatively small general domain Masked Language Model (MLM) model, with additional normalization steps obtained using a simple Large Language Model (LLM) prompting technique. The second is an LLM-based approach combining advanced automated prompt search with few-shot in-context learning using the DSPy framework.Our results confirm the continued relevance of the smaller MLM fine-tuned model. It also suggests that the automated few-shot LLM approach can perform close to the fine-tuning-based method without extra LLM normalization and be advantageous under scarce data access conditions. We finally hint at the possibility to choose between lower training examples or lower computing resources requirements when considering both methods."
}
Markdown (Informal)
[Team NLPeers at Chemotimelines 2024: Evaluation of two timeline extraction methods, can generative LLM do it all or is smaller model fine-tuning still relevant ?](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.clinicalnlp-1.39/) (Bannour et al., ClinicalNLP 2024)
ACL