@inproceedings{hachmeier-jaschke-2024-information,
title = "Information Extraction of Music Entities in Conversational Music Queries",
author = {Hachmeier, Simon and
J{\"a}schke, Robert},
editor = "Kruspe, Anna and
Oramas, Sergio and
Epure, Elena V. and
Sordo, Mohamed and
Weck, Benno and
Doh, SeungHeon and
Won, Minz and
Manco, Ilaria and
Meseguer-Brocal, Gabriel",
booktitle = "Proceedings of the 3rd Workshop on NLP for Music and Audio (NLP4MusA)",
month = nov,
year = "2024",
address = "Oakland, USA",
publisher = "Association for Computational Lingustics",
url = "https://preview.aclanthology.org/fix-sig-urls/2024.nlp4musa-1.7/",
pages = "37--42",
abstract = "The detection of music entities such as songs or performing artists in natural language queries is an important task when designing conversational music recommendation agents. Previous research has observed the applicability of named entity recognition approaches for this task based on pre-trained encoders like BERT. In recent years, large language models (LLMs) have surpassed these encoders in a variety of downstream tasks. In this paper, we validate the use of LLMs for information extraction of music entities in conversational queries by few-shot prompting. We test different numbers of examples and compare two sampling methods to obtain few-shot examples. Our results indicate that LLM performance can achieve state-of-the-art performance in the task."
}
Markdown (Informal)
[Information Extraction of Music Entities in Conversational Music Queries](https://preview.aclanthology.org/fix-sig-urls/2024.nlp4musa-1.7/) (Hachmeier & Jäschke, NLP4MusA 2024)
ACL