@inproceedings{zang-zhang-2024-interpretation,
title = "The Interpretation Gap in Text-to-Music Generation Models",
author = "Zang, Yongyi and
Zhang, Yixiao",
editor = "Kruspe, Anna and
Oramas, Sergio and
Epure, Elena V. and
Sordo, Mohamed and
Weck, Benno and
Doh, SeungHeon and
Won, Minz and
Manco, Ilaria and
Meseguer-Brocal, Gabriel",
booktitle = "Proceedings of the 3rd Workshop on NLP for Music and Audio (NLP4MusA)",
month = nov,
year = "2024",
address = "Oakland, USA",
publisher = "Association for Computational Lingustics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.nlp4musa-1.18/",
pages = "112--118",
abstract = "Large-scale text-to-music generation models have significantly enhanced music creation capabilities, offering unprecedented creative freedom. However, their ability to collaborate effectively with human musicians remains limited. In this paper, we propose a framework to describe the musical interaction process, which includes expression, interpretation, and execution of controls. Following this framework, we argue that the primary gap between existing text-to-music models and musicians lies in the interpretation stage, where models lack the ability to interpret controls from musicians. We also propose two strategies to address this gap and call on the music information retrieval community to tackle the interpretation challenge to improve human-AI musical collaboration."
}
Markdown (Informal)
[The Interpretation Gap in Text-to-Music Generation Models](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.nlp4musa-1.18/) (Zang & Zhang, NLP4MusA 2024)
ACL