@inproceedings{au-2026-midi,
title = "{MIDI}-{PHOR}: Multi-View Distillation for Music Understanding and Captioning",
author = "Au, Steven",
editor = "Epure, Elena V. and
Oramas, Sergio and
Doh, SeungHeon and
Ramoneda, Pedro and
Kruspe, Anna and
Sordo, Mohamed",
booktitle = "Proceedings of the 4th Workshop on {NLP} for Music and Audio ({NLP}4{M}us{A} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/manual-author-scripts/2026.nlp4musa-1.6/",
pages = "33--43",
ISBN = "979-8-89176-369-2",
abstract = "Text-only training is a promising new method for training multimodal machine learning models without data from every modality. However, few studies have explored its use as an approximation of missing data for supervised learning in data-scarce environments. In this work, we examine techniques to acquire text-based training data, address the modality gap, and present a case study on classifying subjective audio timbre descriptions based on three kinds of text-only training data and six augmentation methods on eight audio-timbre datasets. We find text-only training successfully trains supervised audio classifiers without audio that are able to compete with a zero-shot baseline and training on real audio."
}Markdown (Informal)
[MIDI-PHOR: Multi-View Distillation for Music Understanding and Captioning](https://preview.aclanthology.org/manual-author-scripts/2026.nlp4musa-1.6/) (Au, NLP4MusA 2026)
ACL