@inproceedings{huang-etal-2025-applying,
title = "Applying Whisper Fine-tuning and Branchformer to {H}akka Speech Recognition",
author = "Huang, Yu-Sheng and
Hong, Wei-Cheng and
Chen, Xin-Yu and
Lin, Szu-Yin",
editor = "Chang, Kai-Wei and
Lu, Ke-Han and
Yang, Chih-Kai and
Tam, Zhi-Rui and
Chang, Wen-Yu and
Wang, Chung-Che",
booktitle = "Proceedings of the 37th Conference on Computational Linguistics and Speech Processing (ROCLING 2025)",
month = nov,
year = "2025",
address = "National Taiwan University, Taipei City, Taiwan",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/dashboard/2025.rocling-main.50/",
pages = "441--445",
ISBN = "979-8-89176-379-1",
abstract = "This study addresses the FSR 2025 Hakka speech recognition task by comparing two strategies: fine-tuning large pre-trained models and training from scratch. For character (Hanzi) recognition, we fine-tuned five different scales of the Whisper model, with large-v3-turbo achieving a 7.55{\%} CER on the test set. For Pinyin recognition, a Branchformer model was compared against a LoRA fine-tuned Whisper-small, yielding WERs of 4.7{\%} and 6.5{\%} on the test set, respectively. Speed perturbation was the primary method used for data augmentation in our pre-processing pipeline."
}Markdown (Informal)
[Applying Whisper Fine-tuning and Branchformer to Hakka Speech Recognition](https://preview.aclanthology.org/dashboard/2025.rocling-main.50/) (Huang et al., ROCLING 2025)
ACL