@inproceedings{zheng-etal-2025-dncasr,
title = "{DNCASR}: End-to-End Training for Speaker-Attributed {ASR}",
author = "Zheng, Xianrui and
Zhang, Chao and
Woodland, Phil",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.899/",
pages = "18369--18383",
ISBN = "979-8-89176-251-0",
abstract = "This paper introduces DNCASR, a novel end-to-end trainable system designed for joint neural speaker clustering and automatic speech recognition (ASR), enabling speaker-attributed transcription of long multi-party meetings. DNCASR uses two separate encoders to independently encode global speaker characteristics and local waveform information, along with two linked decoders to generate speaker-attributed transcriptions. The use of linked decoders allows the entire system to be jointly trained under a unified loss function. By employing a serialised training approach, DNCASR effectively addresses overlapping speech in real-world meetings, where the link improves the prediction of speaker indices in overlapping segments. Experiments on the AMI-MDM meeting corpus demonstrate that the jointly trained DNCASR outperforms a parallel system that does not have links between the speaker and ASR decoders. Using cpWER to measure the speaker-attributed word error rate, DNCASR achieves a 9.0{\%} relative reduction on the AMI-MDM Eval set."
}
Markdown (Informal)
[DNCASR: End-to-End Training for Speaker-Attributed ASR](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.899/) (Zheng et al., ACL 2025)
ACL
- Xianrui Zheng, Chao Zhang, and Phil Woodland. 2025. DNCASR: End-to-End Training for Speaker-Attributed ASR. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 18369–18383, Vienna, Austria. Association for Computational Linguistics.