@inproceedings{tan-etal-2025-multilingual,
title = "Multilingual Gloss-free Sign Language Translation: Towards Building a Sign Language Foundation Model",
author = "Tan, Sihan and
Miyazaki, Taro and
Nakadai, Kazuhiro",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.43/",
pages = "553--561",
ISBN = "979-8-89176-252-7",
abstract = "Sign Language Translation (SLT) aims to convert sign language (SL) videos into spoken language text, thereby bridging the communication gap between the sign and the spoken community. While most existing works focus on translating a single SL into a single spoken language (one-to-one SLT), leveraging multilingual resources could mitigate low-resource issues and enhance accessibility. However, multilingual SLT (MLSLT) remains unexplored due to language conflicts and alignment difficulties across SLs and spoken languages. To address these challenges, we propose a multilingual gloss-free model with dual CTC objectives for token-level SL identification and spoken text generation. Our model supports 10 SLs and handles one-to-one, many-to-one, and many-to-many SLT tasks, achieving competitive performance compared to state-of-the-art methods on three widely adopted benchmarks: multilingual SP-10, PHOENIX14T, and CSL-Daily."
}
Markdown (Informal)
[Multilingual Gloss-free Sign Language Translation: Towards Building a Sign Language Foundation Model](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-short.43/) (Tan et al., ACL 2025)
ACL