@inproceedings{do-gaspers-2019-cross,
title = "Cross-lingual Transfer Learning with Data Selection for Large-Scale Spoken Language Understanding",
author = "Do, Quynh and
Gaspers, Judith",
editor = "Inui, Kentaro and
Jiang, Jing and
Ng, Vincent and
Wan, Xiaojun",
booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/D19-1153/",
doi = "10.18653/v1/D19-1153",
pages = "1455--1460",
abstract = "A typical cross-lingual transfer learning approach boosting model performance on a language is to pre-train the model on all available supervised data from another language. However, in large-scale systems this leads to high training times and computational requirements. In addition, characteristic differences between the source and target languages raise a natural question of whether source data selection can improve the knowledge transfer. In this paper, we address this question and propose a simple but effective language model based source-language data selection method for cross-lingual transfer learning in large-scale spoken language understanding. The experimental results show that with data selection i) source data and hence training speed is reduced significantly and ii) model performance is improved."
}
Markdown (Informal)
[Cross-lingual Transfer Learning with Data Selection for Large-Scale Spoken Language Understanding](https://preview.aclanthology.org/jlcl-multiple-ingestion/D19-1153/) (Do & Gaspers, EMNLP-IJCNLP 2019)
ACL