@inproceedings{moghe-etal-2021-cross,
title = "Cross-lingual Intermediate Fine-tuning improves Dialogue State Tracking",
author = "Moghe, Nikita and
Steedman, Mark and
Birch, Alexandra",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.87/",
doi = "10.18653/v1/2021.emnlp-main.87",
pages = "1137--1150",
abstract = "Recent progress in task-oriented neural dialogue systems is largely focused on a handful of languages, as annotation of training data is tedious and expensive. Machine translation has been used to make systems multilingual, but this can introduce a pipeline of errors. Another promising solution is using cross-lingual transfer learning through pretrained multilingual models. Existing methods train multilingual models with additional code-mixed task data or refine the cross-lingual representations through parallel ontologies. In this work, we enhance the transfer learning process by intermediate fine-tuning of pretrained multilingual models, where the multilingual models are fine-tuned with different but related data and/or tasks. Specifically, we use parallel and conversational movie subtitles datasets to design cross-lingual intermediate tasks suitable for downstream dialogue tasks. We use only 200K lines of parallel data for intermediate fine-tuning which is already available for 1782 language pairs. We test our approach on the cross-lingual dialogue state tracking task for the parallel MultiWoZ (English -{\ensuremath{>}} Chinese, Chinese -{\ensuremath{>}} English) and Multilingual WoZ (English -{\ensuremath{>}} German, English -{\ensuremath{>}} Italian) datasets. We achieve impressive improvements ({\ensuremath{>}} 20{\%} on joint goal accuracy) on the parallel MultiWoZ dataset and the Multilingual WoZ dataset over the vanilla baseline with only 10{\%} of the target language task data and zero-shot setup respectively."
}
Markdown (Informal)
[Cross-lingual Intermediate Fine-tuning improves Dialogue State Tracking](https://preview.aclanthology.org/fix-sig-urls/2021.emnlp-main.87/) (Moghe et al., EMNLP 2021)
ACL