@inproceedings{wu-etal-2023-extrapolating,
title = "Extrapolating Multilingual Understanding Models as Multilingual Generators",
author = "Wu, Bohong and
Yuan, Fei and
Zhao, Hai and
Li, Lei and
Xu, Jingjing",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2023",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.1031/",
doi = "10.18653/v1/2023.findings-emnlp.1031",
pages = "15432--15444",
abstract = "Multilingual understanding models (or encoder-based), pre-trained via masked language modeling, have achieved promising results on many language understanding tasks (e.g., mBERT). However, these models are not capable of generating high-quality text compared with decoder-based causal language models. Can we transform a pre-trained language understanding model into an effective language generation model? We propose a Semantic-Guided Alignment-then-Denoising (SGA) approach to adapt a multilingual encoder to a multilingual generator with a small number of additional parameters. Experiments show that the proposed approach is an effective adaption method, outperforming widely-used initialization-based methods with gains of 9.4 BLEU on machine translation, 8.1 Rouge-L on question generation, and 5.5 METEOR on story generation on XLM-R$_{large}$. On the other hand, we observe that XLM-R is still inferior to mBART in supervised settings despite better results on zero-shot settings, indicating that more exploration is required to make understanding models strong generators. Our code is available at https://github.com/chengzhipanpan/XLMR4MT."
}
Markdown (Informal)
[Extrapolating Multilingual Understanding Models as Multilingual Generators](https://preview.aclanthology.org/fix-sig-urls/2023.findings-emnlp.1031/) (Wu et al., Findings 2023)
ACL