@inproceedings{zhou-etal-2023-enhancing,
title = "Enhancing Cross-lingual Prompting with Dual Prompt Augmentation",
author = "Zhou, Meng and
Li, Xin and
Jiang, Yue and
Bing, Lidong",
editor = "Rogers, Anna and
Boyd-Graber, Jordan and
Okazaki, Naoaki",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2023",
month = jul,
year = "2023",
address = "Toronto, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/Author-page-Marten-During-lu/2023.findings-acl.700/",
doi = "10.18653/v1/2023.findings-acl.700",
pages = "11008--11020",
abstract = {Prompting shows promising results in few-shot scenarios. However, its strength for multilingual/cross-lingual problems has not been fully exploited. hao and Sch{\"u}tze (2021) made initial explorations in this direction by presenting that cross-lingual prompting outperforms cross-lingual finetuning. In this paper, we conduct an empirical exploration on the effect of each component in cross-lingual prompting and derive Universal Prompting, which helps alleviate the discrepancies between source-language training and target-language inference. Based on this, we propose DPA, a dual prompt augmentation framework, aiming at relieving the data scarcity issue in few-shot cross-lingual prompting. Notably, for XNLI, our method achieves 46.54{\%} with only 16 English training examples per class, significantly better than 34.99{\%} of fine-tuning. Our code is available at \url{https://github.com/DAMO-NLP-SG/DPA}.}
}
Markdown (Informal)
[Enhancing Cross-lingual Prompting with Dual Prompt Augmentation](https://preview.aclanthology.org/Author-page-Marten-During-lu/2023.findings-acl.700/) (Zhou et al., Findings 2023)
ACL