@inproceedings{jia-zhang-2022-prompt,
title = "Prompt-based Distribution Alignment for Domain Generalization in Text Classification",
author = "Jia, Chen and
Zhang, Yue",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Proceedings of the 2022 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.emnlp-main.690/",
doi = "10.18653/v1/2022.emnlp-main.690",
pages = "10147--10157",
abstract = "Prompt-based learning (a.k.a. prompting) achieves high performance by bridging the gap between the objectives of language modeling and downstream tasks. Domain generalization ability can be improved by prompting since classification across different domains can be unified into the prediction of the same set of label words. The remaining challenge for domain generalization by prompting comes from discrepancies between the data distribution of different domains. To improve domain generalization with prompting, we learn distributional invariance across source domains via two alignment regularization loss functions. The first is vocabulary distribution alignment, which uses a Kullback-Leibler divergence regularization on source-domain vocabulary distributions. The second is feature distribution alignment, which uses a novel adversarial training strategy to learn domain invariant representation across source domains. Experiments on sentiment analysis and natural language inference show the effectiveness of our method and achieve state-of-the-art results on six datasets."
}
Markdown (Informal)
[Prompt-based Distribution Alignment for Domain Generalization in Text Classification](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.emnlp-main.690/) (Jia & Zhang, EMNLP 2022)
ACL