@inproceedings{wu-shi-2022-adversarial,
title = "Adversarial Soft Prompt Tuning for Cross-Domain Sentiment Analysis",
author = "Wu, Hui and
Shi, Xiaodong",
editor = "Muresan, Smaranda and
Nakov, Preslav and
Villavicencio, Aline",
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = may,
year = "2022",
address = "Dublin, Ireland",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.acl-long.174/",
doi = "10.18653/v1/2022.acl-long.174",
pages = "2438--2447",
abstract = "Cross-domain sentiment analysis has achieved promising results with the help of pre-trained language models. As GPT-3 appears, prompt tuning has been widely explored to enable better semantic modeling in many natural language processing tasks. However, directly using a fixed predefined template for cross-domain research cannot model different distributions of the [MASK] token in different domains, thus making underuse of the prompt tuning technique. In this paper, we propose a novel \textbf{Ad}versarial \textbf{S}oft \textbf{P}rompt \textbf{T}uning method (AdSPT) to better model cross-domain sentiment analysis. On the one hand, AdSPT adopts separate soft prompts instead of hard templates to learn different vectors for different domains, thus alleviating the domain discrepancy of the [MASK] token in the masked language modeling task. On the other hand, AdSPT uses a novel domain adversarial training strategy to learn domain-invariant representations between each source domain and the target domain. Experiments on a publicly available sentiment analysis dataset show that our model achieves the new state-of-the-art results for both single-source domain adaptation and multi-source domain adaptation."
}
Markdown (Informal)
[Adversarial Soft Prompt Tuning for Cross-Domain Sentiment Analysis](https://preview.aclanthology.org/fix-sig-urls/2022.acl-long.174/) (Wu & Shi, ACL 2022)
ACL