@inproceedings{long-etal-2023-adapt,
title = "Adapt in Contexts: Retrieval-Augmented Domain Adaptation via In-Context Learning",
author = "Long, Quanyu and
Wang, Wenya and
Pan, Sinno",
editor = "Bouamor, Houda and
Pino, Juan and
Bali, Kalika",
booktitle = "Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing",
month = dec,
year = "2023",
address = "Singapore",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.402/",
doi = "10.18653/v1/2023.emnlp-main.402",
pages = "6525--6542",
abstract = "Large language models (LLMs) have showcased their capability with few-shot inference known as in-context learning. However, in-domain demonstrations are not always readily available in real scenarios, leading to cross-domain in-context learning. Besides, LLMs are still facing challenges in long-tail knowledge in unseen and unfamiliar domains. The above limitations demonstrate the necessity of Unsupervised Domain Adaptation (UDA). In this paper, we study the UDA problem under an in-context learning setting to adapt language models from the source domain to the target domain without any target labels. The core idea is to retrieve a subset of cross-domain elements that are the most similar to the query, and elicit language model to adapt in an in-context manner by learning both target domain distribution and the discriminative task signal simultaneously with the augmented cross-domain in-context examples. We devise different prompting and training strategies, accounting for different LM architectures to learn the target distribution via language modeling. With extensive experiments on Sentiment Analysis (SA) and Named Entity Recognition (NER) tasks, we thoroughly study the effectiveness of ICL for domain transfer and demonstrate significant improvements over baseline models."
}
Markdown (Informal)
[Adapt in Contexts: Retrieval-Augmented Domain Adaptation via In-Context Learning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2023.emnlp-main.402/) (Long et al., EMNLP 2023)
ACL