@inproceedings{gong-etal-2025-cross,
title = "Cross-domain Rumor Detection via Test-Time Adaptation and Large Language Models",
author = "Gong, Yuxia and
Hu, Shuguo and
Zhang, Huaiwen",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.407/",
doi = "10.18653/v1/2025.emnlp-main.407",
pages = "8062--8077",
ISBN = "979-8-89176-332-6",
abstract = "Rumor detection on social media has become crucial due to the rapid spread of misinformation. Existing approaches primarily focus on within-domain tasks, resulting in suboptimal performance in cross-domain scenarios due to domain shift. To address this limitation, we draw inspiration from the strong generalization capabilities of Test-Time Adaptation (TTA) and propose a novel framework to enhance rumor detection performance across different domains. Specifically, we introduce Test-Time Adaptation for Rumor Detection (T$^2$ARD), which incorporates both single-domain model and target graph adaptation strategies tailored to the unique requirements of cross-domain rumor detection. T$^2$ARD utilizes a graph adaptation module that updates the graph structure and node attributes through multi-level self-supervised contrastive learning, aiming to derive invariant graph representations. To mitigate the impact of significant distribution shifts on self-supervised signals, T$^2$ARD performs model adaptation by using annotations from Large Language Models (LLMs) on target graph to produce pseudo-labels as supervised signals. Experiments conducted on four widely used cross-domain datasets demonstrate that T$^2$ARD achieves state-of-the-art performance, surpassing existing methods in rumor detection."
}Markdown (Informal)
[Cross-domain Rumor Detection via Test-Time Adaptation and Large Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.emnlp-main.407/) (Gong et al., EMNLP 2025)
ACL