@article{pu-etal-2025-dear,
title = "{DEAR}: Disentangled Event-Agnostic Representation Learning for Early Fake News Detection",
author = "Pu, Xiao and
Wu, Hao and
Bi, Xiuli and
Wu, Yu and
Gao, Xinbo",
journal = "Transactions of the Association for Computational Linguistics",
volume = "13",
year = "2025",
address = "Cambridge, MA",
publisher = "MIT Press",
url = "https://preview.aclanthology.org/corrections-2025-07/2025.tacl-1.16/",
doi = "10.1162/tacl_a_00743",
pages = "343--356",
abstract = "Detecting fake news early is challenging due to the absence of labeled articles for emerging events in training data. To address this, we propose a Disentangled Event-Agnostic Representation (DEAR) learning approach. Our method begins with a BERT-based adaptive multi-grained semantic encoder that captures hierarchical and comprehensive textual representations of the input news content. To effectively separate latent authenticity-related and event-specific knowledge within the news content, we employ a disentanglement architecture. To further enhance the decoupling effect, we introduce a cross-perturbation mechanism that perturbs authenticity-related representation with the event-specific one, and vice versa, deriving a robust and discerning authenticity-related signal. Additionally, we implement a refinement learning scheme to minimize potential interactions between two decoupled representations, ensuring that the authenticity signal remains strong and unaffected by event-specific details. Experimental results demonstrate that our approach effectively mitigates the impact of event-specific influence, outperforming state-of-the-art methods. In particular, it achieves a 6.0{\%} improvement in accuracy on the PHEME dataset over MDDA, a similar approach that decouples latent content and style knowledge, in scenarios involving articles from unseen events different from the topics of the training set."
}
Markdown (Informal)
[DEAR: Disentangled Event-Agnostic Representation Learning for Early Fake News Detection](https://preview.aclanthology.org/corrections-2025-07/2025.tacl-1.16/) (Pu et al., TACL 2025)
ACL