@inproceedings{chen-etal-2025-real,
title = "Real-time Factuality Assessment from Adversarial Feedback",
author = "Chen, Sanxing and
Huang, Yukun and
Dhingra, Bhuwan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.81/",
pages = "1610--1630",
ISBN = "979-8-89176-251-0",
abstract = "We show that existing evaluations for assessing the factuality of news from conventional sources, such as claims on fact-checking websites, result in high accuracies over time for LLM-based detectors{---}even after their knowledge cutoffs. This suggests that recent popular false information from such sources can be easily identified due to its likely presence in pre-training/retrieval corpora or the emergence of salient, yet shallow, patterns in these datasets. Instead, we argue that a proper factuality evaluation dataset should test a model{'}s ability to reason about current events by retrieving and reading related evidence. To this end, we develop a novel pipeline that leverages natural language feedback from a RAG-based detector to iteratively modify real-time news into deceptive variants that challenge LLMs. Our iterative rewrite decreases the binary classification ROC-AUC by an absolute 17.5 percent for a strong RAG-based GPT-4o detector. Our experiments reveal the important role of RAG in both evaluating and generating challenging news examples, as retrieval-free LLM detectors are vulnerable to unseen events and adversarial attacks, while feedback from RAG-based evaluation helps discover more deceitful patterns."
}
Markdown (Informal)
[Real-time Factuality Assessment from Adversarial Feedback](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.81/) (Chen et al., ACL 2025)
ACL
- Sanxing Chen, Yukun Huang, and Bhuwan Dhingra. 2025. Real-time Factuality Assessment from Adversarial Feedback. In Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1610–1630, Vienna, Austria. Association for Computational Linguistics.