@inproceedings{schlichtkrull-2025-attacks,
title = "Attacks by Content: Automated Fact-checking is an {AI} Security Issue",
author = "Schlichtkrull, Michael Sejr",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.431/",
doi = "10.18653/v1/2025.emnlp-main.431",
pages = "8561--8576",
ISBN = "979-8-89176-332-6",
abstract = "When AI agents retrieve and reason over external documents, adversaries can manipulate the data they receive to subvert their behaviour. Previous research has studied indirect prompt injection, where the attacker injects malicious instructions. We argue that injection of instructions is not necessary to manipulate agents {--} attackers could instead supply biased, misleading, or false information. We term this an *attack by content*. Existing defenses, which focus on detecting hidden commands, are ineffective against attacks by content. To defend themselves and their users, agents must critically evaluate retrieved information, corroborating claims with external evidence and evaluating source trustworthiness. We argue that this is analogous to an existing NLP task, automated fact-checking, which we propose to repurpose as a cognitive self-defense tool for agents."
}Markdown (Informal)
[Attacks by Content: Automated Fact-checking is an AI Security Issue](https://preview.aclanthology.org/ingest-luhme/2025.emnlp-main.431/) (Schlichtkrull, EMNLP 2025)
ACL