@inproceedings{jia-etal-2025-task,
title = "The Task Shield: Enforcing Task Alignment to Defend Against Indirect Prompt Injection in {LLM} Agents",
author = "Jia, Feiran and
Wu, Tong and
Qin, Xin and
Squicciarini, Anna",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1435/",
pages = "29680--29697",
ISBN = "979-8-89176-251-0",
abstract = "Large Language Model (LLM) agents are increasingly being deployed as conversational assistants capable of performing complex real-world tasks through tool integration. This enhanced ability to interact with external systems and process various data sources, while powerful, introduces significant security vulnerabilities. In particular, indirect prompt injection attacks pose a critical threat, where malicious instructions embedded within external data sources can manipulate agents to deviate from user intentions. While existing defenses show promise, they struggle to maintain robust security while preserving task functionality. We propose a novel and orthogonal perspective that reframes agent security from preventing harmful actions to ensuring task alignment, requiring every agent action to serve user objectives. Based on this insight, we develop Task Shield, a test-time defense mechanism that systematically verifies whether each instruction and tool call contributes to user-specified goals. Through experiments on the AgentDojo benchmark, we demonstrate that Task Shield reduces attack success rates (2.07{\%}) while maintaining high task utility (69.79{\%}) on GPT-4o, significantly outperforming existing defenses in various real-world scenarios."
}
Markdown (Informal)
[The Task Shield: Enforcing Task Alignment to Defend Against Indirect Prompt Injection in LLM Agents](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1435/) (Jia et al., ACL 2025)
ACL