@inproceedings{fu-etal-2026-ensemble,
title = "Ensemble Privacy Defense for Knowledge-Intensive {LLM}s against Membership Inference Attacks",
author = "Fu, Haowei and
Ni, Bo and
Xu, Han and
Liu, Kunpeng and
Lin, Dan and
Derr, Tyler",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/manual-author-scripts/2026.findings-eacl.145/",
pages = "2786--2799",
ISBN = "979-8-89176-386-9",
abstract = "Retrieval-Augmented Generation (RAG) and Supervised Finetuning (SFT) have become the predominant paradigms for equipping Large Language Models (LLMs) with external knowledge for diverse, knowledge-intensive tasks. However, while such knowledge injection improves performance, it also exposes new attack surfaces. Membership Inference Attacks (MIAs), which aim to determine whether a given data sample was included in a model{'}s training set, pose serious threats to privacy and trust in sensitive domains. To this end, we first systematically evaluate the vulnerability of RAG- and SFT-based LLMs to various MIAs. Then, to address the privacy risk, we further introduce a novel, model-agnostic defense framework, Ensemble Privacy Defense (EPD), which aggregates and evaluates the outputs of a knowledge-injected LLM, a base LLM, and a dedicated judge model to enhance resistance against MIAs. Comprehensive experiments show that, on average, EPD reduces MIA success by up to 27.8{\%} for SFT and 526.3{\%} for RAG compared to inference-time baseline, while maintaining answer quality."
}Markdown (Informal)
[Ensemble Privacy Defense for Knowledge-Intensive LLMs against Membership Inference Attacks](https://preview.aclanthology.org/manual-author-scripts/2026.findings-eacl.145/) (Fu et al., Findings 2026)
ACL