@inproceedings{goswami-etal-2026-nlp,
title = "{NLP} Privacy Risk Identification in Social Media ({NLP}-{PRISM}): A Survey",
author = "Goswami, Dhiman and
Kumar, Jai Kruthunz Naveen and
Das, Sanchari",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Findings of the {A}ssociation for {C}omputational {L}inguistics: {EACL} 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.78/",
pages = "1519--1541",
ISBN = "979-8-89176-386-9",
abstract = "Natural Language Processing (NLP) is integral to social media analytics but often processes content containing Personally Identifiable Information (PII), behavioral cues, and metadata raising privacy risks such as surveillance, profiling, and targeted advertising. To systematically assess these risks, we review 203 peer-reviewed papers and propose the{~}\textit{NLP Privacy Risk Identification in Social Media (NLP-PRISM)} framework, which evaluates vulnerabilities across six dimensions: data collection, preprocessing, visibility, fairness, computational risk, and regulatory compliance. Our analysis shows that transformer models achieve F1-scores ranging from 0.58{--}0.84, but incur a $1\% - 23\%$ drop under privacy-preserving fine-tuning. Using NLP-PRISM, we examine privacy coverage in six NLP tasks: sentiment analysis (16), emotion detection (14), offensive language identification (19), code-mixed processing (39), native language identification (29), and dialect detection (24) revealing substantial gaps in privacy research. We further found a ($\downarrow 2\%-9\%$) trade-off in model utility, MIA AUC (membership inference attacks) 0.81, AIA accuracy 0.75 (attribute inference attacks). Finally, we advocate for stronger anonymization, privacy-aware learning, and fairness-driven training to enable ethical NLP in social media contexts."
}Markdown (Informal)
[NLP Privacy Risk Identification in Social Media (NLP-PRISM): A Survey](https://preview.aclanthology.org/ingest-eacl/2026.findings-eacl.78/) (Goswami et al., Findings 2026)
ACL