@inproceedings{java-etal-2025-towards,
title = "Towards Operationalizing Right to Data Protection",
author = "Java, Abhinav and
Shahid, Simra and
Agarwal, Chirag",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.416/",
pages = "8191--8205",
ISBN = "979-8-89176-189-6",
abstract = "The widespread practice of indiscriminate data scraping to fine-tune language models (LMs) raises significant legal and ethical concerns, particularly regarding compliance with data protection laws such as the General Data Protection Regulation (GDPR). This practice often results in the unauthorized use of personal information, prompting growing debate within the academic and regulatory communities. Recent works have introduced the concept of generating unlearnable datasets (by adding imperceptible noise to the clean data), such that the underlying model achieves lower loss during training but fails to generalize to the unseen test setting. Though somewhat effective, these approaches are predominantly designed for images and are limited by several practical constraints like requiring knowledge of the target model. To this end, we introduce **RegText**, a framework that injects imperceptible spurious correlations into natural language datasets, effectively rendering them unlearnable without affecting semantic content. We demonstrate RegText{'}s utility through rigorous empirical analysis of small and large LMs. Notably, RegText can restrict newer models like GPT-4o and Llama from learning on our generated data, resulting in a drop in their test accuracy compared to their zero-shot performance and paving the way for generating unlearnable text to protect public data."
}
Markdown (Informal)
[Towards Operationalizing Right to Data Protection](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.416/) (Java et al., NAACL 2025)
ACL
- Abhinav Java, Simra Shahid, and Chirag Agarwal. 2025. Towards Operationalizing Right to Data Protection. In Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers), pages 8191–8205, Albuquerque, New Mexico. Association for Computational Linguistics.