@inproceedings{ghosh-etal-2025-language,
title = "Are Language Models Agnostic to Linguistically Grounded Perturbations? A Case Study of {I}ndic Languages",
author = "Ghosh, Poulami and
Dabre, Raj and
Bhattacharyya, Pushpak",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2025",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.468/",
pages = "8362--8396",
ISBN = "979-8-89176-195-7",
abstract = "Pre-trained language models (PLMs) are known to be susceptible to perturbations to the input text, but existing works do not explicitly focus on linguistically grounded attacks, which are subtle and more prevalent in nature. In this paper, we study whether PLMs are agnostic to linguistically grounded attacks or not. To this end, we offer the first study addressing this, investigating different Indic languages and various downstream tasks. Our findings reveal that although PLMs are susceptible to linguistic perturbations, when compared to non-linguistic attacks, PLMs exhibit a slightly lower susceptibility to linguistic attacks. This highlights that even constrained attacks are effective. Moreover, we investigate the implications of these outcomes across a range of languages, encompassing diverse language families and different scripts."
}
Markdown (Informal)
[Are Language Models Agnostic to Linguistically Grounded Perturbations? A Case Study of Indic Languages](https://preview.aclanthology.org/fix-sig-urls/2025.findings-naacl.468/) (Ghosh et al., Findings 2025)
ACL