@inproceedings{anschutz-etal-2024-simpler,
title = "Simpler Becomes Harder: Do {LLM}s Exhibit a Coherent Behavior on Simplified Corpora?",
author = {Ansch{\"u}tz, Miriam and
Mosca, Edoardo and
Groh, Georg},
editor = "Nunzio, Giorgio Maria Di and
Vezzani, Federica and
Ermakova, Liana and
Azarbonyad, Hosein and
Kamps, Jaap",
booktitle = "Proceedings of the Workshop on DeTermIt! Evaluating Text Difficulty in a Multilingual Context @ LREC-COLING 2024",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.determit-1.17/",
pages = "185--195",
abstract = "Text simplification seeks to improve readability while retaining the original content and meaning. Our study investigates whether pre-trained classifiers also maintain such coherence by comparing their predictions on both original and simplified inputs. We conduct experiments using 11 pre-trained models, including BERT and OpenAI`s GPT 3.5, across six datasets spanning three languages. Additionally, we conduct a detailed analysis of the correlation between prediction change rates and simplification types/strengths. Our findings reveal alarming inconsistencies across all languages and models. If not promptly addressed, simplified inputs can be easily exploited to craft zero-iteration model-agnostic adversarial attacks with success rates of up to 50{\%}."
}
Markdown (Informal)
[Simpler Becomes Harder: Do LLMs Exhibit a Coherent Behavior on Simplified Corpora?](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.determit-1.17/) (Anschütz et al., DeTermIt 2024)
ACL