@inproceedings{al-ali-etal-2026-different,
title = "Different Time, Different Language: Revisiting the Bias Against Non-Native Speakers in {GPT} Detectors",
author = "Al Ali, Adnan and
Helcl, Jind{\v{r}}ich and
Libovick{\'y}, Jind{\v{r}}ich",
editor = "Baez Santamaria, Selene and
Somayajula, Sai Ashish and
Yamaguchi, Atsuki",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 4: Student Research Workshop)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-srw.20/",
pages = "277--291",
ISBN = "979-8-89176-383-8",
abstract = "LLM-based assistants have been widely popularised after the release of ChatGPT. Concerns have been raised about their misuse in academia, given the difficulty of distinguishing between human-written and generated text. To combat this, automated techniques have been developed and shown to be effective, to some extent. However, prior work suggests that these methods often falsely flag essays from non-native speakers as generated, due to their low perplexity extracted from an LLM, which is supposedly a key feature of the detectors. We revisit these statements two years later, specifically in the Czech language setting. We show that the perplexity of texts from non-native speakers of Czech is not lower than that of native speakers. We further examine detectors from three separate families and find no systematic bias against non-native speakers. Finally, we demonstrate that contemporary detectors operate effectively without relying on perplexity."
}Markdown (Informal)
[Different Time, Different Language: Revisiting the Bias Against Non-Native Speakers in GPT Detectors](https://preview.aclanthology.org/ingest-eacl/2026.eacl-srw.20/) (Al Ali et al., EACL 2026)
ACL