@inproceedings{dawkins-etal-2025-detection,
title = "When Detection Fails: The Power of Fine-Tuned Models to Generate Human-Like Social Media Text",
author = "Dawkins, Hillary and
Fraser, Kathleen C. and
Kiritchenko, Svetlana",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.695/",
pages = "13494--13527",
ISBN = "979-8-89176-256-5",
abstract = "Detecting AI-generated text is a difficult problem to begin with; detecting AI-generated text on social media is made even more difficult due to the short text length and informal, idiosyncratic language of the internet. It is nonetheless important to tackle this problem, as social media represents a significant attack vector in online influence campaigns, which may be bolstered through the use of mass-produced AI-generated posts supporting (or opposing) particular policies, decisions, or events. We approach this problem with the mindset and resources of a reasonably sophisticated threat actor, and create a dataset of 505,159 AI-generated social media posts from a combination of open-source, closed-source, and fine-tuned LLMs, covering 11 different controversial topics. We show that while the posts can be detected under typical research assumptions about knowledge of and access to the generating models, under the more realistic assumption that an attacker will not release their fine-tuned model to the public, detectability drops dramatically. This result is confirmed with a human study. Ablation experiments highlight the vulnerability of various detection algorithms to fine-tuned LLMs. This result has implications across all detection domains, since fine-tuning is a generally applicable and realistic LLM use case."
}
Markdown (Informal)
[When Detection Fails: The Power of Fine-Tuned Models to Generate Human-Like Social Media Text](https://preview.aclanthology.org/acl25-workshop-ingestion/2025.findings-acl.695/) (Dawkins et al., Findings 2025)
ACL