@inproceedings{srivastava-2025-large,
title = "Large Language Models Threaten Language{'}s Epistemic and Communicative Foundations",
author = "Srivastava, Shashank",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1457/",
pages = "28650--28664",
ISBN = "979-8-89176-332-6",
abstract = {Large language models are reshaping the norms of human communication, sometimes decou- pling words from genuine human thought. This transformation is deep, and undermines norms historically tied to authorship of text. We draw from linguistic philosophy and AI ethics to detail how large-scale text genera- tion can induce semantic drift, erode account- ability, and obfuscate intent and authorship. Our work here introduces hybrid authorship graphs (modeling humans, LLMs, and texts in a provenance network), epistemic doppel- g{\"a}ngers (LLM-generated texts that are indis- tinguishable from human-authored texts), and authorship entropy. We explore mechanisms such as ``proof-of-interaction'' authorship veri- fication and educational reforms to restore con- fidence in language. LLMs' benefits (broader access, increased fluency, automation, etc.) are undeniable, but the upheavals they introduce to the linguistic landscape demand reckoning.}
}Markdown (Informal)
[Large Language Models Threaten Language’s Epistemic and Communicative Foundations](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1457/) (Srivastava, EMNLP 2025)
ACL