@inproceedings{dzhubaeva-etal-2025-unstructured,
title = "Unstructured Minds, Predictable Machines: A Comparative Study of Narrative Cohesion in Human and {LLM} Stream-of-Consciousness Writing",
author = "Dzhubaeva, Nellia and
Trinley, Katharina and
Pissani, Laura",
editor = "Zhao, Jin and
Wang, Mingyang and
Liu, Zhu",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 4: Student Research Workshop)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2025.acl-srw.85/",
pages = "1079--1096",
ISBN = "979-8-89176-254-1",
abstract = "This paper examines differences between stream-of-consciousness (SoC) narratives written by humans and those generated by large language models (LLMs) to assess narrative coherence and personality expression. We generated texts by prompting LLMs (Llama-3.1-8B {\&} DeepSeek-R1-Distill-Llama-8B) with the first half of SoC-essays while either providing the models with the personality characteristics (Big Five) or omitting them. Our analysis revealed consistently low similarity between LLM-generated continuations and original human texts, as measured by cosine similarity, perplexity, and BLEU scores. Including explicit personality traits significantly enhanced Llama-3.1-8B{'}s performance, particularly in BLEU scores.Further analysis of personality expression showed varying alignment patterns between LLMs and human texts. Specifically, Llama-3.1-8B exhibited higher extraversion but low agreeableness, while DeepSeek-R1-Distill-Llama-8B displayed dramatic personality shifts during its reasoning process, especially when prompted with personality traits, with all models consistently showing very low Openness."
}
Markdown (Informal)
[Unstructured Minds, Predictable Machines: A Comparative Study of Narrative Cohesion in Human and LLM Stream-of-Consciousness Writing](https://preview.aclanthology.org/landing_page/2025.acl-srw.85/) (Dzhubaeva et al., ACL 2025)
ACL