@inproceedings{rezapour-etal-2025-tales,
title = "Tales of Morality: Comparing Human- and {LLM}-Generated Moral Stories from Visual Cues",
author = "Rezapour, Rezvaneh and
Jeoung, Sullam and
You, Zhiwen and
Diesner, Jana",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/name-variant-enfa-fane/2025.findings-emnlp.1029/",
doi = "10.18653/v1/2025.findings-emnlp.1029",
pages = "18915--18933",
ISBN = "979-8-89176-335-7",
abstract = "Do moral values align between images, the stories humans write about them, and the narratives generated by large language models (LLMs)? This question matters because stories are central to how humans communicate moral values, yet little is known about how people and LLMs perform this task in a multimodal (text and image) setting. We present a systematic comparison of moral values represented in human- and LLM-generated narratives based on images annotated by humans for moral content. Our analysis shows that while human stories reflect a balanced distribution of moral foundations and coherent narrative arcs, LLMs disproportionately emphasize the Care foundation and often lack emotional resolution. Even with moral conditioning, these biases persist in LLMs. We introduce a novel dataset and framework for evaluating moral storytelling in vision-language models, highlighting key challenges in aligning AI with human moral reasoning across cultures."
}Markdown (Informal)
[Tales of Morality: Comparing Human- and LLM-Generated Moral Stories from Visual Cues](https://preview.aclanthology.org/name-variant-enfa-fane/2025.findings-emnlp.1029/) (Rezapour et al., Findings 2025)
ACL