@inproceedings{cheng-etal-2025-humt,
title = "{H}um{T} {D}um{T}: Measuring and controlling human-like language in {LLM}s",
author = "Cheng, Myra and
Yu, Sunny and
Jurafsky, Dan",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1261/",
pages = "25983--26008",
ISBN = "979-8-89176-251-0",
abstract = "Should LLMs generate language that makes them seem human? Human-like language might improve user experience, but might also lead to deception, overreliance, and stereotyping. Assessing these potential impacts requires a systematic way to measure human-like tone in LLM outputs. We introduce HumT and SocioT, metrics for human-like tone and other dimensions of social perceptions in text data based on relative probabilities from an LLM. By measuring HumT across preference and usage datasets, we find that users prefer less human-like outputs from LLMs in many contexts. HumT also offers insights into the perceptions and impacts of anthropomorphism: human-like LLM outputs are highly correlated with warmth, social closeness, femininity, and low status, which are closely linked to the aforementioned harms. We introduce DumT, a method using HumT to systematically control and reduce the degree of human-like tone while preserving model performance. DumT offers a practical approach for mitigating risks associated with anthropomorphic language generation."
}
Markdown (Informal)
[HumT DumT: Measuring and controlling human-like language in LLMs](https://preview.aclanthology.org/ingestion-acl-25/2025.acl-long.1261/) (Cheng et al., ACL 2025)
ACL