@inproceedings{cho-etal-2025-modeling,
title = "Modeling Motivated Reasoning in Law: Evaluating Strategic Role Conditioning in {LLM} Summarization",
author = {Cho, Eunjung and
Hoyle, Alexander Miserlis and
Hermstr{\"u}wer, Yoan},
editor = "Aletras, Nikolaos and
Chalkidis, Ilias and
Barrett, Leslie and
Goanț{\u{a}}, C{\u{a}}t{\u{a}}lina and
Preoțiuc-Pietro, Daniel and
Spanakis, Gerasimos",
booktitle = "Proceedings of the Natural Legal Language Processing Workshop 2025",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.nllp-1.7/",
doi = "10.18653/v1/2025.nllp-1.7",
pages = "68--112",
ISBN = "979-8-89176-338-8",
abstract = "Large Language Models (LLMs) are increasingly used to generate user-tailored summaries, adapting outputs to specific stakeholders. In legal contexts, this raises important questions about motivated reasoning {---} how models strategically frame information to align with a stakeholder{'}s position within the legal system. Building on theories of legal realism and recent trends in legal practice, we investigate how LLMs respond to prompts conditioned on different legal roles (e.g., judges, prosecutors, attorneys) when summarizing judicial decisions. We introduce an evaluation framework grounded in legal fact and reasoning inclusion, also considering favorability towards stakeholders. Our results show that even when prompts include balancing instructions, models exhibit selective inclusion patterns that reflect role-consistent perspectives. These findings raise broader concerns about how similar alignment may emerge as LLMs begin to infer user roles from prior interactions or context, even without explicit role instructions. Our results underscore the need for role-aware evaluation of LLM summarization behavior in high-stakes legal settings."
}Markdown (Informal)
[Modeling Motivated Reasoning in Law: Evaluating Strategic Role Conditioning in LLM Summarization](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.nllp-1.7/) (Cho et al., NLLP 2025)
ACL