@inproceedings{pichler-pagel-2026-evaluating-humanities,
title = "Evaluating Humanities Theory Alignment in Large Language Models: Incremental Prompting and Statistical Assessment",
author = "Pichler, Axel and
Pagel, Janis",
editor = "Alves, Diego and
Bizzoni, Yuri and
Degaetano-Ortlieb, Stefania and
Kazantseva, Anna and
Pagel, Janis and
Szpakowicz, Stan",
booktitle = "Proceedings of the 10th Joint {SIGHUM} Workshop on Computational Linguistics for Cultural Heritage, Social Sciences, Humanities and Literature 2026",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.latechclfl-1.27/",
pages = "280--294",
ISBN = "979-8-89176-373-9",
abstract = "We propose a method to evaluate the extent to which an LLM{'}s observable input{--}output behavior aligns with established theories in the humanities and cultural studies. We instantiate the framework on three humanities theories{---}Davidson{'}s truth-conditional semantics, Lewis{'}s truth in fiction, and Iser{'}s concept of textual gaps{---}using a top-down, theory-driven black-box framework. Core assumptions of these theories are reconstructed into testable behavioral rules and assessed via controlled classification tasks with systematic prompt comparisons and significance testing. Our experiments show that theory-uninformed classification prompts generally outperform theory-enriched prompts in Lewis and Iser settings, while theory-informed prompts help in the Davidson task. Gemini Flash consistently achieves the highest scores across tasks and corpora, while the Iser gap detection task remains substantially harder than binary truth-conditional judgments. Statistical tests confirm robust prompt effects and the failure of basic prompts. However, model behavior under incremental theory exposure is unstable and architecture-dependent."
}Markdown (Informal)
[Evaluating Humanities Theory Alignment in Large Language Models: Incremental Prompting and Statistical Assessment](https://preview.aclanthology.org/ingest-eacl/2026.latechclfl-1.27/) (Pichler & Pagel, LaTeCH-CLfL 2026)
ACL