@inproceedings{murzaku-rambow-2025-zero,
title = "Zero-Shot Belief: A Hard Problem for {LLM}s",
author = "Murzaku, John and
Rambow, Owen",
editor = "Strube, Michael and
Braud, Chloe and
Hardmeier, Christian and
Li, Junyi Jessy and
Loaiciga, Sharid and
Zeldes, Amir and
Li, Chuyuan",
booktitle = "Proceedings of the 6th Workshop on Computational Approaches to Discourse, Context and Document-Level Inferences (CODI 2025)",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.codi-1.10/",
pages = "107--119",
ISBN = "979-8-89176-343-2",
abstract = "We present two LLM-based approaches to zero-shot source-and-target belief prediction on FactBank: a unified system that identifies events, sources, and belief labels in a single pass, and a hybrid approach that uses a fine-tuned DeBERTa tagger for event detection. We show that multiple open-sourced, closed-source, and reasoning-based LLMs struggle with the task. We then argue that careful source normalization is crucial and provide a few-shot normalization method that improves alignment between predicted and gold-standard sources. Using the hybrid approach, we achieve new state-of-the-art results on FactBank and offer a detailed error analysis. Our approach is then tested on the Italian belief corpus ModaFact. Although we fall short of prior fine-tuned baselines, our zero-shot methods substantially narrow the gap, emphasizing the promise of hybrid pipelines for belief prediction beyond English. We conclude that integrated event tagging, careful prompting, and robust source normalization all jointly enable effective zero-shot belief models."
}Markdown (Informal)
[Zero-Shot Belief: A Hard Problem for LLMs](https://preview.aclanthology.org/ingest-emnlp/2025.codi-1.10/) (Murzaku & Rambow, CODI 2025)
ACL
- John Murzaku and Owen Rambow. 2025. Zero-Shot Belief: A Hard Problem for LLMs. In Proceedings of the 6th Workshop on Computational Approaches to Discourse, Context and Document-Level Inferences (CODI 2025), pages 107–119, Suzhou, China. Association for Computational Linguistics.