@inproceedings{sterner-etal-2026-contrastive,
title = "Contrastive Learning with Narrative Twins for Modeling Story Salience",
author = "Sterner, Igor and
Lascarides, Alex and
Keller, Frank",
editor = "Demberg, Vera and
Inui, Kentaro and
Marquez, Llu{\'i}s",
booktitle = "Proceedings of the 19th Conference of the {E}uropean Chapter of the {A}ssociation for {C}omputational {L}inguistics (Volume 1: Long Papers)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.71/",
pages = "1528--1550",
ISBN = "979-8-89176-380-7",
abstract = "Understanding narratives requires identifying which events are most salient for a story{'}s progression. We present a contrastive learning framework for modeling narrative salience that learns story embeddings from narrative twins: stories that share the same plot but differ in surface form. Our model is trained to distinguish a story from both its narrative twin and a distractor with similar surface features but different plot. Using the resulting embeddings, we evaluate four narratologically motivated operations for inferring salience (deletion, shifting, disruption, and summarization). Experiments on short narratives from the ROCStories corpus and longer Wikipedia plot summaries show that contrastively learned story embeddings outperform a masked-language-model baseline, and that summarization is the most reliable operation for identifying salient sentences. If narrative twins are not available, random dropout can be used to generate the twins from a single story. Effective distractors can be obtained either by prompting LLMs or, in long-form narratives, by using different parts of the same story."
}Markdown (Informal)
[Contrastive Learning with Narrative Twins for Modeling Story Salience](https://preview.aclanthology.org/ingest-eacl/2026.eacl-long.71/) (Sterner et al., EACL 2026)
ACL