@inproceedings{lo-mitkov-2025-anaphora,
title = "Does Anaphora Resolution Improve {LLM} Fine-Tuning for Summarisation?",
author = "Lo, Yi Chun and
Mitkov, Ruslan",
editor = "Picazo-Izquierdo, Alicia and
Estevanell-Valladares, Ernesto Luis and
Mitkov, Ruslan and
Guillena, Rafael Mu{\~n}oz and
Cerd{\'a}, Ra{\'u}l Garc{\'i}a",
booktitle = "Proceedings of the First Workshop on Comparative Performance Evaluation: From Rules to Language Models",
month = sep,
year = "2025",
address = "Varna, Bulgaria",
publisher = "INCOMA Ltd., Shoumen, Bulgaria",
url = "https://preview.aclanthology.org/corrections-2026-01/2025.r2lm-1.7/",
pages = "59--66",
abstract = "This study investigates whether adding anaphora resolution as a preprocessing step before fine-tuning the text summarisation application in LLM can improve the quality of summary output. Two sets of training with the T5-base model and BART-large model using the SAMSum dataset were conducted. One uses the original text and the other uses the text processed by a simplified version of MARS (Mitkov{'}s Anaphora Resolution System). The experiment reveals that when T5-base model is fine-tuned on the anaphora-resolved inputs, the ROUGE metrics are improved. In contrast, BART-large model only has a slight improvement after fine-tuning under the same conditions, which is not statistically significant. Further analysis of the generated summaries indicates that anaphora resolution is helpful in semantic alignment."
}Markdown (Informal)
[Does Anaphora Resolution Improve LLM Fine-Tuning for Summarisation?](https://preview.aclanthology.org/corrections-2026-01/2025.r2lm-1.7/) (Lo & Mitkov, R2LM 2025)
ACL