@inproceedings{sajid-etal-2025-shot,
title = "Few-Shot Multilingual Coreference Resolution Using Long-Context Large Language Models",
author = "Sajid, Moiz and
Fraz, Muhammad and
Latif, Seemab and
Zafar, Zuhair",
editor = "Ogrodniczuk, Maciej and
Novak, Michal and
Poesio, Massimo and
Pradhan, Sameer and
Ng, Vincent",
booktitle = "Proceedings of the Eighth Workshop on Computational Models of Reference, Anaphora and Coreference",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.crac-1.14/",
doi = "10.18653/v1/2025.crac-1.14",
pages = "154--162",
abstract = "In this work, we present our system, which ranked second in the CRAC 2025 Shared Task on Multilingual Coreference Resolution (LLM Track). For multilingual coreference resolution, our system mainly uses long-context large language models (LLMs) in a few-shot in-context learning setting. Among the various approaches we explored, few-shot prompting proved to be the most effective, particularly due to the complexity of the task and the availability of high-quality data with referential relationships provided as part of the competition. We employed Gemini 2.5 Pro, one of the best available closed-source long-context LLMs at the time of submission. Our system achieved a CoNLL F1 score of 61.74 on the mini-testset, demonstrating that performance improves significantly with the number of few-shot examples provided, thanks to the model{'}s extended context window. While this approach comes with trade-offs in terms of inference cost and response latency, it highlights the potential of long-context LLMs for tackling multilingual coreference without task-specific fine-tuning. Although direct comparisons with traditional supervised systems are not straightforward, our findings provide valuable insights and open avenues for future work, particularly in expanding support for low-resource languages."
}Markdown (Informal)
[Few-Shot Multilingual Coreference Resolution Using Long-Context Large Language Models](https://preview.aclanthology.org/author-page-yu-wang-polytechnic/2025.crac-1.14/) (Sajid et al., CRAC 2025)
ACL