@inproceedings{jon-bojar-2025-finetuning,
title = "Finetuning {LLM}s for {E}va{C}un 2025 token prediction shared task",
author = "Jon, Josef and
Bojar, Ond{\v{r}}ej",
editor = "Anderson, Adam and
Gordin, Shai and
Li, Bin and
Liu, Yudong and
Passarotti, Marco C. and
Sprugnoli, Rachele",
booktitle = "Proceedings of the Second Workshop on Ancient Language Processing",
month = may,
year = "2025",
address = "The Albuquerque Convention Center, Laguna",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.alp-1.29/",
pages = "221--225",
ISBN = "979-8-89176-235-0",
abstract = "In this paper, we present our submission for the token prediction task of EvaCun 2025. Our sys-tems are based on LLMs (Command-R, Mistral, and Aya Expanse) fine-tuned on the task data provided by the organizers. As we only pos-sess a very superficial knowledge of the subject field and the languages of the task, we simply used the training data without any task-specific adjustments, preprocessing, or filtering. We compare 3 different approaches (based on 3 different prompts) of obtaining the predictions, and we evaluate them on a held-out part of the data."
}
Markdown (Informal)
[Finetuning LLMs for EvaCun 2025 token prediction shared task](https://preview.aclanthology.org/fix-sig-urls/2025.alp-1.29/) (Jon & Bojar, ALP 2025)
ACL