@inproceedings{kunz-2025-train,
title = "Train More Parameters But Mind Their Placement: {Insights} into Language Adaptation with {PEFT}",
author = "Kunz, Jenny",
editor = "Johansson, Richard and
Stymne, Sara",
booktitle = "Proceedings of the Joint 25th Nordic Conference on Computational Linguistics and 11th Baltic Conference on Human Language Technologies (NoDaLiDa/Baltic-HLT 2025)",
month = mar,
year = "2025",
address = "Tallinn, Estonia",
publisher = "University of Tartu Library",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.nodalida-1.35/",
pages = "323--330",
ISBN = "978-9908-53-109-0",
abstract = "Smaller LLMs still face significant challenges even in medium-resourced languages, particularly when it comes to language-specific knowledge {--} a problem not easily resolved with machine-translated data. In this case study on Icelandic, we aim to enhance the generation performance of an LLM by specialising it using unstructured text corpora. A key focus is on preventing interference with the models' capabilities of handling longer context during this adaptation. Through ablation studies using various parameter-efficient fine-tuning (PEFT) methods and setups, we find that increasing the number of trainable parameters leads to better and more robust language adaptation. LoRAs placed in the feed-forward layers and bottleneck adapters show promising results with sufficient parameters, while prefix tuning and (IA)$^3$ are not suitable. Although improvements are consistent in 0-shot summarisation, some adapted models struggle with longer context lengths, an issue that can be mitigated by adapting only the final layers."
}
Markdown (Informal)
[Train More Parameters But Mind Their Placement: Insights into Language Adaptation with PEFT](https://preview.aclanthology.org/fix-sig-urls/2025.nodalida-1.35/) (Kunz, NoDaLiDa 2025)
ACL