@inproceedings{corral-etal-2025-pipeline,
title = "Pipeline Analysis for Developing Instruct {LLM}s in Low-Resource Languages: A Case Study on {B}asque",
author = "Corral, Ander and
Antero, Ixak Sarasua and
Saralegi, Xabier",
editor = "Chiruzzo, Luis and
Ritter, Alan and
Wang, Lu",
booktitle = "Proceedings of the 2025 Conference of the Nations of the Americas Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = apr,
year = "2025",
address = "Albuquerque, New Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.629/",
pages = "12636--12655",
ISBN = "979-8-89176-189-6",
abstract = "Large language models (LLMs) are typically optimized for resource-rich languages like English, exacerbating the gap between high-resource and underrepresented languages. This work presents a detailed analysis of strategies for developing a model capable of following instructions in a low-resource language, specifically Basque, by focusing on three key stages: pre-training, instruction tuning, and alignment with human preferences. Our findings demonstrate that continual pre-training with a high-quality Basque corpus of around 600 million words improves natural language understanding (NLU) of the foundational model by over 12 points. Moreover, instruction tuning and human preference alignment using automatically translated datasets proved highly effective, resulting in a 24-point improvement in instruction-following performance. The resulting models, Llama-eus-8B and Llama-eus-8B-instruct, establish a new state-of-the-art for Basque in the sub-10B parameter category."
}
Markdown (Informal)
[Pipeline Analysis for Developing Instruct LLMs in Low-Resource Languages: A Case Study on Basque](https://preview.aclanthology.org/fix-sig-urls/2025.naacl-long.629/) (Corral et al., NAACL 2025)
ACL