@inproceedings{li-etal-2025-context-learning,
title = "It{'}s All About In-Context Learning! Teaching Extremely Low-Resource Languages to {LLM}s",
author = "Li, Yue and
Zhao, Zhixue and
Scarton, Carolina",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1502/",
pages = "29532--29547",
ISBN = "979-8-89176-332-6",
abstract = "Extremely low-resource languages, especially those written in rare scripts, remain largely unsupported by large language models (LLMs). This is due in part to compounding factors such as the lack of training data. This paper delivers the first comprehensive analysis of whether LLMs can acquire such languages purely via in-context learning (ICL), with or without auxiliary alignment signals, and how these methods compare to parameter-efficient fine-tuning (PEFT). We systematically evaluate 20 under-represented languages across three state-of-the-art multilingual LLMs. Our findings highlight the limitation of PEFT when both language and its script are extremely under-represented by the LLM. In contrast, zero-shot ICL with language alignment is impressively effective on extremely low-resource languages, while few-shot ICL or PEFT is more beneficial for languages relatively better represented by LLMs. For LLM practitioners working on extremely low-resource languages, we summarise guidelines grounded by our results on adapting LLMs to low-resource languages, e.g., avoiding fine-tuning a multilingual model on languages of unseen scripts."
}Markdown (Informal)
[It’s All About In-Context Learning! Teaching Extremely Low-Resource Languages to LLMs](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.1502/) (Li et al., EMNLP 2025)
ACL