@inproceedings{aggarwal-etal-2025-improving,
title = "Improving Cross Lingual Transfer by Pretraining with Active Forgetting",
author = "Aggarwal, Divyanshu and
Sathe, Ashutosh and
Sitaram, Sunayana",
editor = "Christodoulopoulos, Christos and
Chakraborty, Tanmoy and
Rose, Carolyn and
Peng, Violet",
booktitle = "Proceedings of the 2025 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2025",
address = "Suzhou, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.120/",
pages = "2367--2378",
ISBN = "979-8-89176-332-6",
abstract = "Large Language Models (LLMs) demonstrate exceptional capabilities in a multitude of NLP tasks. However, the efficacy of such models to languages other than English is often limited. Prior works have shown that encoder-only models such as BERT or XLM-RoBERTa show impressive cross lingual transfer of their capabilities from English to other languages. In this work, we propose a pretraining strategy that uses active forgetting to achieve similar cross lingual transfer in decoder-only LLMs. We show that LLMs pretrained with active forgetting are highly effective when adapting to new and unseen languages. Through extensive experimentation, we find that LLMs pretrained with active forgetting are able to learn better multilingual representations which translates to better performance in many downstream tasks."
}Markdown (Informal)
[Improving Cross Lingual Transfer by Pretraining with Active Forgetting](https://preview.aclanthology.org/ingest-emnlp/2025.emnlp-main.120/) (Aggarwal et al., EMNLP 2025)
ACL