@inproceedings{wu-etal-2024-f,
title = "{F}-{MALLOC}: Feed-forward Memory Allocation for Continual Learning in Neural Machine Translation",
author = "Wu, Junhong and
Liu, Yuchen and
Zong, Chengqing",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2024.naacl-long.398/",
doi = "10.18653/v1/2024.naacl-long.398",
pages = "7180--7192",
abstract = "In the evolving landscape of Neural Machine Translation (NMT), the pretrain-then-finetune paradigm has yielded impressive results. However, the persistent challenge of Catastrophic Forgetting (CF) remains a hurdle. While previous work has introduced Continual Learning (CL) methods to address CF, these approaches grapple with the delicate balance between avoiding forgetting and maintaining system extensibility. To address this, we propose a CL method, named $\textbf{F-MALLOC}$ ($\textbf{F}$eed-forward $\textbf{M}$emory $\textbf{ALLOC}$ation). F-MALLOC is inspired by recent insights highlighting that feed-forward layers emulate neural memories and encapsulate crucial translation knowledge. It decomposes feed-forward layers into discrete memory cells and allocates these memories to different tasks. By learning to allocate and safeguard these memories, our method effectively alleviates CF while ensuring robust extendability. Besides, we propose a comprehensive assessment protocol for multi-stage CL of NMT systems. Experiments conducted following this new protocol showcase the superior performance of F-MALLOC, evidenced by higher BLEU scores and almost zero forgetting."
}
Markdown (Informal)
[F-MALLOC: Feed-forward Memory Allocation for Continual Learning in Neural Machine Translation](https://preview.aclanthology.org/add-emnlp-2024-awards/2024.naacl-long.398/) (Wu et al., NAACL 2024)
ACL