@inproceedings{lee-etal-2024-commit,
title = "{COMMIT}: Code-Mixing {E}nglish-Centric Large Language Model for Multilingual Instruction Tuning",
author = "Lee, Jaeseong and
Jung, YeonJoon and
Hwang, Seung-won",
editor = "Duh, Kevin and
Gomez, Helena and
Bethard, Steven",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2024",
month = jun,
year = "2024",
address = "Mexico City, Mexico",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-naacl.198/",
doi = "10.18653/v1/2024.findings-naacl.198",
pages = "3130--3137",
abstract = "Recently, instruction-tuned large language models (LLMs) are showing prominent performance on various tasks, such as question answering. However, the majority of instruction-tuned LLMs are English-centric, which hinders their application to low-resource language QA. In this paper, we propose COde-Mixed Multilingual Instruction Tuning (COMMIT) to adapt English-centric LLM to low-resource language QA. We point out two main causes of English-centricness: imbalance of unlabeled data, and English-centric instruction tuning datasets. To deviate from English-centric instruction tuning, we propose to specialize code-mixing for instruction tuning, which blocks code-mixing in English templates, to leverage the potential of its superiority. To overcome data imbalance, we perform cross-lingual alignment. The majority of cross-lingual alignment works focused on making representations similar, which is not desirable to decoder-based LLMs, such as LLaMA. Therefore, we propose code-mixed continual causal language modeling to align the decoder. COMMIT improves the exact match score of low-resourced language QA by up to 32x. Code is publicly available."
}
Markdown (Informal)
[COMMIT: Code-Mixing English-Centric Large Language Model for Multilingual Instruction Tuning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2024.findings-naacl.198/) (Lee et al., Findings 2024)
ACL