@inproceedings{ye-etal-2022-eliciting,
title = "Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts",
author = "Ye, Qinyuan and
Zha, Juan and
Ren, Xiang",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.189/",
doi = "10.18653/v1/2022.findings-emnlp.189",
pages = "2567--2592",
abstract = "Recent works suggest that transformer models are capable of multi-tasking on diverse NLP tasks and adapt to new tasks efficiently. However, the potential of these multi-task models may be limited as they use the same set of parameters for all tasks. In contrast, humans tackle tasks in a more flexible way, by making proper presumptions on what skills and knowledge are relevant and executing only the necessary computations. Inspired by this, we propose to use task-level mixture-of-expert models, which has a collection of transformer layers (i.e., experts) and a router component to choose among these experts dynamically and flexibly. We find that these models help improve the average performance gain (ARG) metric by 2.6{\%} when adapting to unseen tasks in few-shot settings, and by 5.6{\%} in zero-shot generalization settings. Further, we show that the learned routing decisions and experts partly rediscover human categorization of NLP tasks {--} certain experts are strongly associated with extractive tasks, some with classification tasks, and some with tasks requiring world knowledge."
}
Markdown (Informal)
[Eliciting and Understanding Cross-task Skills with Task-level Mixture-of-Experts](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.189/) (Ye et al., Findings 2022)
ACL