@inproceedings{mueller-etal-2022-text,
title = "Do Text-to-Text Multi-Task Learners Suffer from Task Conflict?",
author = "Mueller, David and
Andrews, Nicholas and
Dredze, Mark",
editor = "Goldberg, Yoav and
Kozareva, Zornitsa and
Zhang, Yue",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2022",
month = dec,
year = "2022",
address = "Abu Dhabi, United Arab Emirates",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.206/",
doi = "10.18653/v1/2022.findings-emnlp.206",
pages = "2843--2858",
abstract = "Traditional multi-task learning architectures learn a single model across multiple tasks through a shared encoder followed by task-specific decoders. Learning these models often requires specialized training algorithms that address task-conflict in the shared parameter updates, which otherwise can lead to negative transfer. A new type of multi-task learning within NLP homogenizes multi-task architectures as a shared encoder and language model decoder, which does surprisingly well across a range of diverse tasks. Does this new architecture suffer from task-conflicts that require specialized training algorithms? We study how certain factors in the shift towards text-to-text models affects multi-task conflict and negative transfer, finding that both directional conflict and transfer are surprisingly constant across architectures."
}
Markdown (Informal)
[Do Text-to-Text Multi-Task Learners Suffer from Task Conflict?](https://preview.aclanthology.org/fix-sig-urls/2022.findings-emnlp.206/) (Mueller et al., Findings 2022)
ACL