@inproceedings{kerinec-etal-2018-deep,
title = "When does deep multi-task learning work for loosely related document classification tasks?",
author = "Kerinec, Emma and
Braud, Chlo{\'e} and
S{\o}gaard, Anders",
editor = "Linzen, Tal and
Chrupa{\l}a, Grzegorz and
Alishahi, Afra",
booktitle = "Proceedings of the 2018 {EMNLP} Workshop {B}lackbox{NLP}: Analyzing and Interpreting Neural Networks for {NLP}",
month = nov,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-5401",
doi = "10.18653/v1/W18-5401",
pages = "1--8",
abstract = "This work aims to contribute to our understanding of \textit{when} multi-task learning through parameter sharing in deep neural networks leads to improvements over single-task learning. We focus on the setting of learning from \textit{loosely related} tasks, for which no theoretical guarantees exist. We therefore approach the question empirically, studying which properties of datasets and single-task learning characteristics correlate with improvements from multi-task learning. We are the first to study this in a text classification setting and across more than 500 different task pairs.",
}
Markdown (Informal)
[When does deep multi-task learning work for loosely related document classification tasks?](https://aclanthology.org/W18-5401) (Kerinec et al., EMNLP 2018)
ACL