@inproceedings{bjerva-etal-2019-transductive,
title = "Transductive Auxiliary Task Self-Training for Neural Multi-Task Models",
author = "Bjerva, Johannes and
Kann, Katharina and
Augenstein, Isabelle",
editor = "Cherry, Colin and
Durrett, Greg and
Foster, George and
Haffari, Reza and
Khadivi, Shahram and
Peng, Nanyun and
Ren, Xiang and
Swayamdipta, Swabha",
booktitle = "Proceedings of the 2nd Workshop on Deep Learning Approaches for Low-Resource NLP (DeepLo 2019)",
month = nov,
year = "2019",
address = "Hong Kong, China",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/D19-6128/",
doi = "10.18653/v1/D19-6128",
pages = "253--258",
abstract = "Multi-task learning and self-training are two common ways to improve a machine learning model`s performance in settings with limited training data. Drawing heavily on ideas from those two approaches, we suggest transductive auxiliary task self-training: training a multi-task model on (i) a combination of main and auxiliary task training data, and (ii) test instances with auxiliary task labels which a single-task version of the model has previously generated. We perform extensive experiments on 86 combinations of languages and tasks. Our results are that, on average, transductive auxiliary task self-training improves absolute accuracy by up to 9.56{\%} over the pure multi-task model for dependency relation tagging and by up to 13.03{\%} for semantic tagging."
}
Markdown (Informal)
[Transductive Auxiliary Task Self-Training for Neural Multi-Task Models](https://preview.aclanthology.org/jlcl-multiple-ingestion/D19-6128/) (Bjerva et al., 2019)
ACL