@inproceedings{sato-etal-2017-adversarial,
title = "Adversarial Training for Cross-Domain {U}niversal {D}ependency Parsing",
author = "Sato, Motoki and
Manabe, Hitoshi and
Noji, Hiroshi and
Matsumoto, Yuji",
editor = "Haji{\v{c}}, Jan and
Zeman, Dan",
booktitle = "Proceedings of the {C}o{NLL} 2017 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies",
month = aug,
year = "2017",
address = "Vancouver, Canada",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/K17-3007/",
doi = "10.18653/v1/K17-3007",
pages = "71--79",
abstract = "We describe our submission to the CoNLL 2017 shared task, which exploits the shared common knowledge of a language across different domains via a domain adaptation technique. Our approach is an extension to the recently proposed adversarial training technique for domain adaptation, which we apply on top of a graph-based neural dependency parsing model on bidirectional LSTMs. In our experiments, we find our baseline graph-based parser already outperforms the official baseline model (UDPipe) by a large margin. Further, by applying our technique to the treebanks of the same language with different domains, we observe an additional gain in the performance, in particular for the domains with less training data."
}
Markdown (Informal)
[Adversarial Training for Cross-Domain Universal Dependency Parsing](https://preview.aclanthology.org/jlcl-multiple-ingestion/K17-3007/) (Sato et al., CoNLL 2017)
ACL