@inproceedings{wu-etal-2018-multilingual,
title = "Multilingual {U}niversal {D}ependency Parsing from Raw Text with Low-Resource Language Enhancement",
author = "Wu, Yingting and
Zhao, Hai and
Tong, Jia-Jun",
editor = "Zeman, Daniel and
Haji{\v{c}}, Jan",
booktitle = "Proceedings of the {C}o{NLL} 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies",
month = oct,
year = "2018",
address = "Brussels, Belgium",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/K18-2007/",
doi = "10.18653/v1/K18-2007",
pages = "74--80",
abstract = "This paper describes the system of our team Phoenix for participating CoNLL 2018 Shared Task: Multilingual Parsing from Raw Text to Universal Dependencies. Given the annotated gold standard data in CoNLL-U format, we train the tokenizer, tagger and parser separately for each treebank based on an open source pipeline tool UDPipe. Our system reads the plain texts for input, performs the pre-processing steps (tokenization, lemmas, morphology) and finally outputs the syntactic dependencies. For the low-resource languages with no training data, we use cross-lingual techniques to build models with some close languages instead. In the official evaluation, our system achieves the macro-averaged scores of 65.61{\%}, 52.26{\%}, 55.71{\%} for LAS, MLAS and BLEX respectively."
}
Markdown (Informal)
[Multilingual Universal Dependency Parsing from Raw Text with Low-Resource Language Enhancement](https://preview.aclanthology.org/jlcl-multiple-ingestion/K18-2007/) (Wu et al., CoNLL 2018)
ACL