@inproceedings{lou-etal-2022-spa,
title = "Spa: On the Sparsity of Virtual Adversarial Training for Dependency Parsing",
author = "Lou, Chao and
Han, Wenjuan and
Tu, Kewei",
editor = "He, Yulan and
Ji, Heng and
Li, Sujian and
Liu, Yang and
Chang, Chua-Hui",
booktitle = "Findings of the Association for Computational Linguistics: AACL-IJCNLP 2022",
month = nov,
year = "2022",
address = "Online only",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2022.findings-aacl.2",
pages = "11--21",
abstract = "Virtual adversarial training (VAT) is a powerful approach to improving robustness and performance, leveraging both labeled and unlabeled data to compensate for the scarcity of labeled data. It is adopted on lots of vision and language classification tasks. However, for tasks with structured output (e.g., dependency parsing), the application of VAT is nontrivial due to the intrinsic proprieties of structures: (1) the non-sparse problem and (2) exponential complexity. Against this background, we propose the Sparse Parse Adjustment (spa) algorithm and successfully applied VAT to the dependency parsing task. spa refers to the learning algorithm which combines the graph-based dependency parsing model with VAT in an exact computational manner and enhances the dependency parser with controllable and adjustable sparsity. Empirical studies show that the TreeCRF parser optimized using outperforms other methods without sparsity regularization.",
}
Markdown (Informal)
[Spa: On the Sparsity of Virtual Adversarial Training for Dependency Parsing](https://aclanthology.org/2022.findings-aacl.2) (Lou et al., Findings 2022)
ACL