@inproceedings{wang-etal-2021-progressive,
title = "Progressive Self-Training with Discriminator for Aspect Term Extraction",
author = "Wang, Qianlong and
Wen, Zhiyuan and
Zhao, Qin and
Yang, Min and
Xu, Ruifeng",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/landing_page/2021.emnlp-main.23/",
doi = "10.18653/v1/2021.emnlp-main.23",
pages = "257--268",
abstract = "Aspect term extraction aims to extract aspect terms from a review sentence that users have expressed opinions on. One of the remaining challenges for aspect term extraction resides in the lack of sufficient annotated data. While self-training is potentially an effective method to address this issue, the pseudo-labels it yields on unlabeled data could induce noise. In this paper, we use two means to alleviate the noise in the pseudo-labels. One is that inspired by the curriculum learning, we refine the conventional self-training to progressive self-training. Specifically, the base model infers pseudo-labels on a progressive subset at each iteration, where samples in the subset become harder and more numerous as the iteration proceeds. The other is that we use a discriminator to filter the noisy pseudo-labels. Experimental results on four SemEval datasets show that our model significantly outperforms the previous baselines and achieves state-of-the-art performance."
}
Markdown (Informal)
[Progressive Self-Training with Discriminator for Aspect Term Extraction](https://preview.aclanthology.org/landing_page/2021.emnlp-main.23/) (Wang et al., EMNLP 2021)
ACL