@inproceedings{qin-etal-2021-improving,
title = "Improving {A}rabic Diacritization with Regularized Decoding and Adversarial Training",
author = "Qin, Han and
Chen, Guimin and
Tian, Yuanhe and
Song, Yan",
editor = "Zong, Chengqing and
Xia, Fei and
Li, Wenjie and
Navigli, Roberto",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.acl-short.68/",
doi = "10.18653/v1/2021.acl-short.68",
pages = "534--542",
abstract = "Arabic diacritization is a fundamental task for Arabic language processing. Previous studies have demonstrated that automatically generated knowledge can be helpful to this task. However, these studies regard the auto-generated knowledge instances as gold references, which limits their effectiveness since such knowledge is not always accurate and inferior instances can lead to incorrect predictions. In this paper, we propose to use regularized decoding and adversarial training to appropriately learn from such noisy knowledge for diacritization. Experimental results on two benchmark datasets show that, even with quite flawed auto-generated knowledge, our model can still learn adequate diacritics and outperform all previous studies, on both datasets."
}
Markdown (Informal)
[Improving Arabic Diacritization with Regularized Decoding and Adversarial Training](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.acl-short.68/) (Qin et al., ACL-IJCNLP 2021)
ACL