@inproceedings{attia-elkahky-2019-segmentation,
title = "Segmentation for Domain Adaptation in {A}rabic",
author = "Attia, Mohammed and
Elkahky, Ali",
editor = "El-Hajj, Wassim and
Belguith, Lamia Hadrich and
Bougares, Fethi and
Magdy, Walid and
Zitouni, Imed and
Tomeh, Nadi and
El-Haj, Mahmoud and
Zaghouani, Wajdi",
booktitle = "Proceedings of the Fourth Arabic Natural Language Processing Workshop",
month = aug,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/W19-4613/",
doi = "10.18653/v1/W19-4613",
pages = "119--129",
abstract = "Segmentation serves as an integral part in many NLP applications including Machine Translation, Parsing, and Information Retrieval. When a model trained on the standard language is applied to dialects, the accuracy drops dramatically. However, there are more lexical items shared by the standard language and dialects than can be found by mere surface word matching. This shared lexicon is obscured by a lot of cliticization, gemination, and character repetition. In this paper, we prove that segmentation and base normalization of dialects can help in domain adaptation by reducing data sparseness. Segmentation will improve a system performance by reducing the number of OOVs, help isolate the differences and allow better utilization of the commonalities. We show that adding a small amount of dialectal segmentation training data reduced OOVs by 5{\%} and remarkably improves POS tagging for dialects by 7.37{\%} f-score, even though no dialect-specific POS training data is included."
}
Markdown (Informal)
[Segmentation for Domain Adaptation in Arabic](https://preview.aclanthology.org/jlcl-multiple-ingestion/W19-4613/) (Attia & Elkahky, WANLP 2019)
ACL
- Mohammed Attia and Ali Elkahky. 2019. Segmentation for Domain Adaptation in Arabic. In Proceedings of the Fourth Arabic Natural Language Processing Workshop, pages 119–129, Florence, Italy. Association for Computational Linguistics.