@inproceedings{xu-kim-2026-tokenization,
title = "Tokenization and Morphological Fidelity in {U}ralic {NLP}: A Cross-Lingual Evaluation",
author = "Xu, Nuo and
Kim, Ahrii",
editor = "Hettiarachchi, Hansi and
Ranasinghe, Tharindu and
Plum, Alistair and
Rayson, Paul and
Mitkov, Ruslan and
Gaber, Mohamed and
Premasiri, Damith and
Tan, Fiona Anting and
Uyangodage, Lasitha",
booktitle = "Proceedings of the Second Workshop on Language Models for Low-Resource Languages ({L}o{R}es{LM} 2026)",
month = mar,
year = "2026",
address = "Rabat, Morocco",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/manual-author-scripts/2026.loreslm-1.43/",
pages = "493--503",
ISBN = "979-8-89176-377-7",
abstract = "Subword tokenization critically affects Natural Language Processing (NLP) performance, yet its behavior in morphologically rich and low-resource language families remains under-explored. This study systematically compares three subword paradigms{---}Byte Pair Encoding (BPE), Overlap BPE (OBPE), and Unigram Language Model{---}across six Uralic languages with varying resource availability and typological diversity.Using part-of-speech (POS) tagging as a controlled downstream task, we show that OBPE consistently achieves stronger morphological alignment and higher tagging accuracy than conventional methods, particularly within the Latin-script group. These gains arise from reduced fragmentation in open-class categories and a better balance across the frequency spectrum. Transfer efficacy further depends on the downstream tagging architecture, interacting with both training volume and genealogical proximity.Taken together, these findings highlight that morphology-sensitive tokenization is not merely a preprocessing choice but a decisive factor in enabling effective cross-lingual transfer for agglutinative, low-resource languages."
}Markdown (Informal)
[Tokenization and Morphological Fidelity in Uralic NLP: A Cross-Lingual Evaluation](https://preview.aclanthology.org/manual-author-scripts/2026.loreslm-1.43/) (Xu & Kim, LoResLM 2026)
ACL