@inproceedings{huang-wan-2025-triembed,
title = "{T}ri{E}mbed: Bridge the Gap between Text and Token Indices with Embedding Reparameterization",
author = "Huang, Baizhou and
Wan, Xiaojun",
editor = "Che, Wanxiang and
Nabende, Joyce and
Shutova, Ekaterina and
Pilehvar, Mohammad Taher",
booktitle = "Findings of the Association for Computational Linguistics: ACL 2025",
month = jul,
year = "2025",
address = "Vienna, Austria",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/display_plenaries/2025.findings-acl.275/",
pages = "5291--5297",
ISBN = "979-8-89176-256-5",
abstract = "The current paradigm of language modeling is a two-stage pipeline that first transforms raw text to token indices, where the distribution is then estimated. It inherently discards linguistic relations between tokens during tokenization, creating a fundamental gap. To address this, we propose \textbf{TriEmbed}, a reparameterization method for embeddings that incorporates the morphological relationships inherent in subword tokenizer algorithms. Specifically, by organizing the vocabulary into a Trie structure, we can encode these relations and reparametrize the embeddings, facilitating the recovery of other linguistic relationships during training. Empirical results across various settings demonstrate that TriEmbed outperforms conventional embeddings from the perspective of scaling, while offering more linguistically informative token embeddings."
}
Markdown (Informal)
[TriEmbed: Bridge the Gap between Text and Token Indices with Embedding Reparameterization](https://preview.aclanthology.org/display_plenaries/2025.findings-acl.275/) (Huang & Wan, Findings 2025)
ACL