@inproceedings{acs-etal-2021-subword,
title = "Subword Pooling Makes a Difference",
author = "{\'A}cs, Judit and
K{\'a}d{\'a}r, {\'A}kos and
Kornai, Andras",
booktitle = "Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume",
month = apr,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.eacl-main.194",
doi = "10.18653/v1/2021.eacl-main.194",
pages = "2284--2295",
abstract = "Contextual word-representations became a standard in modern natural language processing systems. These models use subword tokenization to handle large vocabularies and unknown words. Word-level usage of such systems requires a way of pooling multiple subwords that correspond to a single word. In this paper we investigate how the choice of subword pooling affects the downstream performance on three tasks: morphological probing, POS tagging and NER, in 9 typologically diverse languages. We compare these in two massively multilingual models, mBERT and XLM-RoBERTa. For morphological tasks, the widely used {`}choose the first subword{'} is the worst strategy and the best results are obtained by using attention over the subwords. For POS tagging both of these strategies perform poorly and the best choice is to use a small LSTM over the subwords. The same strategy works best for NER and we show that mBERT is better than XLM-RoBERTa in all 9 languages. We publicly release all code, data and the full result tables at https://github.com/juditacs/subword-choice .",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="acs-etal-2021-subword">
<titleInfo>
<title>Subword Pooling Makes a Difference</title>
</titleInfo>
<name type="personal">
<namePart type="given">Judit</namePart>
<namePart type="family">Ács</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ákos</namePart>
<namePart type="family">Kádár</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Andras</namePart>
<namePart type="family">Kornai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2021-apr</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Contextual word-representations became a standard in modern natural language processing systems. These models use subword tokenization to handle large vocabularies and unknown words. Word-level usage of such systems requires a way of pooling multiple subwords that correspond to a single word. In this paper we investigate how the choice of subword pooling affects the downstream performance on three tasks: morphological probing, POS tagging and NER, in 9 typologically diverse languages. We compare these in two massively multilingual models, mBERT and XLM-RoBERTa. For morphological tasks, the widely used ‘choose the first subword’ is the worst strategy and the best results are obtained by using attention over the subwords. For POS tagging both of these strategies perform poorly and the best choice is to use a small LSTM over the subwords. The same strategy works best for NER and we show that mBERT is better than XLM-RoBERTa in all 9 languages. We publicly release all code, data and the full result tables at https://github.com/juditacs/subword-choice .</abstract>
<identifier type="citekey">acs-etal-2021-subword</identifier>
<identifier type="doi">10.18653/v1/2021.eacl-main.194</identifier>
<location>
<url>https://aclanthology.org/2021.eacl-main.194</url>
</location>
<part>
<date>2021-apr</date>
<extent unit="page">
<start>2284</start>
<end>2295</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Subword Pooling Makes a Difference
%A Ács, Judit
%A Kádár, Ákos
%A Kornai, Andras
%S Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume
%D 2021
%8 apr
%I Association for Computational Linguistics
%C Online
%F acs-etal-2021-subword
%X Contextual word-representations became a standard in modern natural language processing systems. These models use subword tokenization to handle large vocabularies and unknown words. Word-level usage of such systems requires a way of pooling multiple subwords that correspond to a single word. In this paper we investigate how the choice of subword pooling affects the downstream performance on three tasks: morphological probing, POS tagging and NER, in 9 typologically diverse languages. We compare these in two massively multilingual models, mBERT and XLM-RoBERTa. For morphological tasks, the widely used ‘choose the first subword’ is the worst strategy and the best results are obtained by using attention over the subwords. For POS tagging both of these strategies perform poorly and the best choice is to use a small LSTM over the subwords. The same strategy works best for NER and we show that mBERT is better than XLM-RoBERTa in all 9 languages. We publicly release all code, data and the full result tables at https://github.com/juditacs/subword-choice .
%R 10.18653/v1/2021.eacl-main.194
%U https://aclanthology.org/2021.eacl-main.194
%U https://doi.org/10.18653/v1/2021.eacl-main.194
%P 2284-2295
Markdown (Informal)
[Subword Pooling Makes a Difference](https://aclanthology.org/2021.eacl-main.194) (Ács et al., EACL 2021)
ACL
- Judit Ács, Ákos Kádár, and Andras Kornai. 2021. Subword Pooling Makes a Difference. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 2284–2295, Online. Association for Computational Linguistics.