@inproceedings{wang-chen-2020-position,
title = "What Do Position Embeddings Learn? An Empirical Study of Pre-Trained Language Model Positional Encoding",
author = "Wang, Yu-An and
Chen, Yun-Nung",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.emnlp-main.555",
doi = "10.18653/v1/2020.emnlp-main.555",
pages = "6840--6849",
abstract = "In recent years, pre-trained Transformers have dominated the majority of NLP benchmark tasks. Many variants of pre-trained Transformers have kept breaking out, and most focus on designing different pre-training objectives or variants of self-attention. Embedding the position information in the self-attention mechanism is also an indispensable factor in Transformers however is often discussed at will. Hence, we carry out an empirical study on position embedding of mainstream pre-trained Transformers mainly focusing on two questions: 1) Do position embeddings really learn the meaning of positions? 2) How do these different learned position embeddings affect Transformers for NLP tasks? This paper focuses on providing a new insight of pre-trained position embeddings by feature-level analysis and empirical experiments on most of iconic NLP tasks. It is believed that our experimental results can guide the future works to choose the suitable positional encoding function for specific tasks given the application property.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="wang-chen-2020-position">
<titleInfo>
<title>What Do Position Embeddings Learn? An Empirical Study of Pre-Trained Language Model Positional Encoding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Yu-An</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yun-Nung</namePart>
<namePart type="family">Chen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>In recent years, pre-trained Transformers have dominated the majority of NLP benchmark tasks. Many variants of pre-trained Transformers have kept breaking out, and most focus on designing different pre-training objectives or variants of self-attention. Embedding the position information in the self-attention mechanism is also an indispensable factor in Transformers however is often discussed at will. Hence, we carry out an empirical study on position embedding of mainstream pre-trained Transformers mainly focusing on two questions: 1) Do position embeddings really learn the meaning of positions? 2) How do these different learned position embeddings affect Transformers for NLP tasks? This paper focuses on providing a new insight of pre-trained position embeddings by feature-level analysis and empirical experiments on most of iconic NLP tasks. It is believed that our experimental results can guide the future works to choose the suitable positional encoding function for specific tasks given the application property.</abstract>
<identifier type="citekey">wang-chen-2020-position</identifier>
<identifier type="doi">10.18653/v1/2020.emnlp-main.555</identifier>
<location>
<url>https://aclanthology.org/2020.emnlp-main.555</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>6840</start>
<end>6849</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T What Do Position Embeddings Learn? An Empirical Study of Pre-Trained Language Model Positional Encoding
%A Wang, Yu-An
%A Chen, Yun-Nung
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F wang-chen-2020-position
%X In recent years, pre-trained Transformers have dominated the majority of NLP benchmark tasks. Many variants of pre-trained Transformers have kept breaking out, and most focus on designing different pre-training objectives or variants of self-attention. Embedding the position information in the self-attention mechanism is also an indispensable factor in Transformers however is often discussed at will. Hence, we carry out an empirical study on position embedding of mainstream pre-trained Transformers mainly focusing on two questions: 1) Do position embeddings really learn the meaning of positions? 2) How do these different learned position embeddings affect Transformers for NLP tasks? This paper focuses on providing a new insight of pre-trained position embeddings by feature-level analysis and empirical experiments on most of iconic NLP tasks. It is believed that our experimental results can guide the future works to choose the suitable positional encoding function for specific tasks given the application property.
%R 10.18653/v1/2020.emnlp-main.555
%U https://aclanthology.org/2020.emnlp-main.555
%U https://doi.org/10.18653/v1/2020.emnlp-main.555
%P 6840-6849
Markdown (Informal)
[What Do Position Embeddings Learn? An Empirical Study of Pre-Trained Language Model Positional Encoding](https://aclanthology.org/2020.emnlp-main.555) (Wang & Chen, EMNLP 2020)
ACL