@inproceedings{nguyen-salazar-2019-transformers,
title = "Transformers without Tears: Improving the Normalization of Self-Attention",
author = "Nguyen, Toan Q. and
Salazar, Julian",
booktitle = "Proceedings of the 16th International Conference on Spoken Language Translation",
month = nov # " 2-3",
year = "2019",
address = "Hong Kong",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2019.iwslt-1.17",
abstract = "We evaluate three simple, normalization-centric changes to improve Transformer training. First, we show that pre-norm residual connections (PRENORM) and smaller initializations enable warmup-free, validation-based training with large learning rates. Second, we propose l2 normalization with a single scale parameter (SCALENORM) for faster training and better performance. Finally, we reaffirm the effectiveness of normalizing word embeddings to a fixed length (FIXNORM). On five low-resource translation pairs from TED Talks-based corpora, these changes always converge, giving an average +1.1 BLEU over state-of-the-art bilingual baselines and a new 32.8 BLEU on IWSLT '15 English-Vietnamese. We ob- serve sharper performance curves, more consistent gradient norms, and a linear relationship between activation scaling and decoder depth. Surprisingly, in the high-resource setting (WMT '14 English-German), SCALENORM and FIXNORM remain competitive but PRENORM degrades performance.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="nguyen-salazar-2019-transformers">
<titleInfo>
<title>Transformers without Tears: Improving the Normalization of Self-Attention</title>
</titleInfo>
<name type="personal">
<namePart type="given">Toan</namePart>
<namePart type="given">Q</namePart>
<namePart type="family">Nguyen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Julian</namePart>
<namePart type="family">Salazar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2019-nov" 2-3"</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 16th International Conference on Spoken Language Translation</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Hong Kong</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We evaluate three simple, normalization-centric changes to improve Transformer training. First, we show that pre-norm residual connections (PRENORM) and smaller initializations enable warmup-free, validation-based training with large learning rates. Second, we propose l2 normalization with a single scale parameter (SCALENORM) for faster training and better performance. Finally, we reaffirm the effectiveness of normalizing word embeddings to a fixed length (FIXNORM). On five low-resource translation pairs from TED Talks-based corpora, these changes always converge, giving an average +1.1 BLEU over state-of-the-art bilingual baselines and a new 32.8 BLEU on IWSLT ’15 English-Vietnamese. We ob- serve sharper performance curves, more consistent gradient norms, and a linear relationship between activation scaling and decoder depth. Surprisingly, in the high-resource setting (WMT ’14 English-German), SCALENORM and FIXNORM remain competitive but PRENORM degrades performance.</abstract>
<identifier type="citekey">nguyen-salazar-2019-transformers</identifier>
<location>
<url>https://aclanthology.org/2019.iwslt-1.17</url>
</location>
<part>
<date>2019-nov" 2-3"</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Transformers without Tears: Improving the Normalization of Self-Attention
%A Nguyen, Toan Q.
%A Salazar, Julian
%S Proceedings of the 16th International Conference on Spoken Language Translation
%D 2019
%8 nov" 2 3"
%I Association for Computational Linguistics
%C Hong Kong
%F nguyen-salazar-2019-transformers
%X We evaluate three simple, normalization-centric changes to improve Transformer training. First, we show that pre-norm residual connections (PRENORM) and smaller initializations enable warmup-free, validation-based training with large learning rates. Second, we propose l2 normalization with a single scale parameter (SCALENORM) for faster training and better performance. Finally, we reaffirm the effectiveness of normalizing word embeddings to a fixed length (FIXNORM). On five low-resource translation pairs from TED Talks-based corpora, these changes always converge, giving an average +1.1 BLEU over state-of-the-art bilingual baselines and a new 32.8 BLEU on IWSLT ’15 English-Vietnamese. We ob- serve sharper performance curves, more consistent gradient norms, and a linear relationship between activation scaling and decoder depth. Surprisingly, in the high-resource setting (WMT ’14 English-German), SCALENORM and FIXNORM remain competitive but PRENORM degrades performance.
%U https://aclanthology.org/2019.iwslt-1.17
Markdown (Informal)
[Transformers without Tears: Improving the Normalization of Self-Attention](https://aclanthology.org/2019.iwslt-1.17) (Nguyen & Salazar, IWSLT 2019)
ACL