@inproceedings{csordas-etal-2021-devil,
title = "The Devil is in the Detail: Simple Tricks Improve Systematic Generalization of Transformers",
author = "Csord{\'a}s, R{\'o}bert and
Irie, Kazuki and
Schmidhuber, Juergen",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.emnlp-main.49/",
doi = "10.18653/v1/2021.emnlp-main.49",
pages = "619--634",
abstract = "Recently, many datasets have been proposed to test the systematic generalization ability of neural networks. The companion baseline Transformers, typically trained with default hyper-parameters from standard tasks, are shown to fail dramatically. Here we demonstrate that by revisiting model configurations as basic as scaling of embeddings, early stopping, relative positional embedding, and Universal Transformer variants, we can drastically improve the performance of Transformers on systematic generalization. We report improvements on five popular datasets: SCAN, CFQ, PCFG, COGS, and Mathematics dataset. Our models improve accuracy from 50{\%} to 85{\%} on the PCFG productivity split, and from 35{\%} to 81{\%} on COGS. On SCAN, relative positional embedding largely mitigates the EOS decision problem (Newman et al., 2020), yielding 100{\%} accuracy on the length split with a cutoff at 26. Importantly, performance differences between these models are typically invisible on the IID data split. This calls for proper generalization validation sets for developing neural networks that generalize systematically. We publicly release the code to reproduce our results."
}
Markdown (Informal)
[The Devil is in the Detail: Simple Tricks Improve Systematic Generalization of Transformers](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.emnlp-main.49/) (Csordás et al., EMNLP 2021)
ACL