@inproceedings{lin-etal-2022-improving,
title = "Improving Contextual Representation with Gloss Regularized Pre-training",
author = "Lin, Yu and
An, Zhecheng and
Wu, Peihao and
Ma, Zejun",
editor = "Carpuat, Marine and
de Marneffe, Marie-Catherine and
Meza Ruiz, Ivan Vladimir",
booktitle = "Findings of the Association for Computational Linguistics: NAACL 2022",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-naacl.68/",
doi = "10.18653/v1/2022.findings-naacl.68",
pages = "907--920",
abstract = "Though achieving impressive results on many NLP tasks, the BERT-like masked language models (MLM) encounter the discrepancy between pre-training and inference. In light of this gap, we investigate the contextual representation of pre-training and inference from the perspective of word probability distribution. We discover that BERT risks neglecting the contextual word similarity in pre-training. To tackle this issue, we propose an auxiliary gloss regularizer module to BERT pre-training (GR-BERT), to enhance word semantic similarity. By predicting masked words and aligning contextual embeddings to corresponding glosses simultaneously, the word similarity can be explicitly modeled. We design two architectures for GR-BERT and evaluate our model in downstream tasks. Experimental results show that the gloss regularizer benefits BERT in word-level and sentence-level semantic representation. The GR-BERT achieves new state-of-the-art in lexical substitution task and greatly promotes BERT sentence representation in both unsupervised and supervised STS tasks."
}
Markdown (Informal)
[Improving Contextual Representation with Gloss Regularized Pre-training](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.findings-naacl.68/) (Lin et al., Findings 2022)
ACL