@inproceedings{li-etal-2022-lingjing,
title = "{L}ing{J}ing at {S}em{E}val-2022 Task 1: Multi-task Self-supervised Pre-training for Multilingual Reverse Dictionary",
author = "Li, Bin and
Weng, Yixuan and
Xia, Fei and
He, Shizhu and
Sun, Bin and
Li, Shutao",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.4/",
doi = "10.18653/v1/2022.semeval-1.4",
pages = "29--35",
abstract = "This paper introduces the approach of Team LingJing{'}s experiments on SemEval-2022 Task 1 Comparing Dictionaries and Word Embeddings (CODWOE). This task aims at comparing two types of semantic descriptions and including two sub-tasks: the definition modeling and reverse dictionary track. Our team focuses on the reverse dictionary track and adopts the multi-task self-supervised pre-training for multilingual reverse dictionaries. Specifically, the randomly initialized mDeBERTa-base model is used to perform multi-task pre-training on the multilingual training datasets. The pre-training step is divided into two stages, namely the MLM pre-training stage and the contrastive pre-training stage. The experimental results show that the proposed method has achieved good performance in the reverse dictionary track, where we rank the 1-st in the Sgns targets of the EN and RU languages. All the experimental codes are open-sourced at \url{https://github.com/WENGSYX/Semeval}."
}
Markdown (Informal)
[LingJing at SemEval-2022 Task 1: Multi-task Self-supervised Pre-training for Multilingual Reverse Dictionary](https://preview.aclanthology.org/fix-sig-urls/2022.semeval-1.4/) (Li et al., SemEval 2022)
ACL