@inproceedings{srivastava-vemulapati-2022-tldr,
title = "{TLDR} at {S}em{E}val-2022 Task 1: Using Transformers to Learn Dictionaries and Representations",
author = "Srivastava, Aditya and
Vemulapati, Harsha Vardhan",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.semeval-1.6/",
doi = "10.18653/v1/2022.semeval-1.6",
pages = "60--67",
abstract = "We propose a pair of deep learning models, which employ unsupervised pretraining, attention mechanisms and contrastive learning for representation learning from dictionary definitions, and definition modeling from such representations. Our systems, the Transformers for Learning Dictionaries and Representations (TLDR), were submitted to the SemEval 2022 Task 1: Comparing Dictionaries and Word Embeddings (CODWOE), where they officially ranked first on the definition modeling subtask, and achieved competitive performance on the reverse dictionary subtask. In this paper we describe our methodology and analyse our system design hypotheses."
}
Markdown (Informal)
[TLDR at SemEval-2022 Task 1: Using Transformers to Learn Dictionaries and Representations](https://preview.aclanthology.org/jlcl-multiple-ingestion/2022.semeval-1.6/) (Srivastava & Vemulapati, SemEval 2022)
ACL