@inproceedings{chu-etal-2022-hit,
title = "{HIT} at {S}em{E}val-2022 Task 2: Pre-trained Language Model for Idioms Detection",
author = "Chu, Zheng and
Yang, Ziqing and
Cui, Yiming and
Chen, Zhigang and
Liu, Ming",
editor = "Emerson, Guy and
Schluter, Natalie and
Stanovsky, Gabriel and
Kumar, Ritesh and
Palmer, Alexis and
Schneider, Nathan and
Singh, Siddharth and
Ratan, Shyam",
booktitle = "Proceedings of the 16th International Workshop on Semantic Evaluation (SemEval-2022)",
month = jul,
year = "2022",
address = "Seattle, United States",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2022.semeval-1.28/",
doi = "10.18653/v1/2022.semeval-1.28",
pages = "221--227",
abstract = "The same multi-word expressions may have different meanings in different sentences. They can be mainly divided into two categories, which are literal meaning and idiomatic meaning. Non-contextual-based methods perform poorly on this problem, and we need contextual embedding to understand the idiomatic meaning of multi-word expressions correctly. We use a pre-trained language model, which can provide a context-aware sentence embedding, to detect whether multi-word expression in the sentence is idiomatic usage."
}
Markdown (Informal)
[HIT at SemEval-2022 Task 2: Pre-trained Language Model for Idioms Detection](https://preview.aclanthology.org/add-emnlp-2024-awards/2022.semeval-1.28/) (Chu et al., SemEval 2022)
ACL