@inproceedings{lin-etal-2021-cate,
title = "{CATE}: A Contrastive Pre-trained Model for Metaphor Detection with Semi-supervised Learning",
author = "Lin, Zhenxi and
Ma, Qianli and
Yan, Jiangyue and
Chen, Jieyu",
editor = "Moens, Marie-Francine and
Huang, Xuanjing and
Specia, Lucia and
Yih, Scott Wen-tau",
booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing",
month = nov,
year = "2021",
address = "Online and Punta Cana, Dominican Republic",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.emnlp-main.316/",
doi = "10.18653/v1/2021.emnlp-main.316",
pages = "3888--3898",
abstract = "Metaphors are ubiquitous in natural language, and detecting them requires contextual reasoning about whether a semantic incongruence actually exists. Most existing work addresses this problem using pre-trained contextualized models. Despite their success, these models require a large amount of labeled data and are not linguistically-based. In this paper, we proposed a ContrAstive pre-Trained modEl (CATE) for metaphor detection with semi-supervised learning. Our model first uses a pre-trained model to obtain a contextual representation of target words and employs a contrastive objective to promote an increased distance between target words' literal and metaphorical senses based on linguistic theories. Furthermore, we propose a simple strategy to collect large-scale candidate instances from the general corpus and generalize the model via self-training. Extensive experiments show that CATE achieves better performance against state-of-the-art baselines on several benchmark datasets."
}
Markdown (Informal)
[CATE: A Contrastive Pre-trained Model for Metaphor Detection with Semi-supervised Learning](https://preview.aclanthology.org/jlcl-multiple-ingestion/2021.emnlp-main.316/) (Lin et al., EMNLP 2021)
ACL