@inproceedings{niwa-etal-2021-predicting,
title = "Predicting Antonyms in Context using {BERT}",
author = "Niwa, Ayana and
Nishiguchi, Keisuke and
Okazaki, Naoaki",
editor = "Belz, Anya and
Fan, Angela and
Reiter, Ehud and
Sripada, Yaji",
booktitle = "Proceedings of the 14th International Conference on Natural Language Generation",
month = aug,
year = "2021",
address = "Aberdeen, Scotland, UK",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.inlg-1.6/",
doi = "10.18653/v1/2021.inlg-1.6",
pages = "48--54",
abstract = "We address the task of antonym prediction in a context, which is a fill-in-the-blanks problem. This task setting is unique and practical because it requires contrastiveness to the other word and naturalness as a text in filling a blank. We propose methods for fine-tuning pre-trained masked language models (BERT) for context-aware antonym prediction. The experimental results demonstrate that these methods have positive impacts on the prediction of antonyms within a context. Moreover, human evaluation reveals that more than 85{\%} of predictions using the proposed method are acceptable as antonyms."
}
Markdown (Informal)
[Predicting Antonyms in Context using BERT](https://preview.aclanthology.org/add-emnlp-2024-awards/2021.inlg-1.6/) (Niwa et al., INLG 2021)
ACL
- Ayana Niwa, Keisuke Nishiguchi, and Naoaki Okazaki. 2021. Predicting Antonyms in Context using BERT. In Proceedings of the 14th International Conference on Natural Language Generation, pages 48–54, Aberdeen, Scotland, UK. Association for Computational Linguistics.