@inproceedings{mittal-modi-2021-recam,
title = "{R}e{CAM}@{IITK} at {S}em{E}val-2021 Task 4: {BERT} and {ALBERT} based Ensemble for Abstract Word Prediction",
author = "Mittal, Abhishek and
Modi, Ashutosh",
editor = "Palmer, Alexis and
Schneider, Nathan and
Schluter, Natalie and
Emerson, Guy and
Herbelot, Aurelie and
Zhu, Xiaodan",
booktitle = "Proceedings of the 15th International Workshop on Semantic Evaluation (SemEval-2021)",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.semeval-1.19/",
doi = "10.18653/v1/2021.semeval-1.19",
pages = "175--182",
abstract = "This paper describes our system for Task 4 of SemEval-2021: Reading Comprehension of Abstract Meaning (ReCAM). We participated in all subtasks where the main goal was to predict an abstract word missing from a statement. We fine-tuned the pre-trained masked language models namely BERT and ALBERT and used an Ensemble of these as our submitted system on Subtask 1 (ReCAM-Imperceptibility) and Subtask 2 (ReCAM-Nonspecificity). For Subtask 3 (ReCAM-Intersection), we submitted the ALBERT model as it gives the best results. We tried multiple approaches and found that Masked Language Modeling(MLM) based approach works the best."
}
Markdown (Informal)
[ReCAM@IITK at SemEval-2021 Task 4: BERT and ALBERT based Ensemble for Abstract Word Prediction](https://preview.aclanthology.org/fix-sig-urls/2021.semeval-1.19/) (Mittal & Modi, SemEval 2021)
ACL