@inproceedings{zhong-etal-2021-larger,
title = "Are Larger Pretrained Language Models Uniformly Better? Comparing Performance at the Instance Level",
author = "Zhong, Ruiqi and
Ghosh, Dhruba and
Klein, Dan and
Steinhardt, Jacob",
editor = "Zong, Chengqing and
Xia, Fei and
Li, Wenjie and
Navigli, Roberto",
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.findings-acl.334/",
doi = "10.18653/v1/2021.findings-acl.334",
pages = "3813--3827"
}
Markdown (Informal)
[Are Larger Pretrained Language Models Uniformly Better? Comparing Performance at the Instance Level](https://preview.aclanthology.org/fix-sig-urls/2021.findings-acl.334/) (Zhong et al., Findings 2021)
ACL