@inproceedings{zhong-etal-2021-larger, title = "Are Larger Pretrained Language Models Uniformly Better? Comparing Performance at the Instance Level", author = "Zhong, Ruiqi and Ghosh, Dhruba and Klein, Dan and Steinhardt, Jacob", editor = "Zong, Chengqing and Xia, Fei and Li, Wenjie and Navigli, Roberto", booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://preview.aclanthology.org/add-emnlp-2024-awards/2021.findings-acl.334/", doi = "10.18653/v1/2021.findings-acl.334", pages = "3813--3827" }