@inproceedings{rogers-augenstein-2020-improve,
title = "What Can We Do to Improve Peer Review in {NLP}?",
author = "Rogers, Anna and
Augenstein, Isabelle",
editor = "Cohn, Trevor and
He, Yulan and
Liu, Yang",
booktitle = "Findings of the Association for Computational Linguistics: EMNLP 2020",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.findings-emnlp.112/",
doi = "10.18653/v1/2020.findings-emnlp.112",
pages = "1256--1262",
abstract = "Peer review is our best tool for judging the quality of conference submissions, but it is becoming increasingly spurious. We argue that a part of the problem is that the reviewers and area chairs face a poorly defined task forcing apples-to-oranges comparisons. There are several potential ways forward, but the key difficulty is creating the incentives and mechanisms for their consistent implementation in the NLP community."
}
Markdown (Informal)
[What Can We Do to Improve Peer Review in NLP?](https://preview.aclanthology.org/fix-sig-urls/2020.findings-emnlp.112/) (Rogers & Augenstein, Findings 2020)
ACL
- Anna Rogers and Isabelle Augenstein. 2020. What Can We Do to Improve Peer Review in NLP?. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1256–1262, Online. Association for Computational Linguistics.