@inproceedings{li-etal-2021-legoeval,
title = "{LEGOE}val: An Open-Source Toolkit for Dialogue System Evaluation via Crowdsourcing",
author = "Li, Yu and
Arnold, Josh and
Yan, Feifan and
Shi, Weiyan and
Yu, Zhou",
editor = "Ji, Heng and
Park, Jong C. and
Xia, Rui",
booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing: System Demonstrations",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.acl-demo.38/",
doi = "10.18653/v1/2021.acl-demo.38",
pages = "317--324",
abstract = "We present LEGOEval, an open-source toolkit that enables researchers to easily evaluate dialogue systems in a few lines of code using the online crowdsource platform, Amazon Mechanical Turk. Compared to existing toolkits, LEGOEval features a flexible task design by providing a Python API that maps to commonly used React.js interface components. Researchers can personalize their evaluation procedures easily with our built-in pages as if playing with LEGO blocks. Thus, LEGOEval provides a fast, consistent method for reproducing human evaluation results. Besides the flexible task design, LEGOEval also offers an easy API to review collected data."
}
Markdown (Informal)
[LEGOEval: An Open-Source Toolkit for Dialogue System Evaluation via Crowdsourcing](https://preview.aclanthology.org/fix-sig-urls/2021.acl-demo.38/) (Li et al., ACL-IJCNLP 2021)
ACL