@inproceedings{santhanam-shaikh-2020-understanding,
title = "Understanding the Impact of Experiment Design for Evaluating Dialogue System Output",
author = "Santhanam, Sashank and
Shaikh, Samira",
editor = "Cunha, Rossana and
Shaikh, Samira and
Varis, Erika and
Georgi, Ryan and
Tsai, Alicia and
Anastasopoulos, Antonios and
Chandu, Khyathi Raghavi",
booktitle = "Proceedings of the Fourth Widening Natural Language Processing Workshop",
month = jul,
year = "2020",
address = "Seattle, USA",
publisher = "Association for Computational Linguistics",
url = "https://preview.aclanthology.org/fix-sig-urls/2020.winlp-1.33/",
doi = "10.18653/v1/2020.winlp-1.33",
pages = "124--127",
abstract = "Evaluation of output from natural language generation (NLG) systems is typically conducted via crowdsourced human judgments. To understand the impact of how experiment design might affect the quality and consistency of such human judgments, we designed a between-subjects study with four experimental conditions. Through our systematic study with 40 crowdsourced workers in each task, we find that using continuous scales achieves more consistent ratings than Likert scale or ranking-based experiment design. Additionally, we find that factors such as no prior experience of participating in similar studies of rating dialogue system output"
}
Markdown (Informal)
[Understanding the Impact of Experiment Design for Evaluating Dialogue System Output](https://preview.aclanthology.org/fix-sig-urls/2020.winlp-1.33/) (Santhanam & Shaikh, WiNLP 2020)
ACL