@inproceedings{kow-belz-2012-lg,
title = "{LG}-Eval: A Toolkit for Creating Online Language Evaluation Experiments",
author = "Kow, Eric and
Belz, Anja",
editor = "Calzolari, Nicoletta and
Choukri, Khalid and
Declerck, Thierry and
Do{\u{g}}an, Mehmet U{\u{g}}ur and
Maegaard, Bente and
Mariani, Joseph and
Moreno, Asuncion and
Odijk, Jan and
Piperidis, Stelios",
booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}`12)",
month = may,
year = "2012",
address = "Istanbul, Turkey",
publisher = "European Language Resources Association (ELRA)",
url = "https://preview.aclanthology.org/jlcl-multiple-ingestion/L12-1570/",
pages = "4033--4037",
abstract = "In this paper we describe the LG-Eval toolkit for creating online language evaluation experiments. LG-Eval is the direct result of our work setting up and carrying out the human evaluation experiments in several of the Generation Challenges shared tasks. It provides tools for creating experiments with different kinds of rating tools, allocating items to evaluators, and collecting the evaluation scores."
}
Markdown (Informal)
[LG-Eval: A Toolkit for Creating Online Language Evaluation Experiments](https://preview.aclanthology.org/jlcl-multiple-ingestion/L12-1570/) (Kow & Belz, LREC 2012)
ACL