@inproceedings{manzur-2021-mt,
title = "{MT} Human Evaluation {--} Insights {\&} Approaches",
author = "Manzur, Paula",
editor = "Campbell, Janice and
Huyck, Ben and
Larocca, Stephen and
Marciano, Jay and
Savenkov, Konstantin and
Yanishevsky, Alex",
booktitle = "Proceedings of Machine Translation Summit XVIII: Users and Providers Track",
month = aug,
year = "2021",
address = "Virtual",
publisher = "Association for Machine Translation in the Americas",
url = "https://preview.aclanthology.org/fix-sig-urls/2021.mtsummit-up.12/",
pages = "149--165",
abstract = "This session is designed to help companies and people in the business of translation evaluate MT output and to show how human translator feedback can be tweaked to make the process more objective and accurate. You will hear recommendations, insights, and takeaways on how to improve the procedure for human evaluation. When this is achieved, we can understand if the human eval study and machine metric result coheres. And we can think about what the future of translators looks like {--} the final ``human touch'' and automated MT review.''"
}
Markdown (Informal)
[MT Human Evaluation – Insights & Approaches](https://preview.aclanthology.org/fix-sig-urls/2021.mtsummit-up.12/) (Manzur, MTSummit 2021)
ACL
- Paula Manzur. 2021. MT Human Evaluation – Insights & Approaches. In Proceedings of Machine Translation Summit XVIII: Users and Providers Track, pages 149–165, Virtual. Association for Machine Translation in the Americas.