@inproceedings{avramidis-etal-2012-involving,
title = "Involving Language Professionals in the Evaluation of Machine Translation",
author = "Avramidis, Eleftherios and
Burchardt, Aljoscha and
Federmann, Christian and
Popovi{\'c}, Maja and
Tscherwinka, Cindy and
Vilar, David",
booktitle = "Proceedings of the Eighth International Conference on Language Resources and Evaluation ({LREC}'12)",
month = may,
year = "2012",
address = "Istanbul, Turkey",
publisher = "European Language Resources Association (ELRA)",
url = "http://www.lrec-conf.org/proceedings/lrec2012/pdf/294_Paper.pdf",
pages = "1127--1130",
abstract = {Significant breakthroughs in machine translation only seem possible if human translators are taken into the loop. While automatic evaluation and scoring mechanisms such as BLEU have enabled the fast development of systems, it is not clear how systems can meet real-world (quality) requirements in industrial translation scenarios today. The taraX{\"U} project paves the way for wide usage of hybrid machine translation outputs through various feedback loops in system development. In a consortium of research and industry partners, the project integrates human translators into the development process for rating and post-editing of machine translation outputs thus collecting feedback for possible improvements.},
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="avramidis-etal-2012-involving">
<titleInfo>
<title>Involving Language Professionals in the Evaluation of Machine Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Eleftherios</namePart>
<namePart type="family">Avramidis</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Aljoscha</namePart>
<namePart type="family">Burchardt</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Christian</namePart>
<namePart type="family">Federmann</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Maja</namePart>
<namePart type="family">Popović</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cindy</namePart>
<namePart type="family">Tscherwinka</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">David</namePart>
<namePart type="family">Vilar</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2012-may</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC’12)</title>
</titleInfo>
<originInfo>
<publisher>European Language Resources Association (ELRA)</publisher>
<place>
<placeTerm type="text">Istanbul, Turkey</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Significant breakthroughs in machine translation only seem possible if human translators are taken into the loop. While automatic evaluation and scoring mechanisms such as BLEU have enabled the fast development of systems, it is not clear how systems can meet real-world (quality) requirements in industrial translation scenarios today. The taraXÜ project paves the way for wide usage of hybrid machine translation outputs through various feedback loops in system development. In a consortium of research and industry partners, the project integrates human translators into the development process for rating and post-editing of machine translation outputs thus collecting feedback for possible improvements.</abstract>
<identifier type="citekey">avramidis-etal-2012-involving</identifier>
<location>
<url>http://www.lrec-conf.org/proceedings/lrec2012/pdf/294_Paper.pdf</url>
</location>
<part>
<date>2012-may</date>
<extent unit="page">
<start>1127</start>
<end>1130</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Involving Language Professionals in the Evaluation of Machine Translation
%A Avramidis, Eleftherios
%A Burchardt, Aljoscha
%A Federmann, Christian
%A Popović, Maja
%A Tscherwinka, Cindy
%A Vilar, David
%S Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC’12)
%D 2012
%8 may
%I European Language Resources Association (ELRA)
%C Istanbul, Turkey
%F avramidis-etal-2012-involving
%X Significant breakthroughs in machine translation only seem possible if human translators are taken into the loop. While automatic evaluation and scoring mechanisms such as BLEU have enabled the fast development of systems, it is not clear how systems can meet real-world (quality) requirements in industrial translation scenarios today. The taraXÜ project paves the way for wide usage of hybrid machine translation outputs through various feedback loops in system development. In a consortium of research and industry partners, the project integrates human translators into the development process for rating and post-editing of machine translation outputs thus collecting feedback for possible improvements.
%U http://www.lrec-conf.org/proceedings/lrec2012/pdf/294_Paper.pdf
%P 1127-1130
Markdown (Informal)
[Involving Language Professionals in the Evaluation of Machine Translation](http://www.lrec-conf.org/proceedings/lrec2012/pdf/294_Paper.pdf) (Avramidis et al., LREC 2012)
ACL
- Eleftherios Avramidis, Aljoscha Burchardt, Christian Federmann, Maja Popović, Cindy Tscherwinka, and David Vilar. 2012. Involving Language Professionals in the Evaluation of Machine Translation. In Proceedings of the Eighth International Conference on Language Resources and Evaluation (LREC'12), pages 1127–1130, Istanbul, Turkey. European Language Resources Association (ELRA).