@inproceedings{vanni-miller-2001-scaling,
title = "Scaling the {ISLE} framework: validating tests of machine translation quality for multi-dimensional measurement",
author = "Vanni, Michelle and
Miller, Keith J.",
booktitle = "Workshop on MT Evaluation",
month = sep # " 18-22",
year = "2001",
address = "Santiago de Compostela, Spain",
url = "https://aclanthology.org/2001.mtsummit-eval.9",
abstract = "Work on comparing a set of linguistic test scores for MT output to a set of the same tests{'} scores for naturally-occurring target language text (Jones and Rusk 2000) broke new ground in automating MT Evaluation. However, the tests used were selected on an ad hoc basis. In this paper, we report on work to extend our understanding, through refinement and validation, of suitable linguistic tests in the context of our novel approach to MTE. This approach was introduced in Miller and Vanni (2001a) and employs standard, rather than randomly-chosen, tests of MT output quality selected from the ISLE framework as well as a scoring system for predicting the type of information processing task performable with the output. Since the intent is to automate the scoring system, this work can also be viewed as the preliminary steps of algorithm design.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="vanni-miller-2001-scaling">
<titleInfo>
<title>Scaling the ISLE framework: validating tests of machine translation quality for multi-dimensional measurement</title>
</titleInfo>
<name type="personal">
<namePart type="given">Michelle</namePart>
<namePart type="family">Vanni</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Keith</namePart>
<namePart type="given">J</namePart>
<namePart type="family">Miller</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2001-sep" 18-22"</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Workshop on MT Evaluation</title>
</titleInfo>
<originInfo>
<place>
<placeTerm type="text">Santiago de Compostela, Spain</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Work on comparing a set of linguistic test scores for MT output to a set of the same tests’ scores for naturally-occurring target language text (Jones and Rusk 2000) broke new ground in automating MT Evaluation. However, the tests used were selected on an ad hoc basis. In this paper, we report on work to extend our understanding, through refinement and validation, of suitable linguistic tests in the context of our novel approach to MTE. This approach was introduced in Miller and Vanni (2001a) and employs standard, rather than randomly-chosen, tests of MT output quality selected from the ISLE framework as well as a scoring system for predicting the type of information processing task performable with the output. Since the intent is to automate the scoring system, this work can also be viewed as the preliminary steps of algorithm design.</abstract>
<identifier type="citekey">vanni-miller-2001-scaling</identifier>
<location>
<url>https://aclanthology.org/2001.mtsummit-eval.9</url>
</location>
<part>
<date>2001-sep" 18-22"</date>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Scaling the ISLE framework: validating tests of machine translation quality for multi-dimensional measurement
%A Vanni, Michelle
%A Miller, Keith J.
%S Workshop on MT Evaluation
%D 2001
%8 sep" 18 22"
%C Santiago de Compostela, Spain
%F vanni-miller-2001-scaling
%X Work on comparing a set of linguistic test scores for MT output to a set of the same tests’ scores for naturally-occurring target language text (Jones and Rusk 2000) broke new ground in automating MT Evaluation. However, the tests used were selected on an ad hoc basis. In this paper, we report on work to extend our understanding, through refinement and validation, of suitable linguistic tests in the context of our novel approach to MTE. This approach was introduced in Miller and Vanni (2001a) and employs standard, rather than randomly-chosen, tests of MT output quality selected from the ISLE framework as well as a scoring system for predicting the type of information processing task performable with the output. Since the intent is to automate the scoring system, this work can also be viewed as the preliminary steps of algorithm design.
%U https://aclanthology.org/2001.mtsummit-eval.9
Markdown (Informal)
[Scaling the ISLE framework: validating tests of machine translation quality for multi-dimensional measurement](https://aclanthology.org/2001.mtsummit-eval.9) (Vanni & Miller, MTSummit 2001)
ACL