@inproceedings{agarwal-etal-2020-automated,
title = "Automated Assessment of Noisy Crowdsourced Free-text Answers for {H}indi in Low Resource Setting",
author = "Agarwal, Dolly and
Gupta, Somya and
Baghel, Nishant",
booktitle = "Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2020.wnut-1.17",
doi = "10.18653/v1/2020.wnut-1.17",
pages = "122--131",
abstract = "The requirement of performing assessments continually on a larger scale necessitates the implementation of automated systems for evaluation of the learners{'} responses to free-text questions. We target children of age group 8-14 years and use an ASR integrated assessment app to crowdsource learners{'} responses to free text questions in Hindi. The app helped collect 39641 user answers to 35 different questions of Science topics. Since the users are young children from rural India and may not be well-equipped with technology, it brings in various noise types in the answers. We describe these noise types and propose a preprocessing pipeline to denoise user{'}s answers. We showcase the performance of different similarity metrics on the noisy and denoised versions of user and model answers. Our findings have large-scale applications for automated answer assessment for school children in India in low resource settings.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="agarwal-etal-2020-automated">
<titleInfo>
<title>Automated Assessment of Noisy Crowdsourced Free-text Answers for Hindi in Low Resource Setting</title>
</titleInfo>
<name type="personal">
<namePart type="given">Dolly</namePart>
<namePart type="family">Agarwal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Somya</namePart>
<namePart type="family">Gupta</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nishant</namePart>
<namePart type="family">Baghel</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2020-nov</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)</title>
</titleInfo>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Online</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>The requirement of performing assessments continually on a larger scale necessitates the implementation of automated systems for evaluation of the learners’ responses to free-text questions. We target children of age group 8-14 years and use an ASR integrated assessment app to crowdsource learners’ responses to free text questions in Hindi. The app helped collect 39641 user answers to 35 different questions of Science topics. Since the users are young children from rural India and may not be well-equipped with technology, it brings in various noise types in the answers. We describe these noise types and propose a preprocessing pipeline to denoise user’s answers. We showcase the performance of different similarity metrics on the noisy and denoised versions of user and model answers. Our findings have large-scale applications for automated answer assessment for school children in India in low resource settings.</abstract>
<identifier type="citekey">agarwal-etal-2020-automated</identifier>
<identifier type="doi">10.18653/v1/2020.wnut-1.17</identifier>
<location>
<url>https://aclanthology.org/2020.wnut-1.17</url>
</location>
<part>
<date>2020-nov</date>
<extent unit="page">
<start>122</start>
<end>131</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Automated Assessment of Noisy Crowdsourced Free-text Answers for Hindi in Low Resource Setting
%A Agarwal, Dolly
%A Gupta, Somya
%A Baghel, Nishant
%S Proceedings of the Sixth Workshop on Noisy User-generated Text (W-NUT 2020)
%D 2020
%8 nov
%I Association for Computational Linguistics
%C Online
%F agarwal-etal-2020-automated
%X The requirement of performing assessments continually on a larger scale necessitates the implementation of automated systems for evaluation of the learners’ responses to free-text questions. We target children of age group 8-14 years and use an ASR integrated assessment app to crowdsource learners’ responses to free text questions in Hindi. The app helped collect 39641 user answers to 35 different questions of Science topics. Since the users are young children from rural India and may not be well-equipped with technology, it brings in various noise types in the answers. We describe these noise types and propose a preprocessing pipeline to denoise user’s answers. We showcase the performance of different similarity metrics on the noisy and denoised versions of user and model answers. Our findings have large-scale applications for automated answer assessment for school children in India in low resource settings.
%R 10.18653/v1/2020.wnut-1.17
%U https://aclanthology.org/2020.wnut-1.17
%U https://doi.org/10.18653/v1/2020.wnut-1.17
%P 122-131
Markdown (Informal)
[Automated Assessment of Noisy Crowdsourced Free-text Answers for Hindi in Low Resource Setting](https://aclanthology.org/2020.wnut-1.17) (Agarwal et al., WNUT 2020)
ACL